diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 94fa4a8de2ca5d..1f77c99e2cba87 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -5005,8 +5005,7 @@ usbcore.old_scheme_first= [USB] Start with the old device initialization - scheme, applies only to low and full-speed devices - (default 0 = off). + scheme (default 0 = off). usbcore.usbfs_memory_mb= [USB] Memory limit (in MB) for buffers allocated by diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst index 5a09661330fccf..59daa4c21816b8 100644 --- a/Documentation/arm64/silicon-errata.rst +++ b/Documentation/arm64/silicon-errata.rst @@ -88,6 +88,8 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-N1 | #1349291 | N/A | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Neoverse-N1 | #1542419 | ARM64_ERRATUM_1542419 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | MMU-500 | #841119,826419 | N/A | +----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+ diff --git a/Makefile b/Makefile index 6055a94aa4ce37..989e7d64963368 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 5 PATCHLEVEL = 4 -SUBLEVEL = 35 +SUBLEVEL = 38 EXTRAVERSION = NAME = Kleptomaniac Octopus diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi index 90125ce19a1b3b..50c64146d49269 100644 --- a/arch/arm/boot/dts/bcm283x.dtsi +++ b/arch/arm/boot/dts/bcm283x.dtsi @@ -488,6 +488,7 @@ "dsi0_ddr2", "dsi0_ddr"; + status = "disabled"; }; thermal: thermal@7e212000 { diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile index 4c3d709de5e9fb..23241e18d8645e 100644 --- a/arch/arm/mach-imx/Makefile +++ b/arch/arm/mach-imx/Makefile @@ -118,8 +118,10 @@ obj-$(CONFIG_SOC_IMX6) += suspend-imx6.o obj-$(CONFIG_SOC_IMX53) += suspend-imx53.o obj-$(CONFIG_SOC_IMX7ULP) += suspend-imx7ulp.o endif +ifeq ($(CONFIG_ARM_CPU_SUSPEND),y) AFLAGS_resume-imx6.o :=-Wa,-march=armv7-a obj-$(CONFIG_SOC_IMX6) += resume-imx6.o +endif obj-$(CONFIG_SOC_IMX6) += pm-imx6.o obj-$(CONFIG_SOC_IMX1) += mach-imx1.o diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 36570a257ccde6..d806f5d05cef7b 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -559,6 +559,22 @@ config ARM64_ERRATUM_1463225 If unsure, say Y. +config ARM64_ERRATUM_1542419 + bool "Neoverse-N1: workaround mis-ordering of instruction fetches" + default y + help + This option adds a workaround for ARM Neoverse-N1 erratum + 1542419. + + Affected Neoverse-N1 cores could execute a stale instruction when + modified by another CPU. The workaround depends on a firmware + counterpart. + + Workaround the issue by hiding the DIC feature from EL0. This + forces user-space to perform cache maintenance. + + If unsure, say Y. + config CAVIUM_ERRATUM_22375 bool "Cavium erratum 22375, 24313" default y diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index 43da6dd2959207..806e9dc2a852a4 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -11,6 +11,7 @@ #define CTR_L1IP_MASK 3 #define CTR_DMINLINE_SHIFT 16 #define CTR_IMINLINE_SHIFT 0 +#define CTR_IMINLINE_MASK 0xf #define CTR_ERG_SHIFT 20 #define CTR_CWG_SHIFT 24 #define CTR_CWG_MASK 15 @@ -18,7 +19,7 @@ #define CTR_DIC_SHIFT 29 #define CTR_CACHE_MINLINE_MASK \ - (0xf << CTR_DMINLINE_SHIFT | 0xf << CTR_IMINLINE_SHIFT) + (0xf << CTR_DMINLINE_SHIFT | CTR_IMINLINE_MASK << CTR_IMINLINE_SHIFT) #define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK) diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h index b0d53a265f1d51..7b4172ce497c20 100644 --- a/arch/arm64/include/asm/compat.h +++ b/arch/arm64/include/asm/compat.h @@ -4,6 +4,9 @@ */ #ifndef __ASM_COMPAT_H #define __ASM_COMPAT_H + +#include + #ifdef CONFIG_COMPAT /* @@ -13,8 +16,6 @@ #include #include -#include - #define COMPAT_USER_HZ 100 #ifdef __AARCH64EB__ #define COMPAT_UTS_MACHINE "armv8b\0\0" diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index ac1dbca3d0cd3d..1dc3c762fdcb9d 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -54,7 +54,8 @@ #define ARM64_WORKAROUND_1463225 44 #define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM 45 #define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 46 +#define ARM64_WORKAROUND_1542419 47 -#define ARM64_NCAPS 47 +#define ARM64_NCAPS 48 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 6e919fafb43dd6..9b68f1b3915ec3 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -49,7 +49,9 @@ #ifndef CONFIG_BROKEN_GAS_INST #ifdef __ASSEMBLY__ -#define __emit_inst(x) .inst (x) +// The space separator is omitted so that __emit_inst(x) can be parsed as +// either an assembler directive or an assembler macro argument. +#define __emit_inst(x) .inst(x) #else #define __emit_inst(x) ".inst " __stringify((x)) "\n\t" #endif diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 96f576e9ea4633..0b2830379fe032 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -88,13 +88,21 @@ has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry, } static void -cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused) +cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap) { u64 mask = arm64_ftr_reg_ctrel0.strict_mask; + bool enable_uct_trap = false; /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */ if ((read_cpuid_cachetype() & mask) != (arm64_ftr_reg_ctrel0.sys_val & mask)) + enable_uct_trap = true; + + /* ... or if the system is affected by an erratum */ + if (cap->capability == ARM64_WORKAROUND_1542419) + enable_uct_trap = true; + + if (enable_uct_trap) sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0); } @@ -651,6 +659,18 @@ needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry, return false; } +static bool __maybe_unused +has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry, + int scope) +{ + u32 midr = read_cpuid_id(); + bool has_dic = read_cpuid_cachetype() & BIT(CTR_DIC_SHIFT); + const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1); + + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); + return is_midr_in_range(midr, &range) && has_dic; +} + #ifdef CONFIG_HARDEN_EL2_VECTORS static const struct midr_range arm64_harden_el2_vectors[] = { @@ -927,6 +947,16 @@ const struct arm64_cpu_capabilities arm64_errata[] = { .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM, ERRATA_MIDR_RANGE_LIST(tx2_family_cpus), }, +#endif +#ifdef CONFIG_ARM64_ERRATUM_1542419 + { + /* we depend on the firmware portion for correctness */ + .desc = "ARM erratum 1542419 (kernel portion)", + .capability = ARM64_WORKAROUND_1542419, + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, + .matches = has_neoverse_n1_erratum_1542419, + .cpu_enable = cpu_enable_trap_ctr_access, + }, #endif { } diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c index f1cb649594271c..3c18c2454089b4 100644 --- a/arch/arm64/kernel/sys_compat.c +++ b/arch/arm64/kernel/sys_compat.c @@ -8,6 +8,7 @@ */ #include +#include #include #include #include @@ -17,6 +18,7 @@ #include #include +#include #include static long @@ -30,6 +32,15 @@ __do_compat_cache_op(unsigned long start, unsigned long end) if (fatal_signal_pending(current)) return 0; + if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) { + /* + * The workaround requires an inner-shareable tlbi. + * We pick the reserved-ASID to minimise the impact. + */ + __tlbi(aside1is, __TLBI_VADDR(0, 0)); + dsb(ish); + } + ret = __flush_cache_user_range(start, start + chunk); if (ret) return ret; diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 7d31ce7d75627d..ace993b5664c4e 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -501,6 +501,15 @@ static void ctr_read_handler(unsigned int esr, struct pt_regs *regs) int rt = ESR_ELx_SYS64_ISS_RT(esr); unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0); + if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) { + /* Hide DIC so that we can trap the unnecessary maintenance...*/ + val &= ~BIT(CTR_DIC_SHIFT); + + /* ... and fake IminLine to reduce the number of traps. */ + val &= ~CTR_IMINLINE_MASK; + val |= (PAGE_SHIFT - 2) & CTR_IMINLINE_MASK; + } + pt_regs_write_reg(regs, rt, val); arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 13f699256258e1..f29bb176381f19 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -705,7 +705,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPE) stw r10,_CCR(r1) stw r1,KSP(r3) /* Set old stack pointer */ - kuap_check r2, r4 + kuap_check r2, r0 #ifdef CONFIG_SMP /* We need a sync somewhere here to make sure that if the * previous task gets rescheduled on another CPU, it sees all diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index d7ff21316cfaa7..e50fbed3665167 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -541,6 +541,8 @@ static bool __init parse_cache_info(struct device_node *np, lsizep = of_get_property(np, propnames[3], NULL); if (bsizep == NULL) bsizep = lsizep; + if (lsizep == NULL) + lsizep = bsizep; if (lsizep != NULL) lsize = be32_to_cpu(*lsizep); if (bsizep != NULL) diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 11301a1187f337..0e0a2227af7d74 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -522,35 +522,6 @@ static inline void clear_irq_work_pending(void) "i" (offsetof(struct paca_struct, irq_work_pending))); } -void arch_irq_work_raise(void) -{ - preempt_disable(); - set_irq_work_pending_flag(); - /* - * Non-nmi code running with interrupts disabled will replay - * irq_happened before it re-enables interrupts, so setthe - * decrementer there instead of causing a hardware exception - * which would immediately hit the masked interrupt handler - * and have the net effect of setting the decrementer in - * irq_happened. - * - * NMI interrupts can not check this when they return, so the - * decrementer hardware exception is raised, which will fire - * when interrupts are next enabled. - * - * BookE does not support this yet, it must audit all NMI - * interrupt handlers to ensure they call nmi_enter() so this - * check would be correct. - */ - if (IS_ENABLED(CONFIG_BOOKE) || !irqs_disabled() || in_nmi()) { - set_dec(1); - } else { - hard_irq_disable(); - local_paca->irq_happened |= PACA_IRQ_DEC; - } - preempt_enable(); -} - #else /* 32-bit */ DEFINE_PER_CPU(u8, irq_work_pending); @@ -559,16 +530,27 @@ DEFINE_PER_CPU(u8, irq_work_pending); #define test_irq_work_pending() __this_cpu_read(irq_work_pending) #define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0) +#endif /* 32 vs 64 bit */ + void arch_irq_work_raise(void) { + /* + * 64-bit code that uses irq soft-mask can just cause an immediate + * interrupt here that gets soft masked, if this is called under + * local_irq_disable(). It might be possible to prevent that happening + * by noticing interrupts are disabled and setting decrementer pending + * to be replayed when irqs are enabled. The problem there is that + * tracing can call irq_work_raise, including in code that does low + * level manipulations of irq soft-mask state (e.g., trace_hardirqs_on) + * which could get tangled up if we're messing with the same state + * here. + */ preempt_disable(); set_irq_work_pending_flag(); set_dec(1); preempt_enable(); } -#endif /* 32 vs 64 bit */ - #else /* CONFIG_IRQ_WORK */ #define test_irq_work_pending() 0 diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 12543e53fa96a5..f0330ce498d1e1 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -389,7 +389,7 @@ config PPC_KUAP config PPC_KUAP_DEBUG bool "Extra debugging for Kernel Userspace Access Protection" - depends on PPC_HAVE_KUAP && (PPC_RADIX_MMU || PPC_32) + depends on PPC_KUAP && (PPC_RADIX_MMU || PPC32) help Add extra debugging for Kernel Userspace Access Protection (KUAP) If you're unsure, say N. diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index 3acdcc3bb908c6..753adeb624f23e 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c @@ -683,6 +683,17 @@ static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp) #endif out: + /* + * Enable translation as we will be accessing per-cpu variables + * in save_mce_event() which may fall outside RMO region, also + * leave it enabled because subsequently we will be queuing work + * to workqueues where again per-cpu variables accessed, besides + * fwnmi_release_errinfo() crashes when called in realmode on + * pseries. + * Note: All the realmode handling like flushing SLB entries for + * SLB multihit is done by now. + */ + mtmsr(mfmsr() | MSR_IR | MSR_DR); save_mce_event(regs, disposition == RTAS_DISP_FULLY_RECOVERED, &mce_err, regs->nip, eaddr, paddr); diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c index 61f2b0412345af..ccba63aaeb470b 100644 --- a/arch/s390/kernel/diag.c +++ b/arch/s390/kernel/diag.c @@ -133,7 +133,7 @@ void diag_stat_inc(enum diag_stat_enum nr) } EXPORT_SYMBOL(diag_stat_inc); -void diag_stat_inc_norecursion(enum diag_stat_enum nr) +void notrace diag_stat_inc_norecursion(enum diag_stat_enum nr) { this_cpu_inc(diag_stat.counter[nr]); trace_s390_diagnose_norecursion(diag_map[nr].code); diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index f468a10e520624..66bf050d785cf9 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -403,7 +403,7 @@ int smp_find_processor_id(u16 address) return -1; } -bool arch_vcpu_is_preempted(int cpu) +bool notrace arch_vcpu_is_preempted(int cpu) { if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu)) return false; @@ -413,7 +413,7 @@ bool arch_vcpu_is_preempted(int cpu) } EXPORT_SYMBOL(arch_vcpu_is_preempted); -void smp_yield_cpu(int cpu) +void notrace smp_yield_cpu(int cpu) { if (MACHINE_HAS_DIAG9C) { diag_stat_inc_norecursion(DIAG_STAT_X09C); diff --git a/arch/s390/kernel/trace.c b/arch/s390/kernel/trace.c index 490b52e8501453..11a669f3cc93c1 100644 --- a/arch/s390/kernel/trace.c +++ b/arch/s390/kernel/trace.c @@ -14,7 +14,7 @@ EXPORT_TRACEPOINT_SYMBOL(s390_diagnose); static DEFINE_PER_CPU(unsigned int, diagnose_trace_depth); -void trace_s390_diagnose_norecursion(int diag_nr) +void notrace trace_s390_diagnose_norecursion(int diag_nr) { unsigned long flags; unsigned int *depth; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 756c627f7e5408..6fb17b550a80fa 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1932,6 +1932,9 @@ static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn) start = slot + 1; } + if (start >= slots->used_slots) + return slots->used_slots - 1; + if (gfn >= memslots[start].base_gfn && gfn < memslots[start].base_gfn + memslots[start].npages) { atomic_set(&slots->lru_slot, start); diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c index c4f8039a35e8dd..0267405ab7c69f 100644 --- a/arch/s390/lib/uaccess.c +++ b/arch/s390/lib/uaccess.c @@ -64,10 +64,13 @@ mm_segment_t enable_sacf_uaccess(void) { mm_segment_t old_fs; unsigned long asce, cr; + unsigned long flags; old_fs = current->thread.mm_segment; if (old_fs & 1) return old_fs; + /* protect against a concurrent page table upgrade */ + local_irq_save(flags); current->thread.mm_segment |= 1; asce = S390_lowcore.kernel_asce; if (likely(old_fs == USER_DS)) { @@ -83,6 +86,7 @@ mm_segment_t enable_sacf_uaccess(void) __ctl_load(asce, 7, 7); set_cpu_flag(CIF_ASCE_SECONDARY); } + local_irq_restore(flags); return old_fs; } EXPORT_SYMBOL(enable_sacf_uaccess); diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index 3dd253f81a7715..46071be897ab0f 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c @@ -70,8 +70,20 @@ static void __crst_table_upgrade(void *arg) { struct mm_struct *mm = arg; - if (current->active_mm == mm) - set_user_asce(mm); + /* we must change all active ASCEs to avoid the creation of new TLBs */ + if (current->active_mm == mm) { + S390_lowcore.user_asce = mm->context.asce; + if (current->thread.mm_segment == USER_DS) { + __ctl_load(S390_lowcore.user_asce, 1, 1); + /* Mark user-ASCE present in CR1 */ + clear_cpu_flag(CIF_ASCE_PRIMARY); + } + if (current->thread.mm_segment == USER_DS_SACF) { + __ctl_load(S390_lowcore.user_asce, 7, 7); + /* enable_sacf_uaccess does all or nothing */ + WARN_ON(!test_cpu_flag(CIF_ASCE_SECONDARY)); + } + } __tlb_flush_local(); } diff --git a/arch/s390/pci/pci_irq.c b/arch/s390/pci/pci_irq.c index fbe97ab2e22869..743f257cf2cbdd 100644 --- a/arch/s390/pci/pci_irq.c +++ b/arch/s390/pci/pci_irq.c @@ -115,7 +115,6 @@ static struct irq_chip zpci_irq_chip = { .name = "PCI-MSI", .irq_unmask = pci_msi_unmask_irq, .irq_mask = pci_msi_mask_irq, - .irq_set_affinity = zpci_set_irq_affinity, }; static void zpci_handle_cpu_local_irq(bool rescan) @@ -276,7 +275,9 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) rc = -EIO; if (hwirq - bit >= msi_vecs) break; - irq = __irq_alloc_descs(-1, 0, 1, 0, THIS_MODULE, msi->affinity); + irq = __irq_alloc_descs(-1, 0, 1, 0, THIS_MODULE, + (irq_delivery == DIRECTED) ? + msi->affinity : NULL); if (irq < 0) return -ENOMEM; rc = irq_set_msi_desc(irq, msi); diff --git a/arch/um/Makefile b/arch/um/Makefile index d2daa206872da4..275f5ffdf6f0ad 100644 --- a/arch/um/Makefile +++ b/arch/um/Makefile @@ -140,6 +140,7 @@ export CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE) # When cleaning we don't include .config, so we don't include # TT or skas makefiles and don't clean skas_ptregs.h. CLEAN_FILES += linux x.i gmon.out +MRPROPER_DIRS += arch/$(SUBARCH)/include/generated archclean: @find . \( -name '*.bb' -o -name '*.bbg' -o -name '*.da' \ diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index fc8814faae62ca..1c2f9baf848320 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -227,8 +227,8 @@ static void __init ms_hyperv_init_platform(void) ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES); ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO); - pr_info("Hyper-V: features 0x%x, hints 0x%x\n", - ms_hyperv.features, ms_hyperv.hints); + pr_info("Hyper-V: features 0x%x, hints 0x%x, misc 0x%x\n", + ms_hyperv.features, ms_hyperv.hints, ms_hyperv.misc_features); ms_hyperv.max_vp_index = cpuid_eax(HYPERV_CPUID_IMPLEMENT_LIMITS); ms_hyperv.max_lp_index = cpuid_ebx(HYPERV_CPUID_IMPLEMENT_LIMITS); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 39a116d4399390..72f51275247edf 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -4566,7 +4566,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu, */ static void kvm_machine_check(void) { -#if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64) +#if defined(CONFIG_X86_MCE) struct pt_regs regs = { .cs = 3, /* Fake ring 3 no matter what the guest ran on */ .flags = X86_EFLAGS_IF, diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 991549a1c5f3ae..18936533666e39 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -138,6 +138,19 @@ static bool is_ereg(u32 reg) BIT(BPF_REG_AX)); } +/* + * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64 + * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte + * of encoding. al,cl,dl,bl have simpler encoding. + */ +static bool is_ereg_8l(u32 reg) +{ + return is_ereg(reg) || + (1 << reg) & (BIT(BPF_REG_1) | + BIT(BPF_REG_2) | + BIT(BPF_REG_FP)); +} + static bool is_axreg(u32 reg) { return reg == BPF_REG_0; @@ -748,9 +761,8 @@ st: if (is_imm8(insn->off)) /* STX: *(u8*)(dst_reg + off) = src_reg */ case BPF_STX | BPF_MEM | BPF_B: /* Emit 'mov byte ptr [rax + off], al' */ - if (is_ereg(dst_reg) || is_ereg(src_reg) || - /* We have to add extra byte for x86 SIL, DIL regs */ - src_reg == BPF_REG_1 || src_reg == BPF_REG_2) + if (is_ereg(dst_reg) || is_ereg_8l(src_reg)) + /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */ EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88); else EMIT1(0x88); diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c index 4d2a7a76460262..66cd150b7e541d 100644 --- a/arch/x86/net/bpf_jit_comp32.c +++ b/arch/x86/net/bpf_jit_comp32.c @@ -1847,14 +1847,16 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, case BPF_B: case BPF_H: case BPF_W: - if (!bpf_prog->aux->verifier_zext) + if (bpf_prog->aux->verifier_zext) break; if (dstk) { EMIT3(0xC7, add_1reg(0x40, IA32_EBP), STACK_VAR(dst_hi)); EMIT(0x0, 4); } else { - EMIT3(0xC7, add_1reg(0xC0, dst_hi), 0); + /* xor dst_hi,dst_hi */ + EMIT2(0x33, + add_2reg(0xC0, dst_hi, dst_hi)); } break; case BPF_DW: @@ -2013,8 +2015,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, case BPF_JMP | BPF_JSET | BPF_X: case BPF_JMP32 | BPF_JSET | BPF_X: { bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP; - u8 dreg_lo = dstk ? IA32_EAX : dst_lo; - u8 dreg_hi = dstk ? IA32_EDX : dst_hi; + u8 dreg_lo = IA32_EAX; + u8 dreg_hi = IA32_EDX; u8 sreg_lo = sstk ? IA32_ECX : src_lo; u8 sreg_hi = sstk ? IA32_EBX : src_hi; @@ -2026,6 +2028,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, add_2reg(0x40, IA32_EBP, IA32_EDX), STACK_VAR(dst_hi)); + } else { + /* mov dreg_lo,dst_lo */ + EMIT2(0x89, add_2reg(0xC0, dreg_lo, dst_lo)); + if (is_jmp64) + /* mov dreg_hi,dst_hi */ + EMIT2(0x89, + add_2reg(0xC0, dreg_hi, dst_hi)); } if (sstk) { @@ -2050,8 +2059,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, case BPF_JMP | BPF_JSET | BPF_K: case BPF_JMP32 | BPF_JSET | BPF_K: { bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP; - u8 dreg_lo = dstk ? IA32_EAX : dst_lo; - u8 dreg_hi = dstk ? IA32_EDX : dst_hi; + u8 dreg_lo = IA32_EAX; + u8 dreg_hi = IA32_EDX; u8 sreg_lo = IA32_ECX; u8 sreg_hi = IA32_EBX; u32 hi; @@ -2064,6 +2073,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, add_2reg(0x40, IA32_EBP, IA32_EDX), STACK_VAR(dst_hi)); + } else { + /* mov dreg_lo,dst_lo */ + EMIT2(0x89, add_2reg(0xC0, dreg_lo, dst_lo)); + if (is_jmp64) + /* mov dreg_hi,dst_hi */ + EMIT2(0x89, + add_2reg(0xC0, dreg_hi, dst_hi)); } /* mov ecx,imm32 */ diff --git a/block/blk-iocost.c b/block/blk-iocost.c index 9a599cc28c290c..2dc5dc54e257fd 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -1594,7 +1594,7 @@ static void ioc_timer_fn(struct timer_list *timer) vrate_min, vrate_max); } - trace_iocost_ioc_vrate_adj(ioc, vrate, &missed_ppm, rq_wait_pct, + trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct, nr_lagging, nr_shortages, nr_surpluses); @@ -1603,7 +1603,7 @@ static void ioc_timer_fn(struct timer_list *timer) ioc->period_us * vrate * INUSE_MARGIN_PCT, 100); } else if (ioc->busy_level != prev_busy_level || nr_lagging) { trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate), - &missed_ppm, rq_wait_pct, nr_lagging, + missed_ppm, rq_wait_pct, nr_lagging, nr_shortages, nr_surpluses); } diff --git a/block/blk-mq.c b/block/blk-mq.c index a8c1a45cedde09..757c0fd9f0cc22 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1232,8 +1232,10 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list, rq = list_first_entry(list, struct request, queuelist); hctx = rq->mq_hctx; - if (!got_budget && !blk_mq_get_dispatch_budget(hctx)) + if (!got_budget && !blk_mq_get_dispatch_budget(hctx)) { + blk_mq_put_driver_tag(rq); break; + } if (!blk_mq_get_driver_tag(rq)) { /* diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 0e99a760aebd07..8646147dc19463 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -726,7 +726,7 @@ static bool dpm_async_fn(struct device *dev, async_func_t func) if (is_async(dev)) { get_device(dev); - async_schedule(func, dev); + async_schedule_dev(func, dev); return true; } diff --git a/drivers/block/loop.c b/drivers/block/loop.c index ef6e251857c8c5..57ed6b70d2950f 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -427,11 +427,12 @@ static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos, * information. */ struct file *file = lo->lo_backing_file; + struct request_queue *q = lo->lo_queue; int ret; mode |= FALLOC_FL_KEEP_SIZE; - if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size) { + if (!blk_queue_discard(q)) { ret = -EOPNOTSUPP; goto out; } @@ -863,28 +864,47 @@ static void loop_config_discard(struct loop_device *lo) struct inode *inode = file->f_mapping->host; struct request_queue *q = lo->lo_queue; + /* + * If the backing device is a block device, mirror its zeroing + * capability. Set the discard sectors to the block device's zeroing + * capabilities because loop discards result in blkdev_issue_zeroout(), + * not blkdev_issue_discard(). This maintains consistent behavior with + * file-backed loop devices: discarded regions read back as zero. + */ + if (S_ISBLK(inode->i_mode) && !lo->lo_encrypt_key_size) { + struct request_queue *backingq; + + backingq = bdev_get_queue(inode->i_bdev); + blk_queue_max_discard_sectors(q, + backingq->limits.max_write_zeroes_sectors); + + blk_queue_max_write_zeroes_sectors(q, + backingq->limits.max_write_zeroes_sectors); + /* * We use punch hole to reclaim the free space used by the * image a.k.a. discard. However we do not support discard if * encryption is enabled, because it may give an attacker * useful information. */ - if ((!file->f_op->fallocate) || - lo->lo_encrypt_key_size) { + } else if (!file->f_op->fallocate || lo->lo_encrypt_key_size) { q->limits.discard_granularity = 0; q->limits.discard_alignment = 0; blk_queue_max_discard_sectors(q, 0); blk_queue_max_write_zeroes_sectors(q, 0); - blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q); - return; - } - q->limits.discard_granularity = inode->i_sb->s_blocksize; - q->limits.discard_alignment = 0; + } else { + q->limits.discard_granularity = inode->i_sb->s_blocksize; + q->limits.discard_alignment = 0; - blk_queue_max_discard_sectors(q, UINT_MAX >> 9); - blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9); - blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); + blk_queue_max_discard_sectors(q, UINT_MAX >> 9); + blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9); + } + + if (q->limits.max_write_zeroes_sectors) + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); + else + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q); } static void loop_unprepare_queue(struct loop_device *lo) diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index c2ed3e9128e3af..a55383b139df92 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -345,9 +345,14 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, if (err == -ENOSPC) blk_mq_stop_hw_queue(hctx); spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); - if (err == -ENOMEM || err == -ENOSPC) + switch (err) { + case -ENOSPC: return BLK_STS_DEV_RESOURCE; - return BLK_STS_IOERR; + case -ENOMEM: + return BLK_STS_RESOURCE; + default: + return BLK_STS_IOERR; + } } if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq)) diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index d7a3888ad80f0a..b86ee5b1885547 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c @@ -322,7 +322,7 @@ int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, for (i = 0; i < chip->nr_allocated_banks; i++) { if (digests[i].alg_id != chip->allocated_banks[i].alg_id) { - rc = EINVAL; + rc = -EINVAL; goto out; } } diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c index 78cc5269017705..e82013d587b466 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.c +++ b/drivers/char/tpm/tpm_ibmvtpm.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2012 IBM Corporation + * Copyright (C) 2012-2020 IBM Corporation * * Author: Ashley Lai * @@ -133,6 +133,64 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) return len; } +/** + * ibmvtpm_crq_send_init - Send a CRQ initialize message + * @ibmvtpm: vtpm device struct + * + * Return: + * 0 on success. + * Non-zero on failure. + */ +static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm) +{ + int rc; + + rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_CMD); + if (rc != H_SUCCESS) + dev_err(ibmvtpm->dev, + "%s failed rc=%d\n", __func__, rc); + + return rc; +} + +/** + * tpm_ibmvtpm_resume - Resume from suspend + * + * @dev: device struct + * + * Return: Always 0. + */ +static int tpm_ibmvtpm_resume(struct device *dev) +{ + struct tpm_chip *chip = dev_get_drvdata(dev); + struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); + int rc = 0; + + do { + if (rc) + msleep(100); + rc = plpar_hcall_norets(H_ENABLE_CRQ, + ibmvtpm->vdev->unit_address); + } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc)); + + if (rc) { + dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc); + return rc; + } + + rc = vio_enable_interrupts(ibmvtpm->vdev); + if (rc) { + dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc); + return rc; + } + + rc = ibmvtpm_crq_send_init(ibmvtpm); + if (rc) + dev_err(dev, "Error send_init rc=%d\n", rc); + + return rc; +} + /** * tpm_ibmvtpm_send() - Send a TPM command * @chip: tpm chip struct @@ -146,6 +204,7 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) { struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); + bool retry = true; int rc, sig; if (!ibmvtpm->rtce_buf) { @@ -179,18 +238,27 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) */ ibmvtpm->tpm_processing_cmd = true; +again: rc = ibmvtpm_send_crq(ibmvtpm->vdev, IBMVTPM_VALID_CMD, VTPM_TPM_COMMAND, count, ibmvtpm->rtce_dma_handle); if (rc != H_SUCCESS) { + /* + * H_CLOSED can be returned after LPM resume. Call + * tpm_ibmvtpm_resume() to re-enable the CRQ then retry + * ibmvtpm_send_crq() once before failing. + */ + if (rc == H_CLOSED && retry) { + tpm_ibmvtpm_resume(ibmvtpm->dev); + retry = false; + goto again; + } dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc); - rc = 0; ibmvtpm->tpm_processing_cmd = false; - } else - rc = 0; + } spin_unlock(&ibmvtpm->rtce_lock); - return rc; + return 0; } static void tpm_ibmvtpm_cancel(struct tpm_chip *chip) @@ -268,26 +336,6 @@ static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm) return rc; } -/** - * ibmvtpm_crq_send_init - Send a CRQ initialize message - * @ibmvtpm: vtpm device struct - * - * Return: - * 0 on success. - * Non-zero on failure. - */ -static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm) -{ - int rc; - - rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_CMD); - if (rc != H_SUCCESS) - dev_err(ibmvtpm->dev, - "ibmvtpm_crq_send_init failed rc=%d\n", rc); - - return rc; -} - /** * tpm_ibmvtpm_remove - ibm vtpm remove entry point * @vdev: vio device struct @@ -400,44 +448,6 @@ static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm) ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE); } -/** - * tpm_ibmvtpm_resume - Resume from suspend - * - * @dev: device struct - * - * Return: Always 0. - */ -static int tpm_ibmvtpm_resume(struct device *dev) -{ - struct tpm_chip *chip = dev_get_drvdata(dev); - struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); - int rc = 0; - - do { - if (rc) - msleep(100); - rc = plpar_hcall_norets(H_ENABLE_CRQ, - ibmvtpm->vdev->unit_address); - } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc)); - - if (rc) { - dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc); - return rc; - } - - rc = vio_enable_interrupts(ibmvtpm->vdev); - if (rc) { - dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc); - return rc; - } - - rc = ibmvtpm_crq_send_init(ibmvtpm); - if (rc) - dev_err(dev, "Error send_init rc=%d\n", rc); - - return rc; -} - static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status) { return (status == 0); diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index c3181ea9f27132..bdcf8f25cd0d06 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -433,6 +433,9 @@ static void disable_interrupts(struct tpm_chip *chip) u32 intmask; int rc; + if (priv->irq == 0) + return; + rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask); if (rc < 0) intmask = 0; @@ -983,9 +986,12 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, if (irq) { tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED, irq); - if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) + if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) { dev_err(&chip->dev, FW_BUG "TPM interrupt not working, polling instead\n"); + + disable_interrupts(chip); + } } else { tpm_tis_probe_irq(chip, intmask); } diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c index 00b113f4b95884..5c23a9a56921b6 100644 --- a/drivers/counter/104-quad-8.c +++ b/drivers/counter/104-quad-8.c @@ -42,6 +42,7 @@ MODULE_PARM_DESC(base, "ACCES 104-QUAD-8 base addresses"); * @base: base port address of the IIO device */ struct quad8_iio { + struct mutex lock; struct counter_device counter; unsigned int preset[QUAD8_NUM_COUNTERS]; unsigned int count_mode[QUAD8_NUM_COUNTERS]; @@ -116,6 +117,8 @@ static int quad8_read_raw(struct iio_dev *indio_dev, /* Borrow XOR Carry effectively doubles count range */ *val = (borrow ^ carry) << 24; + mutex_lock(&priv->lock); + /* Reset Byte Pointer; transfer Counter to Output Latch */ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_CNTR_OUT, base_offset + 1); @@ -123,6 +126,8 @@ static int quad8_read_raw(struct iio_dev *indio_dev, for (i = 0; i < 3; i++) *val |= (unsigned int)inb(base_offset) << (8 * i); + mutex_unlock(&priv->lock); + return IIO_VAL_INT; case IIO_CHAN_INFO_ENABLE: *val = priv->ab_enable[chan->channel]; @@ -153,6 +158,8 @@ static int quad8_write_raw(struct iio_dev *indio_dev, if ((unsigned int)val > 0xFFFFFF) return -EINVAL; + mutex_lock(&priv->lock); + /* Reset Byte Pointer */ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1); @@ -176,12 +183,16 @@ static int quad8_write_raw(struct iio_dev *indio_dev, /* Reset Error flag */ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, base_offset + 1); + mutex_unlock(&priv->lock); + return 0; case IIO_CHAN_INFO_ENABLE: /* only boolean values accepted */ if (val < 0 || val > 1) return -EINVAL; + mutex_lock(&priv->lock); + priv->ab_enable[chan->channel] = val; ior_cfg = val | priv->preset_enable[chan->channel] << 1; @@ -189,11 +200,18 @@ static int quad8_write_raw(struct iio_dev *indio_dev, /* Load I/O control configuration */ outb(QUAD8_CTR_IOR | ior_cfg, base_offset + 1); + mutex_unlock(&priv->lock); + return 0; case IIO_CHAN_INFO_SCALE: + mutex_lock(&priv->lock); + /* Quadrature scaling only available in quadrature mode */ - if (!priv->quadrature_mode[chan->channel] && (val2 || val != 1)) + if (!priv->quadrature_mode[chan->channel] && + (val2 || val != 1)) { + mutex_unlock(&priv->lock); return -EINVAL; + } /* Only three gain states (1, 0.5, 0.25) */ if (val == 1 && !val2) @@ -207,11 +225,15 @@ static int quad8_write_raw(struct iio_dev *indio_dev, priv->quadrature_scale[chan->channel] = 2; break; default: + mutex_unlock(&priv->lock); return -EINVAL; } - else + else { + mutex_unlock(&priv->lock); return -EINVAL; + } + mutex_unlock(&priv->lock); return 0; } @@ -248,6 +270,8 @@ static ssize_t quad8_write_preset(struct iio_dev *indio_dev, uintptr_t private, if (preset > 0xFFFFFF) return -EINVAL; + mutex_lock(&priv->lock); + priv->preset[chan->channel] = preset; /* Reset Byte Pointer */ @@ -257,6 +281,8 @@ static ssize_t quad8_write_preset(struct iio_dev *indio_dev, uintptr_t private, for (i = 0; i < 3; i++) outb(preset >> (8 * i), base_offset); + mutex_unlock(&priv->lock); + return len; } @@ -286,6 +312,8 @@ static ssize_t quad8_write_set_to_preset_on_index(struct iio_dev *indio_dev, /* Preset enable is active low in Input/Output Control register */ preset_enable = !preset_enable; + mutex_lock(&priv->lock); + priv->preset_enable[chan->channel] = preset_enable; ior_cfg = priv->ab_enable[chan->channel] | @@ -294,6 +322,8 @@ static ssize_t quad8_write_set_to_preset_on_index(struct iio_dev *indio_dev, /* Load I/O control configuration to Input / Output Control Register */ outb(QUAD8_CTR_IOR | ior_cfg, base_offset); + mutex_unlock(&priv->lock); + return len; } @@ -351,6 +381,8 @@ static int quad8_set_count_mode(struct iio_dev *indio_dev, unsigned int mode_cfg = cnt_mode << 1; const int base_offset = priv->base + 2 * chan->channel + 1; + mutex_lock(&priv->lock); + priv->count_mode[chan->channel] = cnt_mode; /* Add quadrature mode configuration */ @@ -360,6 +392,8 @@ static int quad8_set_count_mode(struct iio_dev *indio_dev, /* Load mode configuration to Counter Mode Register */ outb(QUAD8_CTR_CMR | mode_cfg, base_offset); + mutex_unlock(&priv->lock); + return 0; } @@ -387,19 +421,26 @@ static int quad8_set_synchronous_mode(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, unsigned int synchronous_mode) { struct quad8_iio *const priv = iio_priv(indio_dev); - const unsigned int idr_cfg = synchronous_mode | - priv->index_polarity[chan->channel] << 1; const int base_offset = priv->base + 2 * chan->channel + 1; + unsigned int idr_cfg = synchronous_mode; + + mutex_lock(&priv->lock); + + idr_cfg |= priv->index_polarity[chan->channel] << 1; /* Index function must be non-synchronous in non-quadrature mode */ - if (synchronous_mode && !priv->quadrature_mode[chan->channel]) + if (synchronous_mode && !priv->quadrature_mode[chan->channel]) { + mutex_unlock(&priv->lock); return -EINVAL; + } priv->synchronous_mode[chan->channel] = synchronous_mode; /* Load Index Control configuration to Index Control Register */ outb(QUAD8_CTR_IDR | idr_cfg, base_offset); + mutex_unlock(&priv->lock); + return 0; } @@ -427,8 +468,12 @@ static int quad8_set_quadrature_mode(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, unsigned int quadrature_mode) { struct quad8_iio *const priv = iio_priv(indio_dev); - unsigned int mode_cfg = priv->count_mode[chan->channel] << 1; const int base_offset = priv->base + 2 * chan->channel + 1; + unsigned int mode_cfg; + + mutex_lock(&priv->lock); + + mode_cfg = priv->count_mode[chan->channel] << 1; if (quadrature_mode) mode_cfg |= (priv->quadrature_scale[chan->channel] + 1) << 3; @@ -446,6 +491,8 @@ static int quad8_set_quadrature_mode(struct iio_dev *indio_dev, /* Load mode configuration to Counter Mode Register */ outb(QUAD8_CTR_CMR | mode_cfg, base_offset); + mutex_unlock(&priv->lock); + return 0; } @@ -473,15 +520,20 @@ static int quad8_set_index_polarity(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, unsigned int index_polarity) { struct quad8_iio *const priv = iio_priv(indio_dev); - const unsigned int idr_cfg = priv->synchronous_mode[chan->channel] | - index_polarity << 1; const int base_offset = priv->base + 2 * chan->channel + 1; + unsigned int idr_cfg = index_polarity << 1; + + mutex_lock(&priv->lock); + + idr_cfg |= priv->synchronous_mode[chan->channel]; priv->index_polarity[chan->channel] = index_polarity; /* Load Index Control configuration to Index Control Register */ outb(QUAD8_CTR_IDR | idr_cfg, base_offset); + mutex_unlock(&priv->lock); + return 0; } @@ -585,7 +637,7 @@ static int quad8_signal_read(struct counter_device *counter, static int quad8_count_read(struct counter_device *counter, struct counter_count *count, struct counter_count_read_value *val) { - const struct quad8_iio *const priv = counter->priv; + struct quad8_iio *const priv = counter->priv; const int base_offset = priv->base + 2 * count->id; unsigned int flags; unsigned int borrow; @@ -600,6 +652,8 @@ static int quad8_count_read(struct counter_device *counter, /* Borrow XOR Carry effectively doubles count range */ position = (unsigned long)(borrow ^ carry) << 24; + mutex_lock(&priv->lock); + /* Reset Byte Pointer; transfer Counter to Output Latch */ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_CNTR_OUT, base_offset + 1); @@ -609,13 +663,15 @@ static int quad8_count_read(struct counter_device *counter, counter_count_read_value_set(val, COUNTER_COUNT_POSITION, &position); + mutex_unlock(&priv->lock); + return 0; } static int quad8_count_write(struct counter_device *counter, struct counter_count *count, struct counter_count_write_value *val) { - const struct quad8_iio *const priv = counter->priv; + struct quad8_iio *const priv = counter->priv; const int base_offset = priv->base + 2 * count->id; int err; unsigned long position; @@ -630,6 +686,8 @@ static int quad8_count_write(struct counter_device *counter, if (position > 0xFFFFFF) return -EINVAL; + mutex_lock(&priv->lock); + /* Reset Byte Pointer */ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1); @@ -653,6 +711,8 @@ static int quad8_count_write(struct counter_device *counter, /* Reset Error flag */ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, base_offset + 1); + mutex_unlock(&priv->lock); + return 0; } @@ -673,13 +733,13 @@ static enum counter_count_function quad8_count_functions_list[] = { static int quad8_function_get(struct counter_device *counter, struct counter_count *count, size_t *function) { - const struct quad8_iio *const priv = counter->priv; + struct quad8_iio *const priv = counter->priv; const int id = count->id; - const unsigned int quadrature_mode = priv->quadrature_mode[id]; - const unsigned int scale = priv->quadrature_scale[id]; - if (quadrature_mode) - switch (scale) { + mutex_lock(&priv->lock); + + if (priv->quadrature_mode[id]) + switch (priv->quadrature_scale[id]) { case 0: *function = QUAD8_COUNT_FUNCTION_QUADRATURE_X1; break; @@ -693,6 +753,8 @@ static int quad8_function_get(struct counter_device *counter, else *function = QUAD8_COUNT_FUNCTION_PULSE_DIRECTION; + mutex_unlock(&priv->lock); + return 0; } @@ -703,10 +765,15 @@ static int quad8_function_set(struct counter_device *counter, const int id = count->id; unsigned int *const quadrature_mode = priv->quadrature_mode + id; unsigned int *const scale = priv->quadrature_scale + id; - unsigned int mode_cfg = priv->count_mode[id] << 1; unsigned int *const synchronous_mode = priv->synchronous_mode + id; - const unsigned int idr_cfg = priv->index_polarity[id] << 1; const int base_offset = priv->base + 2 * id + 1; + unsigned int mode_cfg; + unsigned int idr_cfg; + + mutex_lock(&priv->lock); + + mode_cfg = priv->count_mode[id] << 1; + idr_cfg = priv->index_polarity[id] << 1; if (function == QUAD8_COUNT_FUNCTION_PULSE_DIRECTION) { *quadrature_mode = 0; @@ -742,6 +809,8 @@ static int quad8_function_set(struct counter_device *counter, /* Load mode configuration to Counter Mode Register */ outb(QUAD8_CTR_CMR | mode_cfg, base_offset); + mutex_unlock(&priv->lock); + return 0; } @@ -858,15 +927,20 @@ static int quad8_index_polarity_set(struct counter_device *counter, { struct quad8_iio *const priv = counter->priv; const size_t channel_id = signal->id - 16; - const unsigned int idr_cfg = priv->synchronous_mode[channel_id] | - index_polarity << 1; const int base_offset = priv->base + 2 * channel_id + 1; + unsigned int idr_cfg = index_polarity << 1; + + mutex_lock(&priv->lock); + + idr_cfg |= priv->synchronous_mode[channel_id]; priv->index_polarity[channel_id] = index_polarity; /* Load Index Control configuration to Index Control Register */ outb(QUAD8_CTR_IDR | idr_cfg, base_offset); + mutex_unlock(&priv->lock); + return 0; } @@ -893,19 +967,26 @@ static int quad8_synchronous_mode_set(struct counter_device *counter, { struct quad8_iio *const priv = counter->priv; const size_t channel_id = signal->id - 16; - const unsigned int idr_cfg = synchronous_mode | - priv->index_polarity[channel_id] << 1; const int base_offset = priv->base + 2 * channel_id + 1; + unsigned int idr_cfg = synchronous_mode; + + mutex_lock(&priv->lock); + + idr_cfg |= priv->index_polarity[channel_id] << 1; /* Index function must be non-synchronous in non-quadrature mode */ - if (synchronous_mode && !priv->quadrature_mode[channel_id]) + if (synchronous_mode && !priv->quadrature_mode[channel_id]) { + mutex_unlock(&priv->lock); return -EINVAL; + } priv->synchronous_mode[channel_id] = synchronous_mode; /* Load Index Control configuration to Index Control Register */ outb(QUAD8_CTR_IDR | idr_cfg, base_offset); + mutex_unlock(&priv->lock); + return 0; } @@ -970,6 +1051,8 @@ static int quad8_count_mode_set(struct counter_device *counter, break; } + mutex_lock(&priv->lock); + priv->count_mode[count->id] = cnt_mode; /* Set count mode configuration value */ @@ -982,6 +1065,8 @@ static int quad8_count_mode_set(struct counter_device *counter, /* Load mode configuration to Counter Mode Register */ outb(QUAD8_CTR_CMR | mode_cfg, base_offset); + mutex_unlock(&priv->lock); + return 0; } @@ -1023,6 +1108,8 @@ static ssize_t quad8_count_enable_write(struct counter_device *counter, if (err) return err; + mutex_lock(&priv->lock); + priv->ab_enable[count->id] = ab_enable; ior_cfg = ab_enable | priv->preset_enable[count->id] << 1; @@ -1030,6 +1117,8 @@ static ssize_t quad8_count_enable_write(struct counter_device *counter, /* Load I/O control configuration */ outb(QUAD8_CTR_IOR | ior_cfg, base_offset + 1); + mutex_unlock(&priv->lock); + return len; } @@ -1058,14 +1147,28 @@ static ssize_t quad8_count_preset_read(struct counter_device *counter, return sprintf(buf, "%u\n", priv->preset[count->id]); } +static void quad8_preset_register_set(struct quad8_iio *quad8iio, int id, + unsigned int preset) +{ + const unsigned int base_offset = quad8iio->base + 2 * id; + int i; + + quad8iio->preset[id] = preset; + + /* Reset Byte Pointer */ + outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1); + + /* Set Preset Register */ + for (i = 0; i < 3; i++) + outb(preset >> (8 * i), base_offset); +} + static ssize_t quad8_count_preset_write(struct counter_device *counter, struct counter_count *count, void *private, const char *buf, size_t len) { struct quad8_iio *const priv = counter->priv; - const int base_offset = priv->base + 2 * count->id; unsigned int preset; int ret; - int i; ret = kstrtouint(buf, 0, &preset); if (ret) @@ -1075,14 +1178,11 @@ static ssize_t quad8_count_preset_write(struct counter_device *counter, if (preset > 0xFFFFFF) return -EINVAL; - priv->preset[count->id] = preset; + mutex_lock(&priv->lock); - /* Reset Byte Pointer */ - outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1); + quad8_preset_register_set(priv, count->id, preset); - /* Set Preset Register */ - for (i = 0; i < 3; i++) - outb(preset >> (8 * i), base_offset); + mutex_unlock(&priv->lock); return len; } @@ -1090,15 +1190,20 @@ static ssize_t quad8_count_preset_write(struct counter_device *counter, static ssize_t quad8_count_ceiling_read(struct counter_device *counter, struct counter_count *count, void *private, char *buf) { - const struct quad8_iio *const priv = counter->priv; + struct quad8_iio *const priv = counter->priv; + + mutex_lock(&priv->lock); /* Range Limit and Modulo-N count modes use preset value as ceiling */ switch (priv->count_mode[count->id]) { case 1: case 3: - return quad8_count_preset_read(counter, count, private, buf); + mutex_unlock(&priv->lock); + return sprintf(buf, "%u\n", priv->preset[count->id]); } + mutex_unlock(&priv->lock); + /* By default 0x1FFFFFF (25 bits unsigned) is maximum count */ return sprintf(buf, "33554431\n"); } @@ -1107,15 +1212,29 @@ static ssize_t quad8_count_ceiling_write(struct counter_device *counter, struct counter_count *count, void *private, const char *buf, size_t len) { struct quad8_iio *const priv = counter->priv; + unsigned int ceiling; + int ret; + + ret = kstrtouint(buf, 0, &ceiling); + if (ret) + return ret; + + /* Only 24-bit values are supported */ + if (ceiling > 0xFFFFFF) + return -EINVAL; + + mutex_lock(&priv->lock); /* Range Limit and Modulo-N count modes use preset value as ceiling */ switch (priv->count_mode[count->id]) { case 1: case 3: - return quad8_count_preset_write(counter, count, private, buf, - len); + quad8_preset_register_set(priv, count->id, ceiling); + break; } + mutex_unlock(&priv->lock); + return len; } @@ -1143,6 +1262,8 @@ static ssize_t quad8_count_preset_enable_write(struct counter_device *counter, /* Preset enable is active low in Input/Output Control register */ preset_enable = !preset_enable; + mutex_lock(&priv->lock); + priv->preset_enable[count->id] = preset_enable; ior_cfg = priv->ab_enable[count->id] | (unsigned int)preset_enable << 1; @@ -1150,6 +1271,8 @@ static ssize_t quad8_count_preset_enable_write(struct counter_device *counter, /* Load I/O control configuration to Input / Output Control Register */ outb(QUAD8_CTR_IOR | ior_cfg, base_offset); + mutex_unlock(&priv->lock); + return len; } @@ -1320,6 +1443,9 @@ static int quad8_probe(struct device *dev, unsigned int id) quad8iio->counter.priv = quad8iio; quad8iio->base = base[id]; + /* Initialize mutex */ + mutex_init(&quad8iio->lock); + /* Reset all counters and disable interrupt function */ outb(QUAD8_CHAN_OP_RESET_COUNTERS, base[id] + QUAD8_REG_CHAN_OP); /* Set initial configuration for all counters */ diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c index 029a7354f5416c..5c16f368879bf7 100644 --- a/drivers/crypto/chelsio/chcr_core.c +++ b/drivers/crypto/chelsio/chcr_core.c @@ -125,8 +125,6 @@ static void chcr_dev_init(struct uld_ctx *u_ctx) atomic_set(&dev->inflight, 0); mutex_lock(&drv_data.drv_mutex); list_add_tail(&u_ctx->entry, &drv_data.inact_dev); - if (!drv_data.last_dev) - drv_data.last_dev = u_ctx; mutex_unlock(&drv_data.drv_mutex); } diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c index 89ca292236ad86..538755062ab7ca 100644 --- a/drivers/fpga/dfl-pci.c +++ b/drivers/fpga/dfl-pci.c @@ -248,11 +248,13 @@ static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs) return ret; ret = pci_enable_sriov(pcidev, num_vfs); - if (ret) + if (ret) { dfl_fpga_cdev_config_ports_pf(cdev); + return ret; + } } - return ret; + return num_vfs; } static void cci_pci_remove(struct pci_dev *pcidev) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 4704aac336c293..2028dc017f7a09 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -283,6 +283,8 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc, int i = 0; bool ret = false; + stream->adjust = *adjust; + for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; @@ -1180,6 +1182,26 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context) return (result == DC_OK); } +static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context) +{ + int i; + struct pipe_ctx *pipe; + + for (i = 0; i < MAX_PIPES; i++) { + pipe = &context->res_ctx.pipe_ctx[i]; + + if (!pipe->plane_state) + continue; + + /* Must set to false to start with, due to OR in update function */ + pipe->plane_state->status.is_flip_pending = false; + dc->hwss.update_pending_status(pipe); + if (pipe->plane_state->status.is_flip_pending) + return true; + } + return false; +} + bool dc_post_update_surfaces_to_stream(struct dc *dc) { int i; @@ -1190,6 +1212,9 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc) post_surface_trace(dc); + if (is_flip_pending_in_pipes(dc, context)) + return true; + for (i = 0; i < dc->res_pool->pipe_count; i++) if (context->res_ctx.pipe_ctx[i].stream == NULL || context->res_ctx.pipe_ctx[i].plane_state == NULL) { @@ -2152,7 +2177,7 @@ void dc_commit_updates_for_stream(struct dc *dc, enum surface_update_type update_type; struct dc_state *context; struct dc_context *dc_ctx = dc->ctx; - int i; + int i, j; stream_status = dc_stream_get_status(stream); context = dc->current_state; @@ -2190,6 +2215,17 @@ void dc_commit_updates_for_stream(struct dc *dc, copy_surface_update_to_plane(surface, &srf_updates[i]); + if (update_type >= UPDATE_TYPE_MED) { + for (j = 0; j < dc->res_pool->pipe_count; j++) { + struct pipe_ctx *pipe_ctx = + &context->res_ctx.pipe_ctx[j]; + + if (pipe_ctx->plane_state != surface) + continue; + + resource_build_scaling_params(pipe_ctx); + } + } } copy_stream_update_to_stream(dc, context, stream, stream_update); diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c index f2d81b0558e569..e3f1ebee71306b 100644 --- a/drivers/hwmon/jc42.c +++ b/drivers/hwmon/jc42.c @@ -506,7 +506,7 @@ static int jc42_probe(struct i2c_client *client, const struct i2c_device_id *id) } data->config = config; - hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, + hwmon_dev = devm_hwmon_device_register_with_info(dev, "jc42", data, &jc42_chip_info, NULL); return PTR_ERR_OR_ZERO(hwmon_dev); diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c index 1de23b4f3809c5..92d2c706c2a752 100644 --- a/drivers/i2c/busses/i2c-altera.c +++ b/drivers/i2c/busses/i2c-altera.c @@ -384,7 +384,6 @@ static int altr_i2c_probe(struct platform_device *pdev) struct altr_i2c_dev *idev = NULL; struct resource *res; int irq, ret; - u32 val; idev = devm_kzalloc(&pdev->dev, sizeof(*idev), GFP_KERNEL); if (!idev) @@ -411,17 +410,17 @@ static int altr_i2c_probe(struct platform_device *pdev) init_completion(&idev->msg_complete); spin_lock_init(&idev->lock); - val = device_property_read_u32(idev->dev, "fifo-size", + ret = device_property_read_u32(idev->dev, "fifo-size", &idev->fifo_size); - if (val) { + if (ret) { dev_err(&pdev->dev, "FIFO size set to default of %d\n", ALTR_I2C_DFLT_FIFO_SZ); idev->fifo_size = ALTR_I2C_DFLT_FIFO_SZ; } - val = device_property_read_u32(idev->dev, "clock-frequency", + ret = device_property_read_u32(idev->dev, "clock-frequency", &idev->bus_clk_rate); - if (val) { + if (ret) { dev_err(&pdev->dev, "Default to 100kHz\n"); idev->bus_clk_rate = 100000; /* default clock rate */ } diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c index bbc41ecf0d2ffd..6ed6d141020167 100644 --- a/drivers/iio/adc/ad7793.c +++ b/drivers/iio/adc/ad7793.c @@ -541,7 +541,7 @@ static const struct iio_info ad7797_info = { .read_raw = &ad7793_read_raw, .write_raw = &ad7793_write_raw, .write_raw_get_fmt = &ad7793_write_raw_get_fmt, - .attrs = &ad7793_attribute_group, + .attrs = &ad7797_attribute_group, .validate_trigger = ad_sd_validate_trigger, }; diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c index 73aee5949b6b3c..9f63ceb158657c 100644 --- a/drivers/iio/adc/stm32-adc.c +++ b/drivers/iio/adc/stm32-adc.c @@ -1367,8 +1367,30 @@ static unsigned int stm32_adc_dma_residue(struct stm32_adc *adc) static void stm32_adc_dma_buffer_done(void *data) { struct iio_dev *indio_dev = data; + struct stm32_adc *adc = iio_priv(indio_dev); + int residue = stm32_adc_dma_residue(adc); + + /* + * In DMA mode the trigger services of IIO are not used + * (e.g. no call to iio_trigger_poll). + * Calling irq handler associated to the hardware trigger is not + * relevant as the conversions have already been done. Data + * transfers are performed directly in DMA callback instead. + * This implementation avoids to call trigger irq handler that + * may sleep, in an atomic context (DMA irq handler context). + */ + dev_dbg(&indio_dev->dev, "%s bufi=%d\n", __func__, adc->bufi); - iio_trigger_poll_chained(indio_dev->trig); + while (residue >= indio_dev->scan_bytes) { + u16 *buffer = (u16 *)&adc->rx_buf[adc->bufi]; + + iio_push_to_buffers(indio_dev, buffer); + + residue -= indio_dev->scan_bytes; + adc->bufi += indio_dev->scan_bytes; + if (adc->bufi >= adc->rx_buf_sz) + adc->bufi = 0; + } } static int stm32_adc_dma_start(struct iio_dev *indio_dev) @@ -1778,6 +1800,7 @@ static int stm32_adc_probe(struct platform_device *pdev) { struct iio_dev *indio_dev; struct device *dev = &pdev->dev; + irqreturn_t (*handler)(int irq, void *p) = NULL; struct stm32_adc *adc; int ret; @@ -1843,9 +1866,11 @@ static int stm32_adc_probe(struct platform_device *pdev) if (ret < 0) return ret; + if (!adc->dma_chan) + handler = &stm32_adc_trigger_handler; + ret = iio_triggered_buffer_setup(indio_dev, - &iio_pollfunc_store_time, - &stm32_adc_trigger_handler, + &iio_pollfunc_store_time, handler, &stm32_adc_buffer_setup_ops); if (ret) { dev_err(&pdev->dev, "buffer setup failed\n"); diff --git a/drivers/iio/adc/ti-ads8344.c b/drivers/iio/adc/ti-ads8344.c index 9a460807d46d13..abe4b56c847c7d 100644 --- a/drivers/iio/adc/ti-ads8344.c +++ b/drivers/iio/adc/ti-ads8344.c @@ -29,7 +29,7 @@ struct ads8344 { struct mutex lock; u8 tx_buf ____cacheline_aligned; - u16 rx_buf; + u8 rx_buf[3]; }; #define ADS8344_VOLTAGE_CHANNEL(chan, si) \ @@ -89,11 +89,11 @@ static int ads8344_adc_conversion(struct ads8344 *adc, int channel, udelay(9); - ret = spi_read(spi, &adc->rx_buf, 2); + ret = spi_read(spi, adc->rx_buf, sizeof(adc->rx_buf)); if (ret) return ret; - return adc->rx_buf; + return adc->rx_buf[0] << 9 | adc->rx_buf[1] << 1 | adc->rx_buf[2] >> 7; } static int ads8344_read_raw(struct iio_dev *iio, diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c index 4fd389678dba27..3f0b88b13dd359 100644 --- a/drivers/iio/adc/xilinx-xadc-core.c +++ b/drivers/iio/adc/xilinx-xadc-core.c @@ -102,6 +102,16 @@ static const unsigned int XADC_ZYNQ_UNMASK_TIMEOUT = 500; #define XADC_FLAGS_BUFFERED BIT(0) +/* + * The XADC hardware supports a samplerate of up to 1MSPS. Unfortunately it does + * not have a hardware FIFO. Which means an interrupt is generated for each + * conversion sequence. At 1MSPS sample rate the CPU in ZYNQ7000 is completely + * overloaded by the interrupts that it soft-lockups. For this reason the driver + * limits the maximum samplerate 150kSPS. At this rate the CPU is fairly busy, + * but still responsive. + */ +#define XADC_MAX_SAMPLERATE 150000 + static void xadc_write_reg(struct xadc *xadc, unsigned int reg, uint32_t val) { @@ -674,7 +684,7 @@ static int xadc_trigger_set_state(struct iio_trigger *trigger, bool state) spin_lock_irqsave(&xadc->lock, flags); xadc_read_reg(xadc, XADC_AXI_REG_IPIER, &val); - xadc_write_reg(xadc, XADC_AXI_REG_IPISR, val & XADC_AXI_INT_EOS); + xadc_write_reg(xadc, XADC_AXI_REG_IPISR, XADC_AXI_INT_EOS); if (state) val |= XADC_AXI_INT_EOS; else @@ -722,13 +732,14 @@ static int xadc_power_adc_b(struct xadc *xadc, unsigned int seq_mode) { uint16_t val; + /* Powerdown the ADC-B when it is not needed. */ switch (seq_mode) { case XADC_CONF1_SEQ_SIMULTANEOUS: case XADC_CONF1_SEQ_INDEPENDENT: - val = XADC_CONF2_PD_ADC_B; + val = 0; break; default: - val = 0; + val = XADC_CONF2_PD_ADC_B; break; } @@ -797,6 +808,16 @@ static int xadc_preenable(struct iio_dev *indio_dev) if (ret) goto err; + /* + * In simultaneous mode the upper and lower aux channels are samples at + * the same time. In this mode the upper 8 bits in the sequencer + * register are don't care and the lower 8 bits control two channels + * each. As such we must set the bit if either the channel in the lower + * group or the upper group is enabled. + */ + if (seq_mode == XADC_CONF1_SEQ_SIMULTANEOUS) + scan_mask = ((scan_mask >> 8) | scan_mask) & 0xff0000; + ret = xadc_write_adc_reg(xadc, XADC_REG_SEQ(1), scan_mask >> 16); if (ret) goto err; @@ -823,11 +844,27 @@ static const struct iio_buffer_setup_ops xadc_buffer_ops = { .postdisable = &xadc_postdisable, }; +static int xadc_read_samplerate(struct xadc *xadc) +{ + unsigned int div; + uint16_t val16; + int ret; + + ret = xadc_read_adc_reg(xadc, XADC_REG_CONF2, &val16); + if (ret) + return ret; + + div = (val16 & XADC_CONF2_DIV_MASK) >> XADC_CONF2_DIV_OFFSET; + if (div < 2) + div = 2; + + return xadc_get_dclk_rate(xadc) / div / 26; +} + static int xadc_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long info) { struct xadc *xadc = iio_priv(indio_dev); - unsigned int div; uint16_t val16; int ret; @@ -880,41 +917,31 @@ static int xadc_read_raw(struct iio_dev *indio_dev, *val = -((273150 << 12) / 503975); return IIO_VAL_INT; case IIO_CHAN_INFO_SAMP_FREQ: - ret = xadc_read_adc_reg(xadc, XADC_REG_CONF2, &val16); - if (ret) + ret = xadc_read_samplerate(xadc); + if (ret < 0) return ret; - div = (val16 & XADC_CONF2_DIV_MASK) >> XADC_CONF2_DIV_OFFSET; - if (div < 2) - div = 2; - - *val = xadc_get_dclk_rate(xadc) / div / 26; - + *val = ret; return IIO_VAL_INT; default: return -EINVAL; } } -static int xadc_write_raw(struct iio_dev *indio_dev, - struct iio_chan_spec const *chan, int val, int val2, long info) +static int xadc_write_samplerate(struct xadc *xadc, int val) { - struct xadc *xadc = iio_priv(indio_dev); unsigned long clk_rate = xadc_get_dclk_rate(xadc); unsigned int div; if (!clk_rate) return -EINVAL; - if (info != IIO_CHAN_INFO_SAMP_FREQ) - return -EINVAL; - if (val <= 0) return -EINVAL; /* Max. 150 kSPS */ - if (val > 150000) - val = 150000; + if (val > XADC_MAX_SAMPLERATE) + val = XADC_MAX_SAMPLERATE; val *= 26; @@ -927,7 +954,7 @@ static int xadc_write_raw(struct iio_dev *indio_dev, * limit. */ div = clk_rate / val; - if (clk_rate / div / 26 > 150000) + if (clk_rate / div / 26 > XADC_MAX_SAMPLERATE) div++; if (div < 2) div = 2; @@ -938,6 +965,17 @@ static int xadc_write_raw(struct iio_dev *indio_dev, div << XADC_CONF2_DIV_OFFSET); } +static int xadc_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, int val, int val2, long info) +{ + struct xadc *xadc = iio_priv(indio_dev); + + if (info != IIO_CHAN_INFO_SAMP_FREQ) + return -EINVAL; + + return xadc_write_samplerate(xadc, val); +} + static const struct iio_event_spec xadc_temp_events[] = { { .type = IIO_EV_TYPE_THRESH, @@ -1225,6 +1263,21 @@ static int xadc_probe(struct platform_device *pdev) if (ret) goto err_free_samplerate_trigger; + /* + * Make sure not to exceed the maximum samplerate since otherwise the + * resulting interrupt storm will soft-lock the system. + */ + if (xadc->ops->flags & XADC_FLAGS_BUFFERED) { + ret = xadc_read_samplerate(xadc); + if (ret < 0) + goto err_free_samplerate_trigger; + if (ret > XADC_MAX_SAMPLERATE) { + ret = xadc_write_samplerate(xadc, XADC_MAX_SAMPLERATE); + if (ret < 0) + goto err_free_samplerate_trigger; + } + } + ret = request_irq(xadc->irq, xadc->ops->interrupt_handler, 0, dev_name(&pdev->dev), indio_dev); if (ret) diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c index 4a3064fb6cd9c3..364683783ae52f 100644 --- a/drivers/iio/common/st_sensors/st_sensors_core.c +++ b/drivers/iio/common/st_sensors/st_sensors_core.c @@ -80,7 +80,7 @@ int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr) struct st_sensor_odr_avl odr_out = {0, 0}; struct st_sensor_data *sdata = iio_priv(indio_dev); - if (!sdata->sensor_settings->odr.addr) + if (!sdata->sensor_settings->odr.mask) return 0; err = st_sensors_match_odr(sdata->sensor_settings, odr, &odr_out); diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index d618650533b659..14850b7fe6d7fd 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1441,6 +1441,10 @@ static int b53_arl_rw_op(struct b53_device *dev, unsigned int op) reg |= ARLTBL_RW; else reg &= ~ARLTBL_RW; + if (dev->vlan_enabled) + reg &= ~ARLTBL_IVL_SVL_SELECT; + else + reg |= ARLTBL_IVL_SVL_SELECT; b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, reg); return b53_arl_op_wait(dev); @@ -1450,6 +1454,7 @@ static int b53_arl_read(struct b53_device *dev, u64 mac, u16 vid, struct b53_arl_entry *ent, u8 *idx, bool is_valid) { + DECLARE_BITMAP(free_bins, B53_ARLTBL_MAX_BIN_ENTRIES); unsigned int i; int ret; @@ -1457,6 +1462,8 @@ static int b53_arl_read(struct b53_device *dev, u64 mac, if (ret) return ret; + bitmap_zero(free_bins, dev->num_arl_entries); + /* Read the bins */ for (i = 0; i < dev->num_arl_entries; i++) { u64 mac_vid; @@ -1468,13 +1475,24 @@ static int b53_arl_read(struct b53_device *dev, u64 mac, B53_ARLTBL_DATA_ENTRY(i), &fwd_entry); b53_arl_to_entry(ent, mac_vid, fwd_entry); - if (!(fwd_entry & ARLTBL_VALID)) + if (!(fwd_entry & ARLTBL_VALID)) { + set_bit(i, free_bins); continue; + } if ((mac_vid & ARLTBL_MAC_MASK) != mac) continue; + if (dev->vlan_enabled && + ((mac_vid >> ARLTBL_VID_S) & ARLTBL_VID_MASK) != vid) + continue; *idx = i; + return 0; } + if (bitmap_weight(free_bins, dev->num_arl_entries) == 0) + return -ENOSPC; + + *idx = find_first_bit(free_bins, dev->num_arl_entries); + return -ENOENT; } @@ -1504,15 +1522,25 @@ static int b53_arl_op(struct b53_device *dev, int op, int port, if (op) return ret; - /* We could not find a matching MAC, so reset to a new entry */ - if (ret) { + switch (ret) { + case -ENOSPC: + dev_dbg(dev->dev, "{%pM,%.4d} no space left in ARL\n", + addr, vid); + return is_valid ? ret : 0; + case -ENOENT: + /* We could not find a matching MAC, so reset to a new entry */ + dev_dbg(dev->dev, "{%pM,%.4d} not found, using idx: %d\n", + addr, vid, idx); fwd_entry = 0; - idx = 1; + break; + default: + dev_dbg(dev->dev, "{%pM,%.4d} found, using idx: %d\n", + addr, vid, idx); + break; } memset(&ent, 0, sizeof(ent)); ent.port = port; - ent.is_valid = is_valid; ent.vid = vid; ent.is_static = true; memcpy(ent.mac, addr, ETH_ALEN); diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h index 2a9f421680aad6..c90985c294a2e7 100644 --- a/drivers/net/dsa/b53/b53_regs.h +++ b/drivers/net/dsa/b53/b53_regs.h @@ -292,6 +292,7 @@ /* ARL Table Read/Write Register (8 bit) */ #define B53_ARLTBL_RW_CTRL 0x00 #define ARLTBL_RW BIT(0) +#define ARLTBL_IVL_SVL_SELECT BIT(6) #define ARLTBL_START_DONE BIT(7) /* MAC Address Index Register (48 bit) */ @@ -304,7 +305,7 @@ * * BCM5325 and BCM5365 share most definitions below */ -#define B53_ARLTBL_MAC_VID_ENTRY(n) (0x10 * (n)) +#define B53_ARLTBL_MAC_VID_ENTRY(n) ((0x10 * (n)) + 0x10) #define ARLTBL_MAC_MASK 0xffffffffffffULL #define ARLTBL_VID_S 48 #define ARLTBL_VID_MASK_25 0xff @@ -316,13 +317,16 @@ #define ARLTBL_VALID_25 BIT(63) /* ARL Table Data Entry N Registers (32 bit) */ -#define B53_ARLTBL_DATA_ENTRY(n) ((0x10 * (n)) + 0x08) +#define B53_ARLTBL_DATA_ENTRY(n) ((0x10 * (n)) + 0x18) #define ARLTBL_DATA_PORT_ID_MASK 0x1ff #define ARLTBL_TC(tc) ((3 & tc) << 11) #define ARLTBL_AGE BIT(14) #define ARLTBL_STATIC BIT(15) #define ARLTBL_VALID BIT(16) +/* Maximum number of bin entries in the ARL for all switches */ +#define B53_ARLTBL_MAX_BIN_ENTRIES 4 + /* ARL Search Control Register (8 bit) */ #define B53_ARL_SRCH_CTL 0x50 #define B53_ARL_SRCH_CTL_25 0x20 diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 8f909d57501ffe..ff09ee777b2bf9 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -995,6 +995,8 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev, if (netif_running(dev)) bcmgenet_update_mib_counters(priv); + dev->netdev_ops->ndo_get_stats(dev); + for (i = 0; i < BCMGENET_STATS_LEN; i++) { const struct bcmgenet_stats *s; char *p; @@ -3204,6 +3206,7 @@ static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev) dev->stats.rx_packets = rx_packets; dev->stats.rx_errors = rx_errors; dev->stats.rx_missed_errors = rx_errors; + dev->stats.rx_dropped = rx_dropped; return &dev->stats; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index c2e92786608b42..7bcdce182ee5c0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -1054,9 +1054,9 @@ static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init, } } -static unsigned long cudbg_mem_region_size(struct cudbg_init *pdbg_init, - struct cudbg_error *cudbg_err, - u8 mem_type) +static int cudbg_mem_region_size(struct cudbg_init *pdbg_init, + struct cudbg_error *cudbg_err, + u8 mem_type, unsigned long *region_size) { struct adapter *padap = pdbg_init->adap; struct cudbg_meminfo mem_info; @@ -1065,15 +1065,23 @@ static unsigned long cudbg_mem_region_size(struct cudbg_init *pdbg_init, memset(&mem_info, 0, sizeof(struct cudbg_meminfo)); rc = cudbg_fill_meminfo(padap, &mem_info); - if (rc) + if (rc) { + cudbg_err->sys_err = rc; return rc; + } cudbg_t4_fwcache(pdbg_init, cudbg_err); rc = cudbg_meminfo_get_mem_index(padap, &mem_info, mem_type, &mc_idx); - if (rc) + if (rc) { + cudbg_err->sys_err = rc; return rc; + } + + if (region_size) + *region_size = mem_info.avail[mc_idx].limit - + mem_info.avail[mc_idx].base; - return mem_info.avail[mc_idx].limit - mem_info.avail[mc_idx].base; + return 0; } static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init, @@ -1081,7 +1089,12 @@ static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init, struct cudbg_error *cudbg_err, u8 mem_type) { - unsigned long size = cudbg_mem_region_size(pdbg_init, cudbg_err, mem_type); + unsigned long size = 0; + int rc; + + rc = cudbg_mem_region_size(pdbg_init, cudbg_err, mem_type, &size); + if (rc) + return rc; return cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type, size, cudbg_err); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c index af1f40cbccc882..f5bc996ac77d5b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c @@ -311,32 +311,17 @@ static int cxgb4_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) */ static int cxgb4_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { - struct adapter *adapter = (struct adapter *)container_of(ptp, - struct adapter, ptp_clock_info); - struct fw_ptp_cmd c; + struct adapter *adapter = container_of(ptp, struct adapter, + ptp_clock_info); u64 ns; - int err; - - memset(&c, 0, sizeof(c)); - c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) | - FW_CMD_REQUEST_F | - FW_CMD_READ_F | - FW_PTP_CMD_PORTID_V(0)); - c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16)); - c.u.ts.sc = FW_PTP_SC_GET_TIME; - err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), &c); - if (err < 0) { - dev_err(adapter->pdev_dev, - "PTP: %s error %d\n", __func__, -err); - return err; - } + ns = t4_read_reg(adapter, T5_PORT_REG(0, MAC_PORT_PTP_SUM_LO_A)); + ns |= (u64)t4_read_reg(adapter, + T5_PORT_REG(0, MAC_PORT_PTP_SUM_HI_A)) << 32; /* convert to timespec*/ - ns = be64_to_cpu(c.u.ts.tm); *ts = ns_to_timespec64(ns); - - return err; + return 0; } /** diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 3f6813daf3c104..31fcfc58e3373a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -3748,7 +3748,7 @@ int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver) FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION)); ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); - if (ret < 0) + if (ret) return ret; *phy_fw_ver = val; return 0; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index a957a6e4d4c428..b0519c326692c2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -1900,6 +1900,9 @@ #define MAC_PORT_CFG2_A 0x818 +#define MAC_PORT_PTP_SUM_LO_A 0x990 +#define MAC_PORT_PTP_SUM_HI_A 0x994 + #define MPS_CMN_CTL_A 0x9000 #define COUNTPAUSEMCRX_S 5 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 4d5ca302c06712..a30edb436f4af1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -43,6 +43,7 @@ #include #include #include +#include #include "mlx4_en.h" @@ -261,6 +262,10 @@ static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv, } } +INDIRECT_CALLABLE_DECLARE(u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, + int index, u64 timestamp, + int napi_mode)); u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, @@ -329,6 +334,11 @@ u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, return tx_info->nr_txbb; } +INDIRECT_CALLABLE_DECLARE(u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, + int index, u64 timestamp, + int napi_mode)); + u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, int index, u64 timestamp, @@ -449,7 +459,9 @@ bool mlx4_en_process_tx_cq(struct net_device *dev, timestamp = mlx4_en_get_cqe_ts(cqe); /* free next descriptor */ - last_nr_txbb = ring->free_tx_desc( + last_nr_txbb = INDIRECT_CALL_2(ring->free_tx_desc, + mlx4_en_free_tx_desc, + mlx4_en_recycle_tx_desc, priv, ring, ring_index, timestamp, napi_budget); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index 94d7b69a95c746..eb2e57ff08a60b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -935,7 +935,7 @@ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev) return NULL; } - tracer = kzalloc(sizeof(*tracer), GFP_KERNEL); + tracer = kvzalloc(sizeof(*tracer), GFP_KERNEL); if (!tracer) return ERR_PTR(-ENOMEM); @@ -982,7 +982,7 @@ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev) tracer->dev = NULL; destroy_workqueue(tracer->work_queue); free_tracer: - kfree(tracer); + kvfree(tracer); return ERR_PTR(err); } @@ -1061,7 +1061,7 @@ void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer) mlx5_fw_tracer_destroy_log_buf(tracer); flush_workqueue(tracer->work_queue); destroy_workqueue(tracer->work_queue); - kfree(tracer); + kvfree(tracer); } static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void *data) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 11426f94c90c69..38aa55638bbed8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -367,6 +367,7 @@ enum { MLX5E_SQ_STATE_AM, MLX5E_SQ_STATE_TLS, MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, + MLX5E_SQ_STATE_PENDING_XSK_TX, }; struct mlx5e_sq_wqe_info { @@ -948,7 +949,7 @@ void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq); -void mlx5e_poll_ico_cq(struct mlx5e_cq *cq); +int mlx5e_poll_ico_cq(struct mlx5e_cq *cq); bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq); void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix); void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c index fe2d596cb361fa..3bcdb5b2fc2034 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c @@ -33,6 +33,9 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &c->xskicosq.state))) return 0; + if (test_and_set_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->xskicosq.state)) + return 0; + spin_lock(&c->xskicosq_lock); mlx5e_trigger_irq(&c->xskicosq); spin_unlock(&c->xskicosq_lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 88ea279c29bb8b..0e340893ca0029 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3579,7 +3579,12 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) struct mlx5e_vport_stats *vstats = &priv->stats.vport; struct mlx5e_pport_stats *pstats = &priv->stats.pport; - if (!mlx5e_monitor_counter_supported(priv)) { + /* In switchdev mode, monitor counters doesn't monitor + * rx/tx stats of 802_3. The update stats mechanism + * should keep the 802_3 layout counters updated + */ + if (!mlx5e_monitor_counter_supported(priv) || + mlx5e_is_uplink_rep(priv)) { /* update HW stats in background for next time */ mlx5e_queue_update_stats(priv); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 1d295a7afc8ce2..c4eed5bbcd4547 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -587,7 +587,7 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) return !!err; } -void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) +int mlx5e_poll_ico_cq(struct mlx5e_cq *cq) { struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq); struct mlx5_cqe64 *cqe; @@ -595,11 +595,11 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) int i; if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) - return; + return 0; cqe = mlx5_cqwq_get_cqe(&cq->wq); if (likely(!cqe)) - return; + return 0; /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), * otherwise a cq overrun may occur @@ -646,6 +646,8 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) sq->cc = sqcc; mlx5_cqwq_update_db_record(&cq->wq); + + return i; } bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index 800d34ed8a96c5..76efa9579215c6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -145,7 +145,11 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) busy |= rq->post_wqes(rq); if (xsk_open) { - mlx5e_poll_ico_cq(&c->xskicosq.cq); + if (mlx5e_poll_ico_cq(&c->xskicosq.cq)) + /* Don't clear the flag if nothing was polled to prevent + * queueing more WQEs and overflowing XSKICOSQ. + */ + clear_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->xskicosq.state); busy |= mlx5e_poll_xdpsq_cq(&xsksq->cq); busy_xsk |= mlx5e_napi_xsk_post(xsksq, xskrq); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c index c51b2adfc1e19f..2cbfa5cfefabc8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -316,7 +316,7 @@ struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa) block = kzalloc(sizeof(*block), GFP_KERNEL); if (!block) - return NULL; + return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&block->resource_list); block->afa = mlxsw_afa; @@ -344,7 +344,7 @@ struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa) mlxsw_afa_set_destroy(block->first_set); err_first_set_create: kfree(block); - return NULL; + return ERR_PTR(-ENOMEM); } EXPORT_SYMBOL(mlxsw_afa_block_create); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c index 6c66a0f1b79e38..ad69913f19c10c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c @@ -88,8 +88,8 @@ static int mlxsw_sp2_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv, * to be written using PEFA register to all indexes for all regions. */ afa_block = mlxsw_afa_block_create(mlxsw_sp->afa); - if (!afa_block) { - err = -ENOMEM; + if (IS_ERR(afa_block)) { + err = PTR_ERR(afa_block); goto err_afa_block; } err = mlxsw_afa_block_continue(afa_block); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 3d3cca5961163a..d77cdcb5c642d0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -444,7 +444,7 @@ mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl, rulei = kzalloc(sizeof(*rulei), GFP_KERNEL); if (!rulei) - return NULL; + return ERR_PTR(-ENOMEM); if (afa_block) { rulei->act_block = afa_block; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c index 346f4a5fe053bc..221aa6a474eb10 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c @@ -199,8 +199,8 @@ mlxsw_sp_mr_tcam_afa_block_create(struct mlxsw_sp *mlxsw_sp, int err; afa_block = mlxsw_afa_block_create(mlxsw_sp->afa); - if (!afa_block) - return ERR_PTR(-ENOMEM); + if (IS_ERR(afa_block)) + return afa_block; err = mlxsw_afa_block_append_allocated_counter(afa_block, counter_index); diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index a1ebc2b1ca0bdf..0bf91df80d47fc 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -4648,26 +4648,20 @@ static void qed_chain_free_single(struct qed_dev *cdev, static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain) { - void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl; + struct addr_tbl_entry *pp_addr_tbl = p_chain->pbl.pp_addr_tbl; u32 page_cnt = p_chain->page_cnt, i, pbl_size; - u8 *p_pbl_virt = p_chain->pbl_sp.p_virt_table; - if (!pp_virt_addr_tbl) + if (!pp_addr_tbl) return; - if (!p_pbl_virt) - goto out; - for (i = 0; i < page_cnt; i++) { - if (!pp_virt_addr_tbl[i]) + if (!pp_addr_tbl[i].virt_addr || !pp_addr_tbl[i].dma_map) break; dma_free_coherent(&cdev->pdev->dev, QED_CHAIN_PAGE_SIZE, - pp_virt_addr_tbl[i], - *(dma_addr_t *)p_pbl_virt); - - p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE; + pp_addr_tbl[i].virt_addr, + pp_addr_tbl[i].dma_map); } pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; @@ -4677,9 +4671,9 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain) pbl_size, p_chain->pbl_sp.p_virt_table, p_chain->pbl_sp.p_phys_table); -out: - vfree(p_chain->pbl.pp_virt_addr_tbl); - p_chain->pbl.pp_virt_addr_tbl = NULL; + + vfree(p_chain->pbl.pp_addr_tbl); + p_chain->pbl.pp_addr_tbl = NULL; } void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain) @@ -4780,19 +4774,19 @@ qed_chain_alloc_pbl(struct qed_dev *cdev, { u32 page_cnt = p_chain->page_cnt, size, i; dma_addr_t p_phys = 0, p_pbl_phys = 0; - void **pp_virt_addr_tbl = NULL; + struct addr_tbl_entry *pp_addr_tbl; u8 *p_pbl_virt = NULL; void *p_virt = NULL; - size = page_cnt * sizeof(*pp_virt_addr_tbl); - pp_virt_addr_tbl = vzalloc(size); - if (!pp_virt_addr_tbl) + size = page_cnt * sizeof(*pp_addr_tbl); + pp_addr_tbl = vzalloc(size); + if (!pp_addr_tbl) return -ENOMEM; /* The allocation of the PBL table is done with its full size, since it * is expected to be successive. * qed_chain_init_pbl_mem() is called even in a case of an allocation - * failure, since pp_virt_addr_tbl was previously allocated, and it + * failure, since tbl was previously allocated, and it * should be saved to allow its freeing during the error flow. */ size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; @@ -4806,8 +4800,7 @@ qed_chain_alloc_pbl(struct qed_dev *cdev, p_chain->b_external_pbl = true; } - qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, - pp_virt_addr_tbl); + qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, pp_addr_tbl); if (!p_pbl_virt) return -ENOMEM; @@ -4826,7 +4819,8 @@ qed_chain_alloc_pbl(struct qed_dev *cdev, /* Fill the PBL table with the physical address of the page */ *(dma_addr_t *)p_pbl_virt = p_phys; /* Keep the virtual address of the page */ - p_chain->pbl.pp_virt_addr_tbl[i] = p_virt; + p_chain->pbl.pp_addr_tbl[i].virt_addr = p_virt; + p_chain->pbl.pp_addr_tbl[i].dma_map = p_phys; p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 38f7f40b3a4d0d..e72f9f1d2e94d7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -1087,9 +1087,6 @@ static void qed_update_pf_params(struct qed_dev *cdev, #define QED_PERIODIC_DB_REC_INTERVAL_MS 100 #define QED_PERIODIC_DB_REC_INTERVAL \ msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS) -#define QED_PERIODIC_DB_REC_WAIT_COUNT 10 -#define QED_PERIODIC_DB_REC_WAIT_INTERVAL \ - (QED_PERIODIC_DB_REC_INTERVAL_MS / QED_PERIODIC_DB_REC_WAIT_COUNT) static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn, enum qed_slowpath_wq_flag wq_flag, @@ -1123,7 +1120,7 @@ void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn) static void qed_slowpath_wq_stop(struct qed_dev *cdev) { - int i, sleep_count = QED_PERIODIC_DB_REC_WAIT_COUNT; + int i; if (IS_VF(cdev)) return; @@ -1135,13 +1132,7 @@ static void qed_slowpath_wq_stop(struct qed_dev *cdev) /* Stop queuing new delayed works */ cdev->hwfns[i].slowpath_wq_active = false; - /* Wait until the last periodic doorbell recovery is executed */ - while (test_bit(QED_SLOWPATH_PERIODIC_DB_REC, - &cdev->hwfns[i].slowpath_task_flags) && - sleep_count--) - msleep(QED_PERIODIC_DB_REC_WAIT_INTERVAL); - - flush_workqueue(cdev->hwfns[i].slowpath_wq); + cancel_delayed_work(&cdev->hwfns[i].slowpath_task); destroy_workqueue(cdev->hwfns[i].slowpath_wq); } } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c index 33ce139f090fb4..d1d6ba9cdccdd3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c @@ -119,6 +119,7 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac) { .div = 5, .val = 5, }, { .div = 6, .val = 6, }, { .div = 7, .val = 7, }, + { /* end of array */ } }; clk_configs = devm_kzalloc(dev, sizeof(*clk_configs), GFP_KERNEL); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index e0212d2fc2a12c..fa32cd5b418ef9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -241,6 +241,8 @@ static int socfpga_set_phy_mode_common(int phymode, u32 *val) switch (phymode) { case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: *val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII; break; case PHY_INTERFACE_MODE_MII: diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 730ab57201bdb2..aa101f72d40556 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -1207,7 +1207,7 @@ static int geneve_validate(struct nlattr *tb[], struct nlattr *data[], enum ifla_geneve_df df = nla_get_u8(data[IFLA_GENEVE_DF]); if (df < 0 || df > GENEVE_DF_MAX) { - NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_GENEVE_DF], + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_DF], "Invalid DF attribute"); return -EINVAL; } diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 32c627702ac55f..a0abc729f0cac7 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -3226,11 +3226,11 @@ static int macsec_newlink(struct net *net, struct net_device *dev, struct netlink_ext_ack *extack) { struct macsec_dev *macsec = macsec_priv(dev); + rx_handler_func_t *rx_handler; + u8 icv_len = DEFAULT_ICV_LEN; struct net_device *real_dev; - int err; + int err, mtu; sci_t sci; - u8 icv_len = DEFAULT_ICV_LEN; - rx_handler_func_t *rx_handler; if (!tb[IFLA_LINK]) return -EINVAL; @@ -3246,7 +3246,11 @@ static int macsec_newlink(struct net *net, struct net_device *dev, if (data && data[IFLA_MACSEC_ICV_LEN]) icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); - dev->mtu = real_dev->mtu - icv_len - macsec_extra_len(true); + mtu = real_dev->mtu - icv_len - macsec_extra_len(true); + if (mtu < 0) + dev->mtu = 0; + else + dev->mtu = mtu; rx_handler = rtnl_dereference(real_dev->rx_handler); if (rx_handler && rx_handler != macsec_handle_frame) diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 26f6be4796c753..0ce1004a8d0db5 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -1704,7 +1704,7 @@ static int macvlan_device_event(struct notifier_block *unused, struct macvlan_dev, list); - if (macvlan_sync_address(vlan->dev, dev->dev_addr)) + if (vlan && macvlan_sync_address(vlan->dev, dev->dev_addr)) return NOTIFY_BAD; break; diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 4004f98e50d9fc..04845a4017f935 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -468,6 +468,9 @@ static const struct team_mode *team_mode_get(const char *kind) struct team_mode_item *mitem; const struct team_mode *mode = NULL; + if (!try_module_get(THIS_MODULE)) + return NULL; + spin_lock(&mode_list_lock); mitem = __find_mode(kind); if (!mitem) { @@ -483,6 +486,7 @@ static const struct team_mode *team_mode_get(const char *kind) } spin_unlock(&mode_list_lock); + module_put(THIS_MODULE); return mode; } diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index b8228f50bc9412..6716deeb35e33c 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -188,8 +188,8 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb, fl6.flowi6_proto = iph->nexthdr; fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF; - dst = ip6_route_output(net, NULL, &fl6); - if (dst == dst_null) + dst = ip6_dst_lookup_flow(net, NULL, &fl6, NULL); + if (IS_ERR(dst) || dst == dst_null) goto err; skb_dst_drop(skb); @@ -474,7 +474,8 @@ static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev, if (rt6_need_strict(&ipv6_hdr(skb)->daddr)) return skb; - if (qdisc_tx_is_default(vrf_dev)) + if (qdisc_tx_is_default(vrf_dev) || + IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) return vrf_ip6_out_direct(vrf_dev, sk, skb); return vrf_ip6_out_redirect(vrf_dev, skb); @@ -686,7 +687,8 @@ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev, ipv4_is_lbcast(ip_hdr(skb)->daddr)) return skb; - if (qdisc_tx_is_default(vrf_dev)) + if (qdisc_tx_is_default(vrf_dev) || + IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) return vrf_ip_out_direct(vrf_dev, sk, skb); return vrf_ip_out_redirect(vrf_dev, skb); diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 93690f77ec9c56..ae59fca9603220 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -3144,7 +3144,7 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[], u32 id = nla_get_u32(data[IFLA_VXLAN_ID]); if (id >= VXLAN_N_VID) { - NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_ID], + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VXLAN_ID], "VXLAN ID must be lower than 16777216"); return -ERANGE; } @@ -3155,7 +3155,7 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[], = nla_data(data[IFLA_VXLAN_PORT_RANGE]); if (ntohs(p->high) < ntohs(p->low)) { - NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_PORT_RANGE], + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VXLAN_PORT_RANGE], "Invalid source port range"); return -EINVAL; } @@ -3165,7 +3165,7 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[], enum ifla_vxlan_df df = nla_get_u8(data[IFLA_VXLAN_DF]); if (df < 0 || df > VXLAN_DF_MAX) { - NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_DF], + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VXLAN_DF], "Invalid DF attribute"); return -EINVAL; } diff --git a/drivers/net/wireless/intel/iwlegacy/3945-rs.c b/drivers/net/wireless/intel/iwlegacy/3945-rs.c index 6209f85a71ddb3..0af9e997c9f676 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945-rs.c +++ b/drivers/net/wireless/intel/iwlegacy/3945-rs.c @@ -374,7 +374,7 @@ il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id) } static void * -il3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) +il3945_rs_alloc(struct ieee80211_hw *hw) { return hw->priv; } diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c index 7c6e2c8634974d..0a02d8aca32061 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-rs.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c @@ -2474,7 +2474,7 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta, } static void * -il4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) +il4965_rs_alloc(struct ieee80211_hw *hw) { return hw->priv; } diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c index 74229fcb63a912..e68a13c33c4574 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c @@ -3019,7 +3019,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv, cpu_to_le16(priv->lib->bt_params->agg_time_limit); } -static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) +static void *rs_alloc(struct ieee80211_hw *hw) { return hw->priv; } diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h index 73196cbc7fbefd..75d958bab0e380 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h @@ -8,7 +8,7 @@ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2019 Intel Corporation + * Copyright(c) 2019 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2019 Intel Corporation + * Copyright(c) 2019 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -99,7 +99,7 @@ enum iwl_mvm_dqa_txq { IWL_MVM_DQA_MAX_MGMT_QUEUE = 8, IWL_MVM_DQA_AP_PROBE_RESP_QUEUE = 9, IWL_MVM_DQA_MIN_DATA_QUEUE = 10, - IWL_MVM_DQA_MAX_DATA_QUEUE = 31, + IWL_MVM_DQA_MAX_DATA_QUEUE = 30, }; enum iwl_mvm_tx_fifo { diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 5d546dac78142c..022f2faccab411 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -525,8 +525,7 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = { IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, .mac_cap_info[2] = - IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP | - IEEE80211_HE_MAC_CAP2_ACK_EN, + IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP, .mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_OMI_CONTROL | IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2, @@ -610,8 +609,7 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = { IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, .mac_cap_info[2] = - IEEE80211_HE_MAC_CAP2_BSR | - IEEE80211_HE_MAC_CAP2_ACK_EN, + IEEE80211_HE_MAC_CAP2_BSR, .mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_OMI_CONTROL | IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 42d525e46e805b..9af657820b3883 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -3663,7 +3663,7 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm, cpu_to_le16(iwl_mvm_coex_agg_time_limit(mvm, sta)); } -static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) +static void *rs_alloc(struct ieee80211_hw *hw) { return hw->priv; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index 5ee33c8ae9d24c..77b8def26edb22 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -8,7 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -566,6 +566,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_mvm_stat_data { struct iwl_mvm *mvm; + __le32 flags; __le32 mac_id; u8 beacon_filter_average_energy; void *general; @@ -606,6 +607,13 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac, -general->beacon_average_energy[vif_id]; } + /* make sure that beacon statistics don't go backwards with TCM + * request to clear statistics + */ + if (le32_to_cpu(data->flags) & IWL_STATISTICS_REPLY_FLG_CLEAR) + mvmvif->beacon_stats.accu_num_beacons += + mvmvif->beacon_stats.num_beacons; + if (mvmvif->id != id) return; @@ -763,6 +771,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, flags = stats->flag; } + data.flags = flags; iwl_mvm_rx_stats_check_trigger(mvm, pkt); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 8ad2d889179c64..71d339e90a9e79 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -722,6 +722,11 @@ static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, lockdep_assert_held(&mvm->mutex); + if (WARN(maxq >= mvm->trans->trans_cfg->base_params->num_of_queues, + "max queue %d >= num_of_queues (%d)", maxq, + mvm->trans->trans_cfg->base_params->num_of_queues)) + maxq = mvm->trans->trans_cfg->base_params->num_of_queues - 1; + /* This should not be hit with new TX path */ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -ENOSPC; @@ -1164,9 +1169,9 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta) inactive_tid_bitmap, &unshare_queues, &changetid_queues); - if (ret >= 0 && free_queue < 0) { + if (ret && free_queue < 0) { queue_owner = sta; - free_queue = ret; + free_queue = i; } /* only unlock sta lock - we still need the queue info lock */ spin_unlock_bh(&mvmsta->lock); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index ff4c34d7b74f79..248d42bf00c154 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -1283,6 +1283,9 @@ void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue) iwl_pcie_gen2_txq_unmap(trans, queue); + iwl_pcie_gen2_txq_free_memory(trans, trans_pcie->txq[queue]); + trans_pcie->txq[queue] = NULL; + IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue); } diff --git a/drivers/net/wireless/realtek/rtlwifi/rc.c b/drivers/net/wireless/realtek/rtlwifi/rc.c index 0c7d74902d33b7..4b5ea0ec910936 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rc.c +++ b/drivers/net/wireless/realtek/rtlwifi/rc.c @@ -261,7 +261,7 @@ static void rtl_rate_update(void *ppriv, { } -static void *rtl_rate_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) +static void *rtl_rate_alloc(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); return rtlpriv; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index b8fe42f4b3c5bf..f97c48fd3edae7 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -6,6 +6,7 @@ #include #include +#include #include #include #include @@ -1244,6 +1245,18 @@ static void nvme_enable_aen(struct nvme_ctrl *ctrl) queue_work(nvme_wq, &ctrl->async_event_work); } +/* + * Convert integer values from ioctl structures to user pointers, silently + * ignoring the upper bits in the compat case to match behaviour of 32-bit + * kernels. + */ +static void __user *nvme_to_user_ptr(uintptr_t ptrval) +{ + if (in_compat_syscall()) + ptrval = (compat_uptr_t)ptrval; + return (void __user *)ptrval; +} + static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) { struct nvme_user_io io; @@ -1267,7 +1280,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) length = (io.nblocks + 1) << ns->lba_shift; meta_len = (io.nblocks + 1) * ns->ms; - metadata = (void __user *)(uintptr_t)io.metadata; + metadata = nvme_to_user_ptr(io.metadata); if (ns->ext) { length += meta_len; @@ -1290,7 +1303,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) c.rw.appmask = cpu_to_le16(io.appmask); return nvme_submit_user_cmd(ns->queue, &c, - (void __user *)(uintptr_t)io.addr, length, + nvme_to_user_ptr(io.addr), length, metadata, meta_len, lower_32_bits(io.slba), NULL, 0); } @@ -1410,9 +1423,9 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, effects = nvme_passthru_start(ctrl, ns, cmd.opcode); status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, - (void __user *)(uintptr_t)cmd.addr, cmd.data_len, - (void __user *)(uintptr_t)cmd.metadata, - cmd.metadata_len, 0, &result, timeout); + nvme_to_user_ptr(cmd.addr), cmd.data_len, + nvme_to_user_ptr(cmd.metadata), cmd.metadata_len, + 0, &result, timeout); nvme_passthru_end(ctrl, effects); if (status >= 0) { @@ -1457,8 +1470,8 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns, effects = nvme_passthru_start(ctrl, ns, cmd.opcode); status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, - (void __user *)(uintptr_t)cmd.addr, cmd.data_len, - (void __user *)(uintptr_t)cmd.metadata, cmd.metadata_len, + nvme_to_user_ptr(cmd.addr), cmd.data_len, + nvme_to_user_ptr(cmd.metadata), cmd.metadata_len, 0, &cmd.result, timeout); nvme_passthru_end(ctrl, effects); diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index aed6354cb27178..56caddeabb5e5c 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -510,7 +510,7 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl, if (!nr_nsids) return 0; - down_write(&ctrl->namespaces_rwsem); + down_read(&ctrl->namespaces_rwsem); list_for_each_entry(ns, &ctrl->namespaces, list) { unsigned nsid = le32_to_cpu(desc->nsids[n]); @@ -521,7 +521,7 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl, if (++n == nr_nsids) break; } - up_write(&ctrl->namespaces_rwsem); + up_read(&ctrl->namespaces_rwsem); return 0; } diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 244984420b41b6..11e84ed4de361e 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -164,16 +164,14 @@ static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req) static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req) { struct request *rq; - unsigned int bytes; if (unlikely(nvme_tcp_async_req(req))) return false; /* async events don't have a request */ rq = blk_mq_rq_from_pdu(req); - bytes = blk_rq_payload_bytes(rq); - return rq_data_dir(rq) == WRITE && bytes && - bytes <= nvme_tcp_inline_data_size(req->queue); + return rq_data_dir(rq) == WRITE && req->data_len && + req->data_len <= nvme_tcp_inline_data_size(req->queue); } static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req) @@ -2090,7 +2088,9 @@ static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue, c->common.flags |= NVME_CMD_SGL_METABUF; - if (rq_data_dir(rq) == WRITE && req->data_len && + if (!blk_rq_nr_phys_segments(rq)) + nvme_tcp_set_sg_null(c); + else if (rq_data_dir(rq) == WRITE && req->data_len <= nvme_tcp_inline_data_size(queue)) nvme_tcp_set_sg_inline(queue, c, req->data_len); else @@ -2117,7 +2117,8 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns, req->data_sent = 0; req->pdu_len = 0; req->pdu_sent = 0; - req->data_len = blk_rq_payload_bytes(rq); + req->data_len = blk_rq_nr_phys_segments(rq) ? + blk_rq_payload_bytes(rq) : 0; req->curr_bio = rq->bio; if (rq_data_dir(rq) == WRITE && diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 882ce82c46990e..aa61d4c219d7b6 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h @@ -174,10 +174,10 @@ void pciehp_set_indicators(struct controller *ctrl, int pwr, int attn); void pciehp_get_latch_status(struct controller *ctrl, u8 *status); int pciehp_query_power_fault(struct controller *ctrl); -bool pciehp_card_present(struct controller *ctrl); -bool pciehp_card_present_or_link_active(struct controller *ctrl); +int pciehp_card_present(struct controller *ctrl); +int pciehp_card_present_or_link_active(struct controller *ctrl); int pciehp_check_link_status(struct controller *ctrl); -bool pciehp_check_link_active(struct controller *ctrl); +int pciehp_check_link_active(struct controller *ctrl); void pciehp_release_ctrl(struct controller *ctrl); int pciehp_sysfs_enable_slot(struct hotplug_slot *hotplug_slot); diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index 56daad828c9e00..312cc45c44c780 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c @@ -139,10 +139,15 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) { struct controller *ctrl = to_ctrl(hotplug_slot); struct pci_dev *pdev = ctrl->pcie->port; + int ret; pci_config_pm_runtime_get(pdev); - *value = pciehp_card_present_or_link_active(ctrl); + ret = pciehp_card_present_or_link_active(ctrl); pci_config_pm_runtime_put(pdev); + if (ret < 0) + return ret; + + *value = ret; return 0; } @@ -158,13 +163,13 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) */ static void pciehp_check_presence(struct controller *ctrl) { - bool occupied; + int occupied; down_read(&ctrl->reset_lock); mutex_lock(&ctrl->state_lock); occupied = pciehp_card_present_or_link_active(ctrl); - if ((occupied && (ctrl->state == OFF_STATE || + if ((occupied > 0 && (ctrl->state == OFF_STATE || ctrl->state == BLINKINGON_STATE)) || (!occupied && (ctrl->state == ON_STATE || ctrl->state == BLINKINGOFF_STATE))) diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index dd8e4a5fb28260..6503d15effbbd3 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c @@ -226,7 +226,7 @@ void pciehp_handle_disable_request(struct controller *ctrl) void pciehp_handle_presence_or_link_change(struct controller *ctrl, u32 events) { - bool present, link_active; + int present, link_active; /* * If the slot is on and presence or link has changed, turn it off. @@ -257,7 +257,7 @@ void pciehp_handle_presence_or_link_change(struct controller *ctrl, u32 events) mutex_lock(&ctrl->state_lock); present = pciehp_card_present(ctrl); link_active = pciehp_check_link_active(ctrl); - if (!present && !link_active) { + if (present <= 0 && link_active <= 0) { mutex_unlock(&ctrl->state_lock); return; } diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index d74a71712cde41..356786a3b7f4b7 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -201,17 +201,29 @@ static void pcie_write_cmd_nowait(struct controller *ctrl, u16 cmd, u16 mask) pcie_do_write_cmd(ctrl, cmd, mask, false); } -bool pciehp_check_link_active(struct controller *ctrl) +/** + * pciehp_check_link_active() - Is the link active + * @ctrl: PCIe hotplug controller + * + * Check whether the downstream link is currently active. Note it is + * possible that the card is removed immediately after this so the + * caller may need to take it into account. + * + * If the hotplug controller itself is not available anymore returns + * %-ENODEV. + */ +int pciehp_check_link_active(struct controller *ctrl) { struct pci_dev *pdev = ctrl_dev(ctrl); u16 lnk_status; - bool ret; + int ret; - pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status); - ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA); + ret = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status); + if (ret == PCIBIOS_DEVICE_NOT_FOUND || lnk_status == (u16)~0) + return -ENODEV; - if (ret) - ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status); + ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA); + ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status); return ret; } @@ -373,13 +385,29 @@ void pciehp_get_latch_status(struct controller *ctrl, u8 *status) *status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS); } -bool pciehp_card_present(struct controller *ctrl) +/** + * pciehp_card_present() - Is the card present + * @ctrl: PCIe hotplug controller + * + * Function checks whether the card is currently present in the slot and + * in that case returns true. Note it is possible that the card is + * removed immediately after the check so the caller may need to take + * this into account. + * + * It the hotplug controller itself is not available anymore returns + * %-ENODEV. + */ +int pciehp_card_present(struct controller *ctrl) { struct pci_dev *pdev = ctrl_dev(ctrl); u16 slot_status; + int ret; - pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status); - return slot_status & PCI_EXP_SLTSTA_PDS; + ret = pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status); + if (ret == PCIBIOS_DEVICE_NOT_FOUND || slot_status == (u16)~0) + return -ENODEV; + + return !!(slot_status & PCI_EXP_SLTSTA_PDS); } /** @@ -390,10 +418,19 @@ bool pciehp_card_present(struct controller *ctrl) * Presence Detect State bit, this helper also returns true if the Link Active * bit is set. This is a concession to broken hotplug ports which hardwire * Presence Detect State to zero, such as Wilocity's [1ae9:0200]. + * + * Returns: %1 if the slot is occupied and %0 if it is not. If the hotplug + * port is not present anymore returns %-ENODEV. */ -bool pciehp_card_present_or_link_active(struct controller *ctrl) +int pciehp_card_present_or_link_active(struct controller *ctrl) { - return pciehp_card_present(ctrl) || pciehp_check_link_active(ctrl); + int ret; + + ret = pciehp_card_present(ctrl); + if (ret) + return ret; + + return pciehp_check_link_active(ctrl); } int pciehp_query_power_fault(struct controller *ctrl) diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 0c3086793e4ec6..5ea612a15550ee 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -919,6 +919,8 @@ static int pci_pm_resume_noirq(struct device *dev) struct pci_dev *pci_dev = to_pci_dev(dev); struct device_driver *drv = dev->driver; int error = 0; + pci_power_t prev_state = pci_dev->current_state; + bool skip_bus_pm = pci_dev->skip_bus_pm; if (dev_pm_may_skip_resume(dev)) return 0; @@ -937,12 +939,15 @@ static int pci_pm_resume_noirq(struct device *dev) * configuration here and attempting to put them into D0 again is * pointless, so avoid doing that. */ - if (!(pci_dev->skip_bus_pm && pm_suspend_no_platform())) + if (!(skip_bus_pm && pm_suspend_no_platform())) pci_pm_default_resume_early(pci_dev); pci_fixup_device(pci_fixup_resume_early, pci_dev); pcie_pme_root_status_cleanup(pci_dev); + if (!skip_bus_pm && prev_state == PCI_D3cold) + pci_bridge_wait_for_secondary_bus(pci_dev); + if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_resume_early(dev); @@ -1333,6 +1338,7 @@ static int pci_pm_runtime_resume(struct device *dev) int rc = 0; struct pci_dev *pci_dev = to_pci_dev(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + pci_power_t prev_state = pci_dev->current_state; /* * Restoring config space is necessary even if the device is not bound @@ -1348,6 +1354,9 @@ static int pci_pm_runtime_resume(struct device *dev) pci_enable_wake(pci_dev, PCI_D0, false); pci_fixup_device(pci_fixup_resume, pci_dev); + if (prev_state == PCI_D3cold) + pci_bridge_wait_for_secondary_bus(pci_dev); + if (pm && pm->runtime_resume) rc = pm->runtime_resume(dev); diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 981ae16f935bce..779132aef0fb93 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1019,8 +1019,6 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state) * because have already delayed for the bridge. */ if (dev->runtime_d3cold) { - if (dev->d3cold_delay && !dev->imm_ready) - msleep(dev->d3cold_delay); /* * When powering on a bridge from D3cold, the * whole hierarchy may be powered on into @@ -4605,14 +4603,17 @@ static int pci_pm_reset(struct pci_dev *dev, int probe) return pci_dev_wait(dev, "PM D3->D0", PCIE_RESET_READY_POLL_MS); } + /** - * pcie_wait_for_link - Wait until link is active or inactive + * pcie_wait_for_link_delay - Wait until link is active or inactive * @pdev: Bridge device * @active: waiting for active or inactive? + * @delay: Delay to wait after link has become active (in ms) * * Use this to wait till link becomes active or inactive. */ -bool pcie_wait_for_link(struct pci_dev *pdev, bool active) +static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, + int delay) { int timeout = 1000; bool ret; @@ -4649,13 +4650,144 @@ bool pcie_wait_for_link(struct pci_dev *pdev, bool active) timeout -= 10; } if (active && ret) - msleep(100); + msleep(delay); else if (ret != active) pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n", active ? "set" : "cleared"); return ret == active; } +/** + * pcie_wait_for_link - Wait until link is active or inactive + * @pdev: Bridge device + * @active: waiting for active or inactive? + * + * Use this to wait till link becomes active or inactive. + */ +bool pcie_wait_for_link(struct pci_dev *pdev, bool active) +{ + return pcie_wait_for_link_delay(pdev, active, 100); +} + +/* + * Find maximum D3cold delay required by all the devices on the bus. The + * spec says 100 ms, but firmware can lower it and we allow drivers to + * increase it as well. + * + * Called with @pci_bus_sem locked for reading. + */ +static int pci_bus_max_d3cold_delay(const struct pci_bus *bus) +{ + const struct pci_dev *pdev; + int min_delay = 100; + int max_delay = 0; + + list_for_each_entry(pdev, &bus->devices, bus_list) { + if (pdev->d3cold_delay < min_delay) + min_delay = pdev->d3cold_delay; + if (pdev->d3cold_delay > max_delay) + max_delay = pdev->d3cold_delay; + } + + return max(min_delay, max_delay); +} + +/** + * pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible + * @dev: PCI bridge + * + * Handle necessary delays before access to the devices on the secondary + * side of the bridge are permitted after D3cold to D0 transition. + * + * For PCIe this means the delays in PCIe 5.0 section 6.6.1. For + * conventional PCI it means Tpvrh + Trhfa specified in PCI 3.0 section + * 4.3.2. + */ +void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev) +{ + struct pci_dev *child; + int delay; + + if (pci_dev_is_disconnected(dev)) + return; + + if (!pci_is_bridge(dev) || !dev->bridge_d3) + return; + + down_read(&pci_bus_sem); + + /* + * We only deal with devices that are present currently on the bus. + * For any hot-added devices the access delay is handled in pciehp + * board_added(). In case of ACPI hotplug the firmware is expected + * to configure the devices before OS is notified. + */ + if (!dev->subordinate || list_empty(&dev->subordinate->devices)) { + up_read(&pci_bus_sem); + return; + } + + /* Take d3cold_delay requirements into account */ + delay = pci_bus_max_d3cold_delay(dev->subordinate); + if (!delay) { + up_read(&pci_bus_sem); + return; + } + + child = list_first_entry(&dev->subordinate->devices, struct pci_dev, + bus_list); + up_read(&pci_bus_sem); + + /* + * Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before + * accessing the device after reset (that is 1000 ms + 100 ms). In + * practice this should not be needed because we don't do power + * management for them (see pci_bridge_d3_possible()). + */ + if (!pci_is_pcie(dev)) { + pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay); + msleep(1000 + delay); + return; + } + + /* + * For PCIe downstream and root ports that do not support speeds + * greater than 5 GT/s need to wait minimum 100 ms. For higher + * speeds (gen3) we need to wait first for the data link layer to + * become active. + * + * However, 100 ms is the minimum and the PCIe spec says the + * software must allow at least 1s before it can determine that the + * device that did not respond is a broken device. There is + * evidence that 100 ms is not always enough, for example certain + * Titan Ridge xHCI controller does not always respond to + * configuration requests if we only wait for 100 ms (see + * https://bugzilla.kernel.org/show_bug.cgi?id=203885). + * + * Therefore we wait for 100 ms and check for the device presence. + * If it is still not present give it an additional 100 ms. + */ + if (!pcie_downstream_port(dev)) + return; + + if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) { + pci_dbg(dev, "waiting %d ms for downstream link\n", delay); + msleep(delay); + } else { + pci_dbg(dev, "waiting %d ms for downstream link, after activation\n", + delay); + if (!pcie_wait_for_link_delay(dev, true, delay)) { + /* Did not train, no need to wait any further */ + return; + } + } + + if (!pci_device_is_present(child)) { + pci_dbg(child, "waiting additional %d ms to become accessible\n", delay); + msleep(delay); + } +} + void pci_reset_secondary_bus(struct pci_dev *dev) { u16 ctrl; diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 273d60cb0762d4..a5adc2e2c351d2 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -107,6 +107,7 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev); void pci_free_cap_save_buffers(struct pci_dev *dev); bool pci_bridge_d3_possible(struct pci_dev *dev); void pci_bridge_d3_update(struct pci_dev *dev); +void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev); static inline void pci_wakeup_event(struct pci_dev *dev) { diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 32c34330e5a676..5a1bbf2cb7e989 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -64,6 +64,7 @@ struct pcie_link_state { u32 clkpm_capable:1; /* Clock PM capable? */ u32 clkpm_enabled:1; /* Current Clock PM state */ u32 clkpm_default:1; /* Default Clock PM state by BIOS */ + u32 clkpm_disable:1; /* Clock PM disabled */ /* Exit latencies */ struct aspm_latency latency_up; /* Upstream direction exit latency */ @@ -161,8 +162,11 @@ static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable) static void pcie_set_clkpm(struct pcie_link_state *link, int enable) { - /* Don't enable Clock PM if the link is not Clock PM capable */ - if (!link->clkpm_capable) + /* + * Don't enable Clock PM if the link is not Clock PM capable + * or Clock PM is disabled + */ + if (!link->clkpm_capable || link->clkpm_disable) enable = 0; /* Need nothing if the specified equals to current state */ if (link->clkpm_enabled == enable) @@ -192,7 +196,8 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist) } link->clkpm_enabled = enabled; link->clkpm_default = enabled; - link->clkpm_capable = (blacklist) ? 0 : capable; + link->clkpm_capable = capable; + link->clkpm_disable = blacklist ? 1 : 0; } static bool pcie_retrain_link(struct pcie_link_state *link) @@ -1097,10 +1102,9 @@ static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem) link->aspm_disable |= ASPM_STATE_L1; pcie_config_aspm_link(link, policy_to_aspm_state(link)); - if (state & PCIE_LINK_STATE_CLKPM) { - link->clkpm_capable = 0; - pcie_set_clkpm(link, 0); - } + if (state & PCIE_LINK_STATE_CLKPM) + link->clkpm_disable = 1; + pcie_set_clkpm(link, policy_to_clkpm_state(link)); mutex_unlock(&aspm_lock); if (sem) up_read(&pci_bus_sem); diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 6465b014dea04e..029add7cf75d12 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -4361,6 +4361,47 @@ static void quirk_chelsio_T5_disable_root_port_attributes(struct pci_dev *pdev) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID, quirk_chelsio_T5_disable_root_port_attributes); +/* + * pci_acs_ctrl_enabled - compare desired ACS controls with those provided + * by a device + * @acs_ctrl_req: Bitmask of desired ACS controls + * @acs_ctrl_ena: Bitmask of ACS controls enabled or provided implicitly by + * the hardware design + * + * Return 1 if all ACS controls in the @acs_ctrl_req bitmask are included + * in @acs_ctrl_ena, i.e., the device provides all the access controls the + * caller desires. Return 0 otherwise. + */ +static int pci_acs_ctrl_enabled(u16 acs_ctrl_req, u16 acs_ctrl_ena) +{ + if ((acs_ctrl_req & acs_ctrl_ena) == acs_ctrl_req) + return 1; + return 0; +} + +/* + * Many Zhaoxin Root Ports and Switch Downstream Ports have no ACS capability. + * But the implementation could block peer-to-peer transactions between them + * and provide ACS-like functionality. + */ +static int pci_quirk_zhaoxin_pcie_ports_acs(struct pci_dev *dev, u16 acs_flags) +{ + if (!pci_is_pcie(dev) || + ((pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) && + (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM))) + return -ENOTTY; + + switch (dev->device) { + case 0x0710 ... 0x071e: + case 0x0721: + case 0x0723 ... 0x0732: + return pci_acs_ctrl_enabled(acs_flags, + PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); + } + + return false; +} + /* * AMD has indicated that the devices below do not support peer-to-peer * in any system where they are found in the southbridge with an AMD @@ -4404,7 +4445,7 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags) /* Filter out flags not applicable to multifunction */ acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT); - return acs_flags & ~(PCI_ACS_RR | PCI_ACS_CR) ? 0 : 1; + return pci_acs_ctrl_enabled(acs_flags, PCI_ACS_RR | PCI_ACS_CR); #else return -ENODEV; #endif @@ -4431,20 +4472,19 @@ static bool pci_quirk_cavium_acs_match(struct pci_dev *dev) static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags) { + if (!pci_quirk_cavium_acs_match(dev)) + return -ENOTTY; + /* - * Cavium root ports don't advertise an ACS capability. However, + * Cavium Root Ports don't advertise an ACS capability. However, * the RTL internally implements similar protection as if ACS had - * Request Redirection, Completion Redirection, Source Validation, + * Source Validation, Request Redirection, Completion Redirection, * and Upstream Forwarding features enabled. Assert that the * hardware implements and enables equivalent ACS functionality for * these flags. */ - acs_flags &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_SV | PCI_ACS_UF); - - if (!pci_quirk_cavium_acs_match(dev)) - return -ENOTTY; - - return acs_flags ? 0 : 1; + return pci_acs_ctrl_enabled(acs_flags, + PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); } static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags) @@ -4454,13 +4494,12 @@ static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags) * transactions with others, allowing masking out these bits as if they * were unimplemented in the ACS capability. */ - acs_flags &= ~(PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); - - return acs_flags ? 0 : 1; + return pci_acs_ctrl_enabled(acs_flags, + PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); } /* - * Many Intel PCH root ports do provide ACS-like features to disable peer + * Many Intel PCH Root Ports do provide ACS-like features to disable peer * transactions and validate bus numbers in requests, but do not provide an * actual PCIe ACS capability. This is the list of device IDs known to fall * into that category as provided by Intel in Red Hat bugzilla 1037684. @@ -4508,37 +4547,32 @@ static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev) return false; } -#define INTEL_PCH_ACS_FLAGS (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV) - static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags) { - u16 flags = dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK ? - INTEL_PCH_ACS_FLAGS : 0; - if (!pci_quirk_intel_pch_acs_match(dev)) return -ENOTTY; - return acs_flags & ~flags ? 0 : 1; + if (dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK) + return pci_acs_ctrl_enabled(acs_flags, + PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); + + return pci_acs_ctrl_enabled(acs_flags, 0); } /* - * These QCOM root ports do provide ACS-like features to disable peer + * These QCOM Root Ports do provide ACS-like features to disable peer * transactions and validate bus numbers in requests, but do not provide an * actual PCIe ACS capability. Hardware supports source validation but it * will report the issue as Completer Abort instead of ACS Violation. - * Hardware doesn't support peer-to-peer and each root port is a root - * complex with unique segment numbers. It is not possible for one root - * port to pass traffic to another root port. All PCIe transactions are - * terminated inside the root port. + * Hardware doesn't support peer-to-peer and each Root Port is a Root + * Complex with unique segment numbers. It is not possible for one Root + * Port to pass traffic to another Root Port. All PCIe transactions are + * terminated inside the Root Port. */ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags) { - u16 flags = (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV); - int ret = acs_flags & ~flags ? 0 : 1; - - pci_info(dev, "Using QCOM ACS Quirk (%d)\n", ret); - - return ret; + return pci_acs_ctrl_enabled(acs_flags, + PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); } static int pci_quirk_al_acs(struct pci_dev *dev, u16 acs_flags) @@ -4639,7 +4673,7 @@ static int pci_quirk_intel_spt_pch_acs(struct pci_dev *dev, u16 acs_flags) pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl); - return acs_flags & ~ctrl ? 0 : 1; + return pci_acs_ctrl_enabled(acs_flags, ctrl); } static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags) @@ -4653,10 +4687,9 @@ static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags) * perform peer-to-peer with other functions, allowing us to mask out * these bits as if they were unimplemented in the ACS capability. */ - acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR | - PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT); - - return acs_flags ? 0 : 1; + return pci_acs_ctrl_enabled(acs_flags, + PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR | + PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT); } static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags) @@ -4667,9 +4700,8 @@ static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags) * Allow each Root Port to be in a separate IOMMU group by masking * SV/RR/CR/UF bits. */ - acs_flags &= ~(PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); - - return acs_flags ? 0 : 1; + return pci_acs_ctrl_enabled(acs_flags, + PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); } static const struct pci_dev_acs_enabled { @@ -4768,9 +4800,26 @@ static const struct pci_dev_acs_enabled { { PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs }, /* Amazon Annapurna Labs */ { PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs }, + /* Zhaoxin multi-function devices */ + { PCI_VENDOR_ID_ZHAOXIN, 0x3038, pci_quirk_mf_endpoint_acs }, + { PCI_VENDOR_ID_ZHAOXIN, 0x3104, pci_quirk_mf_endpoint_acs }, + { PCI_VENDOR_ID_ZHAOXIN, 0x9083, pci_quirk_mf_endpoint_acs }, + /* Zhaoxin Root/Downstream Ports */ + { PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs }, { 0 } }; +/* + * pci_dev_specific_acs_enabled - check whether device provides ACS controls + * @dev: PCI device + * @acs_flags: Bitmask of desired ACS controls + * + * Returns: + * -ENOTTY: No quirk applies to this device; we can't tell whether the + * device provides the desired controls + * 0: Device does not provide all the desired controls + * >0: Device provides all the controls in @acs_flags + */ int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags) { const struct pci_dev_acs_enabled *i; @@ -5499,3 +5548,21 @@ static void quirk_reset_lenovo_thinkpad_p50_nvgpu(struct pci_dev *pdev) DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, 0x13b1, PCI_CLASS_DISPLAY_VGA, 8, quirk_reset_lenovo_thinkpad_p50_nvgpu); + +/* + * Device [1b21:2142] + * When in D0, PME# doesn't get asserted when plugging USB 3.0 device. + */ +static void pci_fixup_no_d0_pme(struct pci_dev *dev) +{ + pci_info(dev, "PME# does not work under D0, disabling it\n"); + dev->pme_support &= ~(PCI_PM_CAP_PME_D0 >> PCI_PM_CAP_PME_SHIFT); +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x2142, pci_fixup_no_d0_pme); + +static void apex_pci_fixup_class(struct pci_dev *pdev) +{ + pdev->class = (PCI_CLASS_SYSTEM_OTHER << 8) | pdev->class; +} +DECLARE_PCI_FIXUP_CLASS_HEADER(0x1ac1, 0x089a, + PCI_CLASS_NOT_DEFINED, 8, apex_pci_fixup_class); diff --git a/drivers/pwm/pwm-bcm2835.c b/drivers/pwm/pwm-bcm2835.c index 91e24f01b54ed0..d78f86f8e46210 100644 --- a/drivers/pwm/pwm-bcm2835.c +++ b/drivers/pwm/pwm-bcm2835.c @@ -166,6 +166,7 @@ static int bcm2835_pwm_probe(struct platform_device *pdev) pc->chip.dev = &pdev->dev; pc->chip.ops = &bcm2835_pwm_ops; + pc->chip.base = -1; pc->chip.npwm = 2; pc->chip.of_xlate = of_pwm_xlate_with_flags; pc->chip.of_pwm_n_cells = 3; diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c index 852eb2347954d1..b98ec8847b4885 100644 --- a/drivers/pwm/pwm-rcar.c +++ b/drivers/pwm/pwm-rcar.c @@ -228,24 +228,28 @@ static int rcar_pwm_probe(struct platform_device *pdev) rcar_pwm->chip.base = -1; rcar_pwm->chip.npwm = 1; + pm_runtime_enable(&pdev->dev); + ret = pwmchip_add(&rcar_pwm->chip); if (ret < 0) { dev_err(&pdev->dev, "failed to register PWM chip: %d\n", ret); + pm_runtime_disable(&pdev->dev); return ret; } - pm_runtime_enable(&pdev->dev); - return 0; } static int rcar_pwm_remove(struct platform_device *pdev) { struct rcar_pwm_chip *rcar_pwm = platform_get_drvdata(pdev); + int ret; + + ret = pwmchip_remove(&rcar_pwm->chip); pm_runtime_disable(&pdev->dev); - return pwmchip_remove(&rcar_pwm->chip); + return ret; } static const struct of_device_id rcar_pwm_of_table[] = { diff --git a/drivers/pwm/pwm-renesas-tpu.c b/drivers/pwm/pwm-renesas-tpu.c index 4a855a21b782de..8032acc84161a9 100644 --- a/drivers/pwm/pwm-renesas-tpu.c +++ b/drivers/pwm/pwm-renesas-tpu.c @@ -415,16 +415,17 @@ static int tpu_probe(struct platform_device *pdev) tpu->chip.base = -1; tpu->chip.npwm = TPU_CHANNEL_MAX; + pm_runtime_enable(&pdev->dev); + ret = pwmchip_add(&tpu->chip); if (ret < 0) { dev_err(&pdev->dev, "failed to register PWM chip\n"); + pm_runtime_disable(&pdev->dev); return ret; } dev_info(&pdev->dev, "TPU PWM %d registered\n", tpu->pdev->id); - pm_runtime_enable(&pdev->dev); - return 0; } @@ -434,12 +435,10 @@ static int tpu_remove(struct platform_device *pdev) int ret; ret = pwmchip_remove(&tpu->chip); - if (ret) - return ret; pm_runtime_disable(&pdev->dev); - return 0; + return ret; } #ifdef CONFIG_OF diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index b542debbc6f038..010f541a500262 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -400,7 +400,7 @@ rproc_parse_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i) void rproc_free_vring(struct rproc_vring *rvring) { struct rproc *rproc = rvring->rvdev->rproc; - int idx = rvring->rvdev->vring - rvring; + int idx = rvring - rvring->rvdev->vring; struct fw_rsc_vdev *rsc; idr_remove(&rproc->notifyids, rvring->notifyid); diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 0c6245fc770694..983f9c9e08debe 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -849,8 +849,10 @@ static void io_subchannel_register(struct ccw_device *cdev) * Now we know this subchannel will stay, we can throw * our delayed uevent. */ - dev_set_uevent_suppress(&sch->dev, 0); - kobject_uevent(&sch->dev.kobj, KOBJ_ADD); + if (dev_get_uevent_suppress(&sch->dev)) { + dev_set_uevent_suppress(&sch->dev, 0); + kobject_uevent(&sch->dev.kobj, KOBJ_ADD); + } /* make it known to the system */ ret = ccw_device_add(cdev); if (ret) { @@ -1058,8 +1060,11 @@ static int io_subchannel_probe(struct subchannel *sch) * Throw the delayed uevent for the subchannel, register * the ccw_device and exit. */ - dev_set_uevent_suppress(&sch->dev, 0); - kobject_uevent(&sch->dev.kobj, KOBJ_ADD); + if (dev_get_uevent_suppress(&sch->dev)) { + /* should always be the case for the console */ + dev_set_uevent_suppress(&sch->dev, 0); + kobject_uevent(&sch->dev.kobj, KOBJ_ADD); + } cdev = sch_get_cdev(sch); rc = ccw_device_add(cdev); if (rc) { diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c index e401a3d0aa570f..339a6bc0339b0f 100644 --- a/drivers/s390/cio/vfio_ccw_drv.c +++ b/drivers/s390/cio/vfio_ccw_drv.c @@ -167,6 +167,11 @@ static int vfio_ccw_sch_probe(struct subchannel *sch) if (ret) goto out_disable; + if (dev_get_uevent_suppress(&sch->dev)) { + dev_set_uevent_suppress(&sch->dev, 0); + kobject_uevent(&sch->dev.kobj, KOBJ_ADD); + } + VFIO_CCW_MSG_EVENT(4, "bound to subchannel %x.%x.%04x\n", sch->schid.cssid, sch->schid.ssid, sch->schid.sch_no); diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index da6e97d8dc3bb8..6bb8917b99a19b 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -1208,9 +1208,15 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, rjt = fc_frame_payload_get(fp, sizeof(*rjt)); if (!rjt) FC_RPORT_DBG(rdata, "PRLI bad response\n"); - else + else { FC_RPORT_DBG(rdata, "PRLI ELS rejected, reason %x expl %x\n", rjt->er_reason, rjt->er_explan); + if (rjt->er_reason == ELS_RJT_UNAB && + rjt->er_explan == ELS_EXPL_PLOGI_REQD) { + fc_rport_enter_plogi(rdata); + goto out; + } + } fc_rport_error_retry(rdata, FC_EX_ELS_RJT); } diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index a227e36cbdc2b0..5a86a1ee0de3b1 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -342,13 +342,15 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport) if (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG) { ndlp->nrport = NULL; ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG; - } - spin_unlock_irq(&vport->phba->hbalock); + spin_unlock_irq(&vport->phba->hbalock); - /* Remove original register reference. The host transport - * won't reference this rport/remoteport any further. - */ - lpfc_nlp_put(ndlp); + /* Remove original register reference. The host transport + * won't reference this rport/remoteport any further. + */ + lpfc_nlp_put(ndlp); + } else { + spin_unlock_irq(&vport->phba->hbalock); + } rport_err: return; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 0717e850bcbfdf..a951e1c8165ed1 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -2481,6 +2481,8 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) !pmb->u.mb.mbxStatus) { rpi = pmb->u.mb.un.varWords[0]; vpi = pmb->u.mb.un.varRegLogin.vpi; + if (phba->sli_rev == LPFC_SLI_REV4) + vpi -= phba->sli4_hba.max_cfg_param.vpi_base; lpfc_unreg_login(phba, vpi, rpi, pmb); pmb->vport = vport; pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; @@ -4011,6 +4013,11 @@ lpfc_sli_flush_io_rings(struct lpfc_hba *phba) struct lpfc_iocbq *piocb, *next_iocb; spin_lock_irq(&phba->hbalock); + if (phba->hba_flag & HBA_IOQ_FLUSH || + !phba->sli4_hba.hdwq) { + spin_unlock_irq(&phba->hbalock); + return; + } /* Indicate the I/O queues are flushed */ phba->hba_flag |= HBA_IOQ_FLUSH; spin_unlock_irq(&phba->hbalock); diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 271afea654e2ba..a5c78b38d3022b 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -2012,7 +2012,7 @@ static void __iscsi_unbind_session(struct work_struct *work) if (session->target_id == ISCSI_MAX_TARGET) { spin_unlock_irqrestore(&session->lock, flags); mutex_unlock(&ihost->mutex); - return; + goto unbind_session_exit; } target_id = session->target_id; @@ -2024,6 +2024,8 @@ static void __iscsi_unbind_session(struct work_struct *work) ida_simple_remove(&iscsi_sess_ida, target_id); scsi_remove_target(&session->dev); + +unbind_session_exit: iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION); ISCSI_DBG_TRANS_SESSION(session, "Completed target removal\n"); } diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h index 79d2af36f65526..7a3a942b40df07 100644 --- a/drivers/scsi/smartpqi/smartpqi.h +++ b/drivers/scsi/smartpqi/smartpqi.h @@ -907,7 +907,6 @@ struct pqi_scsi_dev { u8 scsi3addr[8]; __be64 wwid; u8 volume_id[16]; - u8 unique_id[16]; u8 is_physical_device : 1; u8 is_external_raid_device : 1; u8 is_expander_smp_device : 1; @@ -1130,8 +1129,9 @@ struct pqi_ctrl_info { struct mutex ofa_mutex; /* serialize ofa */ bool controller_online; bool block_requests; - bool in_shutdown; + bool block_device_reset; bool in_ofa; + bool in_shutdown; u8 inbound_spanning_supported : 1; u8 outbound_spanning_supported : 1; u8 pqi_mode_enabled : 1; @@ -1173,6 +1173,7 @@ struct pqi_ctrl_info { struct pqi_ofa_memory *pqi_ofa_mem_virt_addr; dma_addr_t pqi_ofa_mem_dma_handle; void **pqi_ofa_chunk_virt_addr; + atomic_t sync_cmds_outstanding; }; enum pqi_ctrl_mode { @@ -1423,6 +1424,11 @@ static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) return ctrl_info->block_requests; } +static inline bool pqi_device_reset_blocked(struct pqi_ctrl_info *ctrl_info) +{ + return ctrl_info->block_device_reset; +} + void pqi_sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, struct sas_rphy *rphy); diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index ea5409bebf578d..5ae074505386a2 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -249,6 +249,11 @@ static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info) scsi_unblock_requests(ctrl_info->scsi_host); } +static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info) +{ + ctrl_info->block_device_reset = true; +} + static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info, unsigned long timeout_msecs) { @@ -331,6 +336,16 @@ static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info, return device->in_remove && !ctrl_info->in_shutdown; } +static inline void pqi_ctrl_shutdown_start(struct pqi_ctrl_info *ctrl_info) +{ + ctrl_info->in_shutdown = true; +} + +static inline bool pqi_ctrl_in_shutdown(struct pqi_ctrl_info *ctrl_info) +{ + return ctrl_info->in_shutdown; +} + static inline void pqi_schedule_rescan_worker_with_delay( struct pqi_ctrl_info *ctrl_info, unsigned long delay) { @@ -360,6 +375,11 @@ static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info) cancel_delayed_work_sync(&ctrl_info->rescan_work); } +static inline void pqi_cancel_event_worker(struct pqi_ctrl_info *ctrl_info) +{ + cancel_work_sync(&ctrl_info->event_work); +} + static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info) { if (!ctrl_info->heartbeat_counter) @@ -628,79 +648,6 @@ static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info, buffer, buffer_length, vpd_page, NULL, NO_TIMEOUT); } -static bool pqi_vpd_page_supported(struct pqi_ctrl_info *ctrl_info, - u8 *scsi3addr, u16 vpd_page) -{ - int rc; - int i; - int pages; - unsigned char *buf, bufsize; - - buf = kzalloc(256, GFP_KERNEL); - if (!buf) - return false; - - /* Get the size of the page list first */ - rc = pqi_scsi_inquiry(ctrl_info, scsi3addr, - VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES, - buf, SCSI_VPD_HEADER_SZ); - if (rc != 0) - goto exit_unsupported; - - pages = buf[3]; - if ((pages + SCSI_VPD_HEADER_SZ) <= 255) - bufsize = pages + SCSI_VPD_HEADER_SZ; - else - bufsize = 255; - - /* Get the whole VPD page list */ - rc = pqi_scsi_inquiry(ctrl_info, scsi3addr, - VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES, - buf, bufsize); - if (rc != 0) - goto exit_unsupported; - - pages = buf[3]; - for (i = 1; i <= pages; i++) - if (buf[3 + i] == vpd_page) - goto exit_supported; - -exit_unsupported: - kfree(buf); - return false; - -exit_supported: - kfree(buf); - return true; -} - -static int pqi_get_device_id(struct pqi_ctrl_info *ctrl_info, - u8 *scsi3addr, u8 *device_id, int buflen) -{ - int rc; - unsigned char *buf; - - if (!pqi_vpd_page_supported(ctrl_info, scsi3addr, SCSI_VPD_DEVICE_ID)) - return 1; /* function not supported */ - - buf = kzalloc(64, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - rc = pqi_scsi_inquiry(ctrl_info, scsi3addr, - VPD_PAGE | SCSI_VPD_DEVICE_ID, - buf, 64); - if (rc == 0) { - if (buflen > 16) - buflen = 16; - memcpy(device_id, &buf[SCSI_VPD_DEVICE_ID_IDX], buflen); - } - - kfree(buf); - - return rc; -} - static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, struct bmic_identify_physical_device *buffer, @@ -1385,14 +1332,6 @@ static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, } } - if (pqi_get_device_id(ctrl_info, device->scsi3addr, - device->unique_id, sizeof(device->unique_id)) < 0) - dev_warn(&ctrl_info->pci_dev->dev, - "Can't get device id for scsi %d:%d:%d:%d\n", - ctrl_info->scsi_host->host_no, - device->bus, device->target, - device->lun); - out: kfree(buffer); @@ -4122,6 +4061,8 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, goto out; } + atomic_inc(&ctrl_info->sync_cmds_outstanding); + io_request = pqi_alloc_io_request(ctrl_info); put_unaligned_le16(io_request->index, @@ -4168,6 +4109,7 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, pqi_free_io_request(io_request); + atomic_dec(&ctrl_info->sync_cmds_outstanding); out: up(&ctrl_info->sync_request_sem); @@ -5402,7 +5344,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, pqi_ctrl_busy(ctrl_info); if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device) || - pqi_ctrl_in_ofa(ctrl_info)) { + pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info)) { rc = SCSI_MLQUEUE_HOST_BUSY; goto out; } @@ -5650,6 +5592,18 @@ static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, return 0; } +static int pqi_ctrl_wait_for_pending_sync_cmds(struct pqi_ctrl_info *ctrl_info) +{ + while (atomic_read(&ctrl_info->sync_cmds_outstanding)) { + pqi_check_ctrl_health(ctrl_info); + if (pqi_ctrl_offline(ctrl_info)) + return -ENXIO; + usleep_range(1000, 2000); + } + + return 0; +} + static void pqi_lun_reset_complete(struct pqi_io_request *io_request, void *context) { @@ -5787,17 +5741,17 @@ static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd) shost->host_no, device->bus, device->target, device->lun); pqi_check_ctrl_health(ctrl_info); - if (pqi_ctrl_offline(ctrl_info)) { - dev_err(&ctrl_info->pci_dev->dev, - "controller %u offlined - cannot send device reset\n", - ctrl_info->ctrl_id); + if (pqi_ctrl_offline(ctrl_info) || + pqi_device_reset_blocked(ctrl_info)) { rc = FAILED; goto out; } pqi_wait_until_ofa_finished(ctrl_info); + atomic_inc(&ctrl_info->sync_cmds_outstanding); rc = pqi_device_reset(ctrl_info, device); + atomic_dec(&ctrl_info->sync_cmds_outstanding); out: dev_err(&ctrl_info->pci_dev->dev, @@ -6119,7 +6073,8 @@ static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd, ctrl_info = shost_to_hba(sdev->host); - if (pqi_ctrl_in_ofa(ctrl_info)) + if (pqi_ctrl_in_ofa(ctrl_info) || + pqi_ctrl_in_shutdown(ctrl_info)) return -EBUSY; switch (cmd) { @@ -6283,7 +6238,7 @@ static ssize_t pqi_unique_id_show(struct device *dev, struct scsi_device *sdev; struct pqi_scsi_dev *device; unsigned long flags; - unsigned char uid[16]; + u8 unique_id[16]; sdev = to_scsi_device(dev); ctrl_info = shost_to_hba(sdev->host); @@ -6296,16 +6251,22 @@ static ssize_t pqi_unique_id_show(struct device *dev, flags); return -ENODEV; } - memcpy(uid, device->unique_id, sizeof(uid)); + + if (device->is_physical_device) { + memset(unique_id, 0, 8); + memcpy(unique_id + 8, &device->wwid, sizeof(device->wwid)); + } else { + memcpy(unique_id, device->volume_id, sizeof(device->volume_id)); + } spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); return snprintf(buffer, PAGE_SIZE, "%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n", - uid[0], uid[1], uid[2], uid[3], - uid[4], uid[5], uid[6], uid[7], - uid[8], uid[9], uid[10], uid[11], - uid[12], uid[13], uid[14], uid[15]); + unique_id[0], unique_id[1], unique_id[2], unique_id[3], + unique_id[4], unique_id[5], unique_id[6], unique_id[7], + unique_id[8], unique_id[9], unique_id[10], unique_id[11], + unique_id[12], unique_id[13], unique_id[14], unique_id[15]); } static ssize_t pqi_lunid_show(struct device *dev, @@ -7074,13 +7035,20 @@ static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info) return pqi_revert_to_sis_mode(ctrl_info); } +#define PQI_POST_RESET_DELAY_B4_MSGU_READY 5000 + static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) { int rc; - rc = pqi_force_sis_mode(ctrl_info); - if (rc) - return rc; + if (reset_devices) { + sis_soft_reset(ctrl_info); + msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY); + } else { + rc = pqi_force_sis_mode(ctrl_info); + if (rc) + return rc; + } /* * Wait until the controller is ready to start accepting SIS @@ -7514,6 +7482,7 @@ static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node) INIT_WORK(&ctrl_info->event_work, pqi_event_worker); atomic_set(&ctrl_info->num_interrupts, 0); + atomic_set(&ctrl_info->sync_cmds_outstanding, 0); INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker); INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker); @@ -7787,8 +7756,6 @@ static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info) 0, NULL, NO_TIMEOUT); } -#define PQI_POST_RESET_DELAY_B4_MSGU_READY 5000 - static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info) { msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY); @@ -7956,28 +7923,74 @@ static void pqi_pci_remove(struct pci_dev *pci_dev) pqi_remove_ctrl(ctrl_info); } +static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info) +{ + unsigned int i; + struct pqi_io_request *io_request; + struct scsi_cmnd *scmd; + + for (i = 0; i < ctrl_info->max_io_slots; i++) { + io_request = &ctrl_info->io_request_pool[i]; + if (atomic_read(&io_request->refcount) == 0) + continue; + scmd = io_request->scmd; + WARN_ON(scmd != NULL); /* IO command from SML */ + WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/ + } +} + static void pqi_shutdown(struct pci_dev *pci_dev) { int rc; struct pqi_ctrl_info *ctrl_info; ctrl_info = pci_get_drvdata(pci_dev); - if (!ctrl_info) - goto error; + if (!ctrl_info) { + dev_err(&pci_dev->dev, + "cache could not be flushed\n"); + return; + } + + pqi_disable_events(ctrl_info); + pqi_wait_until_ofa_finished(ctrl_info); + pqi_cancel_update_time_worker(ctrl_info); + pqi_cancel_rescan_worker(ctrl_info); + pqi_cancel_event_worker(ctrl_info); + + pqi_ctrl_shutdown_start(ctrl_info); + pqi_ctrl_wait_until_quiesced(ctrl_info); + + rc = pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT); + if (rc) { + dev_err(&pci_dev->dev, + "wait for pending I/O failed\n"); + return; + } + + pqi_ctrl_block_device_reset(ctrl_info); + pqi_wait_until_lun_reset_finished(ctrl_info); /* * Write all data in the controller's battery-backed cache to * storage. */ rc = pqi_flush_cache(ctrl_info, SHUTDOWN); - pqi_free_interrupts(ctrl_info); - pqi_reset(ctrl_info); - if (rc == 0) + if (rc) + dev_err(&pci_dev->dev, + "unable to flush controller cache\n"); + + pqi_ctrl_block_requests(ctrl_info); + + rc = pqi_ctrl_wait_for_pending_sync_cmds(ctrl_info); + if (rc) { + dev_err(&pci_dev->dev, + "wait for pending sync cmds failed\n"); return; + } + + pqi_crash_if_pending_command(ctrl_info); + pqi_reset(ctrl_info); -error: - dev_warn(&pci_dev->dev, - "unable to flush controller cache\n"); } static void pqi_process_lockup_action_param(void) diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c index 6776dfc1d317c3..b7e28b9f8589f4 100644 --- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c +++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c @@ -45,9 +45,9 @@ static void pqi_free_sas_phy(struct pqi_sas_phy *pqi_sas_phy) struct sas_phy *phy = pqi_sas_phy->phy; sas_port_delete_phy(pqi_sas_phy->parent_port->port, phy); - sas_phy_free(phy); if (pqi_sas_phy->added_to_port) list_del(&pqi_sas_phy->phy_list_entry); + sas_phy_delete(phy); kfree(pqi_sas_phy); } diff --git a/drivers/soc/xilinx/Kconfig b/drivers/soc/xilinx/Kconfig index 01e76b58dd78af..3fa162c1fde730 100644 --- a/drivers/soc/xilinx/Kconfig +++ b/drivers/soc/xilinx/Kconfig @@ -19,7 +19,7 @@ config XILINX_VCU config ZYNQMP_POWER bool "Enable Xilinx Zynq MPSoC Power Management driver" - depends on PM && ARCH_ZYNQMP + depends on PM && ZYNQMP_FIRMWARE default y help Say yes to enable power management support for ZyqnMP SoC. @@ -31,7 +31,7 @@ config ZYNQMP_POWER config ZYNQMP_PM_DOMAINS bool "Enable Zynq MPSoC generic PM domains" default y - depends on PM && ARCH_ZYNQMP && ZYNQMP_FIRMWARE + depends on PM && ZYNQMP_FIRMWARE select PM_GENERIC_DOMAINS help Say yes to enable device power management through PM domains diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c index 08d1bbbebf2d7b..e84b4fb493d627 100644 --- a/drivers/staging/comedi/comedi_fops.c +++ b/drivers/staging/comedi/comedi_fops.c @@ -2725,8 +2725,10 @@ static int comedi_open(struct inode *inode, struct file *file) } cfp = kzalloc(sizeof(*cfp), GFP_KERNEL); - if (!cfp) + if (!cfp) { + comedi_dev_put(dev); return -ENOMEM; + } cfp->dev = dev; diff --git a/drivers/staging/comedi/drivers/dt2815.c b/drivers/staging/comedi/drivers/dt2815.c index 83026ba63d1cc5..78a7c1b3448ab1 100644 --- a/drivers/staging/comedi/drivers/dt2815.c +++ b/drivers/staging/comedi/drivers/dt2815.c @@ -92,6 +92,7 @@ static int dt2815_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s, int ret; for (i = 0; i < insn->n; i++) { + /* FIXME: lo bit 0 chooses voltage output or current output */ lo = ((data[i] & 0x0f) << 4) | (chan << 1) | 0x01; hi = (data[i] & 0xff0) >> 4; @@ -105,6 +106,8 @@ static int dt2815_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s, if (ret) return ret; + outb(hi, dev->iobase + DT2815_DATA); + devpriv->ao_readback[chan] = data[i]; } return i; diff --git a/drivers/staging/gasket/apex_driver.c b/drivers/staging/gasket/apex_driver.c index 46199c8ca441b6..f12f81c8dd2fb6 100644 --- a/drivers/staging/gasket/apex_driver.c +++ b/drivers/staging/gasket/apex_driver.c @@ -570,13 +570,6 @@ static const struct pci_device_id apex_pci_ids[] = { { PCI_DEVICE(APEX_PCI_VENDOR_ID, APEX_PCI_DEVICE_ID) }, { 0 } }; -static void apex_pci_fixup_class(struct pci_dev *pdev) -{ - pdev->class = (PCI_CLASS_SYSTEM_OTHER << 8) | pdev->class; -} -DECLARE_PCI_FIXUP_CLASS_HEADER(APEX_PCI_VENDOR_ID, APEX_PCI_DEVICE_ID, - PCI_CLASS_NOT_DEFINED, 8, apex_pci_fixup_class); - static int apex_pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) { diff --git a/drivers/staging/gasket/gasket_sysfs.c b/drivers/staging/gasket/gasket_sysfs.c index a2d67c28f530c8..5f0e089573a29f 100644 --- a/drivers/staging/gasket/gasket_sysfs.c +++ b/drivers/staging/gasket/gasket_sysfs.c @@ -228,8 +228,7 @@ int gasket_sysfs_create_entries(struct device *device, } mutex_lock(&mapping->mutex); - for (i = 0; strcmp(attrs[i].attr.attr.name, GASKET_ARRAY_END_MARKER); - i++) { + for (i = 0; attrs[i].attr.attr.name != NULL; i++) { if (mapping->attribute_count == GASKET_SYSFS_MAX_NODES) { dev_err(device, "Maximum number of sysfs nodes reached for device\n"); diff --git a/drivers/staging/gasket/gasket_sysfs.h b/drivers/staging/gasket/gasket_sysfs.h index 1d0eed66a7f49f..ab5aa351d55500 100644 --- a/drivers/staging/gasket/gasket_sysfs.h +++ b/drivers/staging/gasket/gasket_sysfs.h @@ -30,10 +30,6 @@ */ #define GASKET_SYSFS_MAX_NODES 196 -/* End markers for sysfs struct arrays. */ -#define GASKET_ARRAY_END_TOKEN GASKET_RESERVED_ARRAY_END -#define GASKET_ARRAY_END_MARKER __stringify(GASKET_ARRAY_END_TOKEN) - /* * Terminator struct for a gasket_sysfs_attr array. Must be at the end of * all gasket_sysfs_attribute arrays. diff --git a/drivers/staging/vt6656/int.c b/drivers/staging/vt6656/int.c index af215860be4c6c..ac563e23868edf 100644 --- a/drivers/staging/vt6656/int.c +++ b/drivers/staging/vt6656/int.c @@ -145,7 +145,8 @@ void vnt_int_process_data(struct vnt_private *priv) priv->wake_up_count = priv->hw->conf.listen_interval; - --priv->wake_up_count; + if (priv->wake_up_count) + --priv->wake_up_count; /* Turn on wake up to listen next beacon */ if (priv->wake_up_count == 1) diff --git a/drivers/staging/vt6656/key.c b/drivers/staging/vt6656/key.c index dcd933a6b66e1d..40c58ac4e209f3 100644 --- a/drivers/staging/vt6656/key.c +++ b/drivers/staging/vt6656/key.c @@ -83,9 +83,6 @@ static int vnt_set_keymode(struct ieee80211_hw *hw, u8 *mac_addr, case VNT_KEY_PAIRWISE: key_mode |= mode; key_inx = 4; - /* Don't save entry for pairwise key for station mode */ - if (priv->op_mode == NL80211_IFTYPE_STATION) - clear_bit(entry, &priv->key_entry_inuse); break; default: return -EINVAL; @@ -109,7 +106,6 @@ static int vnt_set_keymode(struct ieee80211_hw *hw, u8 *mac_addr, int vnt_set_keys(struct ieee80211_hw *hw, struct ieee80211_sta *sta, struct ieee80211_vif *vif, struct ieee80211_key_conf *key) { - struct ieee80211_bss_conf *conf = &vif->bss_conf; struct vnt_private *priv = hw->priv; u8 *mac_addr = NULL; u8 key_dec_mode = 0; @@ -151,16 +147,12 @@ int vnt_set_keys(struct ieee80211_hw *hw, struct ieee80211_sta *sta, key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; } - if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { + if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) vnt_set_keymode(hw, mac_addr, key, VNT_KEY_PAIRWISE, key_dec_mode, true); - } else { - vnt_set_keymode(hw, mac_addr, key, VNT_KEY_DEFAULTKEY, + else + vnt_set_keymode(hw, mac_addr, key, VNT_KEY_GROUP_ADDRESS, key_dec_mode, true); - vnt_set_keymode(hw, (u8 *)conf->bssid, key, - VNT_KEY_GROUP_ADDRESS, key_dec_mode, true); - } - return 0; } diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c index 69a48383611f71..48db31238d56b3 100644 --- a/drivers/staging/vt6656/main_usb.c +++ b/drivers/staging/vt6656/main_usb.c @@ -633,8 +633,6 @@ static int vnt_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) priv->op_mode = vif->type; - vnt_set_bss_mode(priv); - /* LED blink on TX */ vnt_mac_set_led(priv, LEDSTS_STS, LEDSTS_INTER); @@ -721,7 +719,6 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw, priv->basic_rates = conf->basic_rates; vnt_update_top_rates(priv); - vnt_set_bss_mode(priv); dev_dbg(&priv->usb->dev, "basic rates %x\n", conf->basic_rates); } @@ -750,11 +747,14 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw, priv->short_slot_time = false; vnt_set_short_slot_time(priv); - vnt_update_ifs(priv); vnt_set_vga_gain_offset(priv, priv->bb_vga[0]); vnt_update_pre_ed_threshold(priv, false); } + if (changed & (BSS_CHANGED_BASIC_RATES | BSS_CHANGED_ERP_PREAMBLE | + BSS_CHANGED_ERP_SLOT)) + vnt_set_bss_mode(priv); + if (changed & BSS_CHANGED_TXPOWER) vnt_rf_setpower(priv, priv->current_rate, conf->chandef.chan->hw_value); @@ -778,12 +778,15 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw, vnt_mac_reg_bits_on(priv, MAC_REG_TFTCTL, TFTCTL_TSFCNTREN); - vnt_adjust_tsf(priv, conf->beacon_rate->hw_value, - conf->sync_tsf, priv->current_tsf); - vnt_mac_set_beacon_interval(priv, conf->beacon_int); vnt_reset_next_tbtt(priv, conf->beacon_int); + + vnt_adjust_tsf(priv, conf->beacon_rate->hw_value, + conf->sync_tsf, priv->current_tsf); + + vnt_update_next_tbtt(priv, + conf->sync_tsf, conf->beacon_int); } else { vnt_clear_current_tsf(priv); @@ -818,15 +821,11 @@ static void vnt_configure(struct ieee80211_hw *hw, { struct vnt_private *priv = hw->priv; u8 rx_mode = 0; - int rc; *total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC; - rc = vnt_control_in(priv, MESSAGE_TYPE_READ, MAC_REG_RCR, - MESSAGE_REQUEST_MACREG, sizeof(u8), &rx_mode); - - if (!rc) - rx_mode = RCR_MULTICAST | RCR_BROADCAST; + vnt_control_in(priv, MESSAGE_TYPE_READ, MAC_REG_RCR, + MESSAGE_REQUEST_MACREG, sizeof(u8), &rx_mode); dev_dbg(&priv->usb->dev, "rx mode in = %x\n", rx_mode); @@ -867,8 +866,12 @@ static int vnt_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, return -EOPNOTSUPP; break; case DISABLE_KEY: - if (test_bit(key->hw_key_idx, &priv->key_entry_inuse)) + if (test_bit(key->hw_key_idx, &priv->key_entry_inuse)) { clear_bit(key->hw_key_idx, &priv->key_entry_inuse); + + vnt_mac_disable_keyentry(priv, key->hw_key_idx); + } + default: break; } diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c index 6b4b354c88aa09..b5c970faf5854e 100644 --- a/drivers/target/target_core_fabric_lib.c +++ b/drivers/target/target_core_fabric_lib.c @@ -63,7 +63,7 @@ static int fc_get_pr_transport_id( * encoded TransportID. */ ptr = &se_nacl->initiatorname[0]; - for (i = 0; i < 24; ) { + for (i = 0; i < 23; ) { if (!strncmp(&ptr[i], ":", 1)) { i++; continue; diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 35be1be87d2a17..9425354aef99cd 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -2073,6 +2073,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) mb->cmd_tail = 0; mb->cmd_head = 0; tcmu_flush_dcache_range(mb, sizeof(*mb)); + clear_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); del_timer(&udev->cmd_timer); diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c index 27284a2dcd2b62..436cc51c92c397 100644 --- a/drivers/tty/hvc/hvc_console.c +++ b/drivers/tty/hvc/hvc_console.c @@ -302,10 +302,6 @@ int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops) vtermnos[index] = vtermno; cons_ops[index] = ops; - /* reserve all indices up to and including this index */ - if (last_hvc < index) - last_hvc = index; - /* check if we need to re-register the kernel console */ hvc_check_console(index); @@ -960,13 +956,22 @@ struct hvc_struct *hvc_alloc(uint32_t vtermno, int data, cons_ops[i] == hp->ops) break; - /* no matching slot, just use a counter */ - if (i >= MAX_NR_HVC_CONSOLES) - i = ++last_hvc; + if (i >= MAX_NR_HVC_CONSOLES) { + + /* find 'empty' slot for console */ + for (i = 0; i < MAX_NR_HVC_CONSOLES && vtermnos[i] != -1; i++) { + } + + /* no matching slot, just use a counter */ + if (i == MAX_NR_HVC_CONSOLES) + i = ++last_hvc + MAX_NR_HVC_CONSOLES; + } hp->index = i; - cons_ops[i] = ops; - vtermnos[i] = vtermno; + if (i < MAX_NR_HVC_CONSOLES) { + cons_ops[i] = ops; + vtermnos[i] = vtermno; + } list_add_tail(&(hp->next), &hvc_structs); mutex_unlock(&hvc_structs_mutex); diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c index 5ba6816ebf8110..bbaad2887ce76b 100644 --- a/drivers/tty/rocket.c +++ b/drivers/tty/rocket.c @@ -632,18 +632,21 @@ init_r_port(int board, int aiop, int chan, struct pci_dev *pci_dev) tty_port_init(&info->port); info->port.ops = &rocket_port_ops; info->flags &= ~ROCKET_MODE_MASK; - switch (pc104[board][line]) { - case 422: - info->flags |= ROCKET_MODE_RS422; - break; - case 485: - info->flags |= ROCKET_MODE_RS485; - break; - case 232: - default: + if (board < ARRAY_SIZE(pc104) && line < ARRAY_SIZE(pc104_1)) + switch (pc104[board][line]) { + case 422: + info->flags |= ROCKET_MODE_RS422; + break; + case 485: + info->flags |= ROCKET_MODE_RS485; + break; + case 232: + default: + info->flags |= ROCKET_MODE_RS232; + break; + } + else info->flags |= ROCKET_MODE_RS232; - break; - } info->intmask = RXF_TRIG | TXFIFO_MT | SRC_INT | DELTA_CD | DELTA_CTS | DELTA_DSR; if (sInitChan(ctlp, &info->channel, aiop, chan) == 0) { diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c index d2d8b34946851d..c55c8507713c32 100644 --- a/drivers/tty/serial/owl-uart.c +++ b/drivers/tty/serial/owl-uart.c @@ -680,6 +680,12 @@ static int owl_uart_probe(struct platform_device *pdev) return PTR_ERR(owl_port->clk); } + ret = clk_prepare_enable(owl_port->clk); + if (ret) { + dev_err(&pdev->dev, "could not enable clk\n"); + return ret; + } + owl_port->port.dev = &pdev->dev; owl_port->port.line = pdev->id; owl_port->port.type = PORT_OWL; @@ -712,6 +718,7 @@ static int owl_uart_remove(struct platform_device *pdev) uart_remove_one_port(&owl_uart_driver, &owl_port->port); owl_uart_ports[pdev->id] = NULL; + clk_disable_unprepare(owl_port->clk); return 0; } diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 22e5d4e13714e8..7d1529b11ae9ce 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -873,9 +873,16 @@ static void sci_receive_chars(struct uart_port *port) tty_insert_flip_char(tport, c, TTY_NORMAL); } else { for (i = 0; i < count; i++) { - char c = serial_port_in(port, SCxRDR); - - status = serial_port_in(port, SCxSR); + char c; + + if (port->type == PORT_SCIF || + port->type == PORT_HSCIF) { + status = serial_port_in(port, SCxSR); + c = serial_port_in(port, SCxRDR); + } else { + c = serial_port_in(port, SCxRDR); + status = serial_port_in(port, SCxSR); + } if (uart_handle_sysrq_char(port, c)) { count--; i--; continue; diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index 4e55bc327a5475..fe098cf14e6a21 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c @@ -30,13 +30,15 @@ #define CDNS_UART_TTY_NAME "ttyPS" #define CDNS_UART_NAME "xuartps" +#define CDNS_UART_MAJOR 0 /* use dynamic node allocation */ +#define CDNS_UART_MINOR 0 /* works best with devtmpfs */ +#define CDNS_UART_NR_PORTS 16 #define CDNS_UART_FIFO_SIZE 64 /* FIFO size */ #define CDNS_UART_REGISTER_SPACE 0x1000 #define TX_TIMEOUT 500000 /* Rx Trigger level */ static int rx_trigger_level = 56; -static int uartps_major; module_param(rx_trigger_level, uint, 0444); MODULE_PARM_DESC(rx_trigger_level, "Rx trigger level, 1-63 bytes"); @@ -182,7 +184,6 @@ MODULE_PARM_DESC(rx_timeout, "Rx timeout, 1-255"); * @pclk: APB clock * @cdns_uart_driver: Pointer to UART driver * @baud: Current baud rate - * @id: Port ID * @clk_rate_change_nb: Notifier block for clock changes * @quirks: Flags for RXBS support. */ @@ -192,7 +193,6 @@ struct cdns_uart { struct clk *pclk; struct uart_driver *cdns_uart_driver; unsigned int baud; - int id; struct notifier_block clk_rate_change_nb; u32 quirks; bool cts_override; @@ -1119,6 +1119,8 @@ static const struct uart_ops cdns_uart_ops = { #endif }; +static struct uart_driver cdns_uart_uart_driver; + #ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE /** * cdns_uart_console_putchar - write the character to the FIFO buffer @@ -1258,6 +1260,16 @@ static int cdns_uart_console_setup(struct console *co, char *options) return uart_set_options(port, co, baud, parity, bits, flow); } + +static struct console cdns_uart_console = { + .name = CDNS_UART_TTY_NAME, + .write = cdns_uart_console_write, + .device = uart_console_device, + .setup = cdns_uart_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, /* Specified on the cmdline (e.g. console=ttyPS ) */ + .data = &cdns_uart_uart_driver, +}; #endif /* CONFIG_SERIAL_XILINX_PS_UART_CONSOLE */ #ifdef CONFIG_PM_SLEEP @@ -1389,89 +1401,8 @@ static const struct of_device_id cdns_uart_of_match[] = { }; MODULE_DEVICE_TABLE(of, cdns_uart_of_match); -/* - * Maximum number of instances without alias IDs but if there is alias - * which target "< MAX_UART_INSTANCES" range this ID can't be used. - */ -#define MAX_UART_INSTANCES 32 - -/* Stores static aliases list */ -static DECLARE_BITMAP(alias_bitmap, MAX_UART_INSTANCES); -static int alias_bitmap_initialized; - -/* Stores actual bitmap of allocated IDs with alias IDs together */ -static DECLARE_BITMAP(bitmap, MAX_UART_INSTANCES); -/* Protect bitmap operations to have unique IDs */ -static DEFINE_MUTEX(bitmap_lock); - -static int cdns_get_id(struct platform_device *pdev) -{ - int id, ret; - - mutex_lock(&bitmap_lock); - - /* Alias list is stable that's why get alias bitmap only once */ - if (!alias_bitmap_initialized) { - ret = of_alias_get_alias_list(cdns_uart_of_match, "serial", - alias_bitmap, MAX_UART_INSTANCES); - if (ret && ret != -EOVERFLOW) { - mutex_unlock(&bitmap_lock); - return ret; - } - - alias_bitmap_initialized++; - } - - /* Make sure that alias ID is not taken by instance without alias */ - bitmap_or(bitmap, bitmap, alias_bitmap, MAX_UART_INSTANCES); - - dev_dbg(&pdev->dev, "Alias bitmap: %*pb\n", - MAX_UART_INSTANCES, bitmap); - - /* Look for a serialN alias */ - id = of_alias_get_id(pdev->dev.of_node, "serial"); - if (id < 0) { - dev_warn(&pdev->dev, - "No serial alias passed. Using the first free id\n"); - - /* - * Start with id 0 and check if there is no serial0 alias - * which points to device which is compatible with this driver. - * If alias exists then try next free position. - */ - id = 0; - - for (;;) { - dev_info(&pdev->dev, "Checking id %d\n", id); - id = find_next_zero_bit(bitmap, MAX_UART_INSTANCES, id); - - /* No free empty instance */ - if (id == MAX_UART_INSTANCES) { - dev_err(&pdev->dev, "No free ID\n"); - mutex_unlock(&bitmap_lock); - return -EINVAL; - } - - dev_dbg(&pdev->dev, "The empty id is %d\n", id); - /* Check if ID is empty */ - if (!test_and_set_bit(id, bitmap)) { - /* Break the loop if bit is taken */ - dev_dbg(&pdev->dev, - "Selected ID %d allocation passed\n", - id); - break; - } - dev_dbg(&pdev->dev, - "Selected ID %d allocation failed\n", id); - /* if taking bit fails then try next one */ - id++; - } - } - - mutex_unlock(&bitmap_lock); - - return id; -} +/* Temporary variable for storing number of instances */ +static int instances; /** * cdns_uart_probe - Platform driver probe @@ -1481,16 +1412,11 @@ static int cdns_get_id(struct platform_device *pdev) */ static int cdns_uart_probe(struct platform_device *pdev) { - int rc, irq; + int rc, id, irq; struct uart_port *port; struct resource *res; struct cdns_uart *cdns_uart_data; const struct of_device_id *match; - struct uart_driver *cdns_uart_uart_driver; - char *driver_name; -#ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE - struct console *cdns_uart_console; -#endif cdns_uart_data = devm_kzalloc(&pdev->dev, sizeof(*cdns_uart_data), GFP_KERNEL); @@ -1500,64 +1426,35 @@ static int cdns_uart_probe(struct platform_device *pdev) if (!port) return -ENOMEM; - cdns_uart_uart_driver = devm_kzalloc(&pdev->dev, - sizeof(*cdns_uart_uart_driver), - GFP_KERNEL); - if (!cdns_uart_uart_driver) - return -ENOMEM; - - cdns_uart_data->id = cdns_get_id(pdev); - if (cdns_uart_data->id < 0) - return cdns_uart_data->id; + /* Look for a serialN alias */ + id = of_alias_get_id(pdev->dev.of_node, "serial"); + if (id < 0) + id = 0; - /* There is a need to use unique driver name */ - driver_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s%d", - CDNS_UART_NAME, cdns_uart_data->id); - if (!driver_name) { - rc = -ENOMEM; - goto err_out_id; + if (id >= CDNS_UART_NR_PORTS) { + dev_err(&pdev->dev, "Cannot get uart_port structure\n"); + return -ENODEV; } - cdns_uart_uart_driver->owner = THIS_MODULE; - cdns_uart_uart_driver->driver_name = driver_name; - cdns_uart_uart_driver->dev_name = CDNS_UART_TTY_NAME; - cdns_uart_uart_driver->major = uartps_major; - cdns_uart_uart_driver->minor = cdns_uart_data->id; - cdns_uart_uart_driver->nr = 1; - + if (!cdns_uart_uart_driver.state) { + cdns_uart_uart_driver.owner = THIS_MODULE; + cdns_uart_uart_driver.driver_name = CDNS_UART_NAME; + cdns_uart_uart_driver.dev_name = CDNS_UART_TTY_NAME; + cdns_uart_uart_driver.major = CDNS_UART_MAJOR; + cdns_uart_uart_driver.minor = CDNS_UART_MINOR; + cdns_uart_uart_driver.nr = CDNS_UART_NR_PORTS; #ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE - cdns_uart_console = devm_kzalloc(&pdev->dev, sizeof(*cdns_uart_console), - GFP_KERNEL); - if (!cdns_uart_console) { - rc = -ENOMEM; - goto err_out_id; - } - - strncpy(cdns_uart_console->name, CDNS_UART_TTY_NAME, - sizeof(cdns_uart_console->name)); - cdns_uart_console->index = cdns_uart_data->id; - cdns_uart_console->write = cdns_uart_console_write; - cdns_uart_console->device = uart_console_device; - cdns_uart_console->setup = cdns_uart_console_setup; - cdns_uart_console->flags = CON_PRINTBUFFER; - cdns_uart_console->data = cdns_uart_uart_driver; - cdns_uart_uart_driver->cons = cdns_uart_console; + cdns_uart_uart_driver.cons = &cdns_uart_console; #endif - rc = uart_register_driver(cdns_uart_uart_driver); - if (rc < 0) { - dev_err(&pdev->dev, "Failed to register driver\n"); - goto err_out_id; + rc = uart_register_driver(&cdns_uart_uart_driver); + if (rc < 0) { + dev_err(&pdev->dev, "Failed to register driver\n"); + return rc; + } } - cdns_uart_data->cdns_uart_driver = cdns_uart_uart_driver; - - /* - * Setting up proper name_base needs to be done after uart - * registration because tty_driver structure is not filled. - * name_base is 0 by default. - */ - cdns_uart_uart_driver->tty_driver->name_base = cdns_uart_data->id; + cdns_uart_data->cdns_uart_driver = &cdns_uart_uart_driver; match = of_match_node(cdns_uart_of_match, pdev->dev.of_node); if (match && match->data) { @@ -1634,6 +1531,7 @@ static int cdns_uart_probe(struct platform_device *pdev) port->flags = UPF_BOOT_AUTOCONF; port->ops = &cdns_uart_ops; port->fifosize = CDNS_UART_FIFO_SIZE; + port->line = id; /* * Register the port. @@ -1665,7 +1563,7 @@ static int cdns_uart_probe(struct platform_device *pdev) console_port = port; #endif - rc = uart_add_one_port(cdns_uart_uart_driver, port); + rc = uart_add_one_port(&cdns_uart_uart_driver, port); if (rc) { dev_err(&pdev->dev, "uart_add_one_port() failed; err=%i\n", rc); @@ -1675,13 +1573,15 @@ static int cdns_uart_probe(struct platform_device *pdev) #ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE /* This is not port which is used for console that's why clean it up */ if (console_port == port && - !(cdns_uart_uart_driver->cons->flags & CON_ENABLED)) + !(cdns_uart_uart_driver.cons->flags & CON_ENABLED)) console_port = NULL; #endif - uartps_major = cdns_uart_uart_driver->tty_driver->major; cdns_uart_data->cts_override = of_property_read_bool(pdev->dev.of_node, "cts-override"); + + instances++; + return 0; err_out_pm_disable: @@ -1697,12 +1597,8 @@ static int cdns_uart_probe(struct platform_device *pdev) err_out_clk_dis_pclk: clk_disable_unprepare(cdns_uart_data->pclk); err_out_unregister_driver: - uart_unregister_driver(cdns_uart_data->cdns_uart_driver); -err_out_id: - mutex_lock(&bitmap_lock); - if (cdns_uart_data->id < MAX_UART_INSTANCES) - clear_bit(cdns_uart_data->id, bitmap); - mutex_unlock(&bitmap_lock); + if (!instances) + uart_unregister_driver(cdns_uart_data->cdns_uart_driver); return rc; } @@ -1725,10 +1621,6 @@ static int cdns_uart_remove(struct platform_device *pdev) #endif rc = uart_remove_one_port(cdns_uart_data->cdns_uart_driver, port); port->mapbase = 0; - mutex_lock(&bitmap_lock); - if (cdns_uart_data->id < MAX_UART_INSTANCES) - clear_bit(cdns_uart_data->id, bitmap); - mutex_unlock(&bitmap_lock); clk_disable_unprepare(cdns_uart_data->uartclk); clk_disable_unprepare(cdns_uart_data->pclk); pm_runtime_disable(&pdev->dev); @@ -1741,13 +1633,8 @@ static int cdns_uart_remove(struct platform_device *pdev) console_port = NULL; #endif - /* If this is last instance major number should be initialized */ - mutex_lock(&bitmap_lock); - if (bitmap_empty(bitmap, MAX_UART_INSTANCES)) - uartps_major = 0; - mutex_unlock(&bitmap_lock); - - uart_unregister_driver(cdns_uart_data->cdns_uart_driver); + if (!--instances) + uart_unregister_driver(cdns_uart_data->cdns_uart_driver); return rc; } diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index fa9433e6cdc79a..8b3ecef50394a0 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -81,6 +81,7 @@ #include #include #include +#include #include #include #include @@ -350,7 +351,7 @@ static struct uni_screen *vc_uniscr_alloc(unsigned int cols, unsigned int rows) /* allocate everything in one go */ memsize = cols * rows * sizeof(char32_t); memsize += rows * sizeof(char32_t *); - p = kmalloc(memsize, GFP_KERNEL); + p = vmalloc(memsize); if (!p) return NULL; @@ -366,7 +367,7 @@ static struct uni_screen *vc_uniscr_alloc(unsigned int cols, unsigned int rows) static void vc_uniscr_set(struct vc_data *vc, struct uni_screen *new_uniscr) { - kfree(vc->vc_uni_screen); + vfree(vc->vc_uni_screen); vc->vc_uni_screen = new_uniscr; } @@ -1206,7 +1207,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, if (new_cols == vc->vc_cols && new_rows == vc->vc_rows) return 0; - if (new_screen_size > (4 << 20)) + if (new_screen_size > KMALLOC_MAX_SIZE) return -EINVAL; newscreen = kzalloc(new_screen_size, GFP_USER); if (!newscreen) diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 84d6f7df09a4ef..8ca72d80501dd4 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -412,9 +412,12 @@ static void acm_ctrl_irq(struct urb *urb) exit: retval = usb_submit_urb(urb, GFP_ATOMIC); - if (retval && retval != -EPERM) + if (retval && retval != -EPERM && retval != -ENODEV) dev_err(&acm->control->dev, "%s - usb_submit_urb failed: %d\n", __func__, retval); + else + dev_vdbg(&acm->control->dev, + "control resubmission terminated %d\n", retval); } static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags) @@ -430,6 +433,8 @@ static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags) dev_err(&acm->data->dev, "urb %d failed submission with %d\n", index, res); + } else { + dev_vdbg(&acm->data->dev, "intended failure %d\n", res); } set_bit(index, &acm->read_urbs_free); return res; @@ -471,6 +476,7 @@ static void acm_read_bulk_callback(struct urb *urb) int status = urb->status; bool stopped = false; bool stalled = false; + bool cooldown = false; dev_vdbg(&acm->data->dev, "got urb %d, len %d, status %d\n", rb->index, urb->actual_length, status); @@ -497,6 +503,14 @@ static void acm_read_bulk_callback(struct urb *urb) __func__, status); stopped = true; break; + case -EOVERFLOW: + case -EPROTO: + dev_dbg(&acm->data->dev, + "%s - cooling babbling device\n", __func__); + usb_mark_last_busy(acm->dev); + set_bit(rb->index, &acm->urbs_in_error_delay); + cooldown = true; + break; default: dev_dbg(&acm->data->dev, "%s - nonzero urb status received: %d\n", @@ -518,9 +532,11 @@ static void acm_read_bulk_callback(struct urb *urb) */ smp_mb__after_atomic(); - if (stopped || stalled) { + if (stopped || stalled || cooldown) { if (stalled) schedule_work(&acm->work); + else if (cooldown) + schedule_delayed_work(&acm->dwork, HZ / 2); return; } @@ -557,14 +573,20 @@ static void acm_softint(struct work_struct *work) struct acm *acm = container_of(work, struct acm, work); if (test_bit(EVENT_RX_STALL, &acm->flags)) { - if (!(usb_autopm_get_interface(acm->data))) { + smp_mb(); /* against acm_suspend() */ + if (!acm->susp_count) { for (i = 0; i < acm->rx_buflimit; i++) usb_kill_urb(acm->read_urbs[i]); usb_clear_halt(acm->dev, acm->in); acm_submit_read_urbs(acm, GFP_KERNEL); - usb_autopm_put_interface(acm->data); + clear_bit(EVENT_RX_STALL, &acm->flags); } - clear_bit(EVENT_RX_STALL, &acm->flags); + } + + if (test_and_clear_bit(ACM_ERROR_DELAY, &acm->flags)) { + for (i = 0; i < ACM_NR; i++) + if (test_and_clear_bit(i, &acm->urbs_in_error_delay)) + acm_submit_read_urb(acm, i, GFP_NOIO); } if (test_and_clear_bit(EVENT_TTY_WAKEUP, &acm->flags)) @@ -1333,6 +1355,7 @@ static int acm_probe(struct usb_interface *intf, acm->readsize = readsize; acm->rx_buflimit = num_rx_buf; INIT_WORK(&acm->work, acm_softint); + INIT_DELAYED_WORK(&acm->dwork, acm_softint); init_waitqueue_head(&acm->wioctl); spin_lock_init(&acm->write_lock); spin_lock_init(&acm->read_lock); @@ -1542,6 +1565,7 @@ static void acm_disconnect(struct usb_interface *intf) acm_kill_urbs(acm); cancel_work_sync(&acm->work); + cancel_delayed_work_sync(&acm->dwork); tty_unregister_device(acm_tty_driver, acm->minor); @@ -1584,6 +1608,8 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message) acm_kill_urbs(acm); cancel_work_sync(&acm->work); + cancel_delayed_work_sync(&acm->dwork); + acm->urbs_in_error_delay = 0; return 0; } diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h index ca1c026382c2e1..cd5e9d8ab23758 100644 --- a/drivers/usb/class/cdc-acm.h +++ b/drivers/usb/class/cdc-acm.h @@ -109,8 +109,11 @@ struct acm { # define EVENT_TTY_WAKEUP 0 # define EVENT_RX_STALL 1 # define ACM_THROTTLED 2 +# define ACM_ERROR_DELAY 3 + unsigned long urbs_in_error_delay; /* these need to be restarted after a delay */ struct usb_cdc_line_coding line; /* bits, stop, parity */ - struct work_struct work; /* work queue entry for line discipline waking up */ + struct work_struct work; /* work queue entry for various purposes*/ + struct delayed_work dwork; /* for cool downs needed in error recovery */ unsigned int ctrlin; /* input control lines (DCD, DSR, RI, break, overruns) */ unsigned int ctrlout; /* output control lines (DTR, RTS) */ struct async_icount iocount; /* counters for control line changes */ diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 6852e3f6ee10e8..64679adb22b3ec 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -1222,6 +1222,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) #ifdef CONFIG_PM udev->reset_resume = 1; #endif + /* Don't set the change_bits when the device + * was powered off. + */ + if (test_bit(port1, hub->power_bits)) + set_bit(port1, hub->change_bits); } else { /* The power session is gone; tell hub_wq */ @@ -2722,13 +2727,11 @@ static bool use_new_scheme(struct usb_device *udev, int retry, { int old_scheme_first_port = port_dev->quirks & USB_PORT_QUIRK_OLD_SCHEME; - int quick_enumeration = (udev->speed == USB_SPEED_HIGH); if (udev->speed >= USB_SPEED_SUPER) return false; - return USE_NEW_SCHEME(retry, old_scheme_first_port || old_scheme_first - || quick_enumeration); + return USE_NEW_SCHEME(retry, old_scheme_first_port || old_scheme_first); } /* Is a USB 3.0 port in the Inactive or Compliance Mode state? @@ -3087,6 +3090,15 @@ static int check_port_resume_type(struct usb_device *udev, if (portchange & USB_PORT_STAT_C_ENABLE) usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_ENABLE); + + /* + * Whatever made this reset-resume necessary may have + * turned on the port1 bit in hub->change_bits. But after + * a successful reset-resume we want the bit to be clear; + * if it was on it would indicate that something happened + * following the reset-resume. + */ + clear_bit(port1, hub->change_bits); } return status; diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index 5adf489428aad5..02eaac7e1e34ec 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -588,12 +588,13 @@ void usb_sg_cancel(struct usb_sg_request *io) int i, retval; spin_lock_irqsave(&io->lock, flags); - if (io->status) { + if (io->status || io->count == 0) { spin_unlock_irqrestore(&io->lock, flags); return; } /* shut everything down */ io->status = -ECONNRESET; + io->count++; /* Keep the request alive until we're done */ spin_unlock_irqrestore(&io->lock, flags); for (i = io->entries - 1; i >= 0; --i) { @@ -607,6 +608,12 @@ void usb_sg_cancel(struct usb_sg_request *io) dev_warn(&io->dev->dev, "%s, unlink --> %d\n", __func__, retval); } + + spin_lock_irqsave(&io->lock, flags); + io->count--; + if (!io->count) + complete(&io->complete); + spin_unlock_irqrestore(&io->lock, flags); } EXPORT_SYMBOL_GPL(usb_sg_cancel); diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index da30b5664ff3d5..3e8efe759c3e69 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -430,6 +430,10 @@ static const struct usb_device_id usb_quirk_list[] = { /* Corsair K70 LUX */ { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT }, + /* Corsair K70 RGB RAPDIFIRE */ + { USB_DEVICE(0x1b1c, 0x1b38), .driver_info = USB_QUIRK_DELAY_INIT | + USB_QUIRK_DELAY_CTRL_MSG }, + /* MIDI keyboard WORLDE MINI */ { USB_DEVICE(0x1c75, 0x0204), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index f969b8791dfe23..c8f0224db9a892 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -1725,7 +1725,6 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc) u32 reg; u8 link_state; - u8 speed; /* * According to the Databook Remote wakeup request should @@ -1735,16 +1734,13 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc) */ reg = dwc3_readl(dwc->regs, DWC3_DSTS); - speed = reg & DWC3_DSTS_CONNECTSPD; - if ((speed == DWC3_DSTS_SUPERSPEED) || - (speed == DWC3_DSTS_SUPERSPEED_PLUS)) - return 0; - link_state = DWC3_DSTS_USBLNKST(reg); switch (link_state) { + case DWC3_LINK_STATE_RESET: case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */ case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */ + case DWC3_LINK_STATE_RESUME: break; default: return -EINVAL; @@ -2481,14 +2477,7 @@ static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep *dep, static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req) { - /* - * For OUT direction, host may send less than the setup - * length. Return true for all OUT requests. - */ - if (!req->direction) - return true; - - return req->request.actual == req->request.length; + return req->num_pending_sgs == 0; } static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep, @@ -2512,8 +2501,7 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep, req->request.actual = req->request.length - req->remaining; - if (!dwc3_gadget_ep_request_completed(req) || - req->num_pending_sgs) { + if (!dwc3_gadget_ep_request_completed(req)) { __dwc3_gadget_kick_transfer(dep); goto out; } diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c index cac991173ac042..5a462a1d189692 100644 --- a/drivers/usb/early/xhci-dbc.c +++ b/drivers/usb/early/xhci-dbc.c @@ -728,19 +728,19 @@ static void xdbc_handle_tx_event(struct xdbc_trb *evt_trb) case COMP_USB_TRANSACTION_ERROR: case COMP_STALL_ERROR: default: - if (ep_id == XDBC_EPID_OUT) + if (ep_id == XDBC_EPID_OUT || ep_id == XDBC_EPID_OUT_INTEL) xdbc.flags |= XDBC_FLAGS_OUT_STALL; - if (ep_id == XDBC_EPID_IN) + if (ep_id == XDBC_EPID_IN || ep_id == XDBC_EPID_IN_INTEL) xdbc.flags |= XDBC_FLAGS_IN_STALL; xdbc_trace("endpoint %d stalled\n", ep_id); break; } - if (ep_id == XDBC_EPID_IN) { + if (ep_id == XDBC_EPID_IN || ep_id == XDBC_EPID_IN_INTEL) { xdbc.flags &= ~XDBC_FLAGS_IN_PROCESS; xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true); - } else if (ep_id == XDBC_EPID_OUT) { + } else if (ep_id == XDBC_EPID_OUT || ep_id == XDBC_EPID_OUT_INTEL) { xdbc.flags &= ~XDBC_FLAGS_OUT_PROCESS; } else { xdbc_trace("invalid endpoint id %d\n", ep_id); diff --git a/drivers/usb/early/xhci-dbc.h b/drivers/usb/early/xhci-dbc.h index 673686eeddd74e..6e2b7266a69525 100644 --- a/drivers/usb/early/xhci-dbc.h +++ b/drivers/usb/early/xhci-dbc.h @@ -120,8 +120,22 @@ struct xdbc_ring { u32 cycle_state; }; -#define XDBC_EPID_OUT 2 -#define XDBC_EPID_IN 3 +/* + * These are the "Endpoint ID" (also known as "Context Index") values for the + * OUT Transfer Ring and the IN Transfer Ring of a Debug Capability Context data + * structure. + * According to the "eXtensible Host Controller Interface for Universal Serial + * Bus (xHCI)" specification, section "7.6.3.2 Endpoint Contexts and Transfer + * Rings", these should be 0 and 1, and those are the values AMD machines give + * you; but Intel machines seem to use the formula from section "4.5.1 Device + * Context Index", which is supposed to be used for the Device Context only. + * Luckily the values from Intel don't overlap with those from AMD, so we can + * just test for both. + */ +#define XDBC_EPID_OUT 0 +#define XDBC_EPID_IN 1 +#define XDBC_EPID_OUT_INTEL 2 +#define XDBC_EPID_IN_INTEL 3 struct xdbc_state { u16 vendor; diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 87fdeb042c6783..f8bcfc506f4a3e 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -1828,6 +1828,10 @@ static void ffs_data_reset(struct ffs_data *ffs) ffs->state = FFS_READ_DESCRIPTORS; ffs->setup_state = FFS_NO_SETUP; ffs->flags = 0; + + ffs->ms_os_descs_ext_prop_count = 0; + ffs->ms_os_descs_ext_prop_name_len = 0; + ffs->ms_os_descs_ext_prop_data_len = 0; } diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c index 1d0d8952a74bfa..58e5b015d40e64 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c @@ -1950,10 +1950,10 @@ static irqreturn_t usba_vbus_irq_thread(int irq, void *devid) usba_start(udc); } else { udc->suspended = false; - usba_stop(udc); - if (udc->driver->disconnect) udc->driver->disconnect(&udc->gadget); + + usba_stop(udc); } udc->vbus_prev = vbus; } diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c index a4d9b5e1e50ea0..d49c6dc1082dc9 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_ep.c +++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c @@ -540,7 +540,7 @@ static void bdc_req_complete(struct bdc_ep *ep, struct bdc_req *req, { struct bdc *bdc = ep->bdc; - if (req == NULL || &req->queue == NULL || &req->usb_req == NULL) + if (req == NULL) return; dev_dbg(bdc->dev, "%s ep:%s status:%d\n", __func__, ep->name, status); diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 6c8a684bcd946f..879a8540414bfe 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -1306,7 +1306,47 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, wIndex, link_state); goto error; } + + /* + * set link to U0, steps depend on current link state. + * U3: set link to U0 and wait for u3exit completion. + * U1/U2: no PLC complete event, only set link to U0. + * Resume/Recovery: device initiated U0, only wait for + * completion + */ + if (link_state == USB_SS_PORT_LS_U0) { + u32 pls = temp & PORT_PLS_MASK; + bool wait_u0 = false; + + /* already in U0 */ + if (pls == XDEV_U0) + break; + if (pls == XDEV_U3 || + pls == XDEV_RESUME || + pls == XDEV_RECOVERY) { + wait_u0 = true; + reinit_completion(&bus_state->u3exit_done[wIndex]); + } + if (pls <= XDEV_U3) /* U1, U2, U3 */ + xhci_set_link_state(xhci, ports[wIndex], + USB_SS_PORT_LS_U0); + if (!wait_u0) { + if (pls > XDEV_U3) + goto error; + break; + } + spin_unlock_irqrestore(&xhci->lock, flags); + if (!wait_for_completion_timeout(&bus_state->u3exit_done[wIndex], + msecs_to_jiffies(100))) + xhci_dbg(xhci, "missing U0 port change event for port %d\n", + wIndex); + spin_lock_irqsave(&xhci->lock, flags); + temp = readl(ports[wIndex]->addr); + break; + } + if (link_state == USB_SS_PORT_LS_U3) { + int retries = 16; slot_id = xhci_find_slot_id_by_port(hcd, xhci, wIndex + 1); if (slot_id) { @@ -1317,17 +1357,18 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, xhci_stop_device(xhci, slot_id, 1); spin_lock_irqsave(&xhci->lock, flags); } - } - - xhci_set_link_state(xhci, ports[wIndex], link_state); - - spin_unlock_irqrestore(&xhci->lock, flags); - msleep(20); /* wait device to enter */ - spin_lock_irqsave(&xhci->lock, flags); - - temp = readl(ports[wIndex]->addr); - if (link_state == USB_SS_PORT_LS_U3) + xhci_set_link_state(xhci, ports[wIndex], USB_SS_PORT_LS_U3); + spin_unlock_irqrestore(&xhci->lock, flags); + while (retries--) { + usleep_range(4000, 8000); + temp = readl(ports[wIndex]->addr); + if ((temp & PORT_PLS_MASK) == XDEV_U3) + break; + } + spin_lock_irqsave(&xhci->lock, flags); + temp = readl(ports[wIndex]->addr); bus_state->suspended_ports |= 1 << wIndex; + } break; case USB_PORT_FEAT_POWER: /* @@ -1537,6 +1578,8 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf) } if ((temp & PORT_RC)) reset_change = true; + if (temp & PORT_OC) + status = 1; } if (!status && !reset_change) { xhci_dbg(xhci, "%s: stopping port polling.\n", __func__); @@ -1602,6 +1645,13 @@ int xhci_bus_suspend(struct usb_hcd *hcd) port_index); goto retry; } + /* bail out if port detected a over-current condition */ + if (t1 & PORT_OC) { + bus_state->bus_suspended = 0; + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_dbg(xhci, "Bus suspend bailout, port over-current detected\n"); + return -EBUSY; + } /* suspend ports in U0, or bail out for new connect changes */ if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) { if ((t1 & PORT_CSC) && wake_enabled) { diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 884c601bfa15f8..9764122c9cdf2d 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -2552,6 +2552,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) xhci->usb3_rhub.bus_state.resume_done[i] = 0; /* Only the USB 2.0 completions will ever be used. */ init_completion(&xhci->usb2_rhub.bus_state.rexit_done[i]); + init_completion(&xhci->usb3_rhub.bus_state.u3exit_done[i]); } if (scratchpad_alloc(xhci, flags)) diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index cd8e49f6202d7c..414677a028f6ec 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -541,6 +541,23 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, stream_id); return; } + /* + * A cancelled TD can complete with a stall if HW cached the trb. + * In this case driver can't find cur_td, but if the ring is empty we + * can move the dequeue pointer to the current enqueue position. + */ + if (!cur_td) { + if (list_empty(&ep_ring->td_list)) { + state->new_deq_seg = ep_ring->enq_seg; + state->new_deq_ptr = ep_ring->enqueue; + state->new_cycle_state = ep_ring->cycle_state; + goto done; + } else { + xhci_warn(xhci, "Can't find new dequeue state, missing cur_td\n"); + return; + } + } + /* Dig out the cycle state saved by the xHC during the stop ep cmd */ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, "Finding endpoint context"); @@ -586,6 +603,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, state->new_deq_seg = new_seg; state->new_deq_ptr = new_deq; +done: /* Don't update the ring cycle state for the producer (us). */ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, "Cycle state = 0x%x", state->new_cycle_state); @@ -1673,6 +1691,7 @@ static void handle_port_status(struct xhci_hcd *xhci, (portsc & PORT_PLS_MASK) == XDEV_U1 || (portsc & PORT_PLS_MASK) == XDEV_U2)) { xhci_dbg(xhci, "resume SS port %d finished\n", port_id); + complete(&bus_state->u3exit_done[hcd_portnum]); /* We've just brought the device into U0/1/2 through either the * Resume state after a device remote wakeup, or through the * U3Exit state after a host-initiated resume. If it's a device @@ -1847,8 +1866,8 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, if (reset_type == EP_HARD_RESET) { ep->ep_state |= EP_HARD_CLEAR_TOGGLE; - xhci_cleanup_stalled_ring(xhci, ep_index, stream_id, td); - xhci_clear_hub_tt_buffer(xhci, td, ep); + xhci_cleanup_stalled_ring(xhci, slot_id, ep_index, stream_id, + td); } xhci_ring_cmd_db(xhci); } @@ -1969,11 +1988,18 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, if (trb_comp_code == COMP_STALL_ERROR || xhci_requires_manual_halt_cleanup(xhci, ep_ctx, trb_comp_code)) { - /* Issue a reset endpoint command to clear the host side - * halt, followed by a set dequeue command to move the - * dequeue pointer past the TD. - * The class driver clears the device side halt later. + /* + * xhci internal endpoint state will go to a "halt" state for + * any stall, including default control pipe protocol stall. + * To clear the host side halt we need to issue a reset endpoint + * command, followed by a set dequeue command to move past the + * TD. + * Class drivers clear the device side halt from a functional + * stall later. Hub TT buffer should only be cleared for FS/LS + * devices behind HS hubs for functional stalls. */ + if ((ep_index != 0) || (trb_comp_code != COMP_STALL_ERROR)) + xhci_clear_hub_tt_buffer(xhci, td, ep); xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, ep_ring->stream_id, td, EP_HARD_RESET); } else { @@ -2523,6 +2549,15 @@ static int handle_tx_event(struct xhci_hcd *xhci, xhci_dbg(xhci, "td_list is empty while skip flag set. Clear skip flag for slot %u ep %u.\n", slot_id, ep_index); } + if (trb_comp_code == COMP_STALL_ERROR || + xhci_requires_manual_halt_cleanup(xhci, ep_ctx, + trb_comp_code)) { + xhci_cleanup_halted_endpoint(xhci, slot_id, + ep_index, + ep_ring->stream_id, + NULL, + EP_HARD_RESET); + } goto cleanup; } diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index e9da9a49e914f0..ff04d8e69bbb50 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -3031,19 +3031,19 @@ static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, added_ctxs, added_ctxs); } -void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index, - unsigned int stream_id, struct xhci_td *td) +void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int slot_id, + unsigned int ep_index, unsigned int stream_id, + struct xhci_td *td) { struct xhci_dequeue_state deq_state; - struct usb_device *udev = td->urb->dev; xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, "Cleaning up stalled endpoint ring"); /* We need to move the HW's dequeue pointer past this TD, * or it will attempt to resend it on the next doorbell ring. */ - xhci_find_new_dequeue_state(xhci, udev->slot_id, - ep_index, stream_id, td, &deq_state); + xhci_find_new_dequeue_state(xhci, slot_id, ep_index, stream_id, td, + &deq_state); if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg) return; @@ -3054,7 +3054,7 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index, if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, "Queueing new dequeue state"); - xhci_queue_new_dequeue_state(xhci, udev->slot_id, + xhci_queue_new_dequeue_state(xhci, slot_id, ep_index, &deq_state); } else { /* Better hope no one uses the input context between now and the @@ -3065,7 +3065,7 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index, xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, "Setting up input context for " "configure endpoint command"); - xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id, + xhci_setup_input_ctx_for_quirk(xhci, slot_id, ep_index, &deq_state); } } diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 59e762fd0f2502..5af41ae9ac16e4 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1694,6 +1694,7 @@ struct xhci_bus_state { /* Which ports are waiting on RExit to U0 transition. */ unsigned long rexit_ports; struct completion rexit_done[USB_MAXCHILDREN]; + struct completion u3exit_done[USB_MAXCHILDREN]; }; @@ -2117,8 +2118,9 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, unsigned int slot_id, unsigned int ep_index, struct xhci_dequeue_state *deq_state); -void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index, - unsigned int stream_id, struct xhci_td *td); +void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int slot_id, + unsigned int ep_index, unsigned int stream_id, + struct xhci_td *td); void xhci_stop_endpoint_command_watchdog(struct timer_list *t); void xhci_handle_command_timeout(struct work_struct *work); diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c index 2ab9600d0898bc..fc8a5da4a07c9a 100644 --- a/drivers/usb/misc/sisusbvga/sisusb.c +++ b/drivers/usb/misc/sisusbvga/sisusb.c @@ -1199,18 +1199,18 @@ static int sisusb_read_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr, /* High level: Gfx (indexed) register access */ #ifdef CONFIG_USB_SISUSBVGA_CON -int sisusb_setreg(struct sisusb_usb_data *sisusb, int port, u8 data) +int sisusb_setreg(struct sisusb_usb_data *sisusb, u32 port, u8 data) { return sisusb_write_memio_byte(sisusb, SISUSB_TYPE_IO, port, data); } -int sisusb_getreg(struct sisusb_usb_data *sisusb, int port, u8 *data) +int sisusb_getreg(struct sisusb_usb_data *sisusb, u32 port, u8 *data) { return sisusb_read_memio_byte(sisusb, SISUSB_TYPE_IO, port, data); } #endif -int sisusb_setidxreg(struct sisusb_usb_data *sisusb, int port, +int sisusb_setidxreg(struct sisusb_usb_data *sisusb, u32 port, u8 index, u8 data) { int ret; @@ -1220,7 +1220,7 @@ int sisusb_setidxreg(struct sisusb_usb_data *sisusb, int port, return ret; } -int sisusb_getidxreg(struct sisusb_usb_data *sisusb, int port, +int sisusb_getidxreg(struct sisusb_usb_data *sisusb, u32 port, u8 index, u8 *data) { int ret; @@ -1230,7 +1230,7 @@ int sisusb_getidxreg(struct sisusb_usb_data *sisusb, int port, return ret; } -int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, int port, u8 idx, +int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, u32 port, u8 idx, u8 myand, u8 myor) { int ret; @@ -1245,7 +1245,7 @@ int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, int port, u8 idx, } static int sisusb_setidxregmask(struct sisusb_usb_data *sisusb, - int port, u8 idx, u8 data, u8 mask) + u32 port, u8 idx, u8 data, u8 mask) { int ret; u8 tmp; @@ -1258,13 +1258,13 @@ static int sisusb_setidxregmask(struct sisusb_usb_data *sisusb, return ret; } -int sisusb_setidxregor(struct sisusb_usb_data *sisusb, int port, +int sisusb_setidxregor(struct sisusb_usb_data *sisusb, u32 port, u8 index, u8 myor) { return sisusb_setidxregandor(sisusb, port, index, 0xff, myor); } -int sisusb_setidxregand(struct sisusb_usb_data *sisusb, int port, +int sisusb_setidxregand(struct sisusb_usb_data *sisusb, u32 port, u8 idx, u8 myand) { return sisusb_setidxregandor(sisusb, port, idx, myand, 0x00); @@ -2785,8 +2785,8 @@ static loff_t sisusb_lseek(struct file *file, loff_t offset, int orig) static int sisusb_handle_command(struct sisusb_usb_data *sisusb, struct sisusb_command *y, unsigned long arg) { - int retval, port, length; - u32 address; + int retval, length; + u32 port, address; /* All our commands require the device * to be initialized. diff --git a/drivers/usb/misc/sisusbvga/sisusb_init.h b/drivers/usb/misc/sisusbvga/sisusb_init.h index 1782c759c4ad58..ace09985dae4c5 100644 --- a/drivers/usb/misc/sisusbvga/sisusb_init.h +++ b/drivers/usb/misc/sisusbvga/sisusb_init.h @@ -812,17 +812,17 @@ static const struct SiS_VCLKData SiSUSB_VCLKData[] = { int SiSUSBSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo); int SiSUSBSetVESAMode(struct SiS_Private *SiS_Pr, unsigned short VModeNo); -extern int sisusb_setreg(struct sisusb_usb_data *sisusb, int port, u8 data); -extern int sisusb_getreg(struct sisusb_usb_data *sisusb, int port, u8 * data); -extern int sisusb_setidxreg(struct sisusb_usb_data *sisusb, int port, +extern int sisusb_setreg(struct sisusb_usb_data *sisusb, u32 port, u8 data); +extern int sisusb_getreg(struct sisusb_usb_data *sisusb, u32 port, u8 * data); +extern int sisusb_setidxreg(struct sisusb_usb_data *sisusb, u32 port, u8 index, u8 data); -extern int sisusb_getidxreg(struct sisusb_usb_data *sisusb, int port, +extern int sisusb_getidxreg(struct sisusb_usb_data *sisusb, u32 port, u8 index, u8 * data); -extern int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, int port, +extern int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, u32 port, u8 idx, u8 myand, u8 myor); -extern int sisusb_setidxregor(struct sisusb_usb_data *sisusb, int port, +extern int sisusb_setidxregor(struct sisusb_usb_data *sisusb, u32 port, u8 index, u8 myor); -extern int sisusb_setidxregand(struct sisusb_usb_data *sisusb, int port, +extern int sisusb_setidxregand(struct sisusb_usb_data *sisusb, u32 port, u8 idx, u8 myand); void sisusb_delete(struct kref *kref); diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index bb2198496f4225..5fcad96e06564b 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -81,6 +81,19 @@ static void uas_free_streams(struct uas_dev_info *devinfo); static void uas_log_cmd_state(struct scsi_cmnd *cmnd, const char *prefix, int status); +/* + * This driver needs its own workqueue, as we need to control memory allocation. + * + * In the course of error handling and power management uas_wait_for_pending_cmnds() + * needs to flush pending work items. In these contexts we cannot allocate memory + * by doing block IO as we would deadlock. For the same reason we cannot wait + * for anything allocating memory not heeding these constraints. + * + * So we have to control all work items that can be on the workqueue we flush. + * Hence we cannot share a queue and need our own. + */ +static struct workqueue_struct *workqueue; + static void uas_do_work(struct work_struct *work) { struct uas_dev_info *devinfo = @@ -109,7 +122,7 @@ static void uas_do_work(struct work_struct *work) if (!err) cmdinfo->state &= ~IS_IN_WORK_LIST; else - schedule_work(&devinfo->work); + queue_work(workqueue, &devinfo->work); } out: spin_unlock_irqrestore(&devinfo->lock, flags); @@ -134,7 +147,7 @@ static void uas_add_work(struct uas_cmd_info *cmdinfo) lockdep_assert_held(&devinfo->lock); cmdinfo->state |= IS_IN_WORK_LIST; - schedule_work(&devinfo->work); + queue_work(workqueue, &devinfo->work); } static void uas_zap_pending(struct uas_dev_info *devinfo, int result) @@ -190,6 +203,9 @@ static void uas_log_cmd_state(struct scsi_cmnd *cmnd, const char *prefix, struct uas_cmd_info *ci = (void *)&cmnd->SCp; struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp; + if (status == -ENODEV) /* too late */ + return; + scmd_printk(KERN_INFO, cmnd, "%s %d uas-tag %d inflight:%s%s%s%s%s%s%s%s%s%s%s%s ", prefix, status, cmdinfo->uas_tag, @@ -1227,7 +1243,31 @@ static struct usb_driver uas_driver = { .id_table = uas_usb_ids, }; -module_usb_driver(uas_driver); +static int __init uas_init(void) +{ + int rv; + + workqueue = alloc_workqueue("uas", WQ_MEM_RECLAIM, 0); + if (!workqueue) + return -ENOMEM; + + rv = usb_register(&uas_driver); + if (rv) { + destroy_workqueue(workqueue); + return -ENOMEM; + } + + return 0; +} + +static void __exit uas_exit(void) +{ + usb_deregister(&uas_driver); + destroy_workqueue(workqueue); +} + +module_init(uas_init); +module_exit(uas_exit); MODULE_LICENSE("GPL"); MODULE_IMPORT_NS(USB_STORAGE); diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 1880f3e13f5763..f6c3681fa2e9ef 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -2323,6 +2323,13 @@ UNUSUAL_DEV( 0x3340, 0xffff, 0x0000, 0x0000, USB_SC_DEVICE,USB_PR_DEVICE,NULL, US_FL_MAX_SECTORS_64 ), +/* Reported by Cyril Roelandt */ +UNUSUAL_DEV( 0x357d, 0x7788, 0x0114, 0x0114, + "JMicron", + "USB to ATA/ATAPI Bridge", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_BROKEN_FUA ), + /* Reported by Andrey Rahmatullin */ UNUSUAL_DEV( 0x4102, 0x1020, 0x0100, 0x0100, "iRiver", diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c index 74cb3c2ecb3476..c950171556d8c8 100644 --- a/drivers/usb/typec/bus.c +++ b/drivers/usb/typec/bus.c @@ -192,7 +192,10 @@ EXPORT_SYMBOL_GPL(typec_altmode_vdm); const struct typec_altmode * typec_altmode_get_partner(struct typec_altmode *adev) { - return adev ? &to_altmode(adev)->partner->adev : NULL; + if (!adev || !to_altmode(adev)->partner) + return NULL; + + return &to_altmode(adev)->partner->adev; } EXPORT_SYMBOL_GPL(typec_altmode_get_partner); diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index ea68559ce60e57..99fc6223f0dd27 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -3799,6 +3799,14 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1, */ break; + case PORT_RESET: + case PORT_RESET_WAIT_OFF: + /* + * State set back to default mode once the timer completes. + * Ignore CC changes here. + */ + break; + default: if (tcpm_port_is_disconnected(port)) tcpm_set_state(port, unattached_state(port), 0); @@ -3860,6 +3868,15 @@ static void _tcpm_pd_vbus_on(struct tcpm_port *port) case SRC_TRY_DEBOUNCE: /* Do nothing, waiting for sink detection */ break; + + case PORT_RESET: + case PORT_RESET_WAIT_OFF: + /* + * State set back to default mode once the timer completes. + * Ignore vbus changes here. + */ + break; + default: break; } @@ -3913,10 +3930,19 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port) case PORT_RESET_WAIT_OFF: tcpm_set_state(port, tcpm_default_state(port), 0); break; + case SRC_TRY_WAIT: case SRC_TRY_DEBOUNCE: /* Do nothing, waiting for sink detection */ break; + + case PORT_RESET: + /* + * State set back to default mode once the timer completes. + * Ignore vbus changes here. + */ + break; + default: if (port->pwr_role == TYPEC_SINK && port->attached) diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c index ce04edc69e5f0d..c4147e93aa7d4b 100644 --- a/drivers/watchdog/watchdog_dev.c +++ b/drivers/watchdog/watchdog_dev.c @@ -282,6 +282,7 @@ static int watchdog_start(struct watchdog_device *wdd) if (err == 0) { set_bit(WDOG_ACTIVE, &wdd->status); wd_data->last_keepalive = started_at; + wd_data->last_hw_keepalive = started_at; watchdog_update_worker(wdd); } diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index e17ca815617131..a38292ef79f6d5 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c @@ -448,7 +448,14 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn); int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs, unsigned int nr_grefs, void **vaddr) { - return ring_ops->map(dev, gnt_refs, nr_grefs, vaddr); + int err; + + err = ring_ops->map(dev, gnt_refs, nr_grefs, vaddr); + /* Some hypervisors are buggy and can return 1. */ + if (err > 0) + err = GNTST_general_error; + + return err; } EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c index b378cd780ed547..fc5eb0f8930499 100644 --- a/fs/afs/cmservice.c +++ b/fs/afs/cmservice.c @@ -169,7 +169,7 @@ static int afs_record_cm_probe(struct afs_call *call, struct afs_server *server) spin_lock(&server->probe_lock); - if (!test_bit(AFS_SERVER_FL_HAVE_EPOCH, &server->flags)) { + if (!test_and_set_bit(AFS_SERVER_FL_HAVE_EPOCH, &server->flags)) { server->cm_epoch = call->epoch; server->probe.cm_epoch = call->epoch; goto out; diff --git a/fs/afs/internal.h b/fs/afs/internal.h index d5efb1debebf46..485cc3b2aaa8ad 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -1329,7 +1329,7 @@ extern struct afs_volume *afs_create_volume(struct afs_fs_context *); extern void afs_activate_volume(struct afs_volume *); extern void afs_deactivate_volume(struct afs_volume *); extern void afs_put_volume(struct afs_cell *, struct afs_volume *); -extern int afs_check_volume_status(struct afs_volume *, struct key *); +extern int afs_check_volume_status(struct afs_volume *, struct afs_fs_cursor *); /* * write.c diff --git a/fs/afs/rotate.c b/fs/afs/rotate.c index 172ba569cd602b..2a3305e42b145c 100644 --- a/fs/afs/rotate.c +++ b/fs/afs/rotate.c @@ -192,7 +192,7 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc) write_unlock(&vnode->volume->servers_lock); set_bit(AFS_VOLUME_NEEDS_UPDATE, &vnode->volume->flags); - error = afs_check_volume_status(vnode->volume, fc->key); + error = afs_check_volume_status(vnode->volume, fc); if (error < 0) goto failed_set_error; @@ -281,7 +281,7 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc) set_bit(AFS_VOLUME_WAIT, &vnode->volume->flags); set_bit(AFS_VOLUME_NEEDS_UPDATE, &vnode->volume->flags); - error = afs_check_volume_status(vnode->volume, fc->key); + error = afs_check_volume_status(vnode->volume, fc); if (error < 0) goto failed_set_error; @@ -341,7 +341,7 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc) /* See if we need to do an update of the volume record. Note that the * volume may have moved or even have been deleted. */ - error = afs_check_volume_status(vnode->volume, fc->key); + error = afs_check_volume_status(vnode->volume, fc); if (error < 0) goto failed_set_error; diff --git a/fs/afs/server.c b/fs/afs/server.c index ca8115ba1724b8..d3a9288f755665 100644 --- a/fs/afs/server.c +++ b/fs/afs/server.c @@ -595,12 +595,9 @@ bool afs_check_server_record(struct afs_fs_cursor *fc, struct afs_server *server } ret = wait_on_bit(&server->flags, AFS_SERVER_FL_UPDATING, - TASK_INTERRUPTIBLE); + (fc->flags & AFS_FS_CURSOR_INTR) ? + TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); if (ret == -ERESTARTSYS) { - if (!(fc->flags & AFS_FS_CURSOR_INTR) && server->addresses) { - _leave(" = t [intr]"); - return true; - } fc->error = ret; _leave(" = f [intr]"); return false; diff --git a/fs/afs/volume.c b/fs/afs/volume.c index 92ca5e27573b7a..4310336b9bb8c1 100644 --- a/fs/afs/volume.c +++ b/fs/afs/volume.c @@ -281,7 +281,7 @@ static int afs_update_volume_status(struct afs_volume *volume, struct key *key) /* * Make sure the volume record is up to date. */ -int afs_check_volume_status(struct afs_volume *volume, struct key *key) +int afs_check_volume_status(struct afs_volume *volume, struct afs_fs_cursor *fc) { time64_t now = ktime_get_real_seconds(); int ret, retries = 0; @@ -299,7 +299,7 @@ int afs_check_volume_status(struct afs_volume *volume, struct key *key) } if (!test_and_set_bit_lock(AFS_VOLUME_UPDATING, &volume->flags)) { - ret = afs_update_volume_status(volume, key); + ret = afs_update_volume_status(volume, fc->key); clear_bit_unlock(AFS_VOLUME_WAIT, &volume->flags); clear_bit_unlock(AFS_VOLUME_UPDATING, &volume->flags); wake_up_bit(&volume->flags, AFS_VOLUME_WAIT); @@ -312,7 +312,9 @@ int afs_check_volume_status(struct afs_volume *volume, struct key *key) return 0; } - ret = wait_on_bit(&volume->flags, AFS_VOLUME_WAIT, TASK_INTERRUPTIBLE); + ret = wait_on_bit(&volume->flags, AFS_VOLUME_WAIT, + (fc->flags & AFS_FS_CURSOR_INTR) ? + TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); if (ret == -ERESTARTSYS) { _leave(" = %d", ret); return ret; diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c index 31b236c6b1f76e..39230880f372bf 100644 --- a/fs/afs/yfsclient.c +++ b/fs/afs/yfsclient.c @@ -165,15 +165,15 @@ static void xdr_dump_bad(const __be32 *bp) int i; pr_notice("YFS XDR: Bad status record\n"); - for (i = 0; i < 5 * 4 * 4; i += 16) { + for (i = 0; i < 6 * 4 * 4; i += 16) { memcpy(x, bp, 16); bp += 4; pr_notice("%03x: %08x %08x %08x %08x\n", i, ntohl(x[0]), ntohl(x[1]), ntohl(x[2]), ntohl(x[3])); } - memcpy(x, bp, 4); - pr_notice("0x50: %08x\n", ntohl(x[0])); + memcpy(x, bp, 8); + pr_notice("0x60: %08x %08x\n", ntohl(x[0]), ntohl(x[1])); } /* diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index f5a38910a82bfb..703945cce0e5d9 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -1976,8 +1976,12 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags, } /* want more caps from mds? */ - if (want & ~(cap->mds_wanted | cap->issued)) - goto ack; + if (want & ~cap->mds_wanted) { + if (want & ~(cap->mds_wanted | cap->issued)) + goto ack; + if (!__cap_is_valid(cap)) + goto ack; + } /* things we might delay */ if ((cap->issued & ~retain) == 0) diff --git a/fs/ceph/export.c b/fs/ceph/export.c index b6bfa94332c304..79dc06881e78e2 100644 --- a/fs/ceph/export.c +++ b/fs/ceph/export.c @@ -315,6 +315,11 @@ static struct dentry *__get_parent(struct super_block *sb, req->r_num_caps = 1; err = ceph_mdsc_do_request(mdsc, NULL, req); + if (err) { + ceph_mdsc_put_request(req); + return ERR_PTR(err); + } + inode = req->r_target_inode; if (inode) ihold(inode); diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 76eacffb24d86e..58915d882285cf 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -664,6 +664,11 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid) if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; + if (!server->ops->new_lease_key) + return -EIO; + + server->ops->new_lease_key(pfid); + memset(rqst, 0, sizeof(rqst)); resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER; memset(rsp_iov, 0, sizeof(rsp_iov)); diff --git a/fs/coredump.c b/fs/coredump.c index b1ea7dfbd1494b..d25bad2ed06163 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -211,6 +211,8 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm, return -ENOMEM; (*argv)[(*argc)++] = 0; ++pat_ptr; + if (!(*pat_ptr)) + return -ENOMEM; } /* Repeat as long as we have more pattern to process and more output diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 164dbfd40c52d3..9bd44588eb77cb 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -498,6 +498,30 @@ int ext4_ext_check_inode(struct inode *inode) return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0); } +static void ext4_cache_extents(struct inode *inode, + struct ext4_extent_header *eh) +{ + struct ext4_extent *ex = EXT_FIRST_EXTENT(eh); + ext4_lblk_t prev = 0; + int i; + + for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) { + unsigned int status = EXTENT_STATUS_WRITTEN; + ext4_lblk_t lblk = le32_to_cpu(ex->ee_block); + int len = ext4_ext_get_actual_len(ex); + + if (prev && (prev != lblk)) + ext4_es_cache_extent(inode, prev, lblk - prev, ~0, + EXTENT_STATUS_HOLE); + + if (ext4_ext_is_unwritten(ex)) + status = EXTENT_STATUS_UNWRITTEN; + ext4_es_cache_extent(inode, lblk, len, + ext4_ext_pblock(ex), status); + prev = lblk + len; + } +} + static struct buffer_head * __read_extent_tree_block(const char *function, unsigned int line, struct inode *inode, ext4_fsblk_t pblk, int depth, @@ -532,26 +556,7 @@ __read_extent_tree_block(const char *function, unsigned int line, */ if (!(flags & EXT4_EX_NOCACHE) && depth == 0) { struct ext4_extent_header *eh = ext_block_hdr(bh); - struct ext4_extent *ex = EXT_FIRST_EXTENT(eh); - ext4_lblk_t prev = 0; - int i; - - for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) { - unsigned int status = EXTENT_STATUS_WRITTEN; - ext4_lblk_t lblk = le32_to_cpu(ex->ee_block); - int len = ext4_ext_get_actual_len(ex); - - if (prev && (prev != lblk)) - ext4_es_cache_extent(inode, prev, - lblk - prev, ~0, - EXTENT_STATUS_HOLE); - - if (ext4_ext_is_unwritten(ex)) - status = EXTENT_STATUS_UNWRITTEN; - ext4_es_cache_extent(inode, lblk, len, - ext4_ext_pblock(ex), status); - prev = lblk + len; - } + ext4_cache_extents(inode, eh); } return bh; errout: @@ -899,6 +904,8 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block, path[0].p_bh = NULL; i = depth; + if (!(flags & EXT4_EX_NOCACHE) && depth == 0) + ext4_cache_extents(inode, eh); /* walk through the tree */ while (i) { ext_debug("depth %d: num %d, max %d\n", diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index a6288730210e8b..64b6549dd90167 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -660,7 +660,7 @@ static int find_group_other(struct super_block *sb, struct inode *parent, * block has been written back to disk. (Yes, these values are * somewhat arbitrary...) */ -#define RECENTCY_MIN 5 +#define RECENTCY_MIN 60 #define RECENTCY_DIRTY 300 static int recently_deleted(struct super_block *sb, ext4_group_t group, int ino) diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index b9473fcc110fdc..7e0c77de551bbc 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -2131,7 +2131,7 @@ static int ext4_writepage(struct page *page, bool keep_towrite = false; if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) { - ext4_invalidatepage(page, 0, PAGE_SIZE); + inode->i_mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE); unlock_page(page); return -EIO; } diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index c76ffc259d197a..e1782b2e2e2dd5 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -1936,7 +1936,8 @@ void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, int free; free = e4b->bd_info->bb_free; - BUG_ON(free <= 0); + if (WARN_ON(free <= 0)) + return; i = e4b->bd_info->bb_first_free; @@ -1959,7 +1960,8 @@ void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, } mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex); - BUG_ON(ex.fe_len <= 0); + if (WARN_ON(ex.fe_len <= 0)) + break; if (free < ex.fe_len) { ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, "%d free clusters as per " diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 53d4c67a20df9b..d3500eaf900e2b 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -3562,7 +3562,8 @@ int ext4_calculate_overhead(struct super_block *sb) */ if (sbi->s_journal && !sbi->journal_bdev) overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen); - else if (ext4_has_feature_journal(sb) && !sbi->s_journal) { + else if (ext4_has_feature_journal(sb) && !sbi->s_journal && j_inum) { + /* j_inum for internal journal is non-zero */ j_inode = ext4_get_journal_inode(sb, j_inum); if (j_inode) { j_blocks = j_inode->i_size >> sb->s_blocksize_bits; diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c index 181900af2576ba..296b3189448a46 100644 --- a/fs/f2fs/xattr.c +++ b/fs/f2fs/xattr.c @@ -539,8 +539,9 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name, ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size) { struct inode *inode = d_inode(dentry); + nid_t xnid = F2FS_I(inode)->i_xattr_nid; struct f2fs_xattr_entry *entry; - void *base_addr; + void *base_addr, *last_base_addr; int error = 0; size_t rest = buffer_size; @@ -550,6 +551,8 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size) if (error) return error; + last_base_addr = (void *)base_addr + XATTR_SIZE(xnid, inode); + list_for_each_xattr(entry, base_addr) { const struct xattr_handler *handler = f2fs_xattr_handler(entry->e_name_index); @@ -557,6 +560,15 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size) size_t prefix_len; size_t size; + if ((void *)(entry) + sizeof(__u32) > last_base_addr || + (void *)XATTR_NEXT_ENTRY(entry) > last_base_addr) { + f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr", + inode->i_ino); + set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK); + error = -EFSCORRUPTED; + goto cleanup; + } + if (!handler || (handler->list && !handler->list(dentry))) continue; diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 524111420b48c2..afca3287184b98 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -1241,6 +1241,7 @@ nfsd4_run_cb_work(struct work_struct *work) container_of(work, struct nfsd4_callback, cb_work); struct nfs4_client *clp = cb->cb_clp; struct rpc_clnt *clnt; + int flags; if (cb->cb_need_restart) { cb->cb_need_restart = false; @@ -1269,7 +1270,8 @@ nfsd4_run_cb_work(struct work_struct *work) } cb->cb_msg.rpc_cred = clp->cl_cb_cred; - rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN, + flags = clp->cl_minorversion ? RPC_TASK_NOCONNECT : RPC_TASK_SOFTCONN; + rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | flags, cb->cb_ops ? &nfsd4_cb_ops : &nfsd4_cb_probe_ops, cb); } diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 1c82d7dd54df0d..8650a97e2ba967 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -266,6 +266,8 @@ find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh, if (!nbl) { nbl= kmalloc(sizeof(*nbl), GFP_KERNEL); if (nbl) { + INIT_LIST_HEAD(&nbl->nbl_list); + INIT_LIST_HEAD(&nbl->nbl_lru); fh_copy_shallow(&nbl->nbl_fh, fh); locks_init_lock(&nbl->nbl_lock); nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client, diff --git a/fs/pnode.c b/fs/pnode.c index 49f6d7ff21394f..1106137c747a3a 100644 --- a/fs/pnode.c +++ b/fs/pnode.c @@ -261,14 +261,13 @@ static int propagate_one(struct mount *m) child = copy_tree(last_source, last_source->mnt.mnt_root, type); if (IS_ERR(child)) return PTR_ERR(child); + read_seqlock_excl(&mount_lock); mnt_set_mountpoint(m, mp, child); + if (m->mnt_master != dest_master) + SET_MNT_MARK(m->mnt_master); + read_sequnlock_excl(&mount_lock); last_dest = m; last_source = child; - if (m->mnt_master != dest_master) { - read_seqlock_excl(&mount_lock); - SET_MNT_MARK(m->mnt_master); - read_sequnlock_excl(&mount_lock); - } hlist_add_head(&child->mnt_hash, list); return count_mounts(m->mnt_ns, child); } diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index 7b13988796e173..080ca9d5eccbb4 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c @@ -266,7 +266,8 @@ static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst, if (start < offset + dump->size) { tsz = min(offset + (u64)dump->size - start, (u64)size); buf = dump->buf + start - offset; - if (remap_vmalloc_range_partial(vma, dst, buf, tsz)) { + if (remap_vmalloc_range_partial(vma, dst, buf, 0, + tsz)) { ret = -EFAULT; goto out_unlock; } @@ -624,7 +625,7 @@ static int mmap_vmcore(struct file *file, struct vm_area_struct *vma) tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size); kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz; if (remap_vmalloc_range_partial(vma, vma->vm_start + len, - kaddr, tsz)) + kaddr, 0, tsz)) goto fail; size -= tsz; diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c index edf43ddd7dce47..7dd740e3692daf 100644 --- a/fs/ubifs/orphan.c +++ b/fs/ubifs/orphan.c @@ -688,14 +688,14 @@ static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb, ino_key_init(c, &key1, inum); err = ubifs_tnc_lookup(c, &key1, ino); - if (err) + if (err && err != -ENOENT) goto out_free; /* * Check whether an inode can really get deleted. * linkat() with O_TMPFILE allows rebirth of an inode. */ - if (ino->nlink == 0) { + if (err == 0 && ino->nlink == 0) { dbg_rcvry("deleting orphaned inode %lu", (unsigned long)inum); diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index 944add5ff8e098..d95dc9b0f0bba6 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c @@ -907,7 +907,12 @@ xfs_eofblocks_worker( { struct xfs_mount *mp = container_of(to_delayed_work(work), struct xfs_mount, m_eofblocks_work); + + if (!sb_start_write_trylock(mp->m_super)) + return; xfs_icache_free_eofblocks(mp, NULL); + sb_end_write(mp->m_super); + xfs_queue_eofblocks(mp); } @@ -934,7 +939,12 @@ xfs_cowblocks_worker( { struct xfs_mount *mp = container_of(to_delayed_work(work), struct xfs_mount, m_cowblocks_work); + + if (!sb_start_write_trylock(mp->m_super)) + return; xfs_icache_free_cowblocks(mp, NULL); + sb_end_write(mp->m_super); + xfs_queue_cowblocks(mp); } diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index 2a1909397cb4df..c93c4b7328ef78 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -2401,7 +2401,10 @@ xfs_file_ioctl( if (error) return error; - return xfs_icache_free_eofblocks(mp, &keofb); + sb_start_write(mp->m_super); + error = xfs_icache_free_eofblocks(mp, &keofb); + sb_end_write(mp->m_super); + return error; } default: diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c index 0f08153b499419..6a4fd1738b0869 100644 --- a/fs/xfs/xfs_reflink.c +++ b/fs/xfs/xfs_reflink.c @@ -1053,6 +1053,7 @@ xfs_reflink_remap_extent( uirec.br_startblock = irec->br_startblock + rlen; uirec.br_startoff = irec->br_startoff + rlen; uirec.br_blockcount = unmap_len - rlen; + uirec.br_state = irec->br_state; unmap_len = rlen; /* If this isn't a real mapping, we're done. */ diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index 6ccfd75d3c24ce..812108f6cc89cb 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c @@ -529,8 +529,9 @@ xfsaild( { struct xfs_ail *ailp = data; long tout = 0; /* milliseconds */ + unsigned int noreclaim_flag; - current->flags |= PF_MEMALLOC; + noreclaim_flag = memalloc_noreclaim_save(); set_freezable(); while (1) { @@ -601,6 +602,7 @@ xfsaild( tout = xfsaild_push(ailp); } + memalloc_noreclaim_restore(noreclaim_flag); return 0; } diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index 8e132cf819e4ee..e5341847e3a07b 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -596,7 +596,7 @@ void iio_device_unregister(struct iio_dev *indio_dev); * 0 on success, negative error number on failure. */ #define devm_iio_device_register(dev, indio_dev) \ - __devm_iio_device_register((dev), (indio_dev), THIS_MODULE); + __devm_iio_device_register((dev), (indio_dev), THIS_MODULE) int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev, struct module *this_mod); void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index b81f0f1ded5f9b..678b0a5797a023 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1027,7 +1027,7 @@ search_memslots(struct kvm_memslots *slots, gfn_t gfn) start = slot + 1; } - if (gfn >= memslots[start].base_gfn && + if (start < slots->used_slots && gfn >= memslots[start].base_gfn && gfn < memslots[start].base_gfn + memslots[start].npages) { atomic_set(&slots->lru_slot, start); return &memslots[start]; diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 21a572469a4e64..228f6634762081 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2582,6 +2582,8 @@ #define PCI_VENDOR_ID_AMAZON 0x1d0f +#define PCI_VENDOR_ID_ZHAOXIN 0x1d17 + #define PCI_VENDOR_ID_HYGON 0x1d94 #define PCI_VENDOR_ID_HXT 0x1dbf diff --git a/include/linux/printk.h b/include/linux/printk.h index c09d67edda3a10..3b5cb66d8bc15a 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -202,7 +202,6 @@ __printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...); void dump_stack_print_info(const char *log_lvl); void show_regs_print_info(const char *log_lvl); extern asmlinkage void dump_stack(void) __cold; -extern void printk_safe_init(void); extern void printk_safe_flush(void); extern void printk_safe_flush_on_panic(void); #else @@ -269,10 +268,6 @@ static inline void dump_stack(void) { } -static inline void printk_safe_init(void) -{ -} - static inline void printk_safe_flush(void) { } diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 2dd0a9ed5b3614..733fad7dfbed96 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -97,6 +97,11 @@ struct qed_chain_u32 { u32 cons_idx; }; +struct addr_tbl_entry { + void *virt_addr; + dma_addr_t dma_map; +}; + struct qed_chain { /* fastpath portion of the chain - required for commands such * as produce / consume. @@ -107,10 +112,11 @@ struct qed_chain { /* Fastpath portions of the PBL [if exists] */ struct { - /* Table for keeping the virtual addresses of the chain pages, - * respectively to the physical addresses in the pbl table. + /* Table for keeping the virtual and physical addresses of the + * chain pages, respectively to the physical addresses + * in the pbl table. */ - void **pp_virt_addr_tbl; + struct addr_tbl_entry *pp_addr_tbl; union { struct qed_chain_pbl_u16 u16; @@ -287,7 +293,7 @@ qed_chain_advance_page(struct qed_chain *p_chain, *(u32 *)page_to_inc = 0; page_index = *(u32 *)page_to_inc; } - *p_next_elem = p_chain->pbl.pp_virt_addr_tbl[page_index]; + *p_next_elem = p_chain->pbl.pp_addr_tbl[page_index].virt_addr; } } @@ -537,7 +543,7 @@ static inline void qed_chain_init_params(struct qed_chain *p_chain, p_chain->pbl_sp.p_phys_table = 0; p_chain->pbl_sp.p_virt_table = NULL; - p_chain->pbl.pp_virt_addr_tbl = NULL; + p_chain->pbl.pp_addr_tbl = NULL; } /** @@ -575,11 +581,11 @@ static inline void qed_chain_init_mem(struct qed_chain *p_chain, static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain, void *p_virt_pbl, dma_addr_t p_phys_pbl, - void **pp_virt_addr_tbl) + struct addr_tbl_entry *pp_addr_tbl) { p_chain->pbl_sp.p_phys_table = p_phys_pbl; p_chain->pbl_sp.p_virt_table = p_virt_pbl; - p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl; + p_chain->pbl.pp_addr_tbl = pp_addr_tbl; } /** @@ -644,7 +650,7 @@ static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain) break; case QED_CHAIN_MODE_PBL: last_page_idx = p_chain->page_cnt - 1; - p_virt_addr = p_chain->pbl.pp_virt_addr_tbl[last_page_idx]; + p_virt_addr = p_chain->pbl.pp_addr_tbl[last_page_idx].virt_addr; break; } /* p_virt_addr points at this stage to the last page of the chain */ @@ -716,7 +722,7 @@ static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain) page_cnt = qed_chain_get_page_cnt(p_chain); for (i = 0; i < page_cnt; i++) - memset(p_chain->pbl.pp_virt_addr_tbl[i], 0, + memset(p_chain->pbl.pp_addr_tbl[i].virt_addr, 0, QED_CHAIN_PAGE_SIZE); } diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 40f65888dd3887..fddad9f5b39097 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -162,6 +162,7 @@ extern bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma); extern void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma, struct svc_rdma_recv_ctxt *ctxt); extern void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma); +extern void svc_rdma_release_rqst(struct svc_rqst *rqstp); extern int svc_rdma_recvfrom(struct svc_rqst *); /* svc_rdma_rw.c */ diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index decac0790fc128..01a1334c5fc5e9 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -122,7 +122,7 @@ extern void vunmap(const void *addr); extern int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, void *kaddr, - unsigned long size); + unsigned long pgoff, unsigned long size); extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, unsigned long pgoff); diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 523c6a09e1c8c7..d9ba9a77bcf298 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -5933,7 +5933,9 @@ enum rate_control_capabilities { struct rate_control_ops { unsigned long capa; const char *name; - void *(*alloc)(struct ieee80211_hw *hw, struct dentry *debugfsdir); + void *(*alloc)(struct ieee80211_hw *hw); + void (*add_debugfs)(struct ieee80211_hw *hw, void *priv, + struct dentry *debugfsdir); void (*free)(void *priv); void *(*alloc_sta)(void *priv, struct ieee80211_sta *sta, gfp_t gfp); diff --git a/include/net/tcp.h b/include/net/tcp.h index 830c89db124515..cce285f70c8ed0 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -50,7 +50,7 @@ extern struct inet_hashinfo tcp_hashinfo; extern struct percpu_counter tcp_orphan_count; void tcp_time_wait(struct sock *sk, int state, int timeo); -#define MAX_TCP_HEADER (128 + MAX_HEADER) +#define MAX_TCP_HEADER L1_CACHE_ALIGN(128 + MAX_HEADER) #define MAX_TCP_OPTION_SPACE 40 #define TCP_MIN_SND_MSS 48 #define TCP_MIN_GSO_SIZE (TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE) diff --git a/include/sound/soc.h b/include/sound/soc.h index f264c6509f0032..6d2662c3126ceb 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -1059,6 +1059,7 @@ struct snd_soc_card { const struct snd_soc_dapm_route *of_dapm_routes; int num_of_dapm_routes; bool fully_routed; + bool disable_route_checks; /* lists of probed devices belonging to this card */ struct list_head component_dev_list; diff --git a/include/trace/events/iocost.h b/include/trace/events/iocost.h index 7ecaa65b7106eb..c2f580fd371b11 100644 --- a/include/trace/events/iocost.h +++ b/include/trace/events/iocost.h @@ -130,7 +130,7 @@ DEFINE_EVENT(iocg_inuse_update, iocost_inuse_reset, TRACE_EVENT(iocost_ioc_vrate_adj, - TP_PROTO(struct ioc *ioc, u64 new_vrate, u32 (*missed_ppm)[2], + TP_PROTO(struct ioc *ioc, u64 new_vrate, u32 *missed_ppm, u32 rq_wait_pct, int nr_lagging, int nr_shortages, int nr_surpluses), @@ -155,8 +155,8 @@ TRACE_EVENT(iocost_ioc_vrate_adj, __entry->old_vrate = atomic64_read(&ioc->vtime_rate);; __entry->new_vrate = new_vrate; __entry->busy_level = ioc->busy_level; - __entry->read_missed_ppm = (*missed_ppm)[READ]; - __entry->write_missed_ppm = (*missed_ppm)[WRITE]; + __entry->read_missed_ppm = missed_ppm[READ]; + __entry->write_missed_ppm = missed_ppm[WRITE]; __entry->rq_wait_pct = rq_wait_pct; __entry->nr_lagging = nr_lagging; __entry->nr_shortages = nr_shortages; diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index 7fd11ec1c9a427..2464311b038966 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -1638,17 +1638,15 @@ DECLARE_EVENT_CLASS(svcrdma_sendcomp_event, TRACE_EVENT(svcrdma_post_send, TP_PROTO( - const struct ib_send_wr *wr, - int status + const struct ib_send_wr *wr ), - TP_ARGS(wr, status), + TP_ARGS(wr), TP_STRUCT__entry( __field(const void *, cqe) __field(unsigned int, num_sge) __field(u32, inv_rkey) - __field(int, status) ), TP_fast_assign( @@ -1656,12 +1654,11 @@ TRACE_EVENT(svcrdma_post_send, __entry->num_sge = wr->num_sge; __entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ? wr->ex.invalidate_rkey : 0; - __entry->status = status; ), - TP_printk("cqe=%p num_sge=%u inv_rkey=0x%08x status=%d", + TP_printk("cqe=%p num_sge=%u inv_rkey=0x%08x", __entry->cqe, __entry->num_sge, - __entry->inv_rkey, __entry->status + __entry->inv_rkey ) ); @@ -1726,26 +1723,23 @@ TRACE_EVENT(svcrdma_wc_receive, TRACE_EVENT(svcrdma_post_rw, TP_PROTO( const void *cqe, - int sqecount, - int status + int sqecount ), - TP_ARGS(cqe, sqecount, status), + TP_ARGS(cqe, sqecount), TP_STRUCT__entry( __field(const void *, cqe) __field(int, sqecount) - __field(int, status) ), TP_fast_assign( __entry->cqe = cqe; __entry->sqecount = sqecount; - __entry->status = status; ), - TP_printk("cqe=%p sqecount=%d status=%d", - __entry->cqe, __entry->sqecount, __entry->status + TP_printk("cqe=%p sqecount=%d", + __entry->cqe, __entry->sqecount ) ); @@ -1841,6 +1835,34 @@ DECLARE_EVENT_CLASS(svcrdma_sendqueue_event, DEFINE_SQ_EVENT(full); DEFINE_SQ_EVENT(retry); +TRACE_EVENT(svcrdma_sq_post_err, + TP_PROTO( + const struct svcxprt_rdma *rdma, + int status + ), + + TP_ARGS(rdma, status), + + TP_STRUCT__entry( + __field(int, avail) + __field(int, depth) + __field(int, status) + __string(addr, rdma->sc_xprt.xpt_remotebuf) + ), + + TP_fast_assign( + __entry->avail = atomic_read(&rdma->sc_sq_avail); + __entry->depth = rdma->sc_sq_depth; + __entry->status = status; + __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); + ), + + TP_printk("addr=%s sc_sq_avail=%d/%d status=%d", + __get_str(addr), __entry->avail, __entry->depth, + __entry->status + ) +); + #endif /* _TRACE_RPCRDMA_H */ #include diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index 5011259b8f67aa..edbbf4bfdd9e50 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -1160,8 +1160,8 @@ enum { * [TCA_TAPRIO_ATTR_SCHED_ENTRY_INTERVAL] */ -#define TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST BIT(0) -#define TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD BIT(1) +#define TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST _BITUL(0) +#define TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD _BITUL(1) enum { TCA_TAPRIO_ATTR_UNSPEC, diff --git a/init/main.c b/init/main.c index c0206c507eba97..5cbb9fe937e0bd 100644 --- a/init/main.c +++ b/init/main.c @@ -694,7 +694,6 @@ asmlinkage __visible void __init start_kernel(void) boot_init_stack_canary(); time_init(); - printk_safe_init(); perf_event_init(); profile_init(); call_function_init(); diff --git a/ipc/util.c b/ipc/util.c index d126d156efc64e..594871610d4547 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -764,13 +764,13 @@ static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos, total++; } + *new_pos = pos + 1; if (total >= ids->in_use) return NULL; for (; pos < ipc_mni; pos++) { ipc = idr_find(&ids->ipcs_idr, pos); if (ipc != NULL) { - *new_pos = pos + 1; rcu_read_lock(); ipc_lock_object(ipc); return ipc; diff --git a/kernel/audit.c b/kernel/audit.c index dfc45063cb5601..fcfbb3476ccd91 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -1325,6 +1325,9 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2: if (!audit_enabled && msg_type != AUDIT_USER_AVC) return 0; + /* exit early if there isn't at least one character to print */ + if (data_len < 2) + return -EINVAL; err = audit_filter(msg_type, AUDIT_FILTER_USER); if (err == 1) { /* match or error */ diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index ef49e17ae47cb0..a367fc8503933a 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -486,7 +486,7 @@ static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value, return -EOVERFLOW; /* Make sure CPU is a valid possible cpu */ - if (!cpu_possible(key_cpu)) + if (key_cpu >= nr_cpumask_bits || !cpu_possible(key_cpu)) return -ENODEV; if (qsize == 0) { diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index e1a65303cfd7f5..ae27dd77a73cb6 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1866,6 +1866,15 @@ static bool register_is_const(struct bpf_reg_state *reg) return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off); } +static bool __is_pointer_value(bool allow_ptr_leaks, + const struct bpf_reg_state *reg) +{ + if (allow_ptr_leaks) + return false; + + return reg->type != SCALAR_VALUE; +} + static void save_register_state(struct bpf_func_state *state, int spi, struct bpf_reg_state *reg) { @@ -2056,6 +2065,16 @@ static int check_stack_read(struct bpf_verifier_env *env, * which resets stack/reg liveness for state transitions */ state->regs[value_regno].live |= REG_LIVE_WRITTEN; + } else if (__is_pointer_value(env->allow_ptr_leaks, reg)) { + /* If value_regno==-1, the caller is asking us whether + * it is acceptable to use this value as a SCALAR_VALUE + * (e.g. for XADD). + * We must not allow unprivileged callers to do that + * with spilled pointers. + */ + verbose(env, "leaking pointer from stack off %d\n", + off); + return -EACCES; } mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); } else { @@ -2416,15 +2435,6 @@ static int check_sock_access(struct bpf_verifier_env *env, int insn_idx, return -EACCES; } -static bool __is_pointer_value(bool allow_ptr_leaks, - const struct bpf_reg_state *reg) -{ - if (allow_ptr_leaks) - return false; - - return reg->type != SCALAR_VALUE; -} - static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno) { return cur_regs(env) + regno; diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 867fd72cb26050..0a093a675b632e 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -45,7 +45,8 @@ static inline dma_addr_t phys_to_dma_direct(struct device *dev, u64 dma_direct_get_required_mask(struct device *dev) { - u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT); + phys_addr_t phys = (phys_addr_t)(max_pfn - 1) << PAGE_SHIFT; + u64 max_dma = phys_to_dma_direct(dev, phys); return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; } diff --git a/kernel/events/core.c b/kernel/events/core.c index 15b123bdcaf535..7382fc95d41e54 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -6537,9 +6537,12 @@ static u64 perf_virt_to_phys(u64 virt) * Try IRQ-safe __get_user_pages_fast first. * If failed, leave phys_addr as 0. */ - if ((current->mm != NULL) && - (__get_user_pages_fast(virt, 1, 0, &p) == 1)) - phys_addr = page_to_phys(p) + virt % PAGE_SIZE; + if (current->mm != NULL) { + pagefault_disable(); + if (__get_user_pages_fast(virt, 1, 0, &p) == 1) + phys_addr = page_to_phys(p) + virt % PAGE_SIZE; + pagefault_enable(); + } if (p) put_page(p); @@ -7049,10 +7052,17 @@ static void perf_event_task_output(struct perf_event *event, goto out; task_event->event_id.pid = perf_event_pid(event, task); - task_event->event_id.ppid = perf_event_pid(event, current); - task_event->event_id.tid = perf_event_tid(event, task); - task_event->event_id.ptid = perf_event_tid(event, current); + + if (task_event->event_id.header.type == PERF_RECORD_EXIT) { + task_event->event_id.ppid = perf_event_pid(event, + task->real_parent); + task_event->event_id.ptid = perf_event_pid(event, + task->real_parent); + } else { /* PERF_RECORD_FORK */ + task_event->event_id.ppid = perf_event_pid(event, current); + task_event->event_id.ptid = perf_event_tid(event, current); + } task_event->event_id.time = perf_event_clock(event); diff --git a/kernel/gcov/fs.c b/kernel/gcov/fs.c index e5eb5ea7ea5982..cc4ee482d3fba8 100644 --- a/kernel/gcov/fs.c +++ b/kernel/gcov/fs.c @@ -108,9 +108,9 @@ static void *gcov_seq_next(struct seq_file *seq, void *data, loff_t *pos) { struct gcov_iterator *iter = data; + (*pos)++; if (gcov_iter_next(iter)) return NULL; - (*pos)++; return iter; } diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h index c8e6ab689d42f9..b2b0f526f249e7 100644 --- a/kernel/printk/internal.h +++ b/kernel/printk/internal.h @@ -23,6 +23,9 @@ __printf(1, 0) int vprintk_func(const char *fmt, va_list args); void __printk_safe_enter(void); void __printk_safe_exit(void); +void printk_safe_init(void); +bool printk_percpu_data_ready(void); + #define printk_safe_enter_irqsave(flags) \ do { \ local_irq_save(flags); \ @@ -64,4 +67,6 @@ __printf(1, 0) int vprintk_func(const char *fmt, va_list args) { return 0; } #define printk_safe_enter_irq() local_irq_disable() #define printk_safe_exit_irq() local_irq_enable() +static inline void printk_safe_init(void) { } +static inline bool printk_percpu_data_ready(void) { return false; } #endif /* CONFIG_PRINTK */ diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index c0a5b56aea4e82..971197f5d8ee5d 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -460,6 +460,18 @@ static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN); static char *log_buf = __log_buf; static u32 log_buf_len = __LOG_BUF_LEN; +/* + * We cannot access per-CPU data (e.g. per-CPU flush irq_work) before + * per_cpu_areas are initialised. This variable is set to true when + * it's safe to access per-CPU data. + */ +static bool __printk_percpu_data_ready __read_mostly; + +bool printk_percpu_data_ready(void) +{ + return __printk_percpu_data_ready; +} + /* Return log buffer address */ char *log_buf_addr_get(void) { @@ -1146,12 +1158,28 @@ static void __init log_buf_add_cpu(void) static inline void log_buf_add_cpu(void) {} #endif /* CONFIG_SMP */ +static void __init set_percpu_data_ready(void) +{ + printk_safe_init(); + /* Make sure we set this flag only after printk_safe() init is done */ + barrier(); + __printk_percpu_data_ready = true; +} + void __init setup_log_buf(int early) { unsigned long flags; char *new_log_buf; unsigned int free; + /* + * Some archs call setup_log_buf() multiple times - first is very + * early, e.g. from setup_arch(), and second - when percpu_areas + * are initialised. + */ + if (!early) + set_percpu_data_ready(); + if (log_buf != __log_buf) return; @@ -2966,6 +2994,9 @@ static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = { void wake_up_klogd(void) { + if (!printk_percpu_data_ready()) + return; + preempt_disable(); if (waitqueue_active(&log_wait)) { this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP); @@ -2976,6 +3007,9 @@ void wake_up_klogd(void) void defer_console_output(void) { + if (!printk_percpu_data_ready()) + return; + preempt_disable(); __this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT); irq_work_queue(this_cpu_ptr(&wake_up_klogd_work)); diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c index b4045e7827431a..d9a659a686f31d 100644 --- a/kernel/printk/printk_safe.c +++ b/kernel/printk/printk_safe.c @@ -27,7 +27,6 @@ * There are situations when we want to make sure that all buffers * were handled or when IRQs are blocked. */ -static int printk_safe_irq_ready __read_mostly; #define SAFE_LOG_BUF_LEN ((1 << CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT) - \ sizeof(atomic_t) - \ @@ -51,7 +50,7 @@ static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq); /* Get flushed in a more safe context. */ static void queue_flush_work(struct printk_safe_seq_buf *s) { - if (printk_safe_irq_ready) + if (printk_percpu_data_ready()) irq_work_queue(&s->work); } @@ -402,14 +401,6 @@ void __init printk_safe_init(void) #endif } - /* - * In the highly unlikely event that a NMI were to trigger at - * this moment. Make sure IRQ work is set up before this - * variable is set. - */ - barrier(); - printk_safe_irq_ready = 1; - /* Flush pending messages that did not have scheduled IRQ works. */ printk_safe_flush(); } diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 195d0019e6bb5f..e99d326fa56934 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1233,13 +1233,8 @@ static void uclamp_fork(struct task_struct *p) return; for_each_clamp_id(clamp_id) { - unsigned int clamp_value = uclamp_none(clamp_id); - - /* By default, RT tasks always get 100% boost */ - if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN)) - clamp_value = uclamp_none(UCLAMP_MAX); - - uclamp_se_set(&p->uclamp_req[clamp_id], clamp_value, false); + uclamp_se_set(&p->uclamp_req[clamp_id], + uclamp_none(clamp_id), false); } } diff --git a/kernel/signal.c b/kernel/signal.c index 7d3d35eb7a0ba5..595a36ab87d02e 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1510,15 +1510,15 @@ int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr, unsigned long flags; int ret = -EINVAL; + if (!valid_signal(sig)) + return ret; + clear_siginfo(&info); info.si_signo = sig; info.si_errno = errno; info.si_code = SI_ASYNCIO; *((sigval_t *)&info.si_pid) = addr; - if (!valid_signal(sig)) - return ret; - rcu_read_lock(); p = pid_task(pid, PIDTYPE_PID); if (!p) { @@ -1993,8 +1993,12 @@ bool do_notify_parent(struct task_struct *tsk, int sig) if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) sig = 0; } + /* + * Send with __send_signal as si_pid and si_uid are in the + * parent's namespaces. + */ if (valid_signal(sig) && sig) - __group_send_sig_info(sig, &info, tsk->parent); + __send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false); __wake_up_parent(tsk, tsk->parent); spin_unlock_irqrestore(&psig->siglock, flags); diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile index 3ab8720aa2f843..b9e6c3648be1a6 100644 --- a/lib/raid6/test/Makefile +++ b/lib/raid6/test/Makefile @@ -35,13 +35,13 @@ endif ifeq ($(IS_X86),yes) OBJS += mmx.o sse1.o sse2.o avx2.o recov_ssse3.o recov_avx2.o avx512.o recov_avx512.o CFLAGS += $(shell echo "pshufb %xmm0, %xmm0" | \ - gcc -c -x assembler - >&/dev/null && \ + gcc -c -x assembler - >/dev/null 2>&1 && \ rm ./-.o && echo -DCONFIG_AS_SSSE3=1) CFLAGS += $(shell echo "vpbroadcastb %xmm0, %ymm1" | \ - gcc -c -x assembler - >&/dev/null && \ + gcc -c -x assembler - >/dev/null 2>&1 && \ rm ./-.o && echo -DCONFIG_AS_AVX2=1) CFLAGS += $(shell echo "vpmovm2b %k1, %zmm5" | \ - gcc -c -x assembler - >&/dev/null && \ + gcc -c -x assembler - >/dev/null 2>&1 && \ rm ./-.o && echo -DCONFIG_AS_AVX512=1) else ifeq ($(HAS_NEON),yes) OBJS += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o diff --git a/mm/hugetlb.c b/mm/hugetlb.c index e0afd582ca01f6..2af1831596f223 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5016,8 +5016,8 @@ pte_t *huge_pte_offset(struct mm_struct *mm, { pgd_t *pgd; p4d_t *p4d; - pud_t *pud; - pmd_t *pmd; + pud_t *pud, pud_entry; + pmd_t *pmd, pmd_entry; pgd = pgd_offset(mm, addr); if (!pgd_present(*pgd)) @@ -5027,17 +5027,19 @@ pte_t *huge_pte_offset(struct mm_struct *mm, return NULL; pud = pud_offset(p4d, addr); - if (sz != PUD_SIZE && pud_none(*pud)) + pud_entry = READ_ONCE(*pud); + if (sz != PUD_SIZE && pud_none(pud_entry)) return NULL; /* hugepage or swap? */ - if (pud_huge(*pud) || !pud_present(*pud)) + if (pud_huge(pud_entry) || !pud_present(pud_entry)) return (pte_t *)pud; pmd = pmd_offset(pud, addr); - if (sz != PMD_SIZE && pmd_none(*pmd)) + pmd_entry = READ_ONCE(*pmd); + if (sz != PMD_SIZE && pmd_none(pmd_entry)) return NULL; /* hugepage or swap? */ - if (pmd_huge(*pmd) || !pmd_present(*pmd)) + if (pmd_huge(pmd_entry) || !pmd_present(pmd_entry)) return (pte_t *)pmd; return NULL; diff --git a/mm/ksm.c b/mm/ksm.c index 7905934cd3ad4c..e486c54d921b91 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -2112,8 +2112,16 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) down_read(&mm->mmap_sem); vma = find_mergeable_vma(mm, rmap_item->address); - err = try_to_merge_one_page(vma, page, - ZERO_PAGE(rmap_item->address)); + if (vma) { + err = try_to_merge_one_page(vma, page, + ZERO_PAGE(rmap_item->address)); + } else { + /* + * If the vma is out of date, we do not need to + * continue. + */ + err = 0; + } up_read(&mm->mmap_sem); /* * In case of failure, the page was not really empty, so we diff --git a/mm/shmem.c b/mm/shmem.c index 312e311967204a..e71b15da198542 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2403,11 +2403,11 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, lru_cache_add_anon(page); - spin_lock(&info->lock); + spin_lock_irq(&info->lock); info->alloced++; inode->i_blocks += BLOCKS_PER_PAGE; shmem_recalc_inode(inode); - spin_unlock(&info->lock); + spin_unlock_irq(&info->lock); inc_mm_counter(dst_mm, mm_counter_file(page)); page_add_file_rmap(page, false); diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 7d05834e594cb1..ad4d00bd791474 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include @@ -2976,6 +2977,7 @@ long vwrite(char *buf, char *addr, unsigned long count) * @vma: vma to cover * @uaddr: target user address to start at * @kaddr: virtual address of vmalloc kernel memory + * @pgoff: offset from @kaddr to start at * @size: size of map area * * Returns: 0 for success, -Exxx on failure @@ -2988,9 +2990,15 @@ long vwrite(char *buf, char *addr, unsigned long count) * Similar to remap_pfn_range() (see mm/memory.c) */ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, - void *kaddr, unsigned long size) + void *kaddr, unsigned long pgoff, + unsigned long size) { struct vm_struct *area; + unsigned long off; + unsigned long end_index; + + if (check_shl_overflow(pgoff, PAGE_SHIFT, &off)) + return -EINVAL; size = PAGE_ALIGN(size); @@ -3004,8 +3012,10 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT))) return -EINVAL; - if (kaddr + size > area->addr + get_vm_area_size(area)) + if (check_add_overflow(size, off, &end_index) || + end_index > get_vm_area_size(area)) return -EINVAL; + kaddr += off; do { struct page *page = vmalloc_to_page(kaddr); @@ -3044,7 +3054,7 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, unsigned long pgoff) { return remap_vmalloc_range_partial(vma, vma->vm_start, - addr + (pgoff << PAGE_SHIFT), + addr, pgoff, vma->vm_end - vma->vm_start); } EXPORT_SYMBOL(remap_vmalloc_range); diff --git a/net/core/datagram.c b/net/core/datagram.c index da3c24ed129cd6..189ad4c73a3fe3 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -51,6 +51,7 @@ #include #include #include +#include #include #include @@ -407,6 +408,11 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags) } EXPORT_SYMBOL(skb_kill_datagram); +INDIRECT_CALLABLE_DECLARE(static size_t simple_copy_to_iter(const void *addr, + size_t bytes, + void *data __always_unused, + struct iov_iter *i)); + static int __skb_datagram_iter(const struct sk_buff *skb, int offset, struct iov_iter *to, int len, bool fault_short, size_t (*cb)(const void *, size_t, void *, @@ -420,7 +426,8 @@ static int __skb_datagram_iter(const struct sk_buff *skb, int offset, if (copy > 0) { if (copy > len) copy = len; - n = cb(skb->data + offset, copy, data, to); + n = INDIRECT_CALL_1(cb, simple_copy_to_iter, + skb->data + offset, copy, data, to); offset += n; if (n != copy) goto short_copy; @@ -442,8 +449,9 @@ static int __skb_datagram_iter(const struct sk_buff *skb, int offset, if (copy > len) copy = len; - n = cb(vaddr + skb_frag_off(frag) + offset - start, - copy, data, to); + n = INDIRECT_CALL_1(cb, simple_copy_to_iter, + vaddr + skb_frag_off(frag) + offset - start, + copy, data, to); kunmap(page); offset += n; if (n != copy) diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index f1888c68342631..01588eef0cee2a 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -1999,7 +1999,7 @@ static void fib_select_default(const struct flowi4 *flp, struct fib_result *res) hlist_for_each_entry_rcu(fa, fa_head, fa_list) { struct fib_info *next_fi = fa->fa_info; - struct fib_nh *nh; + struct fib_nh_common *nhc; if (fa->fa_slen != slen) continue; @@ -2022,8 +2022,8 @@ static void fib_select_default(const struct flowi4 *flp, struct fib_result *res) fa->fa_type != RTN_UNICAST) continue; - nh = fib_info_nh(next_fi, 0); - if (!nh->fib_nh_gw4 || nh->fib_nh_scope != RT_SCOPE_LINK) + nhc = fib_info_nhc(next_fi, 0); + if (!nhc->nhc_gw_family || nhc->nhc_scope != RT_SCOPE_LINK) continue; fib_alias_accessed(fa); diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 74e1d964a61528..cd4b84310d9295 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -142,11 +142,8 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, cand = t; } - if (flags & TUNNEL_NO_KEY) - goto skip_key_lookup; - hlist_for_each_entry_rcu(t, head, hash_node) { - if (t->parms.i_key != key || + if ((!(flags & TUNNEL_NO_KEY) && t->parms.i_key != key) || t->parms.iph.saddr != 0 || t->parms.iph.daddr != 0 || !(t->dev->flags & IFF_UP)) @@ -158,7 +155,6 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, cand = t; } -skip_key_lookup: if (cand) return cand; diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c index ecff3fce980736..ab343ae686d453 100644 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c @@ -58,9 +58,7 @@ int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb) { memset(IPCB(skb), 0, sizeof(*IPCB(skb))); -#ifdef CONFIG_NETFILTER IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED; -#endif return xfrm_output(sk, skb); } diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index debdaeba5d8c13..18d05403d3b52c 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -183,15 +183,14 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, retv = -EBUSY; break; } - } else if (sk->sk_protocol == IPPROTO_TCP) { - if (sk->sk_prot != &tcpv6_prot) { - retv = -EBUSY; - break; - } - break; - } else { + } + if (sk->sk_protocol == IPPROTO_TCP && + sk->sk_prot != &tcpv6_prot) { + retv = -EBUSY; break; } + if (sk->sk_protocol != IPPROTO_TCP) + break; if (sk->sk_state != TCP_ESTABLISHED) { retv = -ENOTCONN; break; diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index eecac1b7148e5f..cf2a0ce15c1c5a 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c @@ -111,9 +111,7 @@ int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb) { memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); -#ifdef CONFIG_NETFILTER IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED; -#endif return xfrm_output(sk, skb); } diff --git a/net/mac80211/main.c b/net/mac80211/main.c index bc4bed021066dd..3e8561c3b0e7b4 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -1155,8 +1155,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom, IEEE80211_TX_STATUS_HEADROOM); - debugfs_hw_add(local); - /* * if the driver doesn't specify a max listen interval we * use 5 which should be a safe default @@ -1248,6 +1246,9 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) if (result < 0) goto fail_wiphy_register; + debugfs_hw_add(local); + rate_control_add_debugfs(local); + rtnl_lock(); /* add one default STA interface if supported */ diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index d09b3c789314da..36978a0e500017 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -1257,15 +1257,15 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, sdata->u.mesh.mshcfg.rssi_threshold < rx_status->signal) mesh_neighbour_update(sdata, mgmt->sa, &elems, rx_status); + + if (ifmsh->csa_role != IEEE80211_MESH_CSA_ROLE_INIT && + !sdata->vif.csa_active) + ieee80211_mesh_process_chnswitch(sdata, &elems, true); } if (ifmsh->sync_ops) ifmsh->sync_ops->rx_bcn_presp(sdata, stype, mgmt, &elems, rx_status); - - if (ifmsh->csa_role != IEEE80211_MESH_CSA_ROLE_INIT && - !sdata->vif.csa_active) - ieee80211_mesh_process_chnswitch(sdata, &elems, true); } int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata) @@ -1373,6 +1373,9 @@ static void mesh_rx_csa_frame(struct ieee80211_sub_if_data *sdata, ieee802_11_parse_elems(pos, len - baselen, true, &elems, mgmt->bssid, NULL); + if (!mesh_matches_local(sdata, &elems)) + return; + ifmsh->chsw_ttl = elems.mesh_chansw_params_ie->mesh_ttl; if (!--ifmsh->chsw_ttl) fwd_csa = false; diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c index a1e9fc7878aa33..b051f125d3af24 100644 --- a/net/mac80211/rate.c +++ b/net/mac80211/rate.c @@ -214,17 +214,16 @@ static ssize_t rcname_read(struct file *file, char __user *userbuf, ref->ops->name, len); } -static const struct file_operations rcname_ops = { +const struct file_operations rcname_ops = { .read = rcname_read, .open = simple_open, .llseek = default_llseek, }; #endif -static struct rate_control_ref *rate_control_alloc(const char *name, - struct ieee80211_local *local) +static struct rate_control_ref * +rate_control_alloc(const char *name, struct ieee80211_local *local) { - struct dentry *debugfsdir = NULL; struct rate_control_ref *ref; ref = kmalloc(sizeof(struct rate_control_ref), GFP_KERNEL); @@ -234,13 +233,7 @@ static struct rate_control_ref *rate_control_alloc(const char *name, if (!ref->ops) goto free; -#ifdef CONFIG_MAC80211_DEBUGFS - debugfsdir = debugfs_create_dir("rc", local->hw.wiphy->debugfsdir); - local->debugfs.rcdir = debugfsdir; - debugfs_create_file("name", 0400, debugfsdir, ref, &rcname_ops); -#endif - - ref->priv = ref->ops->alloc(&local->hw, debugfsdir); + ref->priv = ref->ops->alloc(&local->hw); if (!ref->priv) goto free; return ref; diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h index 5397c6dad05612..79b44d3db171e9 100644 --- a/net/mac80211/rate.h +++ b/net/mac80211/rate.h @@ -60,6 +60,29 @@ static inline void rate_control_add_sta_debugfs(struct sta_info *sta) #endif } +extern const struct file_operations rcname_ops; + +static inline void rate_control_add_debugfs(struct ieee80211_local *local) +{ +#ifdef CONFIG_MAC80211_DEBUGFS + struct dentry *debugfsdir; + + if (!local->rate_ctrl) + return; + + if (!local->rate_ctrl->ops->add_debugfs) + return; + + debugfsdir = debugfs_create_dir("rc", local->hw.wiphy->debugfsdir); + local->debugfs.rcdir = debugfsdir; + debugfs_create_file("name", 0400, debugfsdir, + local->rate_ctrl, &rcname_ops); + + local->rate_ctrl->ops->add_debugfs(&local->hw, local->rate_ctrl->priv, + debugfsdir); +#endif +} + void ieee80211_check_rate_mask(struct ieee80211_sub_if_data *sdata); /* Get a reference to the rate control algorithm. If `name' is NULL, get the diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 0ef2633349b5d7..f2abe1f77dfcdd 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -1631,7 +1631,7 @@ minstrel_ht_init_cck_rates(struct minstrel_priv *mp) } static void * -minstrel_ht_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) +minstrel_ht_alloc(struct ieee80211_hw *hw) { struct minstrel_priv *mp; @@ -1668,18 +1668,24 @@ minstrel_ht_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) mp->hw = hw; mp->update_interval = 100; + minstrel_ht_init_cck_rates(mp); + + return mp; +} + #ifdef CONFIG_MAC80211_DEBUGFS +static void minstrel_ht_add_debugfs(struct ieee80211_hw *hw, void *priv, + struct dentry *debugfsdir) +{ + struct minstrel_priv *mp = priv; + mp->fixed_rate_idx = (u32) -1; debugfs_create_u32("fixed_rate_idx", S_IRUGO | S_IWUGO, debugfsdir, &mp->fixed_rate_idx); debugfs_create_u32("sample_switch", S_IRUGO | S_IWUSR, debugfsdir, &mp->sample_switch); -#endif - - minstrel_ht_init_cck_rates(mp); - - return mp; } +#endif static void minstrel_ht_free(void *priv) @@ -1718,6 +1724,7 @@ static const struct rate_control_ops mac80211_minstrel_ht = { .alloc = minstrel_ht_alloc, .free = minstrel_ht_free, #ifdef CONFIG_MAC80211_DEBUGFS + .add_debugfs = minstrel_ht_add_debugfs, .add_sta_debugfs = minstrel_ht_add_sta_debugfs, #endif .get_expected_throughput = minstrel_ht_get_expected_throughput, diff --git a/net/netfilter/nf_nat_proto.c b/net/netfilter/nf_nat_proto.c index 64eedc17037ad7..3d816a1e5442e6 100644 --- a/net/netfilter/nf_nat_proto.c +++ b/net/netfilter/nf_nat_proto.c @@ -1035,8 +1035,8 @@ int nf_nat_inet_register_fn(struct net *net, const struct nf_hook_ops *ops) ret = nf_nat_register_fn(net, NFPROTO_IPV4, ops, nf_nat_ipv4_ops, ARRAY_SIZE(nf_nat_ipv4_ops)); if (ret) - nf_nat_ipv6_unregister_fn(net, ops); - + nf_nat_unregister_fn(net, NFPROTO_IPV6, ops, + ARRAY_SIZE(nf_nat_ipv6_ops)); return ret; } EXPORT_SYMBOL_GPL(nf_nat_inet_register_fn); diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c index d41335bad1f851..89cd9de215948f 100644 --- a/net/netrom/nr_route.c +++ b/net/netrom/nr_route.c @@ -208,6 +208,7 @@ static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic, /* refcount initialized at 1 */ spin_unlock_bh(&nr_node_list_lock); + nr_neigh_put(nr_neigh); return 0; } nr_node_lock(nr_node); diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 283e8f9a5fd278..8b70298857e3af 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -1890,7 +1890,8 @@ static void ovs_ct_limit_exit(struct net *net, struct ovs_net *ovs_net) struct hlist_head *head = &info->limits[i]; struct ovs_ct_limit *ct_limit; - hlist_for_each_entry_rcu(ct_limit, head, hlist_node) + hlist_for_each_entry_rcu(ct_limit, head, hlist_node, + lockdep_ovsl_is_held()) kfree_rcu(ct_limit, rcu); } kfree(ovs_net->ct_limit_info->limits); diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 3eed90bfa2bff7..4f097bd3339e29 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -2430,8 +2430,10 @@ static void __net_exit ovs_exit_net(struct net *dnet) struct net *net; LIST_HEAD(head); - ovs_ct_exit(dnet); ovs_lock(); + + ovs_ct_exit(dnet); + list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node) __dp_destroy(dp); diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c index a6c1349e965d94..01135e54d95d2c 100644 --- a/net/rxrpc/local_object.c +++ b/net/rxrpc/local_object.c @@ -165,15 +165,6 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net) goto error; } - /* we want to set the don't fragment bit */ - opt = IPV6_PMTUDISC_DO; - ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER, - (char *) &opt, sizeof(opt)); - if (ret < 0) { - _debug("setsockopt failed"); - goto error; - } - /* Fall through and set IPv4 options too otherwise we don't get * errors from IPv4 packets sent through the IPv6 socket. */ diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c index bad3d242034466..90e263c6aa69e4 100644 --- a/net/rxrpc/output.c +++ b/net/rxrpc/output.c @@ -474,41 +474,21 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb, skb->tstamp = ktime_get_real(); switch (conn->params.local->srx.transport.family) { + case AF_INET6: case AF_INET: opt = IP_PMTUDISC_DONT; - ret = kernel_setsockopt(conn->params.local->socket, - SOL_IP, IP_MTU_DISCOVER, - (char *)&opt, sizeof(opt)); - if (ret == 0) { - ret = kernel_sendmsg(conn->params.local->socket, &msg, - iov, 2, len); - conn->params.peer->last_tx_at = ktime_get_seconds(); - - opt = IP_PMTUDISC_DO; - kernel_setsockopt(conn->params.local->socket, SOL_IP, - IP_MTU_DISCOVER, - (char *)&opt, sizeof(opt)); - } - break; - -#ifdef CONFIG_AF_RXRPC_IPV6 - case AF_INET6: - opt = IPV6_PMTUDISC_DONT; - ret = kernel_setsockopt(conn->params.local->socket, - SOL_IPV6, IPV6_MTU_DISCOVER, - (char *)&opt, sizeof(opt)); - if (ret == 0) { - ret = kernel_sendmsg(conn->params.local->socket, &msg, - iov, 2, len); - conn->params.peer->last_tx_at = ktime_get_seconds(); - - opt = IPV6_PMTUDISC_DO; - kernel_setsockopt(conn->params.local->socket, - SOL_IPV6, IPV6_MTU_DISCOVER, - (char *)&opt, sizeof(opt)); - } + kernel_setsockopt(conn->params.local->socket, + SOL_IP, IP_MTU_DISCOVER, + (char *)&opt, sizeof(opt)); + ret = kernel_sendmsg(conn->params.local->socket, &msg, + iov, 2, len); + conn->params.peer->last_tx_at = ktime_get_seconds(); + + opt = IP_PMTUDISC_DO; + kernel_setsockopt(conn->params.local->socket, + SOL_IP, IP_MTU_DISCOVER, + (char *)&opt, sizeof(opt)); break; -#endif default: BUG(); diff --git a/net/sched/sch_etf.c b/net/sched/sch_etf.c index b1da5589a0c6a2..c48f91075b5c60 100644 --- a/net/sched/sch_etf.c +++ b/net/sched/sch_etf.c @@ -82,7 +82,7 @@ static bool is_packet_valid(struct Qdisc *sch, struct sk_buff *nskb) if (q->skip_sock_check) goto skip; - if (!sk) + if (!sk || !sk_fullsock(sk)) return false; if (!sock_flag(sk, SOCK_TXTIME)) @@ -137,8 +137,9 @@ static void report_sock_error(struct sk_buff *skb, u32 err, u8 code) struct sock_exterr_skb *serr; struct sk_buff *clone; ktime_t txtime = skb->tstamp; + struct sock *sk = skb->sk; - if (!skb->sk || !(skb->sk->sk_txtime_report_errors)) + if (!sk || !sk_fullsock(sk) || !(sk->sk_txtime_report_errors)) return; clone = skb_clone(skb, GFP_ATOMIC); @@ -154,7 +155,7 @@ static void report_sock_error(struct sk_buff *skb, u32 err, u8 code) serr->ee.ee_data = (txtime >> 32); /* high part of tstamp */ serr->ee.ee_info = txtime; /* low part of tstamp */ - if (sock_queue_err_skb(skb->sk, clone)) + if (sock_queue_err_skb(sk, clone)) kfree_skb(clone); } diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index de3c077733a7a6..dc74519286be5a 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -897,9 +897,6 @@ int svc_send(struct svc_rqst *rqstp) if (!xprt) goto out; - /* release the receive skb before sending the reply */ - xprt->xpt_ops->xpo_release_rqst(rqstp); - /* calculate over-all length */ xb = &rqstp->rq_res; xb->len = xb->head[0].iov_len + @@ -1028,6 +1025,8 @@ static void svc_delete_xprt(struct svc_xprt *xprt) dprintk("svc: svc_delete_xprt(%p)\n", xprt); xprt->xpt_ops->xpo_detach(xprt); + if (xprt->xpt_bc_xprt) + xprt->xpt_bc_xprt->ops->close(xprt->xpt_bc_xprt); spin_lock_bh(&serv->sv_lock); list_del_init(&xprt->xpt_list); diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 2934dd7117153f..4260924ad9dba2 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -605,6 +605,8 @@ svc_udp_sendto(struct svc_rqst *rqstp) { int error; + svc_release_udp_skb(rqstp); + error = svc_sendto(rqstp, &rqstp->rq_res); if (error == -ECONNREFUSED) /* ICMP error on earlier request. */ @@ -1137,6 +1139,8 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp) int sent; __be32 reclen; + svc_release_skb(rqstp); + /* Set up the first element of the reply kvec. * Any other kvecs that may be in use have been taken * care of by the server implementation itself. diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c index 908e78bb87c64e..cf80394b2db33d 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c @@ -242,6 +242,8 @@ static void xprt_rdma_bc_close(struct rpc_xprt *xprt) { dprintk("svcrdma: %s: xprt %p\n", __func__, xprt); + + xprt_disconnect_done(xprt); xprt->cwnd = RPC_CWNDSHIFT; } diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index 96bccd39846949..b8ee91ffedda85 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -222,6 +222,26 @@ void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma, svc_rdma_recv_ctxt_destroy(rdma, ctxt); } +/** + * svc_rdma_release_rqst - Release transport-specific per-rqst resources + * @rqstp: svc_rqst being released + * + * Ensure that the recv_ctxt is released whether or not a Reply + * was sent. For example, the client could close the connection, + * or svc_process could drop an RPC, before the Reply is sent. + */ +void svc_rdma_release_rqst(struct svc_rqst *rqstp) +{ + struct svc_rdma_recv_ctxt *ctxt = rqstp->rq_xprt_ctxt; + struct svc_xprt *xprt = rqstp->rq_xprt; + struct svcxprt_rdma *rdma = + container_of(xprt, struct svcxprt_rdma, sc_xprt); + + rqstp->rq_xprt_ctxt = NULL; + if (ctxt) + svc_rdma_recv_ctxt_put(rdma, ctxt); +} + static int __svc_rdma_post_recv(struct svcxprt_rdma *rdma, struct svc_rdma_recv_ctxt *ctxt) { @@ -756,6 +776,8 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) __be32 *p; int ret; + rqstp->rq_xprt_ctxt = NULL; + spin_lock(&rdma_xprt->sc_rq_dto_lock); ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_read_complete_q); if (ctxt) { diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c index 48fe3b16b0d9c7..a59912e2666d3b 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_rw.c +++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c @@ -323,8 +323,6 @@ static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc) if (atomic_sub_return(cc->cc_sqecount, &rdma->sc_sq_avail) > 0) { ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr); - trace_svcrdma_post_rw(&cc->cc_cqe, - cc->cc_sqecount, ret); if (ret) break; return 0; @@ -337,6 +335,7 @@ static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc) trace_svcrdma_sq_retry(rdma); } while (1); + trace_svcrdma_sq_post_err(rdma, ret); set_bit(XPT_CLOSE, &xprt->xpt_flags); /* If even one was posted, there will be a completion. */ diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index 6fdba72f89f478..93ff7967389a29 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -306,15 +306,17 @@ int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr) } svc_xprt_get(&rdma->sc_xprt); + trace_svcrdma_post_send(wr); ret = ib_post_send(rdma->sc_qp, wr, NULL); - trace_svcrdma_post_send(wr, ret); - if (ret) { - set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); - svc_xprt_put(&rdma->sc_xprt); - wake_up(&rdma->sc_send_wait); - } - break; + if (ret) + break; + return 0; } + + trace_svcrdma_sq_post_err(rdma, ret); + set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); + svc_xprt_put(&rdma->sc_xprt); + wake_up(&rdma->sc_send_wait); return ret; } @@ -871,12 +873,7 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) wr_lst, rp_ch); if (ret < 0) goto err1; - ret = 0; - -out: - rqstp->rq_xprt_ctxt = NULL; - svc_rdma_recv_ctxt_put(rdma, rctxt); - return ret; + return 0; err2: if (ret != -E2BIG && ret != -EINVAL) @@ -885,14 +882,12 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) ret = svc_rdma_send_error_msg(rdma, sctxt, rqstp); if (ret < 0) goto err1; - ret = 0; - goto out; + return 0; err1: svc_rdma_send_ctxt_put(rdma, sctxt); err0: trace_svcrdma_send_failed(rqstp, ret); set_bit(XPT_CLOSE, &xprt->xpt_flags); - ret = -ENOTCONN; - goto out; + return -ENOTCONN; } diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 145a3615c31936..889220f11a7033 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -71,7 +71,6 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, struct sockaddr *sa, int salen, int flags); static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt); -static void svc_rdma_release_rqst(struct svc_rqst *); static void svc_rdma_detach(struct svc_xprt *xprt); static void svc_rdma_free(struct svc_xprt *xprt); static int svc_rdma_has_wspace(struct svc_xprt *xprt); @@ -558,10 +557,6 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) return NULL; } -static void svc_rdma_release_rqst(struct svc_rqst *rqstp) -{ -} - /* * When connected, an svc_xprt has at least two references: * diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 5361b98f31ae4b..934e30e6753757 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -2714,6 +2714,7 @@ static int bc_send_request(struct rpc_rqst *req) static void bc_close(struct rpc_xprt *xprt) { + xprt_disconnect_done(xprt); } /* diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c index 00e782335cb074..25bf72ee6cad07 100644 --- a/net/x25/x25_dev.c +++ b/net/x25/x25_dev.c @@ -115,8 +115,10 @@ int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev, goto drop; } - if (!pskb_may_pull(skb, 1)) + if (!pskb_may_pull(skb, 1)) { + x25_neigh_put(nb); return 0; + } switch (skb->data[0]) { diff --git a/samples/vfio-mdev/mdpy.c b/samples/vfio-mdev/mdpy.c index cc86bf6566e42a..9894693f3be178 100644 --- a/samples/vfio-mdev/mdpy.c +++ b/samples/vfio-mdev/mdpy.c @@ -418,7 +418,7 @@ static int mdpy_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) return -EINVAL; return remap_vmalloc_range_partial(vma, vma->vm_start, - mdev_state->memblk, + mdev_state->memblk, 0, vma->vm_end - vma->vm_start); } diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index a66fc0acad1e47..342618a2bccb4c 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -297,7 +297,7 @@ define rule_dtc endef $(obj)/%.dt.yaml: $(src)/%.dts $(DTC) $(DT_TMP_SCHEMA) FORCE - $(call if_changed_rule,dtc) + $(call if_changed_rule,dtc,yaml) dtc-tmp = $(subst $(comma),_,$(dot-target).dts.tmp) diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc index 82773cc35d356a..0f8c77f8471145 100644 --- a/scripts/kconfig/qconf.cc +++ b/scripts/kconfig/qconf.cc @@ -627,7 +627,7 @@ void ConfigList::updateMenuList(ConfigItem *parent, struct menu* menu) last = item; continue; } - hide: +hide: if (item && item->menu == child) { last = parent->firstChild(); if (last == item) @@ -692,7 +692,7 @@ void ConfigList::updateMenuList(ConfigList *parent, struct menu* menu) last = item; continue; } - hide: +hide: if (item && item->menu == child) { last = (ConfigItem*)parent->topLevelItem(0); if (last == item) @@ -1225,10 +1225,11 @@ QMenu* ConfigInfoView::createStandardContextMenu(const QPoint & pos) { QMenu* popup = Parent::createStandardContextMenu(pos); QAction* action = new QAction("Show Debug Info", popup); - action->setCheckable(true); - connect(action, SIGNAL(toggled(bool)), SLOT(setShowDebug(bool))); - connect(this, SIGNAL(showDebugChanged(bool)), action, SLOT(setOn(bool))); - action->setChecked(showDebug()); + + action->setCheckable(true); + connect(action, SIGNAL(toggled(bool)), SLOT(setShowDebug(bool))); + connect(this, SIGNAL(showDebugChanged(bool)), action, SLOT(setOn(bool))); + action->setChecked(showDebug()); popup->addSeparator(); popup->addAction(action); return popup; diff --git a/security/keys/internal.h b/security/keys/internal.h index c039373488bd94..7e991494361670 100644 --- a/security/keys/internal.h +++ b/security/keys/internal.h @@ -16,6 +16,8 @@ #include #include #include +#include +#include struct iovec; @@ -349,4 +351,14 @@ static inline void key_check(const struct key *key) #endif +/* + * Helper function to clear and free a kvmalloc'ed memory object. + */ +static inline void __kvzfree(const void *addr, size_t len) +{ + if (addr) { + memset((void *)addr, 0, len); + kvfree(addr); + } +} #endif /* _INTERNAL_H */ diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c index 106e16f9006b4e..5e01192e222a0b 100644 --- a/security/keys/keyctl.c +++ b/security/keys/keyctl.c @@ -339,7 +339,7 @@ long keyctl_update_key(key_serial_t id, payload = NULL; if (plen) { ret = -ENOMEM; - payload = kmalloc(plen, GFP_KERNEL); + payload = kvmalloc(plen, GFP_KERNEL); if (!payload) goto error; @@ -360,7 +360,7 @@ long keyctl_update_key(key_serial_t id, key_ref_put(key_ref); error2: - kzfree(payload); + __kvzfree(payload, plen); error: return ret; } @@ -827,7 +827,8 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen) struct key *key; key_ref_t key_ref; long ret; - char *key_data; + char *key_data = NULL; + size_t key_data_len; /* find the key first */ key_ref = lookup_user_key(keyid, 0, 0); @@ -878,24 +879,51 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen) * Allocating a temporary buffer to hold the keys before * transferring them to user buffer to avoid potential * deadlock involving page fault and mmap_sem. + * + * key_data_len = (buflen <= PAGE_SIZE) + * ? buflen : actual length of key data + * + * This prevents allocating arbitrary large buffer which can + * be much larger than the actual key length. In the latter case, + * at least 2 passes of this loop is required. */ - key_data = kmalloc(buflen, GFP_KERNEL); + key_data_len = (buflen <= PAGE_SIZE) ? buflen : 0; + for (;;) { + if (key_data_len) { + key_data = kvmalloc(key_data_len, GFP_KERNEL); + if (!key_data) { + ret = -ENOMEM; + goto key_put_out; + } + } - if (!key_data) { - ret = -ENOMEM; - goto key_put_out; - } - ret = __keyctl_read_key(key, key_data, buflen); + ret = __keyctl_read_key(key, key_data, key_data_len); + + /* + * Read methods will just return the required length without + * any copying if the provided length isn't large enough. + */ + if (ret <= 0 || ret > buflen) + break; + + /* + * The key may change (unlikely) in between 2 consecutive + * __keyctl_read_key() calls. In this case, we reallocate + * a larger buffer and redo the key read when + * key_data_len < ret <= buflen. + */ + if (ret > key_data_len) { + if (unlikely(key_data)) + __kvzfree(key_data, key_data_len); + key_data_len = ret; + continue; /* Allocate buffer */ + } - /* - * Read methods will just return the required length without - * any copying if the provided length isn't large enough. - */ - if (ret > 0 && ret <= buflen) { if (copy_to_user(buffer, key_data, ret)) ret = -EFAULT; + break; } - kzfree(key_data); + __kvzfree(key_data, key_data_len); key_put_out: key_put(key); diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 72bbfeddea24a9..1673479b4eef32 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -1200,10 +1200,8 @@ static void azx_vs_set_state(struct pci_dev *pci, if (!disabled) { dev_info(chip->card->dev, "Start delayed initialization\n"); - if (azx_probe_continue(chip) < 0) { + if (azx_probe_continue(chip) < 0) dev_err(chip->card->dev, "initialization error\n"); - hda->init_failed = true; - } } } else { dev_info(chip->card->dev, "%s via vga_switcheroo\n", @@ -1336,12 +1334,15 @@ static int register_vga_switcheroo(struct azx *chip) /* * destructor */ -static int azx_free(struct azx *chip) +static void azx_free(struct azx *chip) { struct pci_dev *pci = chip->pci; struct hda_intel *hda = container_of(chip, struct hda_intel, chip); struct hdac_bus *bus = azx_bus(chip); + if (hda->freed) + return; + if (azx_has_pm_runtime(chip) && chip->running) pm_runtime_get_noresume(&pci->dev); chip->running = 0; @@ -1385,9 +1386,8 @@ static int azx_free(struct azx *chip) if (chip->driver_caps & AZX_DCAPS_I915_COMPONENT) snd_hdac_i915_exit(bus); - kfree(hda); - return 0; + hda->freed = 1; } static int azx_dev_disconnect(struct snd_device *device) @@ -1403,7 +1403,8 @@ static int azx_dev_disconnect(struct snd_device *device) static int azx_dev_free(struct snd_device *device) { - return azx_free(device->device_data); + azx_free(device->device_data); + return 0; } #ifdef SUPPORT_VGA_SWITCHEROO @@ -1717,7 +1718,7 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci, if (err < 0) return err; - hda = kzalloc(sizeof(*hda), GFP_KERNEL); + hda = devm_kzalloc(&pci->dev, sizeof(*hda), GFP_KERNEL); if (!hda) { pci_disable_device(pci); return -ENOMEM; @@ -1758,7 +1759,6 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci, err = azx_bus_init(chip, model[dev]); if (err < 0) { - kfree(hda); pci_disable_device(pci); return err; } @@ -1958,7 +1958,7 @@ static int azx_first_init(struct azx *chip) /* codec detection */ if (!azx_bus(chip)->codec_mask) { dev_err(card->dev, "no codecs found!\n"); - return -ENODEV; + /* keep running the rest for the runtime PM */ } if (azx_acquire_irq(chip, 0) < 0) @@ -2024,7 +2024,6 @@ static void pcm_mmap_prepare(struct snd_pcm_substream *substream, * should be ignored from the beginning. */ static const struct snd_pci_quirk driver_blacklist[] = { - SND_PCI_QUIRK(0x1043, 0x874f, "ASUS ROG Zenith II / Strix", 0), SND_PCI_QUIRK(0x1462, 0xcb59, "MSI TRX40 Creator", 0), SND_PCI_QUIRK(0x1462, 0xcb60, "MSI TRX40", 0), {} @@ -2269,9 +2268,11 @@ static int azx_probe_continue(struct azx *chip) #endif /* create codec instances */ - err = azx_probe_codecs(chip, azx_max_codecs[chip->driver_type]); - if (err < 0) - goto out_free; + if (bus->codec_mask) { + err = azx_probe_codecs(chip, azx_max_codecs[chip->driver_type]); + if (err < 0) + goto out_free; + } #ifdef CONFIG_SND_HDA_PATCH_LOADER if (chip->fw) { @@ -2285,7 +2286,7 @@ static int azx_probe_continue(struct azx *chip) #endif } #endif - if ((probe_only[dev] & 1) == 0) { + if (bus->codec_mask && !(probe_only[dev] & 1)) { err = azx_codec_configure(chip); if (err < 0) goto out_free; @@ -2302,17 +2303,23 @@ static int azx_probe_continue(struct azx *chip) set_default_power_save(chip); - if (azx_has_pm_runtime(chip)) + if (azx_has_pm_runtime(chip)) { + pm_runtime_use_autosuspend(&pci->dev); + pm_runtime_allow(&pci->dev); pm_runtime_put_autosuspend(&pci->dev); + } out_free: - if (err < 0 || !hda->need_i915_power) + if (err < 0) { + azx_free(chip); + return err; + } + + if (!hda->need_i915_power) display_power(chip, false); - if (err < 0) - hda->init_failed = 1; complete_all(&hda->probe_wait); to_hda_bus(bus)->bus_probing = 0; - return err; + return 0; } static void azx_remove(struct pci_dev *pci) diff --git a/sound/pci/hda/hda_intel.h b/sound/pci/hda/hda_intel.h index 2acfff3da1a047..3fb119f0904088 100644 --- a/sound/pci/hda/hda_intel.h +++ b/sound/pci/hda/hda_intel.h @@ -27,6 +27,7 @@ struct hda_intel { unsigned int use_vga_switcheroo:1; unsigned int vga_switcheroo_registered:1; unsigned int init_failed:1; /* delayed init failed */ + unsigned int freed:1; /* resources already released */ bool need_i915_power:1; /* the hda controller needs i915 power */ }; diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 307ca1f0367624..f1febfc47ba604 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -57,6 +57,10 @@ MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info"); #define is_cherryview(codec) ((codec)->core.vendor_id == 0x80862883) #define is_valleyview_plus(codec) (is_valleyview(codec) || is_cherryview(codec)) +static bool enable_acomp = true; +module_param(enable_acomp, bool, 0444); +MODULE_PARM_DESC(enable_acomp, "Enable audio component binding (default=yes)"); + struct hdmi_spec_per_cvt { hda_nid_t cvt_nid; int assigned; @@ -2550,6 +2554,11 @@ static void generic_acomp_init(struct hda_codec *codec, { struct hdmi_spec *spec = codec->spec; + if (!enable_acomp) { + codec_info(codec, "audio component disabled by module option\n"); + return; + } + spec->port2pin = port2pin; setup_drm_audio_ops(codec, ops); if (!snd_hdac_acomp_init(&codec->bus->core, &spec->drm_audio_ops, diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 10c8b31f850772..ee1b89f2bcd5c7 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -369,6 +369,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) case 0x10ec0233: case 0x10ec0235: case 0x10ec0236: + case 0x10ec0245: case 0x10ec0255: case 0x10ec0256: case 0x10ec0257: @@ -789,9 +790,11 @@ static void alc_ssid_check(struct hda_codec *codec, const hda_nid_t *ports) { if (!alc_subsystem_id(codec, ports)) { struct alc_spec *spec = codec->spec; - codec_dbg(codec, - "realtek: Enable default setup for auto mode as fallback\n"); - spec->init_amp = ALC_INIT_DEFAULT; + if (spec->init_amp == ALC_INIT_UNDEFINED) { + codec_dbg(codec, + "realtek: Enable default setup for auto mode as fallback\n"); + spec->init_amp = ALC_INIT_DEFAULT; + } } } @@ -8100,6 +8103,7 @@ static int patch_alc269(struct hda_codec *codec) spec->gen.mixer_nid = 0; break; case 0x10ec0215: + case 0x10ec0245: case 0x10ec0285: case 0x10ec0289: spec->codec_variant = ALC269_TYPE_ALC215; @@ -9361,6 +9365,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = { HDA_CODEC_ENTRY(0x10ec0234, "ALC234", patch_alc269), HDA_CODEC_ENTRY(0x10ec0235, "ALC233", patch_alc269), HDA_CODEC_ENTRY(0x10ec0236, "ALC236", patch_alc269), + HDA_CODEC_ENTRY(0x10ec0245, "ALC245", patch_alc269), HDA_CODEC_ENTRY(0x10ec0255, "ALC255", patch_alc269), HDA_CODEC_ENTRY(0x10ec0256, "ALC256", patch_alc269), HDA_CODEC_ENTRY(0x10ec0257, "ALC257", patch_alc269), diff --git a/sound/soc/codecs/tas571x.c b/sound/soc/codecs/tas571x.c index 1554631cb397b1..5b7f9fcf6cbf3b 100644 --- a/sound/soc/codecs/tas571x.c +++ b/sound/soc/codecs/tas571x.c @@ -820,8 +820,10 @@ static int tas571x_i2c_probe(struct i2c_client *client, priv->regmap = devm_regmap_init(dev, NULL, client, priv->chip->regmap_config); - if (IS_ERR(priv->regmap)) - return PTR_ERR(priv->regmap); + if (IS_ERR(priv->regmap)) { + ret = PTR_ERR(priv->regmap); + goto disable_regs; + } priv->pdn_gpio = devm_gpiod_get_optional(dev, "pdn", GPIOD_OUT_LOW); if (IS_ERR(priv->pdn_gpio)) { @@ -845,7 +847,7 @@ static int tas571x_i2c_probe(struct i2c_client *client, ret = regmap_write(priv->regmap, TAS571X_OSC_TRIM_REG, 0); if (ret) - return ret; + goto disable_regs; usleep_range(50000, 60000); @@ -861,12 +863,20 @@ static int tas571x_i2c_probe(struct i2c_client *client, */ ret = regmap_update_bits(priv->regmap, TAS571X_MVOL_REG, 1, 0); if (ret) - return ret; + goto disable_regs; } - return devm_snd_soc_register_component(&client->dev, + ret = devm_snd_soc_register_component(&client->dev, &priv->component_driver, &tas571x_dai, 1); + if (ret) + goto disable_regs; + + return ret; + +disable_regs: + regulator_bulk_disable(priv->chip->num_supply_names, priv->supplies); + return ret; } static int tas571x_i2c_remove(struct i2c_client *client) diff --git a/sound/soc/intel/atom/sst-atom-controls.c b/sound/soc/intel/atom/sst-atom-controls.c index f883c9340eeee6..df8f7994d3b7a7 100644 --- a/sound/soc/intel/atom/sst-atom-controls.c +++ b/sound/soc/intel/atom/sst-atom-controls.c @@ -966,7 +966,9 @@ static int sst_set_be_modules(struct snd_soc_dapm_widget *w, dev_dbg(c->dev, "Enter: widget=%s\n", w->name); if (SND_SOC_DAPM_EVENT_ON(event)) { + mutex_lock(&drv->lock); ret = sst_send_slot_map(drv); + mutex_unlock(&drv->lock); if (ret) return ret; ret = sst_send_pipe_module_params(w, k); diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c index 243f683bc02a73..e62e1d7815aa9e 100644 --- a/sound/soc/intel/boards/bytcr_rt5640.c +++ b/sound/soc/intel/boards/bytcr_rt5640.c @@ -591,6 +591,17 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = { BYT_RT5640_SSP0_AIF1 | BYT_RT5640_MCLK_EN), }, + { + /* MPMAN MPWIN895CL */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "MPMAN"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "MPWIN8900CL"), + }, + .driver_data = (void *)(BYTCR_INPUT_DEFAULTS | + BYT_RT5640_MONO_SPEAKER | + BYT_RT5640_SSP0_AIF1 | + BYT_RT5640_MCLK_EN), + }, { /* MSI S100 tablet */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Micro-Star International Co., Ltd."), diff --git a/sound/soc/qcom/qdsp6/q6afe-dai.c b/sound/soc/qcom/qdsp6/q6afe-dai.c index c1a7624eaf175f..2a5302f1db98aa 100644 --- a/sound/soc/qcom/qdsp6/q6afe-dai.c +++ b/sound/soc/qcom/qdsp6/q6afe-dai.c @@ -902,6 +902,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = { SNDRV_PCM_RATE_16000, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, + .channels_min = 1, + .channels_max = 8, .rate_min = 8000, .rate_max = 48000, }, @@ -917,6 +919,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = { SNDRV_PCM_RATE_16000, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, + .channels_min = 1, + .channels_max = 8, .rate_min = 8000, .rate_max = 48000, }, @@ -931,6 +935,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = { .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000, .formats = SNDRV_PCM_FMTBIT_S16_LE, + .channels_min = 1, + .channels_max = 8, .rate_min = 8000, .rate_max = 48000, }, @@ -946,6 +952,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = { SNDRV_PCM_RATE_16000, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, + .channels_min = 1, + .channels_max = 8, .rate_min = 8000, .rate_max = 48000, }, @@ -960,6 +968,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = { .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000, .formats = SNDRV_PCM_FMTBIT_S16_LE, + .channels_min = 1, + .channels_max = 8, .rate_min = 8000, .rate_max = 48000, }, @@ -975,6 +985,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = { SNDRV_PCM_RATE_16000, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, + .channels_min = 1, + .channels_max = 8, .rate_min = 8000, .rate_max = 48000, }, @@ -989,6 +1001,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = { .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000, .formats = SNDRV_PCM_FMTBIT_S16_LE, + .channels_min = 1, + .channels_max = 8, .rate_min = 8000, .rate_max = 48000, }, @@ -1004,6 +1018,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = { SNDRV_PCM_RATE_16000, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, + .channels_min = 1, + .channels_max = 8, .rate_min = 8000, .rate_max = 48000, }, diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 9d3b546bae7bdb..0215e2c94bf085 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -1076,8 +1076,18 @@ static int soc_probe_component(struct snd_soc_card *card, ret = snd_soc_dapm_add_routes(dapm, component->driver->dapm_routes, component->driver->num_dapm_routes); - if (ret < 0) - goto err_probe; + if (ret < 0) { + if (card->disable_route_checks) { + dev_info(card->dev, + "%s: disable_route_checks set, ignoring errors on add_routes\n", + __func__); + } else { + dev_err(card->dev, + "%s: snd_soc_dapm_add_routes failed: %d\n", + __func__, ret); + goto err_probe; + } + } /* see for_each_card_components */ list_add(&component->card_list, &card->component_dev_list); @@ -2067,8 +2077,18 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card) ret = snd_soc_dapm_add_routes(&card->dapm, card->dapm_routes, card->num_dapm_routes); - if (ret < 0) - goto probe_end; + if (ret < 0) { + if (card->disable_route_checks) { + dev_info(card->dev, + "%s: disable_route_checks set, ignoring errors on add_routes\n", + __func__); + } else { + dev_err(card->dev, + "%s: snd_soc_dapm_add_routes failed: %d\n", + __func__, ret); + goto probe_end; + } + } ret = snd_soc_dapm_add_routes(&card->dapm, card->of_dapm_routes, card->num_of_dapm_routes); diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index e0ff40b10d8554..06aa3937974970 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -423,7 +423,7 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget, memset(&template, 0, sizeof(template)); template.reg = e->reg; - template.mask = e->mask << e->shift_l; + template.mask = e->mask; template.shift = e->shift_l; template.off_val = snd_soc_enum_item_to_val(e, 0); template.on_val = template.off_val; @@ -546,8 +546,22 @@ static bool dapm_kcontrol_set_value(const struct snd_kcontrol *kcontrol, if (data->value == value) return false; - if (data->widget) - data->widget->on_val = value; + if (data->widget) { + switch (dapm_kcontrol_get_wlist(kcontrol)->widgets[0]->id) { + case snd_soc_dapm_switch: + case snd_soc_dapm_mixer: + case snd_soc_dapm_mixer_named_ctl: + data->widget->on_val = value & data->widget->mask; + break; + case snd_soc_dapm_demux: + case snd_soc_dapm_mux: + data->widget->on_val = value >> data->widget->shift; + break; + default: + data->widget->on_val = value; + break; + } + } data->value = value; diff --git a/sound/soc/sof/trace.c b/sound/soc/sof/trace.c index 17453d2da45d42..a775c6b21b55a7 100644 --- a/sound/soc/sof/trace.c +++ b/sound/soc/sof/trace.c @@ -328,7 +328,10 @@ void snd_sof_free_trace(struct snd_sof_dev *sdev) { snd_sof_release_trace(sdev); - snd_dma_free_pages(&sdev->dmatb); - snd_dma_free_pages(&sdev->dmatp); + if (sdev->dma_trace_pages) { + snd_dma_free_pages(&sdev->dmatb); + snd_dma_free_pages(&sdev->dmatp); + sdev->dma_trace_pages = 0; + } } EXPORT_SYMBOL(snd_sof_free_trace); diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c index d3259de43712b3..7e965848796c38 100644 --- a/sound/soc/stm/stm32_sai_sub.c +++ b/sound/soc/stm/stm32_sai_sub.c @@ -1543,6 +1543,9 @@ static int stm32_sai_sub_probe(struct platform_device *pdev) return ret; } + if (STM_SAI_PROTOCOL_IS_SPDIF(sai)) + conf = &stm32_sai_pcm_config_spdif; + ret = snd_dmaengine_pcm_register(&pdev->dev, conf, 0); if (ret) { dev_err(&pdev->dev, "Could not register pcm dma\n"); @@ -1551,15 +1554,10 @@ static int stm32_sai_sub_probe(struct platform_device *pdev) ret = snd_soc_register_component(&pdev->dev, &stm32_component, &sai->cpu_dai_drv, 1); - if (ret) { + if (ret) snd_dmaengine_pcm_unregister(&pdev->dev); - return ret; - } - - if (STM_SAI_PROTOCOL_IS_SPDIF(sai)) - conf = &stm32_sai_pcm_config_spdif; - return 0; + return ret; } static int stm32_sai_sub_remove(struct platform_device *pdev) diff --git a/sound/soc/stm/stm32_spdifrx.c b/sound/soc/stm/stm32_spdifrx.c index 604ecb175ac581..55a55179d0f1b2 100644 --- a/sound/soc/stm/stm32_spdifrx.c +++ b/sound/soc/stm/stm32_spdifrx.c @@ -997,6 +997,8 @@ static int stm32_spdifrx_probe(struct platform_device *pdev) if (idr == SPDIFRX_IPIDR_NUMBER) { ret = regmap_read(spdifrx->regmap, STM32_SPDIFRX_VERR, &ver); + if (ret) + goto error; dev_dbg(&pdev->dev, "SPDIFRX version: %lu.%lu registered\n", FIELD_GET(SPDIFRX_VERR_MAJ_MASK, ver), diff --git a/sound/usb/format.c b/sound/usb/format.c index f4f0cf3deaf0c7..1f9ea513230a6b 100644 --- a/sound/usb/format.c +++ b/sound/usb/format.c @@ -226,6 +226,52 @@ static int parse_audio_format_rates_v1(struct snd_usb_audio *chip, struct audiof return 0; } +/* + * Many Focusrite devices supports a limited set of sampling rates per + * altsetting. Maximum rate is exposed in the last 4 bytes of Format Type + * descriptor which has a non-standard bLength = 10. + */ +static bool focusrite_valid_sample_rate(struct snd_usb_audio *chip, + struct audioformat *fp, + unsigned int rate) +{ + struct usb_interface *iface; + struct usb_host_interface *alts; + unsigned char *fmt; + unsigned int max_rate; + + iface = usb_ifnum_to_if(chip->dev, fp->iface); + if (!iface) + return true; + + alts = &iface->altsetting[fp->altset_idx]; + fmt = snd_usb_find_csint_desc(alts->extra, alts->extralen, + NULL, UAC_FORMAT_TYPE); + if (!fmt) + return true; + + if (fmt[0] == 10) { /* bLength */ + max_rate = combine_quad(&fmt[6]); + + /* Validate max rate */ + if (max_rate != 48000 && + max_rate != 96000 && + max_rate != 192000 && + max_rate != 384000) { + + usb_audio_info(chip, + "%u:%d : unexpected max rate: %u\n", + fp->iface, fp->altsetting, max_rate); + + return true; + } + + return rate <= max_rate; + } + + return true; +} + /* * Helper function to walk the array of sample rate triplets reported by * the device. The problem is that we need to parse whole array first to @@ -262,6 +308,11 @@ static int parse_uac2_sample_rate_range(struct snd_usb_audio *chip, } for (rate = min; rate <= max; rate += res) { + /* Filter out invalid rates on Focusrite devices */ + if (USB_ID_VENDOR(chip->usb_id) == 0x1235 && + !focusrite_valid_sample_rate(chip, fp, rate)) + goto skip_rate; + if (fp->rate_table) fp->rate_table[nr_rates] = rate; if (!fp->rate_min || rate < fp->rate_min) @@ -276,6 +327,7 @@ static int parse_uac2_sample_rate_range(struct snd_usb_audio *chip, break; } +skip_rate: /* avoid endless loop */ if (res == 0) break; diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 5661994e13e7d8..583edacc9fe8e9 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -1755,8 +1755,10 @@ static void build_connector_control(struct usb_mixer_interface *mixer, { struct snd_kcontrol *kctl; struct usb_mixer_elem_info *cval; + const struct usbmix_name_map *map; - if (check_ignored_ctl(find_map(imap, term->id, 0))) + map = find_map(imap, term->id, 0); + if (check_ignored_ctl(map)) return; cval = kzalloc(sizeof(*cval), GFP_KERNEL); @@ -1788,8 +1790,12 @@ static void build_connector_control(struct usb_mixer_interface *mixer, usb_mixer_elem_info_free(cval); return; } - get_connector_control_name(mixer, term, is_input, kctl->id.name, - sizeof(kctl->id.name)); + + if (check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name))) + strlcat(kctl->id.name, " Jack", sizeof(kctl->id.name)); + else + get_connector_control_name(mixer, term, is_input, kctl->id.name, + sizeof(kctl->id.name)); kctl->private_free = snd_usb_mixer_elem_free; snd_usb_mixer_add_control(&cval->head, kctl); } @@ -3090,6 +3096,7 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer) if (map->id == state.chip->usb_id) { state.map = map->map; state.selector_map = map->selector_map; + mixer->connector_map = map->connector_map; mixer->ignore_ctl_error |= map->ignore_ctl_error; break; } @@ -3171,10 +3178,32 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer) return 0; } +static int delegate_notify(struct usb_mixer_interface *mixer, int unitid, + u8 *control, u8 *channel) +{ + const struct usbmix_connector_map *map = mixer->connector_map; + + if (!map) + return unitid; + + for (; map->id; map++) { + if (map->id == unitid) { + if (control && map->control) + *control = map->control; + if (channel && map->channel) + *channel = map->channel; + return map->delegated_id; + } + } + return unitid; +} + void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid) { struct usb_mixer_elem_list *list; + unitid = delegate_notify(mixer, unitid, NULL, NULL); + for_each_mixer_elem(list, mixer, unitid) { struct usb_mixer_elem_info *info = mixer_elem_list_to_info(list); @@ -3244,6 +3273,8 @@ static void snd_usb_mixer_interrupt_v2(struct usb_mixer_interface *mixer, return; } + unitid = delegate_notify(mixer, unitid, &control, &channel); + for_each_mixer_elem(list, mixer, unitid) count++; diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h index 37e1b234c802f5..8e0fb7fdf1a008 100644 --- a/sound/usb/mixer.h +++ b/sound/usb/mixer.h @@ -6,6 +6,13 @@ struct media_mixer_ctl; +struct usbmix_connector_map { + u8 id; + u8 delegated_id; + u8 control; + u8 channel; +}; + struct usb_mixer_interface { struct snd_usb_audio *chip; struct usb_host_interface *hostif; @@ -18,6 +25,9 @@ struct usb_mixer_interface { /* the usb audio specification version this interface complies to */ int protocol; + /* optional connector delegation map */ + const struct usbmix_connector_map *connector_map; + /* Sound Blaster remote control stuff */ const struct rc_config *rc_cfg; u32 rc_code; diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c index 3a54ca04ec4c05..39d6c6fa5e3370 100644 --- a/sound/usb/mixer_maps.c +++ b/sound/usb/mixer_maps.c @@ -27,6 +27,7 @@ struct usbmix_ctl_map { u32 id; const struct usbmix_name_map *map; const struct usbmix_selector_map *selector_map; + const struct usbmix_connector_map *connector_map; int ignore_ctl_error; }; @@ -359,6 +360,33 @@ static const struct usbmix_name_map asus_rog_map[] = { {} }; +/* TRX40 mobos with Realtek ALC1220-VB */ +static const struct usbmix_name_map trx40_mobo_map[] = { + { 18, NULL }, /* OT, IEC958 - broken response, disabled */ + { 19, NULL, 12 }, /* FU, Input Gain Pad - broken response, disabled */ + { 16, "Speaker" }, /* OT */ + { 22, "Speaker Playback" }, /* FU */ + { 7, "Line" }, /* IT */ + { 19, "Line Capture" }, /* FU */ + { 17, "Front Headphone" }, /* OT */ + { 23, "Front Headphone Playback" }, /* FU */ + { 8, "Mic" }, /* IT */ + { 20, "Mic Capture" }, /* FU */ + { 9, "Front Mic" }, /* IT */ + { 21, "Front Mic Capture" }, /* FU */ + { 24, "IEC958 Playback" }, /* FU */ + {} +}; + +static const struct usbmix_connector_map trx40_mobo_connector_map[] = { + { 10, 16 }, /* (Back) Speaker */ + { 11, 17 }, /* Front Headphone */ + { 13, 7 }, /* Line */ + { 14, 8 }, /* Mic */ + { 15, 9 }, /* Front Mic */ + {} +}; + /* * Control map entries */ @@ -480,7 +508,8 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = { }, { /* Gigabyte TRX40 Aorus Pro WiFi */ .id = USB_ID(0x0414, 0xa002), - .map = asus_rog_map, + .map = trx40_mobo_map, + .connector_map = trx40_mobo_connector_map, }, { /* ASUS ROG Zenith II */ .id = USB_ID(0x0b05, 0x1916), @@ -492,11 +521,13 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = { }, { /* MSI TRX40 Creator */ .id = USB_ID(0x0db0, 0x0d64), - .map = asus_rog_map, + .map = trx40_mobo_map, + .connector_map = trx40_mobo_connector_map, }, { /* MSI TRX40 */ .id = USB_ID(0x0db0, 0x543d), - .map = asus_rog_map, + .map = trx40_mobo_map, + .connector_map = trx40_mobo_connector_map, }, { 0 } /* terminator */ }; diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c index 39e27ae6c597e8..dc181066c799e3 100644 --- a/sound/usb/mixer_quirks.c +++ b/sound/usb/mixer_quirks.c @@ -1508,11 +1508,15 @@ static int snd_microii_spdif_default_get(struct snd_kcontrol *kcontrol, /* use known values for that card: interface#1 altsetting#1 */ iface = usb_ifnum_to_if(chip->dev, 1); - if (!iface || iface->num_altsetting < 2) - return -EINVAL; + if (!iface || iface->num_altsetting < 2) { + err = -EINVAL; + goto end; + } alts = &iface->altsetting[1]; - if (get_iface_desc(alts)->bNumEndpoints < 1) - return -EINVAL; + if (get_iface_desc(alts)->bNumEndpoints < 1) { + err = -EINVAL; + goto end; + } ep = get_endpoint(alts, 0)->bEndpointAddress; err = snd_usb_ctl_msg(chip->dev, diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index d187aa6d50db0c..8c2f5c23e1b434 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -3592,5 +3592,61 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"), } } }, +{ + /* + * Pioneer DJ DJM-250MK2 + * PCM is 8 channels out @ 48 fixed (endpoints 0x01). + * The output from computer to the mixer is usable. + * + * The input (phono or line to computer) is not working. + * It should be at endpoint 0x82 and probably also 8 channels, + * but it seems that it works only with Pioneer proprietary software. + * Even on officially supported OS, the Audacity was unable to record + * and Mixxx to recognize the control vinyls. + */ + USB_DEVICE_VENDOR_SPEC(0x2b73, 0x0017), + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { + .ifnum = QUIRK_ANY_INTERFACE, + .type = QUIRK_COMPOSITE, + .data = (const struct snd_usb_audio_quirk[]) { + { + .ifnum = 0, + .type = QUIRK_AUDIO_FIXED_ENDPOINT, + .data = &(const struct audioformat) { + .formats = SNDRV_PCM_FMTBIT_S24_3LE, + .channels = 8, // outputs + .iface = 0, + .altsetting = 1, + .altset_idx = 1, + .endpoint = 0x01, + .ep_attr = USB_ENDPOINT_XFER_ISOC| + USB_ENDPOINT_SYNC_ASYNC, + .rates = SNDRV_PCM_RATE_48000, + .rate_min = 48000, + .rate_max = 48000, + .nr_rates = 1, + .rate_table = (unsigned int[]) { 48000 } + } + }, + { + .ifnum = -1 + } + } + } +}, + +#define ALC1220_VB_DESKTOP(vend, prod) { \ + USB_DEVICE(vend, prod), \ + .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { \ + .vendor_name = "Realtek", \ + .product_name = "ALC1220-VB-DT", \ + .profile_name = "Realtek-ALC1220-VB-Desktop", \ + .ifnum = QUIRK_NO_INTERFACE \ + } \ +} +ALC1220_VB_DESKTOP(0x0414, 0xa002), /* Gigabyte TRX40 Aorus Pro WiFi */ +ALC1220_VB_DESKTOP(0x0db0, 0x0d64), /* MSI TRX40 Creator */ +ALC1220_VB_DESKTOP(0x0db0, 0x543d), /* MSI TRX40 */ +#undef ALC1220_VB_DESKTOP #undef USB_DEVICE_VENDOR_SPEC diff --git a/sound/usb/usx2y/usbusx2yaudio.c b/sound/usb/usx2y/usbusx2yaudio.c index 89fa287678fc36..e0bace4d1c40c0 100644 --- a/sound/usb/usx2y/usbusx2yaudio.c +++ b/sound/usb/usx2y/usbusx2yaudio.c @@ -681,6 +681,8 @@ static int usX2Y_rate_set(struct usX2Ydev *usX2Y, int rate) us->submitted = 2*NOOF_SETRATE_URBS; for (i = 0; i < NOOF_SETRATE_URBS; ++i) { struct urb *urb = us->urb[i]; + if (!urb) + continue; if (urb->status) { if (!err) err = -ENODEV; diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile index 33e2638ef7f0d5..122321d549227f 100644 --- a/tools/lib/bpf/Makefile +++ b/tools/lib/bpf/Makefile @@ -145,7 +145,7 @@ PC_FILE := $(addprefix $(OUTPUT),$(PC_FILE)) GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN_SHARED) | \ cut -d "@" -f1 | sed 's/_v[0-9]_[0-9]_[0-9].*//' | \ - awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$8}' | \ + awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}' | \ sort -u | wc -l) VERSIONED_SYM_COUNT = $(shell readelf -s --wide $(OUTPUT)libbpf.so | \ grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | sort -u | wc -l) @@ -217,7 +217,7 @@ check_abi: $(OUTPUT)libbpf.so "versioned in $(VERSION_SCRIPT)." >&2; \ readelf -s --wide $(BPF_IN_SHARED) | \ cut -d "@" -f1 | sed 's/_v[0-9]_[0-9]_[0-9].*//' | \ - awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$8}'| \ + awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}'| \ sort -u > $(OUTPUT)libbpf_global_syms.tmp; \ readelf -s --wide $(OUTPUT)libbpf.so | \ grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | \ diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c index ce3ec81b71c013..88416be2bf9943 100644 --- a/tools/lib/bpf/netlink.c +++ b/tools/lib/bpf/netlink.c @@ -137,7 +137,7 @@ int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags) struct ifinfomsg ifinfo; char attrbuf[64]; } req; - __u32 nl_pid; + __u32 nl_pid = 0; sock = libbpf_netlink_open(&nl_pid); if (sock < 0) @@ -254,7 +254,7 @@ int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags) { struct xdp_id_md xdp_id = {}; int sock, ret; - __u32 nl_pid; + __u32 nl_pid = 0; __u32 mask; if (flags & ~XDP_FLAGS_MASK) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 9fa4e1a46ca954..d6a971326f8790 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -2306,14 +2306,27 @@ static bool ignore_unreachable_insn(struct instruction *insn) !strcmp(insn->sec->name, ".altinstr_aux")) return true; + if (!insn->func) + return false; + + /* + * CONFIG_UBSAN_TRAP inserts a UD2 when it sees + * __builtin_unreachable(). The BUG() macro has an unreachable() after + * the UD2, which causes GCC's undefined trap logic to emit another UD2 + * (or occasionally a JMP to UD2). + */ + if (list_prev_entry(insn, list)->dead_end && + (insn->type == INSN_BUG || + (insn->type == INSN_JUMP_UNCONDITIONAL && + insn->jump_dest && insn->jump_dest->type == INSN_BUG))) + return true; + /* * Check if this (or a subsequent) instruction is related to * CONFIG_UBSAN or CONFIG_KASAN. * * End the search at 5 instructions to avoid going into the weeds. */ - if (!insn->func) - return false; for (i = 0; i < 5; i++) { if (is_kasan_insn(insn) || is_ubsan_insn(insn)) diff --git a/tools/objtool/orc_dump.c b/tools/objtool/orc_dump.c index 13ccf775a83a46..ba4cbb1cdd6324 100644 --- a/tools/objtool/orc_dump.c +++ b/tools/objtool/orc_dump.c @@ -66,7 +66,7 @@ int orc_dump(const char *_objname) char *name; size_t nr_sections; Elf64_Addr orc_ip_addr = 0; - size_t shstrtab_idx; + size_t shstrtab_idx, strtab_idx = 0; Elf *elf; Elf_Scn *scn; GElf_Shdr sh; @@ -127,6 +127,8 @@ int orc_dump(const char *_objname) if (!strcmp(name, ".symtab")) { symtab = data; + } else if (!strcmp(name, ".strtab")) { + strtab_idx = i; } else if (!strcmp(name, ".orc_unwind")) { orc = data->d_buf; orc_size = sh.sh_size; @@ -138,7 +140,7 @@ int orc_dump(const char *_objname) } } - if (!symtab || !orc || !orc_ip) + if (!symtab || !strtab_idx || !orc || !orc_ip) return 0; if (orc_size % sizeof(*orc) != 0) { @@ -159,21 +161,29 @@ int orc_dump(const char *_objname) return -1; } - scn = elf_getscn(elf, sym.st_shndx); - if (!scn) { - WARN_ELF("elf_getscn"); - return -1; - } - - if (!gelf_getshdr(scn, &sh)) { - WARN_ELF("gelf_getshdr"); - return -1; - } - - name = elf_strptr(elf, shstrtab_idx, sh.sh_name); - if (!name || !*name) { - WARN_ELF("elf_strptr"); - return -1; + if (GELF_ST_TYPE(sym.st_info) == STT_SECTION) { + scn = elf_getscn(elf, sym.st_shndx); + if (!scn) { + WARN_ELF("elf_getscn"); + return -1; + } + + if (!gelf_getshdr(scn, &sh)) { + WARN_ELF("gelf_getshdr"); + return -1; + } + + name = elf_strptr(elf, shstrtab_idx, sh.sh_name); + if (!name) { + WARN_ELF("elf_strptr"); + return -1; + } + } else { + name = elf_strptr(elf, strtab_idx, sym.st_name); + if (!name) { + WARN_ELF("elf_strptr"); + return -1; + } } printf("%s+%llx:", name, (unsigned long long)rela.r_addend); diff --git a/tools/testing/nvdimm/Kbuild b/tools/testing/nvdimm/Kbuild index c4a9196d794c9d..cb80d3b811792c 100644 --- a/tools/testing/nvdimm/Kbuild +++ b/tools/testing/nvdimm/Kbuild @@ -21,8 +21,8 @@ DRIVERS := ../../../drivers NVDIMM_SRC := $(DRIVERS)/nvdimm ACPI_SRC := $(DRIVERS)/acpi/nfit DAX_SRC := $(DRIVERS)/dax -ccflags-y := -I$(src)/$(NVDIMM_SRC)/ -ccflags-y += -I$(src)/$(ACPI_SRC)/ +ccflags-y := -I$(srctree)/drivers/nvdimm/ +ccflags-y += -I$(srctree)/drivers/acpi/nfit/ obj-$(CONFIG_LIBNVDIMM) += libnvdimm.o obj-$(CONFIG_BLK_DEV_PMEM) += nd_pmem.o diff --git a/tools/testing/nvdimm/test/Kbuild b/tools/testing/nvdimm/test/Kbuild index fb3c3d7cdb9bd1..75baebf8f4ba17 100644 --- a/tools/testing/nvdimm/test/Kbuild +++ b/tools/testing/nvdimm/test/Kbuild @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 -ccflags-y := -I$(src)/../../../../drivers/nvdimm/ -ccflags-y += -I$(src)/../../../../drivers/acpi/nfit/ +ccflags-y := -I$(srctree)/drivers/nvdimm/ +ccflags-y += -I$(srctree)/drivers/acpi/nfit/ obj-m += nfit_test.o obj-m += nfit_test_iomap.o diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c index bf6422a6af7ffb..a8ee5c4d41ebb1 100644 --- a/tools/testing/nvdimm/test/nfit.c +++ b/tools/testing/nvdimm/test/nfit.c @@ -3164,7 +3164,9 @@ static __init int nfit_test_init(void) mcsafe_test(); dax_pmem_test(); dax_pmem_core_test(); +#ifdef CONFIG_DEV_DAX_PMEM_COMPAT dax_pmem_compat_test(); +#endif nfit_test_setup(nfit_test_lookup, nfit_test_evaluate_dsm); diff --git a/tools/testing/selftests/bpf/verifier/value_illegal_alu.c b/tools/testing/selftests/bpf/verifier/value_illegal_alu.c index 7f6c232cd8423e..ed1c2cea1dea66 100644 --- a/tools/testing/selftests/bpf/verifier/value_illegal_alu.c +++ b/tools/testing/selftests/bpf/verifier/value_illegal_alu.c @@ -88,6 +88,7 @@ BPF_EXIT_INSN(), }, .fixup_map_hash_48b = { 3 }, + .errstr_unpriv = "leaking pointer from stack off -8", .errstr = "R0 invalid mem access 'inv'", .result = REJECT, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, diff --git a/tools/testing/selftests/ftrace/settings b/tools/testing/selftests/ftrace/settings new file mode 100644 index 00000000000000..e7b9417537fbc4 --- /dev/null +++ b/tools/testing/selftests/ftrace/settings @@ -0,0 +1 @@ +timeout=0 diff --git a/tools/testing/selftests/kmod/kmod.sh b/tools/testing/selftests/kmod/kmod.sh index 8b944cf042f6c8..315a43111e0467 100755 --- a/tools/testing/selftests/kmod/kmod.sh +++ b/tools/testing/selftests/kmod/kmod.sh @@ -505,18 +505,23 @@ function test_num() fi } -function get_test_count() +function get_test_data() { test_num $1 - TEST_DATA=$(echo $ALL_TESTS | awk '{print $'$1'}') + local field_num=$(echo $1 | sed 's/^0*//') + echo $ALL_TESTS | awk '{print $'$field_num'}' +} + +function get_test_count() +{ + TEST_DATA=$(get_test_data $1) LAST_TWO=${TEST_DATA#*:*} echo ${LAST_TWO%:*} } function get_test_enabled() { - test_num $1 - TEST_DATA=$(echo $ALL_TESTS | awk '{print $'$1'}') + TEST_DATA=$(get_test_data $1) echo ${TEST_DATA#*:*:} } diff --git a/tools/testing/selftests/net/fib_nexthops.sh b/tools/testing/selftests/net/fib_nexthops.sh index 796670ebc65b37..6560ed796ac48e 100755 --- a/tools/testing/selftests/net/fib_nexthops.sh +++ b/tools/testing/selftests/net/fib_nexthops.sh @@ -749,6 +749,29 @@ ipv4_fcnal_runtime() run_cmd "ip netns exec me ping -c1 -w1 172.16.101.1" log_test $? 0 "Ping - multipath" + run_cmd "$IP ro delete 172.16.101.1/32 nhid 122" + + # + # multiple default routes + # - tests fib_select_default + run_cmd "$IP nexthop add id 501 via 172.16.1.2 dev veth1" + run_cmd "$IP ro add default nhid 501" + run_cmd "$IP ro add default via 172.16.1.3 dev veth1 metric 20" + run_cmd "ip netns exec me ping -c1 -w1 172.16.101.1" + log_test $? 0 "Ping - multiple default routes, nh first" + + # flip the order + run_cmd "$IP ro del default nhid 501" + run_cmd "$IP ro del default via 172.16.1.3 dev veth1 metric 20" + run_cmd "$IP ro add default via 172.16.1.2 dev veth1 metric 20" + run_cmd "$IP nexthop replace id 501 via 172.16.1.3 dev veth1" + run_cmd "$IP ro add default nhid 501 metric 20" + run_cmd "ip netns exec me ping -c1 -w1 172.16.101.1" + log_test $? 0 "Ping - multiple default routes, nh second" + + run_cmd "$IP nexthop delete nhid 501" + run_cmd "$IP ro del default" + # # IPv4 with blackhole nexthops # diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh index 09854f8a0b5713..4811067d9b0535 100755 --- a/tools/testing/selftests/net/fib_tests.sh +++ b/tools/testing/selftests/net/fib_tests.sh @@ -618,16 +618,22 @@ fib_nexthop_test() fib_suppress_test() { + echo + echo "FIB rule with suppress_prefixlength" + setup + $IP link add dummy1 type dummy $IP link set dummy1 up $IP -6 route add default dev dummy1 $IP -6 rule add table main suppress_prefixlength 0 - ping -f -c 1000 -W 1 1234::1 || true + ping -f -c 1000 -W 1 1234::1 >/dev/null 2>&1 $IP -6 rule del table main suppress_prefixlength 0 $IP link del dummy1 # If we got here without crashing, we're good. - return 0 + log_test 0 0 "FIB rule suppress test" + + cleanup } ################################################################################ diff --git a/tools/vm/Makefile b/tools/vm/Makefile index 20f6cf04377f0c..9860622cbb1511 100644 --- a/tools/vm/Makefile +++ b/tools/vm/Makefile @@ -1,6 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 # Makefile for vm tools # +include ../scripts/Makefile.include + TARGETS=page-types slabinfo page_owner_sort LIB_DIR = ../lib/api