diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 2c31208528d565..7eb9366422f54b 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -250,6 +250,14 @@ tcp_base_mss - INTEGER Path MTU discovery (MTU probing). If MTU probing is enabled, this is the initial MSS used by the connection. +tcp_min_snd_mss - INTEGER + TCP SYN and SYNACK messages usually advertise an ADVMSS option, + as described in RFC 1122 and RFC 6691. + If this ADVMSS option is smaller than tcp_min_snd_mss, + it is silently capped to tcp_min_snd_mss. + + Default : 48 (at least 8 bytes of payload per segment) + tcp_congestion_control - STRING Set the congestion control algorithm to be used for new connections. The algorithm "reno" is always available, but diff --git a/Makefile b/Makefile index f7e7e365e2fff9..a76c61f77bcd38 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 19 -SUBLEVEL = 50 +SUBLEVEL = 56 EXTRAVERSION = NAME = "People's Front" @@ -652,6 +652,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,) KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation) KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow) KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context) +KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member) ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE KBUILD_CFLAGS += -Os $(call cc-disable-warning,maybe-uninitialized,) @@ -696,7 +697,6 @@ ifeq ($(cc-name),clang) KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,) KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier) KBUILD_CFLAGS += $(call cc-disable-warning, gnu) -KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member) # Quiet clang warning: comparison of unsigned expression < 0 is always false KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare) # CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts index ef149f59929ae3..d131c54acd3ec0 100644 --- a/arch/arc/boot/dts/hsdk.dts +++ b/arch/arc/boot/dts/hsdk.dts @@ -175,6 +175,7 @@ interrupt-names = "macirq"; phy-mode = "rgmii"; snps,pbl = <32>; + snps,multicast-filter-bins = <256>; clocks = <&gmacclk>; clock-names = "stmmaceth"; phy-handle = <&phy0>; @@ -183,6 +184,9 @@ mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */ dma-coherent; + tx-fifo-depth = <4096>; + rx-fifo-depth = <4096>; + mdio { #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h index d819de1c5d10ed..3ea4112c8302a8 100644 --- a/arch/arc/include/asm/cmpxchg.h +++ b/arch/arc/include/asm/cmpxchg.h @@ -92,8 +92,11 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new) #endif /* CONFIG_ARC_HAS_LLSC */ -#define cmpxchg(ptr, o, n) ((typeof(*(ptr)))__cmpxchg((ptr), \ - (unsigned long)(o), (unsigned long)(n))) +#define cmpxchg(ptr, o, n) ({ \ + (typeof(*(ptr)))__cmpxchg((ptr), \ + (unsigned long)(o), \ + (unsigned long)(n)); \ +}) /* * atomic_cmpxchg is same as cmpxchg @@ -198,8 +201,11 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr, return __xchg_bad_pointer(); } -#define xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \ - sizeof(*(ptr)))) +#define xchg(ptr, with) ({ \ + (typeof(*(ptr)))__xchg((unsigned long)(with), \ + (ptr), \ + sizeof(*(ptr))); \ +}) #endif /* CONFIG_ARC_PLAT_EZNPS */ diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c index 4097764fea2349..fa18c00b0cfd7d 100644 --- a/arch/arc/mm/tlb.c +++ b/arch/arc/mm/tlb.c @@ -911,9 +911,11 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address, struct pt_regs *regs) { struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; - unsigned int pd0[mmu->ways]; unsigned long flags; - int set; + int set, n_ways = mmu->ways; + + n_ways = min(n_ways, 4); + BUG_ON(mmu->ways > 4); local_irq_save(flags); @@ -921,9 +923,10 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address, for (set = 0; set < mmu->sets; set++) { int is_valid, way; + unsigned int pd0[4]; /* read out all the ways of current set */ - for (way = 0, is_valid = 0; way < mmu->ways; way++) { + for (way = 0, is_valid = 0; way < n_ways; way++) { write_aux_reg(ARC_REG_TLBINDEX, SET_WAY_TO_IDX(mmu, set, way)); write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead); @@ -937,14 +940,14 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address, continue; /* Scan the set for duplicate ways: needs a nested loop */ - for (way = 0; way < mmu->ways - 1; way++) { + for (way = 0; way < n_ways - 1; way++) { int n; if (!pd0[way]) continue; - for (n = way + 1; n < mmu->ways; n++) { + for (n = way + 1; n < n_ways; n++) { if (pd0[way] != pd0[n]) continue; diff --git a/arch/arm/boot/dts/am57xx-idk-common.dtsi b/arch/arm/boot/dts/am57xx-idk-common.dtsi index c9063ffca524c8..3fd9a1676d881a 100644 --- a/arch/arm/boot/dts/am57xx-idk-common.dtsi +++ b/arch/arm/boot/dts/am57xx-idk-common.dtsi @@ -410,6 +410,7 @@ vqmmc-supply = <&ldo1_reg>; bus-width = <4>; cd-gpios = <&gpio6 27 GPIO_ACTIVE_LOW>; /* gpio 219 */ + no-1-8-v; }; &mmc2 { diff --git a/arch/arm/boot/dts/dra76x-mmc-iodelay.dtsi b/arch/arm/boot/dts/dra76x-mmc-iodelay.dtsi index baba7b00eca797..fdca4818691609 100644 --- a/arch/arm/boot/dts/dra76x-mmc-iodelay.dtsi +++ b/arch/arm/boot/dts/dra76x-mmc-iodelay.dtsi @@ -22,7 +22,7 @@ * * Datamanual Revisions: * - * DRA76x Silicon Revision 1.0: SPRS993A, Revised July 2017 + * DRA76x Silicon Revision 1.0: SPRS993E, Revised December 2018 * */ @@ -169,25 +169,25 @@ /* Corresponds to MMC2_HS200_MANUAL1 in datamanual */ mmc2_iodelay_hs200_conf: mmc2_iodelay_hs200_conf { pinctrl-pin-array = < - 0x190 A_DELAY_PS(384) G_DELAY_PS(0) /* CFG_GPMC_A19_OEN */ - 0x194 A_DELAY_PS(0) G_DELAY_PS(174) /* CFG_GPMC_A19_OUT */ - 0x1a8 A_DELAY_PS(410) G_DELAY_PS(0) /* CFG_GPMC_A20_OEN */ - 0x1ac A_DELAY_PS(85) G_DELAY_PS(0) /* CFG_GPMC_A20_OUT */ - 0x1b4 A_DELAY_PS(468) G_DELAY_PS(0) /* CFG_GPMC_A21_OEN */ - 0x1b8 A_DELAY_PS(139) G_DELAY_PS(0) /* CFG_GPMC_A21_OUT */ - 0x1c0 A_DELAY_PS(676) G_DELAY_PS(0) /* CFG_GPMC_A22_OEN */ - 0x1c4 A_DELAY_PS(69) G_DELAY_PS(0) /* CFG_GPMC_A22_OUT */ - 0x1d0 A_DELAY_PS(1062) G_DELAY_PS(154) /* CFG_GPMC_A23_OUT */ - 0x1d8 A_DELAY_PS(640) G_DELAY_PS(0) /* CFG_GPMC_A24_OEN */ - 0x1dc A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A24_OUT */ - 0x1e4 A_DELAY_PS(356) G_DELAY_PS(0) /* CFG_GPMC_A25_OEN */ - 0x1e8 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A25_OUT */ - 0x1f0 A_DELAY_PS(579) G_DELAY_PS(0) /* CFG_GPMC_A26_OEN */ - 0x1f4 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A26_OUT */ - 0x1fc A_DELAY_PS(435) G_DELAY_PS(0) /* CFG_GPMC_A27_OEN */ - 0x200 A_DELAY_PS(36) G_DELAY_PS(0) /* CFG_GPMC_A27_OUT */ - 0x364 A_DELAY_PS(759) G_DELAY_PS(0) /* CFG_GPMC_CS1_OEN */ - 0x368 A_DELAY_PS(72) G_DELAY_PS(0) /* CFG_GPMC_CS1_OUT */ + 0x190 A_DELAY_PS(384) G_DELAY_PS(0) /* CFG_GPMC_A19_OEN */ + 0x194 A_DELAY_PS(350) G_DELAY_PS(174) /* CFG_GPMC_A19_OUT */ + 0x1a8 A_DELAY_PS(410) G_DELAY_PS(0) /* CFG_GPMC_A20_OEN */ + 0x1ac A_DELAY_PS(335) G_DELAY_PS(0) /* CFG_GPMC_A20_OUT */ + 0x1b4 A_DELAY_PS(468) G_DELAY_PS(0) /* CFG_GPMC_A21_OEN */ + 0x1b8 A_DELAY_PS(339) G_DELAY_PS(0) /* CFG_GPMC_A21_OUT */ + 0x1c0 A_DELAY_PS(676) G_DELAY_PS(0) /* CFG_GPMC_A22_OEN */ + 0x1c4 A_DELAY_PS(219) G_DELAY_PS(0) /* CFG_GPMC_A22_OUT */ + 0x1d0 A_DELAY_PS(1062) G_DELAY_PS(154) /* CFG_GPMC_A23_OUT */ + 0x1d8 A_DELAY_PS(640) G_DELAY_PS(0) /* CFG_GPMC_A24_OEN */ + 0x1dc A_DELAY_PS(150) G_DELAY_PS(0) /* CFG_GPMC_A24_OUT */ + 0x1e4 A_DELAY_PS(356) G_DELAY_PS(0) /* CFG_GPMC_A25_OEN */ + 0x1e8 A_DELAY_PS(150) G_DELAY_PS(0) /* CFG_GPMC_A25_OUT */ + 0x1f0 A_DELAY_PS(579) G_DELAY_PS(0) /* CFG_GPMC_A26_OEN */ + 0x1f4 A_DELAY_PS(200) G_DELAY_PS(0) /* CFG_GPMC_A26_OUT */ + 0x1fc A_DELAY_PS(435) G_DELAY_PS(0) /* CFG_GPMC_A27_OEN */ + 0x200 A_DELAY_PS(236) G_DELAY_PS(0) /* CFG_GPMC_A27_OUT */ + 0x364 A_DELAY_PS(759) G_DELAY_PS(0) /* CFG_GPMC_CS1_OEN */ + 0x368 A_DELAY_PS(372) G_DELAY_PS(0) /* CFG_GPMC_CS1_OUT */ >; }; diff --git a/arch/arm/boot/dts/exynos5420-arndale-octa.dts b/arch/arm/boot/dts/exynos5420-arndale-octa.dts index cdda614e417e74..a370857beac0d3 100644 --- a/arch/arm/boot/dts/exynos5420-arndale-octa.dts +++ b/arch/arm/boot/dts/exynos5420-arndale-octa.dts @@ -106,6 +106,7 @@ regulator-name = "PVDD_APIO_1V8"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; + regulator-always-on; }; ldo3_reg: LDO3 { @@ -144,6 +145,7 @@ regulator-name = "PVDD_ABB_1V8"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; + regulator-always-on; }; ldo9_reg: LDO9 { diff --git a/arch/arm/boot/dts/imx50.dtsi b/arch/arm/boot/dts/imx50.dtsi index 7fae2ffb76fe2e..ab522c2da6df66 100644 --- a/arch/arm/boot/dts/imx50.dtsi +++ b/arch/arm/boot/dts/imx50.dtsi @@ -420,7 +420,7 @@ reg = <0x63fb0000 0x4000>; interrupts = <6>; clocks = <&clks IMX5_CLK_SDMA_GATE>, - <&clks IMX5_CLK_SDMA_GATE>; + <&clks IMX5_CLK_AHB>; clock-names = "ipg", "ahb"; #dma-cells = <3>; fsl,sdma-ram-script-name = "imx/sdma/sdma-imx50.bin"; diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi index 5c4ba91e43ba4b..ef2abc0978439d 100644 --- a/arch/arm/boot/dts/imx51.dtsi +++ b/arch/arm/boot/dts/imx51.dtsi @@ -481,7 +481,7 @@ reg = <0x83fb0000 0x4000>; interrupts = <6>; clocks = <&clks IMX5_CLK_SDMA_GATE>, - <&clks IMX5_CLK_SDMA_GATE>; + <&clks IMX5_CLK_AHB>; clock-names = "ipg", "ahb"; #dma-cells = <3>; fsl,sdma-ram-script-name = "imx/sdma/sdma-imx51.bin"; diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi index 6386185ae23403..b6b0818343c4e5 100644 --- a/arch/arm/boot/dts/imx53.dtsi +++ b/arch/arm/boot/dts/imx53.dtsi @@ -701,7 +701,7 @@ reg = <0x63fb0000 0x4000>; interrupts = <6>; clocks = <&clks IMX5_CLK_SDMA_GATE>, - <&clks IMX5_CLK_SDMA_GATE>; + <&clks IMX5_CLK_AHB>; clock-names = "ipg", "ahb"; #dma-cells = <3>; fsl,sdma-ram-script-name = "imx/sdma/sdma-imx53.bin"; diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi index 61d2d26afbf4d9..00d44a60972f7a 100644 --- a/arch/arm/boot/dts/imx6qdl.dtsi +++ b/arch/arm/boot/dts/imx6qdl.dtsi @@ -905,7 +905,7 @@ compatible = "fsl,imx6q-sdma", "fsl,imx35-sdma"; reg = <0x020ec000 0x4000>; interrupts = <0 2 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&clks IMX6QDL_CLK_SDMA>, + clocks = <&clks IMX6QDL_CLK_IPG>, <&clks IMX6QDL_CLK_SDMA>; clock-names = "ipg", "ahb"; #dma-cells = <3>; diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi index 7a4f5dace9026b..2fa88c6f188201 100644 --- a/arch/arm/boot/dts/imx6sl.dtsi +++ b/arch/arm/boot/dts/imx6sl.dtsi @@ -739,7 +739,7 @@ reg = <0x020ec000 0x4000>; interrupts = <0 2 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clks IMX6SL_CLK_SDMA>, - <&clks IMX6SL_CLK_SDMA>; + <&clks IMX6SL_CLK_AHB>; clock-names = "ipg", "ahb"; #dma-cells = <3>; /* imx6sl reuses imx6q sdma firmware */ diff --git a/arch/arm/boot/dts/imx6sll.dtsi b/arch/arm/boot/dts/imx6sll.dtsi index 3e6ffaf5f104f0..7c7d5c47578e20 100644 --- a/arch/arm/boot/dts/imx6sll.dtsi +++ b/arch/arm/boot/dts/imx6sll.dtsi @@ -591,7 +591,7 @@ compatible = "fsl,imx6sll-sdma", "fsl,imx35-sdma"; reg = <0x020ec000 0x4000>; interrupts = ; - clocks = <&clks IMX6SLL_CLK_SDMA>, + clocks = <&clks IMX6SLL_CLK_IPG>, <&clks IMX6SLL_CLK_SDMA>; clock-names = "ipg", "ahb"; #dma-cells = <3>; diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi index 50083cecc6c970..7b62e6fb47ebe7 100644 --- a/arch/arm/boot/dts/imx6sx.dtsi +++ b/arch/arm/boot/dts/imx6sx.dtsi @@ -803,7 +803,7 @@ compatible = "fsl,imx6sx-sdma", "fsl,imx6q-sdma"; reg = <0x020ec000 0x4000>; interrupts = ; - clocks = <&clks IMX6SX_CLK_SDMA>, + clocks = <&clks IMX6SX_CLK_IPG>, <&clks IMX6SX_CLK_SDMA>; clock-names = "ipg", "ahb"; #dma-cells = <3>; diff --git a/arch/arm/boot/dts/imx6ul.dtsi b/arch/arm/boot/dts/imx6ul.dtsi index 6dc0b569acdf4c..2366f093cc76d8 100644 --- a/arch/arm/boot/dts/imx6ul.dtsi +++ b/arch/arm/boot/dts/imx6ul.dtsi @@ -707,7 +707,7 @@ "fsl,imx35-sdma"; reg = <0x020ec000 0x4000>; interrupts = ; - clocks = <&clks IMX6UL_CLK_SDMA>, + clocks = <&clks IMX6UL_CLK_IPG>, <&clks IMX6UL_CLK_SDMA>; clock-names = "ipg", "ahb"; #dma-cells = <3>; diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi index a052198f6e9631..a7f697b0290fff 100644 --- a/arch/arm/boot/dts/imx7s.dtsi +++ b/arch/arm/boot/dts/imx7s.dtsi @@ -1050,8 +1050,8 @@ compatible = "fsl,imx7d-sdma", "fsl,imx35-sdma"; reg = <0x30bd0000 0x10000>; interrupts = ; - clocks = <&clks IMX7D_SDMA_CORE_CLK>, - <&clks IMX7D_AHB_CHANNEL_ROOT_CLK>; + clocks = <&clks IMX7D_IPG_ROOT_CLK>, + <&clks IMX7D_SDMA_CORE_CLK>; clock-names = "ipg", "ahb"; #dma-cells = <3>; fsl,sdma-ram-script-name = "imx/sdma/sdma-imx7d.bin"; diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h index cba23eaa607215..7a88f160b1fbe9 100644 --- a/arch/arm/include/asm/hardirq.h +++ b/arch/arm/include/asm/hardirq.h @@ -6,6 +6,7 @@ #include #include +/* number of IPIS _not_ including IPI_CPU_BACKTRACE */ #define NR_IPI 7 typedef struct { diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index a3ce7c5365fa8e..bada66ef441934 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -76,6 +76,10 @@ enum ipi_msg_type { IPI_CPU_STOP, IPI_IRQ_WORK, IPI_COMPLETION, + /* + * CPU_BACKTRACE is special and not included in NR_IPI + * or tracable with trace_ipi_* + */ IPI_CPU_BACKTRACE, /* * SGI8-15 can be reserved by secure firmware, and thus may @@ -803,7 +807,7 @@ core_initcall(register_cpufreq_notifier); static void raise_nmi(cpumask_t *mask) { - smp_cross_call(mask, IPI_CPU_BACKTRACE); + __smp_cross_call(mask, IPI_CPU_BACKTRACE); } void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile index d2b5ec9c4b9293..ba88b1eca93c4c 100644 --- a/arch/arm/kvm/hyp/Makefile +++ b/arch/arm/kvm/hyp/Makefile @@ -11,6 +11,7 @@ CFLAGS_ARMV7VE :=$(call cc-option, -march=armv7ve) obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o +obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/aarch32.o obj-$(CONFIG_KVM_ARM_HOST) += tlb.o obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c index b1fe53e8b46095..088c34e99b02ff 100644 --- a/arch/arm/mach-exynos/suspend.c +++ b/arch/arm/mach-exynos/suspend.c @@ -434,8 +434,27 @@ static void exynos3250_pm_resume(void) static void exynos5420_prepare_pm_resume(void) { + unsigned int mpidr, cluster; + + mpidr = read_cpuid_mpidr(); + cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); + if (IS_ENABLED(CONFIG_EXYNOS5420_MCPM)) WARN_ON(mcpm_cpu_powered_up()); + + if (IS_ENABLED(CONFIG_HW_PERF_EVENTS) && cluster != 0) { + /* + * When system is resumed on the LITTLE/KFC core (cluster 1), + * the DSCR is not properly updated until the power is turned + * on also for the cluster 0. Enable it for a while to + * propagate the SPNIDEN and SPIDEN signals from Secure JTAG + * block and avoid undefined instruction issue on CP14 reset. + */ + pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN, + EXYNOS_COMMON_CONFIGURATION(0)); + pmu_raw_writel(0, + EXYNOS_COMMON_CONFIGURATION(0)); + } } static void exynos5420_pm_resume(void) diff --git a/arch/arm/mach-imx/cpuidle-imx6sx.c b/arch/arm/mach-imx/cpuidle-imx6sx.c index fd0053e47a1511..3708a71f30e62b 100644 --- a/arch/arm/mach-imx/cpuidle-imx6sx.c +++ b/arch/arm/mach-imx/cpuidle-imx6sx.c @@ -15,6 +15,7 @@ #include "common.h" #include "cpuidle.h" +#include "hardware.h" static int imx6sx_idle_finish(unsigned long val) { @@ -110,7 +111,7 @@ int __init imx6sx_cpuidle_init(void) * except for power up sw2iso which need to be * larger than LDO ramp up time. */ - imx_gpc_set_arm_power_up_timing(0xf, 1); + imx_gpc_set_arm_power_up_timing(cpu_is_imx6sx() ? 0xf : 0x2, 1); imx_gpc_set_arm_power_down_timing(1, 1); return cpuidle_register(&imx6sx_cpuidle_driver, NULL); diff --git a/arch/arm/mach-omap2/pm33xx-core.c b/arch/arm/mach-omap2/pm33xx-core.c index f4971e4a86b268..ca7026958d425a 100644 --- a/arch/arm/mach-omap2/pm33xx-core.c +++ b/arch/arm/mach-omap2/pm33xx-core.c @@ -51,10 +51,12 @@ static int amx3_common_init(void) /* CEFUSE domain can be turned off post bootup */ cefuse_pwrdm = pwrdm_lookup("cefuse_pwrdm"); - if (cefuse_pwrdm) - omap_set_pwrdm_state(cefuse_pwrdm, PWRDM_POWER_OFF); - else + if (!cefuse_pwrdm) pr_err("PM: Failed to get cefuse_pwrdm\n"); + else if (omap_type() != OMAP2_DEVICE_TYPE_GP) + pr_info("PM: Leaving EFUSE power domain active\n"); + else + omap_set_pwrdm_state(cefuse_pwrdm, PWRDM_POWER_OFF); return 0; } diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 35649ee8ad56b6..c12ff63265a946 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -51,6 +51,7 @@ endif KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) $(brokengasinst) KBUILD_CFLAGS += -fno-asynchronous-unwind-tables +KBUILD_CFLAGS += -Wno-psabi KBUILD_AFLAGS += $(lseinstr) $(brokengasinst) KBUILD_CFLAGS += $(call cc-option,-mabi=lp64) diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h index ad8be16a39c9d1..58102652bf9e38 100644 --- a/arch/arm64/include/asm/syscall.h +++ b/arch/arm64/include/asm/syscall.h @@ -20,7 +20,7 @@ #include #include -typedef long (*syscall_fn_t)(struct pt_regs *regs); +typedef long (*syscall_fn_t)(const struct pt_regs *regs); extern const syscall_fn_t sys_call_table[]; diff --git a/arch/arm64/include/asm/syscall_wrapper.h b/arch/arm64/include/asm/syscall_wrapper.h index a4477e515b798d..507d0ee6bc6900 100644 --- a/arch/arm64/include/asm/syscall_wrapper.h +++ b/arch/arm64/include/asm/syscall_wrapper.h @@ -30,10 +30,10 @@ } \ static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) -#define COMPAT_SYSCALL_DEFINE0(sname) \ - asmlinkage long __arm64_compat_sys_##sname(void); \ - ALLOW_ERROR_INJECTION(__arm64_compat_sys_##sname, ERRNO); \ - asmlinkage long __arm64_compat_sys_##sname(void) +#define COMPAT_SYSCALL_DEFINE0(sname) \ + asmlinkage long __arm64_compat_sys_##sname(const struct pt_regs *__unused); \ + ALLOW_ERROR_INJECTION(__arm64_compat_sys_##sname, ERRNO); \ + asmlinkage long __arm64_compat_sys_##sname(const struct pt_regs *__unused) #define COND_SYSCALL_COMPAT(name) \ cond_syscall(__arm64_compat_sys_##name); @@ -62,11 +62,11 @@ static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) #ifndef SYSCALL_DEFINE0 -#define SYSCALL_DEFINE0(sname) \ - SYSCALL_METADATA(_##sname, 0); \ - asmlinkage long __arm64_sys_##sname(void); \ - ALLOW_ERROR_INJECTION(__arm64_sys_##sname, ERRNO); \ - asmlinkage long __arm64_sys_##sname(void) +#define SYSCALL_DEFINE0(sname) \ + SYSCALL_METADATA(_##sname, 0); \ + asmlinkage long __arm64_sys_##sname(const struct pt_regs *__unused); \ + ALLOW_ERROR_INJECTION(__arm64_sys_##sname, ERRNO); \ + asmlinkage long __arm64_sys_##sname(const struct pt_regs *__unused) #endif #ifndef COND_SYSCALL diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h index ad64d2c92ef53d..5dff8eccd17d4c 100644 --- a/arch/arm64/include/uapi/asm/ptrace.h +++ b/arch/arm64/include/uapi/asm/ptrace.h @@ -64,8 +64,6 @@ #ifndef __ASSEMBLY__ -#include - /* * User structures for general purpose, floating point and debug registers. */ @@ -112,10 +110,10 @@ struct user_sve_header { /* * Common SVE_PT_* flags: - * These must be kept in sync with prctl interface in + * These must be kept in sync with prctl interface in */ -#define SVE_PT_VL_INHERIT (PR_SVE_VL_INHERIT >> 16) -#define SVE_PT_VL_ONEXEC (PR_SVE_SET_VL_ONEXEC >> 16) +#define SVE_PT_VL_INHERIT ((1 << 17) /* PR_SVE_VL_INHERIT */ >> 16) +#define SVE_PT_VL_ONEXEC ((1 << 18) /* PR_SVE_SET_VL_ONEXEC */ >> 16) /* diff --git a/arch/arm64/kernel/ssbd.c b/arch/arm64/kernel/ssbd.c index 3432e5ef9f4188..388f8fc130800f 100644 --- a/arch/arm64/kernel/ssbd.c +++ b/arch/arm64/kernel/ssbd.c @@ -4,6 +4,7 @@ */ #include +#include #include #include diff --git a/arch/arm64/kernel/sys.c b/arch/arm64/kernel/sys.c index 162a95ed088138..fe20c461582a1e 100644 --- a/arch/arm64/kernel/sys.c +++ b/arch/arm64/kernel/sys.c @@ -47,22 +47,26 @@ SYSCALL_DEFINE1(arm64_personality, unsigned int, personality) return ksys_personality(personality); } +asmlinkage long sys_ni_syscall(void); + +asmlinkage long __arm64_sys_ni_syscall(const struct pt_regs *__unused) +{ + return sys_ni_syscall(); +} + /* * Wrappers to pass the pt_regs argument. */ #define __arm64_sys_personality __arm64_sys_arm64_personality -asmlinkage long sys_ni_syscall(const struct pt_regs *); -#define __arm64_sys_ni_syscall sys_ni_syscall - #undef __SYSCALL #define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *); #include #undef __SYSCALL -#define __SYSCALL(nr, sym) [nr] = (syscall_fn_t)__arm64_##sym, +#define __SYSCALL(nr, sym) [nr] = __arm64_##sym, const syscall_fn_t sys_call_table[__NR_syscalls] = { - [0 ... __NR_syscalls - 1] = (syscall_fn_t)sys_ni_syscall, + [0 ... __NR_syscalls - 1] = __arm64_sys_ni_syscall, #include }; diff --git a/arch/arm64/kernel/sys32.c b/arch/arm64/kernel/sys32.c index 0f8bcb7de70086..3c80a40c1c9d6d 100644 --- a/arch/arm64/kernel/sys32.c +++ b/arch/arm64/kernel/sys32.c @@ -133,17 +133,14 @@ COMPAT_SYSCALL_DEFINE6(aarch32_fallocate, int, fd, int, mode, return ksys_fallocate(fd, mode, arg_u64(offset), arg_u64(len)); } -asmlinkage long sys_ni_syscall(const struct pt_regs *); -#define __arm64_sys_ni_syscall sys_ni_syscall - #undef __SYSCALL #define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *); #include #undef __SYSCALL -#define __SYSCALL(nr, sym) [nr] = (syscall_fn_t)__arm64_##sym, +#define __SYSCALL(nr, sym) [nr] = __arm64_##sym, const syscall_fn_t compat_sys_call_table[__NR_compat_syscalls] = { - [0 ... __NR_compat_syscalls - 1] = (syscall_fn_t)sys_ni_syscall, + [0 ... __NR_compat_syscalls - 1] = __arm64_sys_ni_syscall, #include }; diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile index 2fabc2dc1966f3..feef06fc7c5a8d 100644 --- a/arch/arm64/kvm/hyp/Makefile +++ b/arch/arm64/kvm/hyp/Makefile @@ -10,6 +10,7 @@ KVM=../../../../virt/kvm obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o +obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/aarch32.o obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-cpuif-proxy.o obj-$(CONFIG_KVM_ARM_HOST) += sysreg-sr.o diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 8080c9f489c3e4..0fa558176fb109 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -921,13 +921,18 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys) int __init arch_ioremap_pud_supported(void) { - /* only 4k granule supports level 1 block mappings */ - return IS_ENABLED(CONFIG_ARM64_4K_PAGES); + /* + * Only 4k granule supports level 1 block mappings. + * SW table walks can't handle removal of intermediate entries. + */ + return IS_ENABLED(CONFIG_ARM64_4K_PAGES) && + !IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS); } int __init arch_ioremap_pmd_supported(void) { - return 1; + /* See arch_ioremap_pud_supported() */ + return !IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS); } int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot) diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c index aa19b7ac8222a2..476c7b4be3787e 100644 --- a/arch/ia64/mm/numa.c +++ b/arch/ia64/mm/numa.c @@ -49,6 +49,7 @@ paddr_to_nid(unsigned long paddr) return (i < num_node_memblks) ? node_memblk[i].nid : (num_node_memblks ? -1 : 0); } +EXPORT_SYMBOL(paddr_to_nid); #if defined(CONFIG_SPARSEMEM) && defined(CONFIG_NUMA) /* diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c index 89950b7bf536b7..bdaf3536241a2d 100644 --- a/arch/mips/kernel/prom.c +++ b/arch/mips/kernel/prom.c @@ -41,7 +41,19 @@ char *mips_get_machine_name(void) #ifdef CONFIG_USE_OF void __init early_init_dt_add_memory_arch(u64 base, u64 size) { - return add_memory_region(base, size, BOOT_MEM_RAM); + if (base >= PHYS_ADDR_MAX) { + pr_warn("Trying to add an invalid memory region, skipped\n"); + return; + } + + /* Truncate the passed memory region instead of type casting */ + if (base + size - 1 >= PHYS_ADDR_MAX || base + size < base) { + pr_warn("Truncate memory region %llx @ %llx to size %llx\n", + size, base, PHYS_ADDR_MAX - base); + size = PHYS_ADDR_MAX - base; + } + + add_memory_region(base, size, BOOT_MEM_RAM); } int __init early_init_dt_reserve_memory_arch(phys_addr_t base, diff --git a/arch/mips/kernel/uprobes.c b/arch/mips/kernel/uprobes.c index 4aaff3b3175c4c..6dbe4eab0a0e83 100644 --- a/arch/mips/kernel/uprobes.c +++ b/arch/mips/kernel/uprobes.c @@ -112,9 +112,6 @@ int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs) */ aup->resume_epc = regs->cp0_epc + 4; if (insn_has_delay_slot((union mips_instruction) aup->insn[0])) { - unsigned long epc; - - epc = regs->cp0_epc; __compute_return_epc_for_insn(regs, (union mips_instruction) aup->insn[0]); aup->resume_epc = regs->cp0_epc; diff --git a/arch/parisc/math-emu/cnv_float.h b/arch/parisc/math-emu/cnv_float.h index 933423fa5144ac..b0db61188a6128 100644 --- a/arch/parisc/math-emu/cnv_float.h +++ b/arch/parisc/math-emu/cnv_float.h @@ -60,19 +60,19 @@ ((exponent < (SGL_P - 1)) ? \ (Sall(sgl_value) << (SGL_EXP_LENGTH + 1 + exponent)) : FALSE) -#define Int_isinexact_to_sgl(int_value) (int_value << 33 - SGL_EXP_LENGTH) +#define Int_isinexact_to_sgl(int_value) ((int_value << 33 - SGL_EXP_LENGTH) != 0) #define Sgl_roundnearest_from_int(int_value,sgl_value) \ if (int_value & 1<<(SGL_EXP_LENGTH - 2)) /* round bit */ \ - if ((int_value << 34 - SGL_EXP_LENGTH) || Slow(sgl_value)) \ + if (((int_value << 34 - SGL_EXP_LENGTH) != 0) || Slow(sgl_value)) \ Sall(sgl_value)++ #define Dint_isinexact_to_sgl(dint_valueA,dint_valueB) \ - ((Dintp1(dint_valueA) << 33 - SGL_EXP_LENGTH) || Dintp2(dint_valueB)) + (((Dintp1(dint_valueA) << 33 - SGL_EXP_LENGTH) != 0) || Dintp2(dint_valueB)) #define Sgl_roundnearest_from_dint(dint_valueA,dint_valueB,sgl_value) \ if (Dintp1(dint_valueA) & 1<<(SGL_EXP_LENGTH - 2)) \ - if ((Dintp1(dint_valueA) << 34 - SGL_EXP_LENGTH) || \ + if (((Dintp1(dint_valueA) << 34 - SGL_EXP_LENGTH) != 0) || \ Dintp2(dint_valueB) || Slow(sgl_value)) Sall(sgl_value)++ #define Dint_isinexact_to_dbl(dint_value) \ diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index bccc5051249e1a..2b6049e839706c 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -299,6 +299,7 @@ struct kvm_arch { #ifdef CONFIG_PPC_BOOK3S_64 struct list_head spapr_tce_tables; struct list_head rtas_tokens; + struct mutex rtas_token_lock; DECLARE_BITMAP(enabled_hcalls, MAX_HCALL_OPCODE/4 + 1); #endif #ifdef CONFIG_KVM_MPIC diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 2b713539123137..d9d5391b2af6f9 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -336,6 +336,7 @@ #define PPC_INST_MULLI 0x1c000000 #define PPC_INST_DIVWU 0x7c000396 #define PPC_INST_DIVD 0x7c0003d2 +#define PPC_INST_DIVDU 0x7c000392 #define PPC_INST_RLWINM 0x54000000 #define PPC_INST_RLWIMI 0x50000000 #define PPC_INST_RLDICL 0x78000000 diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 87348e498c89e9..281f074581a3b0 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -840,6 +840,7 @@ int kvmppc_core_init_vm(struct kvm *kvm) #ifdef CONFIG_PPC64 INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables); INIT_LIST_HEAD(&kvm->arch.rtas_tokens); + mutex_init(&kvm->arch.rtas_token_lock); #endif return kvm->arch.kvm_ops->init_vm(kvm); diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 3e3a71594e6319..083dcedba11ce1 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -426,12 +426,7 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu) static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id) { - struct kvm_vcpu *ret; - - mutex_lock(&kvm->lock); - ret = kvm_get_vcpu_by_id(kvm, id); - mutex_unlock(&kvm->lock); - return ret; + return kvm_get_vcpu_by_id(kvm, id); } static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa) @@ -1309,7 +1304,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, struct kvmppc_vcore *vc = vcpu->arch.vcore; u64 mask; - mutex_lock(&kvm->lock); spin_lock(&vc->lock); /* * If ILE (interrupt little-endian) has changed, update the @@ -1349,7 +1343,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, mask &= 0xFFFFFFFF; vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask); spin_unlock(&vc->lock); - mutex_unlock(&kvm->lock); } static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c index 2d3b2b1cc272b0..8f2355138f80bc 100644 --- a/arch/powerpc/kvm/book3s_rtas.c +++ b/arch/powerpc/kvm/book3s_rtas.c @@ -146,7 +146,7 @@ static int rtas_token_undefine(struct kvm *kvm, char *name) { struct rtas_token_definition *d, *tmp; - lockdep_assert_held(&kvm->lock); + lockdep_assert_held(&kvm->arch.rtas_token_lock); list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) { if (rtas_name_matches(d->handler->name, name)) { @@ -167,7 +167,7 @@ static int rtas_token_define(struct kvm *kvm, char *name, u64 token) bool found; int i; - lockdep_assert_held(&kvm->lock); + lockdep_assert_held(&kvm->arch.rtas_token_lock); list_for_each_entry(d, &kvm->arch.rtas_tokens, list) { if (d->token == token) @@ -206,14 +206,14 @@ int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp) if (copy_from_user(&args, argp, sizeof(args))) return -EFAULT; - mutex_lock(&kvm->lock); + mutex_lock(&kvm->arch.rtas_token_lock); if (args.token) rc = rtas_token_define(kvm, args.name, args.token); else rc = rtas_token_undefine(kvm, args.name); - mutex_unlock(&kvm->lock); + mutex_unlock(&kvm->arch.rtas_token_lock); return rc; } @@ -245,7 +245,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) orig_rets = args.rets; args.rets = &args.args[be32_to_cpu(args.nargs)]; - mutex_lock(&vcpu->kvm->lock); + mutex_lock(&vcpu->kvm->arch.rtas_token_lock); rc = -ENOENT; list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) { @@ -256,7 +256,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) } } - mutex_unlock(&vcpu->kvm->lock); + mutex_unlock(&vcpu->kvm->arch.rtas_token_lock); if (rc == 0) { args.rets = orig_rets; @@ -282,8 +282,6 @@ void kvmppc_rtas_tokens_free(struct kvm *kvm) { struct rtas_token_definition *d, *tmp; - lockdep_assert_held(&kvm->lock); - list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) { list_del(&d->list); kfree(d); diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c index dbd8f762140b69..68984d85ad6b8f 100644 --- a/arch/powerpc/mm/mmu_context_book3s64.c +++ b/arch/powerpc/mm/mmu_context_book3s64.c @@ -53,14 +53,48 @@ int hash__alloc_context_id(void) } EXPORT_SYMBOL_GPL(hash__alloc_context_id); +static int realloc_context_ids(mm_context_t *ctx) +{ + int i, id; + + /* + * id 0 (aka. ctx->id) is special, we always allocate a new one, even if + * there wasn't one allocated previously (which happens in the exec + * case where ctx is newly allocated). + * + * We have to be a bit careful here. We must keep the existing ids in + * the array, so that we can test if they're non-zero to decide if we + * need to allocate a new one. However in case of error we must free the + * ids we've allocated but *not* any of the existing ones (or risk a + * UAF). That's why we decrement i at the start of the error handling + * loop, to skip the id that we just tested but couldn't reallocate. + */ + for (i = 0; i < ARRAY_SIZE(ctx->extended_id); i++) { + if (i == 0 || ctx->extended_id[i]) { + id = hash__alloc_context_id(); + if (id < 0) + goto error; + + ctx->extended_id[i] = id; + } + } + + /* The caller expects us to return id */ + return ctx->id; + +error: + for (i--; i >= 0; i--) { + if (ctx->extended_id[i]) + ida_free(&mmu_context_ida, ctx->extended_id[i]); + } + + return id; +} + static int hash__init_new_context(struct mm_struct *mm) { int index; - index = hash__alloc_context_id(); - if (index < 0) - return index; - /* * The old code would re-promote on fork, we don't do that when using * slices as it could cause problem promoting slices that have been @@ -78,6 +112,10 @@ static int hash__init_new_context(struct mm_struct *mm) if (mm->context.id == 0) slice_init_new_context_exec(mm); + index = realloc_context_ids(&mm->context); + if (index < 0) + return index; + subpage_prot_init_new_context(mm); pkey_mm_init(mm); diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index 68dece206048f5..e5c1d30ee968b4 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h @@ -116,7 +116,7 @@ ___PPC_RA(a) | IMM_L(i)) #define PPC_DIVWU(d, a, b) EMIT(PPC_INST_DIVWU | ___PPC_RT(d) | \ ___PPC_RA(a) | ___PPC_RB(b)) -#define PPC_DIVD(d, a, b) EMIT(PPC_INST_DIVD | ___PPC_RT(d) | \ +#define PPC_DIVDU(d, a, b) EMIT(PPC_INST_DIVDU | ___PPC_RT(d) | \ ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_AND(d, a, b) EMIT(PPC_INST_AND | ___PPC_RA(d) | \ ___PPC_RS(a) | ___PPC_RB(b)) diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index 226eec62d1255a..279a51bf94d052 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -372,12 +372,12 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */ case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */ if (BPF_OP(code) == BPF_MOD) { - PPC_DIVD(b2p[TMP_REG_1], dst_reg, src_reg); + PPC_DIVDU(b2p[TMP_REG_1], dst_reg, src_reg); PPC_MULD(b2p[TMP_REG_1], src_reg, b2p[TMP_REG_1]); PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]); } else - PPC_DIVD(dst_reg, dst_reg, src_reg); + PPC_DIVDU(dst_reg, dst_reg, src_reg); break; case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */ case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */ @@ -405,7 +405,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, break; case BPF_ALU64: if (BPF_OP(code) == BPF_MOD) { - PPC_DIVD(b2p[TMP_REG_2], dst_reg, + PPC_DIVDU(b2p[TMP_REG_2], dst_reg, b2p[TMP_REG_1]); PPC_MULD(b2p[TMP_REG_1], b2p[TMP_REG_1], @@ -413,7 +413,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]); } else - PPC_DIVD(dst_reg, dst_reg, + PPC_DIVDU(dst_reg, dst_reg, b2p[TMP_REG_1]); break; } diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c index 3d27f02695e41f..828f6656f8f745 100644 --- a/arch/powerpc/platforms/powernv/opal-imc.c +++ b/arch/powerpc/platforms/powernv/opal-imc.c @@ -161,6 +161,10 @@ static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain) struct imc_pmu *pmu_ptr; u32 offset; + /* Return for unknown domain */ + if (domain < 0) + return -EINVAL; + /* memory for pmu */ pmu_ptr = kzalloc(sizeof(*pmu_ptr), GFP_KERNEL); if (!pmu_ptr) diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index 88401d5125bcc0..523dbfbac03dde 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -29,6 +29,7 @@ #include #include +#include /* * This routine handles page faults. It determines the address and the @@ -281,6 +282,18 @@ asmlinkage void do_page_fault(struct pt_regs *regs) pte_k = pte_offset_kernel(pmd_k, addr); if (!pte_present(*pte_k)) goto no_context; + + /* + * The kernel assumes that TLBs don't cache invalid + * entries, but in RISC-V, SFENCE.VMA specifies an + * ordering constraint, not a cache flush; it is + * necessary even after writing invalid entries. + * Relying on flush_tlb_fix_spurious_fault would + * suffice, but the extra traps reduce + * performance. So, eagerly SFENCE.VMA. + */ + local_flush_tlb_page(addr); + return; } } diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h index 8c00fd509c4555..1a6a7092d94209 100644 --- a/arch/s390/include/asm/ap.h +++ b/arch/s390/include/asm/ap.h @@ -221,16 +221,22 @@ static inline struct ap_queue_status ap_aqic(ap_qid_t qid, void *ind) { register unsigned long reg0 asm ("0") = qid | (3UL << 24); - register struct ap_qirq_ctrl reg1_in asm ("1") = qirqctrl; - register struct ap_queue_status reg1_out asm ("1"); + register union { + unsigned long value; + struct ap_qirq_ctrl qirqctrl; + struct ap_queue_status status; + } reg1 asm ("1"); register void *reg2 asm ("2") = ind; + reg1.qirqctrl = qirqctrl; + asm volatile( ".long 0xb2af0000" /* PQAP(AQIC) */ - : "=d" (reg1_out) - : "d" (reg0), "d" (reg1_in), "d" (reg2) + : "+d" (reg1) + : "d" (reg0), "d" (reg2) : "cc"); - return reg1_out; + + return reg1.status; } /* @@ -264,17 +270,21 @@ static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit, { register unsigned long reg0 asm ("0") = qid | (5UL << 24) | ((ifbit & 0x01) << 22); - register unsigned long reg1_in asm ("1") = apinfo->val; - register struct ap_queue_status reg1_out asm ("1"); + register union { + unsigned long value; + struct ap_queue_status status; + } reg1 asm ("1"); register unsigned long reg2 asm ("2"); + reg1.value = apinfo->val; + asm volatile( ".long 0xb2af0000" /* PQAP(QACT) */ - : "+d" (reg1_in), "=d" (reg1_out), "=d" (reg2) + : "+d" (reg1), "=d" (reg2) : "d" (reg0) : "cc"); apinfo->val = reg2; - return reg1_out; + return reg1.status; } /** diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h index 40f651292aa789..9c7dc970e96653 100644 --- a/arch/s390/include/asm/jump_label.h +++ b/arch/s390/include/asm/jump_label.h @@ -10,6 +10,12 @@ #define JUMP_LABEL_NOP_SIZE 6 #define JUMP_LABEL_NOP_OFFSET 2 +#if __GNUC__ < 9 +#define JUMP_LABEL_STATIC_KEY_CONSTRAINT "X" +#else +#define JUMP_LABEL_STATIC_KEY_CONSTRAINT "jdd" +#endif + /* * We use a brcl 0,2 instruction for jump labels at compile time so it * can be easily distinguished from a hotpatch generated instruction. @@ -19,9 +25,9 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran asm_volatile_goto("0: brcl 0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n" ".pushsection __jump_table, \"aw\"\n" ".balign 8\n" - ".quad 0b, %l[label], %0\n" + ".quad 0b, %l[label], %0+%1\n" ".popsection\n" - : : "X" (&((char *)key)[branch]) : : label); + : : JUMP_LABEL_STATIC_KEY_CONSTRAINT (key), "i" (branch) : : label); return false; label: @@ -33,9 +39,9 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool asm_volatile_goto("0: brcl 15, %l[label]\n" ".pushsection __jump_table, \"aw\"\n" ".balign 8\n" - ".quad 0b, %l[label], %0\n" + ".quad 0b, %l[label], %0+%1\n" ".popsection\n" - : : "X" (&((char *)key)[branch]) : : label); + : : JUMP_LABEL_STATIC_KEY_CONSTRAINT (key), "i" (branch) : : label); return false; label: diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index ad6b91013a0525..5332f628c1edc8 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -56,8 +56,10 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n); unsigned long __must_check raw_copy_to_user(void __user *to, const void *from, unsigned long n); +#ifndef CONFIG_KASAN #define INLINE_COPY_FROM_USER #define INLINE_COPY_TO_USER +#endif #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index f538e3fac7ade9..fc7de27960e73d 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -4156,21 +4156,28 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, const struct kvm_memory_slot *new, enum kvm_mr_change change) { - int rc; - - /* If the basics of the memslot do not change, we do not want - * to update the gmap. Every update causes several unnecessary - * segment translation exceptions. This is usually handled just - * fine by the normal fault handler + gmap, but it will also - * cause faults on the prefix page of running guest CPUs. - */ - if (old->userspace_addr == mem->userspace_addr && - old->base_gfn * PAGE_SIZE == mem->guest_phys_addr && - old->npages * PAGE_SIZE == mem->memory_size) - return; + int rc = 0; - rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, - mem->guest_phys_addr, mem->memory_size); + switch (change) { + case KVM_MR_DELETE: + rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, + old->npages * PAGE_SIZE); + break; + case KVM_MR_MOVE: + rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, + old->npages * PAGE_SIZE); + if (rc) + break; + /* FALLTHROUGH */ + case KVM_MR_CREATE: + rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, + mem->guest_phys_addr, mem->memory_size); + break; + case KVM_MR_FLAGS_ONLY: + break; + default: + WARN(1, "Unknown KVM MR CHANGE: %d\n", change); + } if (rc) pr_warn("failed to commit memory region\n"); return; diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c index 39a2503fa3e18e..51028abe5e9038 100644 --- a/arch/sparc/kernel/mdesc.c +++ b/arch/sparc/kernel/mdesc.c @@ -357,6 +357,8 @@ static int get_vdev_port_node_info(struct mdesc_handle *md, u64 node, node_info->vdev_port.id = *idp; node_info->vdev_port.name = kstrdup_const(name, GFP_KERNEL); + if (!node_info->vdev_port.name) + return -1; node_info->vdev_port.parent_cfg_hdl = *parent_cfg_hdlp; return 0; diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 67b3e6b3ce5d7c..1ad5911f62b416 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -891,6 +891,10 @@ static int sparc_perf_event_set_period(struct perf_event *event, s64 period = hwc->sample_period; int ret = 0; + /* The period may have been changed by PERF_EVENT_IOC_PERIOD */ + if (unlikely(period != hwc->last_period)) + left = period - (hwc->last_period - left); + if (unlikely(left <= -period)) { left = period; local64_set(&hwc->period_left, left); diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c index 052de4c8acb2ec..0c572a48158e8d 100644 --- a/arch/um/kernel/time.c +++ b/arch/um/kernel/time.c @@ -56,7 +56,7 @@ static int itimer_one_shot(struct clock_event_device *evt) static struct clock_event_device timer_clockevent = { .name = "posix-timer", .rating = 250, - .cpumask = cpu_all_mask, + .cpumask = cpu_possible_mask, .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .set_state_shutdown = itimer_shutdown, diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 09c53bcbd497d6..c8b0bf2b0d5e40 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3072,7 +3072,7 @@ static int intel_pmu_hw_config(struct perf_event *event) return ret; if (event->attr.precise_ip) { - if (!(event->attr.freq || event->attr.wakeup_events)) { + if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) { event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD; if (!(event->attr.sample_type & ~intel_pmu_large_pebs_flags(event))) diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index b7b01d762d32a3..e91814d1a27f34 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -684,7 +684,7 @@ struct event_constraint intel_core2_pebs_event_constraints[] = { INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */ INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01), EVENT_CONSTRAINT_END }; @@ -693,7 +693,7 @@ struct event_constraint intel_atom_pebs_event_constraints[] = { INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */ INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01), /* Allow all events as PEBS with no flags */ INTEL_ALL_EVENT_CONSTRAINT(0, 0x1), EVENT_CONSTRAINT_END @@ -701,7 +701,7 @@ struct event_constraint intel_atom_pebs_event_constraints[] = { struct event_constraint intel_slm_pebs_event_constraints[] = { /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x1), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x1), /* Allow all events as PEBS with no flags */ INTEL_ALL_EVENT_CONSTRAINT(0, 0x1), EVENT_CONSTRAINT_END @@ -726,7 +726,7 @@ struct event_constraint intel_nehalem_pebs_event_constraints[] = { INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */ INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f), EVENT_CONSTRAINT_END }; @@ -743,7 +743,7 @@ struct event_constraint intel_westmere_pebs_event_constraints[] = { INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */ INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f), EVENT_CONSTRAINT_END }; @@ -752,7 +752,7 @@ struct event_constraint intel_snb_pebs_event_constraints[] = { INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */ INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */ /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf), INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */ INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ @@ -767,9 +767,9 @@ struct event_constraint intel_ivb_pebs_event_constraints[] = { INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */ INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */ /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf), /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2), INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */ INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ @@ -783,9 +783,9 @@ struct event_constraint intel_hsw_pebs_event_constraints[] = { INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */ /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf), /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2), INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */ @@ -806,9 +806,9 @@ struct event_constraint intel_bdw_pebs_event_constraints[] = { INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */ /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf), /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2), INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */ @@ -829,9 +829,9 @@ struct event_constraint intel_bdw_pebs_event_constraints[] = { struct event_constraint intel_skl_pebs_event_constraints[] = { INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */ /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2), /* INST_RETIRED.TOTAL_CYCLES_PS (inv=1, cmask=16) (cycles:p). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f), INTEL_PLD_CONSTRAINT(0x1cd, 0xf), /* MEM_TRANS_RETIRED.* */ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */ diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 6a25278e009290..da1f5e78363e91 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -819,8 +819,11 @@ static void init_amd_zn(struct cpuinfo_x86 *c) { set_cpu_cap(c, X86_FEATURE_ZEN); - /* Fix erratum 1076: CPB feature bit not being set in CPUID. */ - if (!cpu_has(c, X86_FEATURE_CPB)) + /* + * Fix erratum 1076: CPB feature bit not being set in CPUID. + * Always set it, except when running under a hypervisor. + */ + if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_CPB)) set_cpu_cap(c, X86_FEATURE_CPB); } diff --git a/arch/x86/kernel/cpu/intel_rdt_monitor.c b/arch/x86/kernel/cpu/intel_rdt_monitor.c index b0f3aed76b7507..3d4ec80a6bb96e 100644 --- a/arch/x86/kernel/cpu/intel_rdt_monitor.c +++ b/arch/x86/kernel/cpu/intel_rdt_monitor.c @@ -371,6 +371,9 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm) struct list_head *head; struct rdtgroup *entry; + if (!is_mbm_local_enabled()) + return; + r_mba = &rdt_resources_all[RDT_RESOURCE_MBA]; closid = rgrp->closid; rmid = rgrp->mon.rmid; diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c index 643670fb894348..274d220d0a83da 100644 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c @@ -2379,7 +2379,7 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) if (closid_allocated(i) && i != closid) { mode = rdtgroup_mode_by_closid(i); if (mode == RDT_MODE_PSEUDO_LOCKSETUP) - break; + continue; used_b |= *ctrl; if (mode == RDT_MODE_SHAREABLE) d->new_ctrl |= *ctrl; diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index b43ddefd77f45a..b7027e667604e9 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -873,7 +873,7 @@ int __init microcode_init(void) goto out_ucode_group; register_syscore_ops(&mc_syscore_ops); - cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online", + cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:online", mc_cpu_online, mc_cpu_down_prep); pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION); diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 58ead7db71a312..952aebd0a8a34f 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -282,20 +282,16 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data) { bool fast_mode = idx & (1u << 31); struct kvm_pmc *pmc; - u64 ctr_val; + u64 mask = fast_mode ? ~0u : ~0ull; if (is_vmware_backdoor_pmc(idx)) return kvm_pmu_rdpmc_vmware(vcpu, idx, data); - pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, idx); + pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, idx, &mask); if (!pmc) return 1; - ctr_val = pmc_read_counter(pmc); - if (fast_mode) - ctr_val = (u32)ctr_val; - - *data = ctr_val; + *data = pmc_read_counter(pmc) & mask; return 0; } diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index ba8898e1a8542c..22dff661145a1b 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -25,7 +25,8 @@ struct kvm_pmu_ops { unsigned (*find_fixed_event)(int idx); bool (*pmc_is_enabled)(struct kvm_pmc *pmc); struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx); - struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, unsigned idx); + struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, unsigned idx, + u64 *mask); int (*is_valid_msr_idx)(struct kvm_vcpu *vcpu, unsigned idx); bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr); int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr, u64 *data); diff --git a/arch/x86/kvm/pmu_amd.c b/arch/x86/kvm/pmu_amd.c index 1495a735b38e75..41dff881e0f03d 100644 --- a/arch/x86/kvm/pmu_amd.c +++ b/arch/x86/kvm/pmu_amd.c @@ -186,7 +186,7 @@ static int amd_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx) } /* idx is the ECX register of RDPMC instruction */ -static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, unsigned idx) +static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *mask) { struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); struct kvm_pmc *counters; diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c index 5ab4a364348e3c..c3f103e2b08e15 100644 --- a/arch/x86/kvm/pmu_intel.c +++ b/arch/x86/kvm/pmu_intel.c @@ -126,7 +126,7 @@ static int intel_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx) } static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, - unsigned idx) + unsigned idx, u64 *mask) { struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); bool fixed = idx & (1u << 30); @@ -138,6 +138,7 @@ static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, if (fixed && idx >= pmu->nr_arch_fixed_counters) return NULL; counters = fixed ? pmu->fixed_counters : pmu->gp_counters; + *mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP]; return &counters[idx]; } @@ -183,9 +184,13 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data) *data = pmu->global_ovf_ctrl; return 0; default: - if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || - (pmc = get_fixed_pmc(pmu, msr))) { - *data = pmc_read_counter(pmc); + if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) { + u64 val = pmc_read_counter(pmc); + *data = val & pmu->counter_bitmask[KVM_PMC_GP]; + return 0; + } else if ((pmc = get_fixed_pmc(pmu, msr))) { + u64 val = pmc_read_counter(pmc); + *data = val & pmu->counter_bitmask[KVM_PMC_FIXED]; return 0; } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { *data = pmc->eventsel; @@ -235,11 +240,14 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) } break; default: - if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || - (pmc = get_fixed_pmc(pmu, msr))) { - if (!msr_info->host_initiated) - data = (s64)(s32)data; - pmc->counter += data - pmc_read_counter(pmc); + if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) { + if (msr_info->host_initiated) + pmc->counter = data; + else + pmc->counter = (s32)data; + return 0; + } else if ((pmc = get_fixed_pmc(pmu, msr))) { + pmc->counter = data; return 0; } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { if (data == pmc->eventsel) diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index e3e77527f8dff8..4bfd14d5da8e54 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c @@ -198,7 +198,7 @@ static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr) if (!pgtable_l5_enabled()) return (p4d_t *)pgd; - p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK; + p4d = pgd_val(*pgd) & PTE_PFN_MASK; p4d += __START_KERNEL_map - phys_base; return (p4d_t *)p4d + p4d_index(addr); } diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c index 0988971069c930..bfe769209eaef7 100644 --- a/arch/x86/mm/kaslr.c +++ b/arch/x86/mm/kaslr.c @@ -51,7 +51,7 @@ static __initdata struct kaslr_memory_region { } kaslr_regions[] = { { &page_offset_base, 0 }, { &vmalloc_base, 0 }, - { &vmemmap_base, 1 }, + { &vmemmap_base, 0 }, }; /* Get size in bytes used by the memory region */ @@ -77,6 +77,7 @@ void __init kernel_randomize_memory(void) unsigned long rand, memory_tb; struct rnd_state rand_state; unsigned long remain_entropy; + unsigned long vmemmap_size; vaddr_start = pgtable_l5_enabled() ? __PAGE_OFFSET_BASE_L5 : __PAGE_OFFSET_BASE_L4; vaddr = vaddr_start; @@ -108,6 +109,14 @@ void __init kernel_randomize_memory(void) if (memory_tb < kaslr_regions[0].size_tb) kaslr_regions[0].size_tb = memory_tb; + /* + * Calculate the vmemmap region size in TBs, aligned to a TB + * boundary. + */ + vmemmap_size = (kaslr_regions[0].size_tb << (TB_SHIFT - PAGE_SHIFT)) * + sizeof(struct page); + kaslr_regions[2].size_tb = DIV_ROUND_UP(vmemmap_size, 1UL << TB_SHIFT); + /* Calculate entropy available between regions */ remain_entropy = vaddr_end - vaddr_start; for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++) diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index 52e55108404ebf..d3a73f9335e11c 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c @@ -1119,6 +1119,8 @@ static const struct dmi_system_id pciirq_dmi_table[] __initconst = { void __init pcibios_irq_init(void) { + struct irq_routing_table *rtable = NULL; + DBG(KERN_DEBUG "PCI: IRQ init\n"); if (raw_pci_ops == NULL) @@ -1129,8 +1131,10 @@ void __init pcibios_irq_init(void) pirq_table = pirq_find_routing_table(); #ifdef CONFIG_PCI_BIOS - if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN)) + if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN)) { pirq_table = pcibios_get_irq_routing_table(); + rtable = pirq_table; + } #endif if (pirq_table) { pirq_peer_trick(); @@ -1145,8 +1149,10 @@ void __init pcibios_irq_init(void) * If we're using the I/O APIC, avoid using the PCI IRQ * routing table */ - if (io_apic_assign_pci_irqs) + if (io_apic_assign_pci_irqs) { + kfree(rtable); pirq_table = NULL; + } } x86_init.pci.fixup_irqs(); diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index 351283b60df617..a285fbd0fd9be9 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -310,7 +310,8 @@ extern char _SecondaryResetVector_text_start; extern char _SecondaryResetVector_text_end; #endif -static inline int mem_reserve(unsigned long start, unsigned long end) +static inline int __init_memblock mem_reserve(unsigned long start, + unsigned long end) { return memblock_reserve(start, end - start); } diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 15e8c9955b793a..6bb397995610e9 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -2509,6 +2509,8 @@ static void bfq_arm_slice_timer(struct bfq_data *bfqd) if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 && bfq_symmetric_scenario(bfqd)) sl = min_t(u64, sl, BFQ_MIN_TT); + else if (bfqq->wr_coeff > 1) + sl = max_t(u32, sl, 20ULL * NSEC_PER_MSEC); bfqd->last_idling_start = ktime_get(); hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl), diff --git a/block/blk-core.c b/block/blk-core.c index 33488b1426b7be..6eed5d84c2efd4 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -411,7 +411,6 @@ void blk_sync_queue(struct request_queue *q) struct blk_mq_hw_ctx *hctx; int i; - cancel_delayed_work_sync(&q->requeue_work); queue_for_each_hw_ctx(q, hctx, i) cancel_delayed_work_sync(&hctx->run_work); } else { diff --git a/block/blk-mq.c b/block/blk-mq.c index 4e563ee462cb65..70d839b9c3b09c 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2465,6 +2465,8 @@ void blk_mq_release(struct request_queue *q) struct blk_mq_hw_ctx *hctx; unsigned int i; + cancel_delayed_work_sync(&q->requeue_work); + /* hctx kobj stays in hctx */ queue_for_each_hw_ctx(q, hctx, i) { if (!hctx) diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index a7c2673ffd36e8..1806260938e89a 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -948,8 +948,8 @@ static bool acpi_dev_needs_resume(struct device *dev, struct acpi_device *adev) u32 sys_target = acpi_target_system_state(); int ret, state; - if (!pm_runtime_suspended(dev) || !adev || - device_may_wakeup(dev) != !!adev->wakeup.prepare_count) + if (!pm_runtime_suspended(dev) || !adev || (adev->wakeup.flags.valid && + device_may_wakeup(dev) != !!adev->wakeup.prepare_count)) return true; if (sys_target == ACPI_STATE_S0) diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index adf28788cab52c..133fed8e4a8bb9 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4476,9 +4476,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, - /* drives which fail FPDMA_AA activation (some may freeze afterwards) */ - { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA }, - { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA }, + /* drives which fail FPDMA_AA activation (some may freeze afterwards) + the ST disks also have LPM issues */ + { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA | + ATA_HORKAGE_NOLPM, }, + { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA | + ATA_HORKAGE_NOLPM, }, { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA }, /* Blacklist entries taken from Silicon Image 3124/3132 diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c index 64191694ff6e96..9cfdbea493bb37 100644 --- a/drivers/clk/rockchip/clk-rk3288.c +++ b/drivers/clk/rockchip/clk-rk3288.c @@ -835,6 +835,9 @@ static const int rk3288_saved_cru_reg_ids[] = { RK3288_CLKSEL_CON(10), RK3288_CLKSEL_CON(33), RK3288_CLKSEL_CON(37), + + /* We turn aclk_dmac1 on for suspend; this will restore it */ + RK3288_CLKGATE_CON(10), }; static u32 rk3288_saved_cru_regs[ARRAY_SIZE(rk3288_saved_cru_reg_ids)]; @@ -850,6 +853,14 @@ static int rk3288_clk_suspend(void) readl_relaxed(rk3288_cru_base + reg_id); } + /* + * Going into deep sleep (specifically setting PMU_CLR_DMA in + * RK3288_PMU_PWRMODE_CON1) appears to fail unless + * "aclk_dmac1" is on. + */ + writel_relaxed(1 << (12 + 16), + rk3288_cru_base + RK3288_CLKGATE_CON(10)); + /* * Switch PLLs other than DPLL (for SDRAM) to slow mode to * avoid crashes on resume. The Mask ROM on the system will diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c index 421b0539222058..ca3218337fd74f 100644 --- a/drivers/clk/ti/clkctrl.c +++ b/drivers/clk/ti/clkctrl.c @@ -137,9 +137,6 @@ static int _omap4_clkctrl_clk_enable(struct clk_hw *hw) int ret; union omap4_timeout timeout = { 0 }; - if (!clk->enable_bit) - return 0; - if (clk->clkdm) { ret = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk); if (ret) { @@ -151,6 +148,9 @@ static int _omap4_clkctrl_clk_enable(struct clk_hw *hw) } } + if (!clk->enable_bit) + return 0; + val = ti_clk_ll_ops->clk_readl(&clk->enable_reg); val &= ~OMAP4_MODULEMODE_MASK; @@ -179,7 +179,7 @@ static void _omap4_clkctrl_clk_disable(struct clk_hw *hw) union omap4_timeout timeout = { 0 }; if (!clk->enable_bit) - return; + goto exit; val = ti_clk_ll_ops->clk_readl(&clk->enable_reg); diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c index c4eb55e3011c9f..c05ef7f1d7b66c 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c @@ -512,7 +512,8 @@ dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr, return vchan_tx_prep(&chan->vc, &first->vd, flags); err_desc_get: - axi_desc_put(first); + if (first) + axi_desc_put(first); return NULL; } diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c index 1fbf9cb9b74297..89c5e5b4606870 100644 --- a/drivers/dma/idma64.c +++ b/drivers/dma/idma64.c @@ -597,7 +597,7 @@ static int idma64_probe(struct idma64_chip *chip) idma64->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); idma64->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; - idma64->dma.dev = chip->dev; + idma64->dma.dev = chip->sysdev; dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK); @@ -637,6 +637,7 @@ static int idma64_platform_probe(struct platform_device *pdev) { struct idma64_chip *chip; struct device *dev = &pdev->dev; + struct device *sysdev = dev->parent; struct resource *mem; int ret; @@ -653,11 +654,12 @@ static int idma64_platform_probe(struct platform_device *pdev) if (IS_ERR(chip->regs)) return PTR_ERR(chip->regs); - ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + ret = dma_coerce_mask_and_coherent(sysdev, DMA_BIT_MASK(64)); if (ret) return ret; chip->dev = dev; + chip->sysdev = sysdev; ret = idma64_probe(chip); if (ret) diff --git a/drivers/dma/idma64.h b/drivers/dma/idma64.h index 6b816878e5e7a7..baa32e1425de31 100644 --- a/drivers/dma/idma64.h +++ b/drivers/dma/idma64.h @@ -216,12 +216,14 @@ static inline void idma64_writel(struct idma64 *idma64, int offset, u32 value) /** * struct idma64_chip - representation of iDMA 64-bit controller hardware * @dev: struct device of the DMA controller + * @sysdev: struct device of the physical device that does DMA * @irq: irq line * @regs: memory mapped I/O space * @idma64: struct idma64 that is filed by idma64_probe() */ struct idma64_chip { struct device *dev; + struct device *sysdev; int irq; void __iomem *regs; struct idma64 *idma64; diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c index 55df0d41355b06..1ed1c7efa28853 100644 --- a/drivers/dma/sprd-dma.c +++ b/drivers/dma/sprd-dma.c @@ -663,7 +663,7 @@ static int sprd_dma_fill_desc(struct dma_chan *chan, temp |= slave_cfg->src_maxburst & SPRD_DMA_FRG_LEN_MASK; hw->frg_len = temp; - hw->blk_len = len & SPRD_DMA_BLK_LEN_MASK; + hw->blk_len = slave_cfg->src_maxburst & SPRD_DMA_BLK_LEN_MASK; hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK; temp = (dst_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_DEST_TRSF_STEP_OFFSET; diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index 57304b2e989f2c..b00cc03ad6b670 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -250,8 +250,8 @@ config EDAC_PND2 micro-server but may appear on others in the future. config EDAC_MPC85XX - tristate "Freescale MPC83xx / MPC85xx" - depends on FSL_SOC + bool "Freescale MPC83xx / MPC85xx" + depends on FSL_SOC && EDAC=y help Support for error detection and correction on the Freescale MPC8349, MPC8560, MPC8540, MPC8548, T4240 diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c index 0e81d33af856a6..c9a613dc9eb7e4 100644 --- a/drivers/fpga/dfl-afu-dma-region.c +++ b/drivers/fpga/dfl-afu-dma-region.c @@ -399,7 +399,7 @@ int afu_dma_map_region(struct dfl_feature_platform_data *pdata, region->pages[0], 0, region->length, DMA_BIDIRECTIONAL); - if (dma_mapping_error(&pdata->dev->dev, region->iova)) { + if (dma_mapping_error(dfl_fpga_pdata_to_parent(pdata), region->iova)) { dev_err(&pdata->dev->dev, "failed to map for dma\n"); ret = -EFAULT; goto unpin_pages; diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c index a9b521bccb06db..ab361ec78df4c3 100644 --- a/drivers/fpga/dfl.c +++ b/drivers/fpga/dfl.c @@ -40,6 +40,13 @@ enum dfl_fpga_devt_type { DFL_FPGA_DEVT_MAX, }; +static struct lock_class_key dfl_pdata_keys[DFL_ID_MAX]; + +static const char *dfl_pdata_key_strings[DFL_ID_MAX] = { + "dfl-fme-pdata", + "dfl-port-pdata", +}; + /** * dfl_dev_info - dfl feature device information. * @name: name string of the feature platform device. @@ -443,11 +450,16 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo) struct platform_device *fdev = binfo->feature_dev; struct dfl_feature_platform_data *pdata; struct dfl_feature_info *finfo, *p; + enum dfl_id_type type; int ret, index = 0; if (!fdev) return 0; + type = feature_dev_id_type(fdev); + if (WARN_ON_ONCE(type >= DFL_ID_MAX)) + return -EINVAL; + /* * we do not need to care for the memory which is associated with * the platform device. After calling platform_device_unregister(), @@ -463,6 +475,8 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo) pdata->num = binfo->feature_num; pdata->dfl_cdev = binfo->cdev; mutex_init(&pdata->lock); + lockdep_set_class_and_name(&pdata->lock, &dfl_pdata_keys[type], + dfl_pdata_key_strings[type]); /* * the count should be initialized to 0 to make sure @@ -497,7 +511,7 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo) ret = platform_device_add(binfo->feature_dev); if (!ret) { - if (feature_dev_id_type(binfo->feature_dev) == PORT_ID) + if (type == PORT_ID) dfl_fpga_cdev_add_port_dev(binfo->cdev, binfo->feature_dev); else diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 4f52c3a8ec99bf..ed51221621a5cb 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -784,6 +784,7 @@ config GPIO_ADP5588 config GPIO_ADP5588_IRQ bool "Interrupt controller support for ADP5588" depends on GPIO_ADP5588=y + select GPIOLIB_IRQCHIP help Say yes here to enable the adp5588 to be used as an interrupt controller. It requires the driver to be built in the kernel. diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index 6c1acf642c8e92..6fa430d98517cc 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c @@ -343,6 +343,22 @@ static void omap_clear_gpio_debounce(struct gpio_bank *bank, unsigned offset) } } +/* + * Off mode wake-up capable GPIOs in bank(s) that are in the wakeup domain. + * See TRM section for GPIO for "Wake-Up Generation" for the list of GPIOs + * in wakeup domain. If bank->non_wakeup_gpios is not configured, assume none + * are capable waking up the system from off mode. + */ +static bool omap_gpio_is_off_wakeup_capable(struct gpio_bank *bank, u32 gpio_mask) +{ + u32 no_wake = bank->non_wakeup_gpios; + + if (no_wake) + return !!(~no_wake & gpio_mask); + + return false; +} + static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio, unsigned trigger) { @@ -374,13 +390,7 @@ static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio, } /* This part needs to be executed always for OMAP{34xx, 44xx} */ - if (!bank->regs->irqctrl) { - /* On omap24xx proceed only when valid GPIO bit is set */ - if (bank->non_wakeup_gpios) { - if (!(bank->non_wakeup_gpios & gpio_bit)) - goto exit; - } - + if (!bank->regs->irqctrl && !omap_gpio_is_off_wakeup_capable(bank, gpio)) { /* * Log the edge gpio and manually trigger the IRQ * after resume if the input level changes @@ -393,7 +403,6 @@ static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio, bank->enabled_non_wakeup_gpios &= ~gpio_bit; } -exit: bank->level_mask = readl_relaxed(bank->base + bank->regs->leveldetect0) | readl_relaxed(bank->base + bank->regs->leveldetect1); diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c index 7e09ce75ffb2a5..a9cb5571de5459 100644 --- a/drivers/gpio/gpio-vf610.c +++ b/drivers/gpio/gpio-vf610.c @@ -37,6 +37,7 @@ struct fsl_gpio_soc_data { struct vf610_gpio_port { struct gpio_chip gc; + struct irq_chip ic; void __iomem *base; void __iomem *gpio_base; const struct fsl_gpio_soc_data *sdata; @@ -66,8 +67,6 @@ struct vf610_gpio_port { #define PORT_INT_EITHER_EDGE 0xb #define PORT_INT_LOGIC_ONE 0xc -static struct irq_chip vf610_gpio_irq_chip; - static const struct fsl_gpio_soc_data imx_data = { .have_paddr = true, }; @@ -243,15 +242,6 @@ static int vf610_gpio_irq_set_wake(struct irq_data *d, u32 enable) return 0; } -static struct irq_chip vf610_gpio_irq_chip = { - .name = "gpio-vf610", - .irq_ack = vf610_gpio_irq_ack, - .irq_mask = vf610_gpio_irq_mask, - .irq_unmask = vf610_gpio_irq_unmask, - .irq_set_type = vf610_gpio_irq_set_type, - .irq_set_wake = vf610_gpio_irq_set_wake, -}; - static int vf610_gpio_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -259,6 +249,7 @@ static int vf610_gpio_probe(struct platform_device *pdev) struct vf610_gpio_port *port; struct resource *iores; struct gpio_chip *gc; + struct irq_chip *ic; int i; int ret; @@ -295,6 +286,14 @@ static int vf610_gpio_probe(struct platform_device *pdev) gc->direction_output = vf610_gpio_direction_output; gc->set = vf610_gpio_set; + ic = &port->ic; + ic->name = "gpio-vf610"; + ic->irq_ack = vf610_gpio_irq_ack; + ic->irq_mask = vf610_gpio_irq_mask; + ic->irq_unmask = vf610_gpio_irq_unmask; + ic->irq_set_type = vf610_gpio_irq_set_type; + ic->irq_set_wake = vf610_gpio_irq_set_wake; + ret = gpiochip_add_data(gc, port); if (ret < 0) return ret; @@ -306,14 +305,13 @@ static int vf610_gpio_probe(struct platform_device *pdev) /* Clear the interrupt status register for all GPIO's */ vf610_gpio_writel(~0, port->base + PORT_ISFR); - ret = gpiochip_irqchip_add(gc, &vf610_gpio_irq_chip, 0, - handle_edge_irq, IRQ_TYPE_NONE); + ret = gpiochip_irqchip_add(gc, ic, 0, handle_edge_irq, IRQ_TYPE_NONE); if (ret) { dev_err(dev, "failed to add irqchip\n"); gpiochip_remove(gc); return ret; } - gpiochip_set_chained_irqchip(gc, &vf610_gpio_irq_chip, port->irq, + gpiochip_set_chained_irqchip(gc, ic, port->irq, vf610_gpio_irq_handler); return 0; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c index bf8b68f8db4f7f..bce5741f2952ef 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c @@ -388,6 +388,10 @@ void dpp1_cnv_setup ( default: break; } + + /* Set default color space based on format if none is given. */ + color_space = input_color_space ? input_color_space : color_space; + REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0, CNVC_SURFACE_PIXEL_FORMAT, pixel_format); REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en); @@ -399,7 +403,7 @@ void dpp1_cnv_setup ( for (i = 0; i < 12; i++) tbl_entry.regval[i] = input_csc_color_matrix.matrix[i]; - tbl_entry.color_space = input_color_space; + tbl_entry.color_space = color_space; if (color_space >= COLOR_SPACE_YCBCR601) select = INPUT_CSC_SELECT_ICSC; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index a0355709abd15c..7736ef123e9be0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1890,7 +1890,7 @@ static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state) plane_state->format, EXPANSION_MODE_ZERO, plane_state->input_csc_color_matrix, - COLOR_SPACE_YCBCR601_LIMITED); + plane_state->color_space); //set scale and bias registers build_prescale_params(&bns_params, plane_state); diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c index e4d67b70244d57..e69d996eabadce 100644 --- a/drivers/gpu/drm/arm/hdlcd_crtc.c +++ b/drivers/gpu/drm/arm/hdlcd_crtc.c @@ -186,20 +186,20 @@ static void hdlcd_crtc_atomic_disable(struct drm_crtc *crtc, clk_disable_unprepare(hdlcd->clk); } -static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) +static enum drm_mode_status hdlcd_crtc_mode_valid(struct drm_crtc *crtc, + const struct drm_display_mode *mode) { struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); - struct drm_display_mode *mode = &state->adjusted_mode; long rate, clk_rate = mode->clock * 1000; rate = clk_round_rate(hdlcd->clk, clk_rate); - if (rate != clk_rate) { + /* 0.1% seems a close enough tolerance for the TDA19988 on Juno */ + if (abs(rate - clk_rate) * 1000 > clk_rate) { /* clock required by mode not supported by hardware */ - return -EINVAL; + return MODE_NOCLOCK; } - return 0; + return MODE_OK; } static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc, @@ -220,7 +220,7 @@ static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc, } static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = { - .atomic_check = hdlcd_crtc_atomic_check, + .mode_valid = hdlcd_crtc_mode_valid, .atomic_begin = hdlcd_crtc_atomic_begin, .atomic_enable = hdlcd_crtc_atomic_enable, .atomic_disable = hdlcd_crtc_atomic_disable, diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c index 94d6dabec2dc80..1ab511e3324321 100644 --- a/drivers/gpu/drm/arm/malidp_drv.c +++ b/drivers/gpu/drm/arm/malidp_drv.c @@ -190,6 +190,7 @@ static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state) { struct drm_device *drm = state->dev; struct malidp_drm *malidp = drm->dev_private; + int loop = 5; malidp->event = malidp->crtc.state->event; malidp->crtc.state->event = NULL; @@ -204,8 +205,18 @@ static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state) drm_crtc_vblank_get(&malidp->crtc); /* only set config_valid if the CRTC is enabled */ - if (malidp_set_and_wait_config_valid(drm) < 0) + if (malidp_set_and_wait_config_valid(drm) < 0) { + /* + * make a loop around the second CVAL setting and + * try 5 times before giving up. + */ + while (loop--) { + if (!malidp_set_and_wait_config_valid(drm)) + break; + } DRM_DEBUG_DRIVER("timed out waiting for updated configuration\n"); + } + } else if (malidp->event) { /* CRTC inactive means vblank IRQ is disabled, send event directly */ spin_lock_irq(&drm->event_lock); diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index 85c2d407a52e1a..e7ddd3e3db9209 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -747,11 +747,11 @@ static void adv7511_mode_set(struct adv7511 *adv7511, vsync_polarity = 1; } - if (mode->vrefresh <= 24000) + if (drm_mode_vrefresh(mode) <= 24) low_refresh_rate = ADV7511_LOW_REFRESH_RATE_24HZ; - else if (mode->vrefresh <= 25000) + else if (drm_mode_vrefresh(mode) <= 25) low_refresh_rate = ADV7511_LOW_REFRESH_RATE_25HZ; - else if (mode->vrefresh <= 30000) + else if (drm_mode_vrefresh(mode) <= 30) low_refresh_rate = ADV7511_LOW_REFRESH_RATE_30HZ; else low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE; diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 7c581f4c2b94bd..5965f6383ada34 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -1580,6 +1580,50 @@ static void connector_bad_edid(struct drm_connector *connector, } } +/* Get override or firmware EDID */ +static struct edid *drm_get_override_edid(struct drm_connector *connector) +{ + struct edid *override = NULL; + + if (connector->override_edid) + override = drm_edid_duplicate(connector->edid_blob_ptr->data); + + if (!override) + override = drm_load_edid_firmware(connector); + + return IS_ERR(override) ? NULL : override; +} + +/** + * drm_add_override_edid_modes - add modes from override/firmware EDID + * @connector: connector we're probing + * + * Add modes from the override/firmware EDID, if available. Only to be used from + * drm_helper_probe_single_connector_modes() as a fallback for when DDC probe + * failed during drm_get_edid() and caused the override/firmware EDID to be + * skipped. + * + * Return: The number of modes added or 0 if we couldn't find any. + */ +int drm_add_override_edid_modes(struct drm_connector *connector) +{ + struct edid *override; + int num_modes = 0; + + override = drm_get_override_edid(connector); + if (override) { + drm_connector_update_edid_property(connector, override); + num_modes = drm_add_edid_modes(connector, override); + kfree(override); + + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] adding %d modes via fallback override/firmware EDID\n", + connector->base.id, connector->name, num_modes); + } + + return num_modes; +} +EXPORT_SYMBOL(drm_add_override_edid_modes); + /** * drm_do_get_edid - get EDID data using a custom EDID block read function * @connector: connector we're probing @@ -1607,15 +1651,10 @@ struct edid *drm_do_get_edid(struct drm_connector *connector, { int i, j = 0, valid_extensions = 0; u8 *edid, *new; - struct edid *override = NULL; - - if (connector->override_edid) - override = drm_edid_duplicate(connector->edid_blob_ptr->data); - - if (!override) - override = drm_load_edid_firmware(connector); + struct edid *override; - if (!IS_ERR_OR_NULL(override)) + override = drm_get_override_edid(connector); + if (override) return override; if ((edid = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL) diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index a1bb157bfdfaeb..d18b7e27ef64c9 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -479,6 +479,13 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, count = (*connector_funcs->get_modes)(connector); + /* + * Fallback for when DDC probe failed in drm_get_edid() and thus skipped + * override/firmware EDID. + */ + if (count == 0 && connector->status == connector_status_connected) + count = drm_add_override_edid_modes(connector); + if (count == 0 && connector->status == connector_status_connected) count = drm_add_modes_noedid(connector, 1024, 768); count += drm_helper_probe_add_cmdline_mode(connector); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c index 9146e30e24a6de..468dff2f79040e 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c @@ -124,6 +124,8 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu) return; etnaviv_dump_core = false; + mutex_lock(&gpu->mmu->lock); + mmu_size = etnaviv_iommu_dump_size(gpu->mmu); /* We always dump registers, mmu, ring and end marker */ @@ -166,6 +168,7 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu) iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY, PAGE_KERNEL); if (!iter.start) { + mutex_unlock(&gpu->mmu->lock); dev_warn(gpu->dev, "failed to allocate devcoredump file\n"); return; } @@ -233,6 +236,8 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu) obj->base.size); } + mutex_unlock(&gpu->mmu->lock); + etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data); dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL); diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 812fe7b06f8738..1817a5c0c80fd1 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -925,6 +925,13 @@ static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo, return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1); } +static bool intel_sdvo_set_audio_state(struct intel_sdvo *intel_sdvo, + u8 audio_state) +{ + return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_AUDIO_STAT, + &audio_state, 1); +} + #if 0 static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo) { @@ -1371,11 +1378,6 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder, else sdvox |= SDVO_PIPE_SEL(crtc->pipe); - if (crtc_state->has_audio) { - WARN_ON_ONCE(INTEL_GEN(dev_priv) < 4); - sdvox |= SDVO_AUDIO_ENABLE; - } - if (INTEL_GEN(dev_priv) >= 4) { /* done in crtc_mode_set as the dpll_md reg must be written early */ } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || @@ -1515,8 +1517,13 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder, if (sdvox & HDMI_COLOR_RANGE_16_235) pipe_config->limited_color_range = true; - if (sdvox & SDVO_AUDIO_ENABLE) - pipe_config->has_audio = true; + if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_AUDIO_STAT, + &val, 1)) { + u8 mask = SDVO_AUDIO_ELD_VALID | SDVO_AUDIO_PRESENCE_DETECT; + + if ((val & mask) == mask) + pipe_config->has_audio = true; + } if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE, &val, 1)) { @@ -1529,6 +1536,32 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder, pipe_config->pixel_multiplier, encoder_pixel_multiplier); } +static void intel_sdvo_disable_audio(struct intel_sdvo *intel_sdvo) +{ + intel_sdvo_set_audio_state(intel_sdvo, 0); +} + +static void intel_sdvo_enable_audio(struct intel_sdvo *intel_sdvo, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + const struct drm_display_mode *adjusted_mode = + &crtc_state->base.adjusted_mode; + struct drm_connector *connector = conn_state->connector; + u8 *eld = connector->eld; + + eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2; + + intel_sdvo_set_audio_state(intel_sdvo, 0); + + intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_ELD, + SDVO_HBUF_TX_DISABLED, + eld, drm_eld_size(eld)); + + intel_sdvo_set_audio_state(intel_sdvo, SDVO_AUDIO_ELD_VALID | + SDVO_AUDIO_PRESENCE_DETECT); +} + static void intel_disable_sdvo(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *conn_state) @@ -1538,6 +1571,9 @@ static void intel_disable_sdvo(struct intel_encoder *encoder, struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); u32 temp; + if (old_crtc_state->has_audio) + intel_sdvo_disable_audio(intel_sdvo); + intel_sdvo_set_active_outputs(intel_sdvo, 0); if (0) intel_sdvo_set_encoder_power_state(intel_sdvo, @@ -1623,6 +1659,9 @@ static void intel_enable_sdvo(struct intel_encoder *encoder, intel_sdvo_set_encoder_power_state(intel_sdvo, DRM_MODE_DPMS_ON); intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output); + + if (pipe_config->has_audio) + intel_sdvo_enable_audio(intel_sdvo, pipe_config, conn_state); } static enum drm_mode_status @@ -2514,7 +2553,6 @@ static bool intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) { struct drm_encoder *encoder = &intel_sdvo->base.base; - struct drm_i915_private *dev_priv = to_i915(encoder->dev); struct drm_connector *connector; struct intel_encoder *intel_encoder = to_intel_encoder(encoder); struct intel_connector *intel_connector; @@ -2551,9 +2589,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) encoder->encoder_type = DRM_MODE_ENCODER_TMDS; connector->connector_type = DRM_MODE_CONNECTOR_DVID; - /* gen3 doesn't do the hdmi bits in the SDVO register */ - if (INTEL_GEN(dev_priv) >= 4 && - intel_sdvo_is_hdmi_connector(intel_sdvo, device)) { + if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) { connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; intel_sdvo->is_hdmi = true; } diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h index db0ed499268ae2..e9ba3b047f9321 100644 --- a/drivers/gpu/drm/i915/intel_sdvo_regs.h +++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h @@ -707,6 +707,9 @@ struct intel_sdvo_enhancements_arg { #define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90 #define SDVO_CMD_SET_AUDIO_STAT 0x91 #define SDVO_CMD_GET_AUDIO_STAT 0x92 + #define SDVO_AUDIO_ELD_VALID (1 << 0) + #define SDVO_AUDIO_PRESENCE_DETECT (1 << 1) + #define SDVO_AUDIO_CP_READY (1 << 2) #define SDVO_CMD_SET_HBUF_INDEX 0x93 #define SDVO_HBUF_INDEX_ELD 0 #define SDVO_HBUF_INDEX_AVI_IF 1 diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h index e48c5eb35b4988..66c125a6b0b3c3 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.h +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h @@ -41,6 +41,7 @@ struct nv50_disp_interlock { NV50_DISP_INTERLOCK__SIZE } type; u32 data; + u32 wimm; }; void corec37d_ntfy_init(struct nouveau_bo *, u32); diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c index 4f57e53797968e..d81a99bb2ac319 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head.c +++ b/drivers/gpu/drm/nouveau/dispnv50/head.c @@ -306,7 +306,7 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state) asyh->set.or = head->func->or != NULL; } - if (asyh->state.mode_changed) + if (asyh->state.mode_changed || asyh->state.connectors_changed) nv50_head_atomic_check_mode(head, asyh); if (asyh->state.color_mgmt_changed || diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c index 9103b8494279c6..f7dbd965e4e729 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c @@ -75,6 +75,7 @@ wimmc37b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm, return ret; } + wndw->interlock.wimm = wndw->interlock.data; wndw->immd = func; return 0; } diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c index 2187922e8dc28d..b3db4553098d57 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c @@ -151,7 +151,7 @@ nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 *interlock, if (asyw->set.point) { if (asyw->set.point = false, asyw->set.mask) interlock[wndw->interlock.type] |= wndw->interlock.data; - interlock[NV50_DISP_INTERLOCK_WIMM] |= wndw->interlock.data; + interlock[NV50_DISP_INTERLOCK_WIMM] |= wndw->interlock.wimm; wndw->immd->point(wndw, asyw); wndw->immd->update(wndw, interlock); diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 8edb9f2a426945..e4b977cc845289 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -169,7 +169,11 @@ nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma) struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev); if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) +#if defined(CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT) return drm_legacy_mmap(filp, vma); +#else + return -EINVAL; +#endif return ttm_bo_mmap(filp, vma, &drm->ttm.bdev); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c index 5f301e632599b4..818d21bd28d312 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c @@ -365,8 +365,15 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps) * and it's better to have a failed modeset than that. */ for (cfg = nvkm_dp_rates; cfg->rate; cfg++) { - if (cfg->nr <= outp_nr && cfg->nr <= outp_bw) - failsafe = cfg; + if (cfg->nr <= outp_nr && cfg->nr <= outp_bw) { + /* Try to respect sink limits too when selecting + * lowest link configuration. + */ + if (!failsafe || + (cfg->nr <= sink_nr && cfg->bw <= sink_bw)) + failsafe = cfg; + } + if (failsafe && cfg[1].rate < dataKBps) break; } diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c index 754f6b25f2652e..6d9f78612deeb8 100644 --- a/drivers/gpu/drm/pl111/pl111_display.c +++ b/drivers/gpu/drm/pl111/pl111_display.c @@ -531,14 +531,15 @@ pl111_init_clock_divider(struct drm_device *drm) dev_err(drm->dev, "CLCD: unable to get clcdclk.\n"); return PTR_ERR(parent); } + + spin_lock_init(&priv->tim2_lock); + /* If the clock divider is broken, use the parent directly */ if (priv->variant->broken_clockdivider) { priv->clk = parent; return 0; } parent_name = __clk_get_name(parent); - - spin_lock_init(&priv->tim2_lock); div->init = &init; ret = devm_clk_hw_register(drm->dev, div); diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index ab39315c9078bb..39e608271263db 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -818,6 +818,7 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane, drm_atomic_set_fb_for_plane(plane->state, state->fb); } + swap(plane->state->fb, state->fb); /* Set the cursor's position on the screen. This is the * expected change from the drm_mode_cursor_universal() * helper. diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index c3e2022bda5d62..3834aa71c9c4c9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -2493,7 +2493,8 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv, cmd = container_of(header, typeof(*cmd), header); - if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) { + if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX || + cmd->body.type < SVGA3D_SHADERTYPE_MIN) { DRM_ERROR("Illegal shader type %u.\n", (unsigned) cmd->body.type); return -EINVAL; @@ -2732,6 +2733,10 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv, if (view_type == vmw_view_max) return -EINVAL; cmd = container_of(header, typeof(*cmd), header); + if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) { + DRM_ERROR("Invalid surface id.\n"); + return -EINVAL; + } ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, user_surface_converter, &cmd->sid, &srf_node); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c index 8b9270f314091a..e4e09d47c5c0e0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c @@ -136,6 +136,114 @@ static int vmw_close_channel(struct rpc_channel *channel) return 0; } +/** + * vmw_port_hb_out - Send the message payload either through the + * high-bandwidth port if available, or through the backdoor otherwise. + * @channel: The rpc channel. + * @msg: NULL-terminated message. + * @hb: Whether the high-bandwidth port is available. + * + * Return: The port status. + */ +static unsigned long vmw_port_hb_out(struct rpc_channel *channel, + const char *msg, bool hb) +{ + unsigned long si, di, eax, ebx, ecx, edx; + unsigned long msg_len = strlen(msg); + + if (hb) { + unsigned long bp = channel->cookie_high; + + si = (uintptr_t) msg; + di = channel->cookie_low; + + VMW_PORT_HB_OUT( + (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG, + msg_len, si, di, + VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16), + VMW_HYPERVISOR_MAGIC, bp, + eax, ebx, ecx, edx, si, di); + + return ebx; + } + + /* HB port not available. Send the message 4 bytes at a time. */ + ecx = MESSAGE_STATUS_SUCCESS << 16; + while (msg_len && (HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS)) { + unsigned int bytes = min_t(size_t, msg_len, 4); + unsigned long word = 0; + + memcpy(&word, msg, bytes); + msg_len -= bytes; + msg += bytes; + si = channel->cookie_high; + di = channel->cookie_low; + + VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_SENDPAYLOAD << 16), + word, si, di, + VMW_HYPERVISOR_PORT | (channel->channel_id << 16), + VMW_HYPERVISOR_MAGIC, + eax, ebx, ecx, edx, si, di); + } + + return ecx; +} + +/** + * vmw_port_hb_in - Receive the message payload either through the + * high-bandwidth port if available, or through the backdoor otherwise. + * @channel: The rpc channel. + * @reply: Pointer to buffer holding reply. + * @reply_len: Length of the reply. + * @hb: Whether the high-bandwidth port is available. + * + * Return: The port status. + */ +static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply, + unsigned long reply_len, bool hb) +{ + unsigned long si, di, eax, ebx, ecx, edx; + + if (hb) { + unsigned long bp = channel->cookie_low; + + si = channel->cookie_high; + di = (uintptr_t) reply; + + VMW_PORT_HB_IN( + (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG, + reply_len, si, di, + VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16), + VMW_HYPERVISOR_MAGIC, bp, + eax, ebx, ecx, edx, si, di); + + return ebx; + } + + /* HB port not available. Retrieve the message 4 bytes at a time. */ + ecx = MESSAGE_STATUS_SUCCESS << 16; + while (reply_len) { + unsigned int bytes = min_t(unsigned long, reply_len, 4); + + si = channel->cookie_high; + di = channel->cookie_low; + + VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_RECVPAYLOAD << 16), + MESSAGE_STATUS_SUCCESS, si, di, + VMW_HYPERVISOR_PORT | (channel->channel_id << 16), + VMW_HYPERVISOR_MAGIC, + eax, ebx, ecx, edx, si, di); + + if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) + break; + + memcpy(reply, &ebx, bytes); + reply_len -= bytes; + reply += bytes; + } + + return ecx; +} /** @@ -148,11 +256,10 @@ static int vmw_close_channel(struct rpc_channel *channel) */ static int vmw_send_msg(struct rpc_channel *channel, const char *msg) { - unsigned long eax, ebx, ecx, edx, si, di, bp; + unsigned long eax, ebx, ecx, edx, si, di; size_t msg_len = strlen(msg); int retries = 0; - while (retries < RETRIES) { retries++; @@ -166,23 +273,14 @@ static int vmw_send_msg(struct rpc_channel *channel, const char *msg) VMW_HYPERVISOR_MAGIC, eax, ebx, ecx, edx, si, di); - if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0 || - (HIGH_WORD(ecx) & MESSAGE_STATUS_HB) == 0) { - /* Expected success + high-bandwidth. Give up. */ + if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) { + /* Expected success. Give up. */ return -EINVAL; } /* Send msg */ - si = (uintptr_t) msg; - di = channel->cookie_low; - bp = channel->cookie_high; - - VMW_PORT_HB_OUT( - (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG, - msg_len, si, di, - VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16), - VMW_HYPERVISOR_MAGIC, bp, - eax, ebx, ecx, edx, si, di); + ebx = vmw_port_hb_out(channel, msg, + !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB)); if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) != 0) { return 0; @@ -211,7 +309,7 @@ STACK_FRAME_NON_STANDARD(vmw_send_msg); static int vmw_recv_msg(struct rpc_channel *channel, void **msg, size_t *msg_len) { - unsigned long eax, ebx, ecx, edx, si, di, bp; + unsigned long eax, ebx, ecx, edx, si, di; char *reply; size_t reply_len; int retries = 0; @@ -233,8 +331,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, VMW_HYPERVISOR_MAGIC, eax, ebx, ecx, edx, si, di); - if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0 || - (HIGH_WORD(ecx) & MESSAGE_STATUS_HB) == 0) { + if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) { DRM_ERROR("Failed to get reply size for host message.\n"); return -EINVAL; } @@ -252,17 +349,8 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, /* Receive buffer */ - si = channel->cookie_high; - di = (uintptr_t) reply; - bp = channel->cookie_low; - - VMW_PORT_HB_IN( - (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG, - reply_len, si, di, - VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16), - VMW_HYPERVISOR_MAGIC, bp, - eax, ebx, ecx, edx, si, di); - + ebx = vmw_port_hb_in(channel, reply, reply_len, + !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB)); if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) { kfree(reply); diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 2faf5421fdd0c6..184e49036e1dce 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -641,6 +641,13 @@ static void mt_store_field(struct hid_device *hdev, if (*target != DEFAULT_TRUE && *target != DEFAULT_FALSE && *target != DEFAULT_ZERO) { + if (usage->contactid == DEFAULT_ZERO || + usage->x == DEFAULT_ZERO || + usage->y == DEFAULT_ZERO) { + hid_dbg(hdev, + "ignoring duplicate usage on incomplete"); + return; + } usage = mt_allocate_usage(hdev, application); if (!usage) return; diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 5dd3a8245f0fd5..d7c3f4ac2c045d 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -1234,13 +1234,13 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom) /* Add back in missing bits of ID for non-USI pens */ wacom->id[0] |= (wacom->serial[0] >> 32) & 0xFFFFF; } - wacom->tool[0] = wacom_intuos_get_tool_type(wacom_intuos_id_mangle(wacom->id[0])); for (i = 0; i < pen_frames; i++) { unsigned char *frame = &data[i*pen_frame_len + 1]; bool valid = frame[0] & 0x80; bool prox = frame[0] & 0x40; bool range = frame[0] & 0x20; + bool invert = frame[0] & 0x10; if (!valid) continue; @@ -1249,9 +1249,24 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom) wacom->shared->stylus_in_proximity = false; wacom_exit_report(wacom); input_sync(pen_input); + + wacom->tool[0] = 0; + wacom->id[0] = 0; + wacom->serial[0] = 0; return; } + if (range) { + if (!wacom->tool[0]) { /* first in range */ + /* Going into range select tool */ + if (invert) + wacom->tool[0] = BTN_TOOL_RUBBER; + else if (wacom->id[0]) + wacom->tool[0] = wacom_intuos_get_tool_type(wacom->id[0]); + else + wacom->tool[0] = BTN_TOOL_PEN; + } + input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1])); input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3])); @@ -1273,23 +1288,26 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom) get_unaligned_le16(&frame[11])); } } - input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5])); - if (wacom->features.type == INTUOSP2_BT) { - input_report_abs(pen_input, ABS_DISTANCE, - range ? frame[13] : wacom->features.distance_max); - } else { - input_report_abs(pen_input, ABS_DISTANCE, - range ? frame[7] : wacom->features.distance_max); - } - input_report_key(pen_input, BTN_TOUCH, frame[0] & 0x01); - input_report_key(pen_input, BTN_STYLUS, frame[0] & 0x02); - input_report_key(pen_input, BTN_STYLUS2, frame[0] & 0x04); + if (wacom->tool[0]) { + input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5])); + if (wacom->features.type == INTUOSP2_BT) { + input_report_abs(pen_input, ABS_DISTANCE, + range ? frame[13] : wacom->features.distance_max); + } else { + input_report_abs(pen_input, ABS_DISTANCE, + range ? frame[7] : wacom->features.distance_max); + } - input_report_key(pen_input, wacom->tool[0], prox); - input_event(pen_input, EV_MSC, MSC_SERIAL, wacom->serial[0]); - input_report_abs(pen_input, ABS_MISC, - wacom_intuos_id_mangle(wacom->id[0])); /* report tool id */ + input_report_key(pen_input, BTN_TOUCH, frame[0] & 0x09); + input_report_key(pen_input, BTN_STYLUS, frame[0] & 0x02); + input_report_key(pen_input, BTN_STYLUS2, frame[0] & 0x04); + + input_report_key(pen_input, wacom->tool[0], prox); + input_event(pen_input, EV_MSC, MSC_SERIAL, wacom->serial[0]); + input_report_abs(pen_input, ABS_MISC, + wacom_intuos_id_mangle(wacom->id[0])); /* report tool id */ + } wacom->shared->stylus_in_proximity = prox; @@ -1351,11 +1369,17 @@ static void wacom_intuos_pro2_bt_touch(struct wacom_wac *wacom) if (wacom->num_contacts_left <= 0) { wacom->num_contacts_left = 0; wacom->shared->touch_down = wacom_wac_finger_count_touches(wacom); + input_sync(touch_input); } } - input_report_switch(touch_input, SW_MUTE_DEVICE, !(data[281] >> 7)); - input_sync(touch_input); + if (wacom->num_contacts_left == 0) { + // Be careful that we don't accidentally call input_sync with + // only a partial set of fingers of processed + input_report_switch(touch_input, SW_MUTE_DEVICE, !(data[281] >> 7)); + input_sync(touch_input); + } + } static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom) @@ -1363,7 +1387,7 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom) struct input_dev *pad_input = wacom->pad_input; unsigned char *data = wacom->data; - int buttons = (data[282] << 1) | ((data[281] >> 6) & 0x01); + int buttons = data[282] | ((data[281] & 0x40) << 2); int ring = data[285] & 0x7F; bool ringstatus = data[285] & 0x80; bool prox = buttons || ringstatus; @@ -3832,7 +3856,7 @@ static void wacom_24hd_update_leds(struct wacom *wacom, int mask, int group) static bool wacom_is_led_toggled(struct wacom *wacom, int button_count, int mask, int group) { - int button_per_group; + int group_button; /* * 21UX2 has LED group 1 to the left and LED group 0 @@ -3842,9 +3866,12 @@ static bool wacom_is_led_toggled(struct wacom *wacom, int button_count, if (wacom->wacom_wac.features.type == WACOM_21UX2) group = 1 - group; - button_per_group = button_count/wacom->led.count; + group_button = group * (button_count/wacom->led.count); + + if (wacom->wacom_wac.features.type == INTUOSP2_BT) + group_button = 8; - return mask & (1 << (group * button_per_group)); + return mask & (1 << group_button); } static void wacom_update_led(struct wacom *wacom, int button_count, int mask, diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c index fcdbac4a56e390..6b3559f58b67d8 100644 --- a/drivers/hwmon/hwmon.c +++ b/drivers/hwmon/hwmon.c @@ -619,7 +619,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata, if (err) goto free_hwmon; - if (dev && chip && chip->ops->read && + if (dev && dev->of_node && chip && chip->ops->read && chip->info[0]->type == hwmon_chip && (chip->info[0]->config[0] & HWMON_C_REGISTER_TZ)) { const struct hwmon_channel_info **info = chip->info; diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c index 2e2b5851139c2f..cd24b375df1eea 100644 --- a/drivers/hwmon/pmbus/pmbus_core.c +++ b/drivers/hwmon/pmbus/pmbus_core.c @@ -1230,7 +1230,8 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client, const struct pmbus_driver_info *info, const char *name, int index, int page, - const struct pmbus_sensor_attr *attr) + const struct pmbus_sensor_attr *attr, + bool paged) { struct pmbus_sensor *base; bool upper = !!(attr->gbit & 0xff00); /* need to check STATUS_WORD */ @@ -1238,7 +1239,7 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client, if (attr->label) { ret = pmbus_add_label(data, name, index, attr->label, - attr->paged ? page + 1 : 0); + paged ? page + 1 : 0); if (ret) return ret; } @@ -1271,6 +1272,30 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client, return 0; } +static bool pmbus_sensor_is_paged(const struct pmbus_driver_info *info, + const struct pmbus_sensor_attr *attr) +{ + int p; + + if (attr->paged) + return true; + + /* + * Some attributes may be present on more than one page despite + * not being marked with the paged attribute. If that is the case, + * then treat the sensor as being paged and add the page suffix to the + * attribute name. + * We don't just add the paged attribute to all such attributes, in + * order to maintain the un-suffixed labels in the case where the + * attribute is only on page 0. + */ + for (p = 1; p < info->pages; p++) { + if (info->func[p] & attr->func) + return true; + } + return false; +} + static int pmbus_add_sensor_attrs(struct i2c_client *client, struct pmbus_data *data, const char *name, @@ -1284,14 +1309,15 @@ static int pmbus_add_sensor_attrs(struct i2c_client *client, index = 1; for (i = 0; i < nattrs; i++) { int page, pages; + bool paged = pmbus_sensor_is_paged(info, attrs); - pages = attrs->paged ? info->pages : 1; + pages = paged ? info->pages : 1; for (page = 0; page < pages; page++) { if (!(info->func[page] & attrs->func)) continue; ret = pmbus_add_sensor_attrs_one(client, data, info, name, index, page, - attrs); + attrs, paged); if (ret) return ret; index++; diff --git a/drivers/i2c/busses/i2c-acorn.c b/drivers/i2c/busses/i2c-acorn.c index f4a5ae69bf6a48..fa3763e4b3ee26 100644 --- a/drivers/i2c/busses/i2c-acorn.c +++ b/drivers/i2c/busses/i2c-acorn.c @@ -81,6 +81,7 @@ static struct i2c_algo_bit_data ioc_data = { static struct i2c_adapter ioc_ops = { .nr = 0, + .name = "ioc", .algo_data = &ioc_data, }; diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c index ccd76c71af098d..cb07651f4b467f 100644 --- a/drivers/i2c/i2c-dev.c +++ b/drivers/i2c/i2c-dev.c @@ -283,6 +283,7 @@ static noinline int i2cdev_ioctl_rdwr(struct i2c_client *client, msgs[i].len < 1 || msgs[i].buf[0] < 1 || msgs[i].len < msgs[i].buf[0] + I2C_SMBUS_BLOCK_MAX) { + i++; res = -EINVAL; break; } diff --git a/drivers/iio/temperature/mlx90632.c b/drivers/iio/temperature/mlx90632.c index 9851311aa3fdc9..2d54d9cac61dc0 100644 --- a/drivers/iio/temperature/mlx90632.c +++ b/drivers/iio/temperature/mlx90632.c @@ -81,6 +81,8 @@ /* Magic constants */ #define MLX90632_ID_MEDICAL 0x0105 /* EEPROM DSPv5 Medical device id */ #define MLX90632_ID_CONSUMER 0x0205 /* EEPROM DSPv5 Consumer device id */ +#define MLX90632_DSP_VERSION 5 /* DSP version */ +#define MLX90632_DSP_MASK GENMASK(7, 0) /* DSP version in EE_VERSION */ #define MLX90632_RESET_CMD 0x0006 /* Reset sensor (address or global) */ #define MLX90632_REF_12 12LL /**< ResCtrlRef value of Ch 1 or Ch 2 */ #define MLX90632_REF_3 12LL /**< ResCtrlRef value of Channel 3 */ @@ -666,10 +668,13 @@ static int mlx90632_probe(struct i2c_client *client, } else if (read == MLX90632_ID_CONSUMER) { dev_dbg(&client->dev, "Detected Consumer EEPROM calibration %x\n", read); + } else if ((read & MLX90632_DSP_MASK) == MLX90632_DSP_VERSION) { + dev_dbg(&client->dev, + "Detected Unknown EEPROM calibration %x\n", read); } else { dev_err(&client->dev, - "EEPROM version mismatch %x (expected %x or %x)\n", - read, MLX90632_ID_CONSUMER, MLX90632_ID_MEDICAL); + "Wrong DSP version %x (expected %x)\n", + read, MLX90632_DSP_VERSION); return -EPROTONOSUPPORT; } diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index b12c8ff8ed6669..d8eb4dc04d6903 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -9849,6 +9849,7 @@ void hfi1_quiet_serdes(struct hfi1_pportdata *ppd) /* disable the port */ clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); + cancel_work_sync(&ppd->freeze_work); } static inline int init_cpu_counters(struct hfi1_devdata *dd) diff --git a/drivers/infiniband/hw/hfi1/fault.c b/drivers/infiniband/hw/hfi1/fault.c index e2290f32c8d90c..7eaff4dcbfd77d 100644 --- a/drivers/infiniband/hw/hfi1/fault.c +++ b/drivers/infiniband/hw/hfi1/fault.c @@ -153,6 +153,7 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf, char *dash; unsigned long range_start, range_end, i; bool remove = false; + unsigned long bound = 1U << BITS_PER_BYTE; end = strchr(ptr, ','); if (end) @@ -178,6 +179,10 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf, BITS_PER_BYTE); break; } + /* Check the inputs */ + if (range_start >= bound || range_end >= bound) + break; + for (i = range_start; i <= range_end; i++) { if (remove) clear_bit(i, fault->opcodes); diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c index dbe7d14a5c76d1..4e986ca4dd3544 100644 --- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c +++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c @@ -324,6 +324,9 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd, u32 *tidlist = NULL; struct tid_user_buf *tidbuf; + if (!PAGE_ALIGNED(tinfo->vaddr)) + return -EINVAL; + tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL); if (!tidbuf) return -ENOMEM; diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index 48692adbe811ee..27d9c4cefdc7a5 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c @@ -1418,8 +1418,6 @@ static void hfi1_fill_device_attr(struct hfi1_devdata *dd) rdi->dparms.props.max_cq = hfi1_max_cqs; rdi->dparms.props.max_ah = hfi1_max_ahs; rdi->dparms.props.max_cqe = hfi1_max_cqes; - rdi->dparms.props.max_mr = rdi->lkey_table.max; - rdi->dparms.props.max_fmr = rdi->lkey_table.max; rdi->dparms.props.max_map_per_fmr = 32767; rdi->dparms.props.max_pd = hfi1_max_pds; rdi->dparms.props.max_qp_rd_atom = HFI1_MAX_RDMA_ATOMIC; diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.c b/drivers/infiniband/hw/hfi1/verbs_txreq.c index c4ab2d5b4502ee..8f766dd3f61c89 100644 --- a/drivers/infiniband/hw/hfi1/verbs_txreq.c +++ b/drivers/infiniband/hw/hfi1/verbs_txreq.c @@ -100,7 +100,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { struct hfi1_qp_priv *priv; - tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC); + tx = kmem_cache_alloc(dev->verbs_txreq_cache, VERBS_TXREQ_GFP); if (tx) goto out; priv = qp->priv; diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h index 1c19bbc764b2d6..b1a78985b4ecad 100644 --- a/drivers/infiniband/hw/hfi1/verbs_txreq.h +++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h @@ -72,6 +72,7 @@ struct hfi1_ibdev; struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, struct rvt_qp *qp); +#define VERBS_TXREQ_GFP (GFP_ATOMIC | __GFP_NOWARN) static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev, struct rvt_qp *qp) __must_hold(&qp->slock) @@ -79,7 +80,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev, struct verbs_txreq *tx; struct hfi1_qp_priv *priv = qp->priv; - tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC); + tx = kmem_cache_alloc(dev->verbs_txreq_cache, VERBS_TXREQ_GFP); if (unlikely(!tx)) { /* call slow path to get the lock */ tx = __get_txreq(dev, qp); diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 41babbc0db583c..803c3544c75b5f 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c @@ -1495,8 +1495,6 @@ static void qib_fill_device_attr(struct qib_devdata *dd) rdi->dparms.props.max_cq = ib_qib_max_cqs; rdi->dparms.props.max_cqe = ib_qib_max_cqes; rdi->dparms.props.max_ah = ib_qib_max_ahs; - rdi->dparms.props.max_mr = rdi->lkey_table.max; - rdi->dparms.props.max_fmr = rdi->lkey_table.max; rdi->dparms.props.max_map_per_fmr = 32767; rdi->dparms.props.max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC; rdi->dparms.props.max_qp_init_rd_atom = 255; diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c index 5819c9d6ffdc8a..39d101df229d15 100644 --- a/drivers/infiniband/sw/rdmavt/mr.c +++ b/drivers/infiniband/sw/rdmavt/mr.c @@ -96,6 +96,8 @@ int rvt_driver_mr_init(struct rvt_dev_info *rdi) for (i = 0; i < rdi->lkey_table.max; i++) RCU_INIT_POINTER(rdi->lkey_table.table[i], NULL); + rdi->dparms.props.max_mr = rdi->lkey_table.max; + rdi->dparms.props.max_fmr = rdi->lkey_table.max; return 0; } diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 5ce403c6cddba4..7d03680afd9145 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -412,7 +412,8 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, offset = qpt->incr | ((offset & 1) ^ 1); } /* there can be no set bits in low-order QoS bits */ - WARN_ON(offset & (BIT(rdi->dparms.qos_shift) - 1)); + WARN_ON(rdi->dparms.qos_shift > 1 && + offset & ((BIT(rdi->dparms.qos_shift - 1) - 1) << 1)); qpn = mk_qpn(qpt, map, offset); } diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index 26ec603fe22085..83d1499fe02152 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c @@ -1051,13 +1051,31 @@ static long uinput_ioctl(struct file *file, unsigned int cmd, unsigned long arg) #ifdef CONFIG_COMPAT -#define UI_SET_PHYS_COMPAT _IOW(UINPUT_IOCTL_BASE, 108, compat_uptr_t) +/* + * These IOCTLs change their size and thus their numbers between + * 32 and 64 bits. + */ +#define UI_SET_PHYS_COMPAT \ + _IOW(UINPUT_IOCTL_BASE, 108, compat_uptr_t) +#define UI_BEGIN_FF_UPLOAD_COMPAT \ + _IOWR(UINPUT_IOCTL_BASE, 200, struct uinput_ff_upload_compat) +#define UI_END_FF_UPLOAD_COMPAT \ + _IOW(UINPUT_IOCTL_BASE, 201, struct uinput_ff_upload_compat) static long uinput_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - if (cmd == UI_SET_PHYS_COMPAT) + switch (cmd) { + case UI_SET_PHYS_COMPAT: cmd = UI_SET_PHYS; + break; + case UI_BEGIN_FF_UPLOAD_COMPAT: + cmd = UI_BEGIN_FF_UPLOAD; + break; + case UI_END_FF_UPLOAD_COMPAT: + cmd = UI_END_FF_UPLOAD; + break; + } return uinput_ioctl_handler(file, cmd, arg, compat_ptr(arg)); } diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index b6da0c1267e36e..8e6077d8e434a3 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -179,6 +179,8 @@ static const char * const smbus_pnp_ids[] = { "LEN0096", /* X280 */ "LEN0097", /* X280 -> ALPS trackpoint */ "LEN200f", /* T450s */ + "LEN2054", /* E480 */ + "LEN2055", /* E580 */ "SYN3052", /* HP EliteBook 840 G4 */ "SYN3221", /* HP 15-ay000 */ NULL diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c index d196ac3d8b8cda..e5c3b066bd2a19 100644 --- a/drivers/input/touchscreen/silead.c +++ b/drivers/input/touchscreen/silead.c @@ -604,6 +604,7 @@ static const struct acpi_device_id silead_ts_acpi_match[] = { { "MSSL1680", 0 }, { "MSSL0001", 0 }, { "MSSL0002", 0 }, + { "MSSL0017", 0 }, { } }; MODULE_DEVICE_TABLE(acpi, silead_ts_acpi_match); diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 9ae3678844ebf0..40fbf20d69e5a5 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -2414,13 +2414,9 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) /* Clear CR0 and sync (disables SMMU and queue processing) */ reg = readl_relaxed(smmu->base + ARM_SMMU_CR0); if (reg & CR0_SMMUEN) { - if (is_kdump_kernel()) { - arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0); - arm_smmu_device_disable(smmu); - return -EBUSY; - } - dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n"); + WARN_ON(is_kdump_kernel() && !disable_bypass); + arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0); } ret = arm_smmu_device_disable(smmu); @@ -2513,6 +2509,8 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) return ret; } + if (is_kdump_kernel()) + enables &= ~(CR0_EVTQEN | CR0_PRIQEN); /* Enable the SMMU interface, or ensure bypass */ if (!bypass || disable_bypass) { diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index ce119cb279c3f3..0c3b8f1c7225e8 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -56,6 +56,15 @@ #include "io-pgtable.h" #include "arm-smmu-regs.h" +/* + * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU + * global register space are still, in fact, using a hypervisor to mediate it + * by trapping and emulating register accesses. Sadly, some deployed versions + * of said trapping code have bugs wherein they go horribly wrong for stores + * using r31 (i.e. XZR/WZR) as the source register. + */ +#define QCOM_DUMMY_VAL -1 + #define ARM_MMU500_ACTLR_CPRE (1 << 1) #define ARM_MMU500_ACR_CACHE_LOCK (1 << 26) @@ -398,7 +407,7 @@ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, { unsigned int spin_cnt, delay; - writel_relaxed(0, sync); + writel_relaxed(QCOM_DUMMY_VAL, sync); for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) { for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) { if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE)) @@ -1637,8 +1646,8 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) } /* Invalidate the TLB, just in case */ - writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH); - writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH); + writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLH); + writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH); reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 603bf5233a998b..c1439019dd127e 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -4033,9 +4033,7 @@ static void __init init_no_remapping_devices(void) /* This IOMMU has *only* gfx devices. Either bypass it or set the gfx_mapped flag, as appropriate */ - if (dmar_map_gfx) { - intel_iommu_gfx_mapped = 1; - } else { + if (!dmar_map_gfx) { drhd->ignored = 1; for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) @@ -4831,6 +4829,9 @@ int __init intel_iommu_init(void) goto out_free_reserved_range; } + if (dmar_map_gfx) + intel_iommu_gfx_mapped = 1; + init_no_remapping_devices(); ret = init_dmars(); diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c index b2abc44fa5cb8b..a73337b74f410c 100644 --- a/drivers/isdn/mISDN/socket.c +++ b/drivers/isdn/mISDN/socket.c @@ -394,7 +394,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) memcpy(di.channelmap, dev->channelmap, sizeof(di.channelmap)); di.nrbchan = dev->nrbchan; - strcpy(di.name, dev_name(&dev->dev)); + strscpy(di.name, dev_name(&dev->dev), sizeof(di.name)); if (copy_to_user((void __user *)arg, &di, sizeof(di))) err = -EFAULT; } else @@ -677,7 +677,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) memcpy(di.channelmap, dev->channelmap, sizeof(di.channelmap)); di.nrbchan = dev->nrbchan; - strcpy(di.name, dev_name(&dev->dev)); + strscpy(di.name, dev_name(&dev->dev), sizeof(di.name)); if (copy_to_user((void __user *)arg, &di, sizeof(di))) err = -EFAULT; } else @@ -691,6 +691,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) err = -EFAULT; break; } + dn.name[sizeof(dn.name) - 1] = '\0'; dev = get_mdevice(dn.id); if (dev) err = device_rename(&dev->dev, dn.name); diff --git a/drivers/mailbox/stm32-ipcc.c b/drivers/mailbox/stm32-ipcc.c index 533b0da5235d43..ca1f993c0de306 100644 --- a/drivers/mailbox/stm32-ipcc.c +++ b/drivers/mailbox/stm32-ipcc.c @@ -8,9 +8,9 @@ #include #include #include +#include #include #include -#include #include #include @@ -240,9 +240,11 @@ static int stm32_ipcc_probe(struct platform_device *pdev) /* irq */ for (i = 0; i < IPCC_IRQ_NUM; i++) { - ipcc->irqs[i] = of_irq_get_byname(dev->of_node, irq_name[i]); + ipcc->irqs[i] = platform_get_irq_byname(pdev, irq_name[i]); if (ipcc->irqs[i] < 0) { - dev_err(dev, "no IRQ specified %s\n", irq_name[i]); + if (ipcc->irqs[i] != -EPROBE_DEFER) + dev_err(dev, "no IRQ specified %s\n", + irq_name[i]); ret = ipcc->irqs[i]; goto err_clk; } @@ -263,9 +265,10 @@ static int stm32_ipcc_probe(struct platform_device *pdev) /* wakeup */ if (of_property_read_bool(np, "wakeup-source")) { - ipcc->wkp = of_irq_get_byname(dev->of_node, "wakeup"); + ipcc->wkp = platform_get_irq_byname(pdev, "wakeup"); if (ipcc->wkp < 0) { - dev_err(dev, "could not get wakeup IRQ\n"); + if (ipcc->wkp != -EPROBE_DEFER) + dev_err(dev, "could not get wakeup IRQ\n"); ret = ipcc->wkp; goto err_clk; } diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index 8f07fa6e17394b..268f1b6850840a 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -887,12 +887,22 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k, struct bset *i = bset_tree_last(b)->data; struct bkey *m, *prev = NULL; struct btree_iter iter; + struct bkey preceding_key_on_stack = ZERO_KEY; + struct bkey *preceding_key_p = &preceding_key_on_stack; BUG_ON(b->ops->is_extents && !KEY_SIZE(k)); - m = bch_btree_iter_init(b, &iter, b->ops->is_extents - ? PRECEDING_KEY(&START_KEY(k)) - : PRECEDING_KEY(k)); + /* + * If k has preceding key, preceding_key_p will be set to address + * of k's preceding key; otherwise preceding_key_p will be set + * to NULL inside preceding_key(). + */ + if (b->ops->is_extents) + preceding_key(&START_KEY(k), &preceding_key_p); + else + preceding_key(k, &preceding_key_p); + + m = bch_btree_iter_init(b, &iter, preceding_key_p); if (b->ops->insert_fixup(b, k, &iter, replace_key)) return status; diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h index bac76aabca6d79..c71365e7c1fac7 100644 --- a/drivers/md/bcache/bset.h +++ b/drivers/md/bcache/bset.h @@ -434,20 +434,26 @@ static inline bool bch_cut_back(const struct bkey *where, struct bkey *k) return __bch_cut_back(where, k); } -#define PRECEDING_KEY(_k) \ -({ \ - struct bkey *_ret = NULL; \ - \ - if (KEY_INODE(_k) || KEY_OFFSET(_k)) { \ - _ret = &KEY(KEY_INODE(_k), KEY_OFFSET(_k), 0); \ - \ - if (!_ret->low) \ - _ret->high--; \ - _ret->low--; \ - } \ - \ - _ret; \ -}) +/* + * Pointer '*preceding_key_p' points to a memory object to store preceding + * key of k. If the preceding key does not exist, set '*preceding_key_p' to + * NULL. So the caller of preceding_key() needs to take care of memory + * which '*preceding_key_p' pointed to before calling preceding_key(). + * Currently the only caller of preceding_key() is bch_btree_insert_key(), + * and it points to an on-stack variable, so the memory release is handled + * by stackframe itself. + */ +static inline void preceding_key(struct bkey *k, struct bkey **preceding_key_p) +{ + if (KEY_INODE(k) || KEY_OFFSET(k)) { + (**preceding_key_p) = KEY(KEY_INODE(k), KEY_OFFSET(k), 0); + if (!(*preceding_key_p)->low) + (*preceding_key_p)->high--; + (*preceding_key_p)->low--; + } else { + (*preceding_key_p) = NULL; + } +} static inline bool bch_ptr_invalid(struct btree_keys *b, const struct bkey *k) { diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index d9481640b3e116..541454b4f479ca 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -393,8 +393,13 @@ STORE(bch_cached_dev) if (attr == &sysfs_writeback_running) bch_writeback_queue(dc); + /* + * Only set BCACHE_DEV_WB_RUNNING when cached device attached to + * a cache set, otherwise it doesn't make sense. + */ if (attr == &sysfs_writeback_percent) - if (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) + if ((dc->disk.c != NULL) && + (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))) schedule_delayed_work(&dc->writeback_rate_update, dc->writeback_rate_update_seconds * HZ); diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c index 50bffc3382d77f..ff3fba16e7359c 100644 --- a/drivers/mfd/intel-lpss.c +++ b/drivers/mfd/intel-lpss.c @@ -273,6 +273,9 @@ static void intel_lpss_init_dev(const struct intel_lpss *lpss) { u32 value = LPSS_PRIV_SSP_REG_DIS_DMA_FIN; + /* Set the device in reset state */ + writel(0, lpss->priv + LPSS_PRIV_RESETS); + intel_lpss_deassert_reset(lpss); intel_lpss_set_remap_addr(lpss); diff --git a/drivers/mfd/tps65912-spi.c b/drivers/mfd/tps65912-spi.c index 3bd75061f77768..f78be039e46376 100644 --- a/drivers/mfd/tps65912-spi.c +++ b/drivers/mfd/tps65912-spi.c @@ -27,6 +27,7 @@ static const struct of_device_id tps65912_spi_of_match_table[] = { { .compatible = "ti,tps65912", }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, tps65912_spi_of_match_table); static int tps65912_spi_probe(struct spi_device *spi) { diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c index dd19f17a1b6375..2b8c479dbfa6e1 100644 --- a/drivers/mfd/twl6040.c +++ b/drivers/mfd/twl6040.c @@ -322,8 +322,19 @@ int twl6040_power(struct twl6040 *twl6040, int on) } } + /* + * Register access can produce errors after power-up unless we + * wait at least 8ms based on measurements on duovero. + */ + usleep_range(10000, 12000); + /* Sync with the HW */ - regcache_sync(twl6040->regmap); + ret = regcache_sync(twl6040->regmap); + if (ret) { + dev_err(twl6040->dev, "Failed to sync with the HW: %i\n", + ret); + goto out; + } /* Default PLL configuration after power up */ twl6040->pll = TWL6040_SYSCLK_SEL_LPPLL; diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c index 6193270e7b3dca..eb4d90b7d99e16 100644 --- a/drivers/misc/kgdbts.c +++ b/drivers/misc/kgdbts.c @@ -1139,7 +1139,7 @@ static void kgdbts_put_char(u8 chr) static int param_set_kgdbts_var(const char *kmessage, const struct kernel_param *kp) { - int len = strlen(kmessage); + size_t len = strlen(kmessage); if (len >= MAX_CONFIG_LEN) { printk(KERN_ERR "kgdbts: config string too long\n"); @@ -1159,7 +1159,7 @@ static int param_set_kgdbts_var(const char *kmessage, strcpy(config, kmessage); /* Chop out \n char as a result of echo */ - if (config[len - 1] == '\n') + if (len && config[len - 1] == '\n') config[len - 1] = '\0'; /* Go and configure with the new params. */ diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c index 896e2df9400fd2..fd33a3b9c66f62 100644 --- a/drivers/misc/pci_endpoint_test.c +++ b/drivers/misc/pci_endpoint_test.c @@ -662,6 +662,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev, data = (struct pci_endpoint_test_data *)ent->driver_data; if (data) { test_reg_bar = data->test_reg_bar; + test->test_reg_bar = test_reg_bar; test->alignment = data->alignment; irq_type = data->irq_type; } diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 6600b3466dfbc6..0a74785e575ba9 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -144,8 +144,9 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) int err = cmd->error; /* Flag re-tuning needed on CRC errors */ - if ((cmd->opcode != MMC_SEND_TUNING_BLOCK && - cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) && + if (cmd->opcode != MMC_SEND_TUNING_BLOCK && + cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200 && + !host->retune_crc_disable && (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) || (mrq->data && mrq->data->error == -EILSEQ) || (mrq->stop && mrq->stop->error == -EILSEQ))) diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index d8e17ea6126de8..0aa99694b93798 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c @@ -934,6 +934,10 @@ static int mmc_sdio_pre_suspend(struct mmc_host *host) */ static int mmc_sdio_suspend(struct mmc_host *host) { + /* Prevent processing of SDIO IRQs in suspended state. */ + mmc_card_set_suspended(host->card); + cancel_delayed_work_sync(&host->sdio_irq_work); + mmc_claim_host(host); if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) @@ -982,13 +986,20 @@ static int mmc_sdio_resume(struct mmc_host *host) err = sdio_enable_4bit_bus(host->card); } - if (!err && host->sdio_irqs) { + if (err) + goto out; + + /* Allow SDIO IRQs to be processed again. */ + mmc_card_clr_suspended(host->card); + + if (host->sdio_irqs) { if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) wake_up_process(host->sdio_irq_thread); else if (host->caps & MMC_CAP_SDIO_IRQ) host->ops->enable_sdio_irq(host, 1); } +out: mmc_release_host(host); host->pm_flags &= ~MMC_PM_KEEP_POWER; diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c index d40744bbafa9a9..ed2d8c48ea1780 100644 --- a/drivers/mmc/core/sdio_io.c +++ b/drivers/mmc/core/sdio_io.c @@ -18,6 +18,7 @@ #include "sdio_ops.h" #include "core.h" #include "card.h" +#include "host.h" /** * sdio_claim_host - exclusively claim a bus for a certain SDIO function @@ -725,3 +726,79 @@ int sdio_set_host_pm_flags(struct sdio_func *func, mmc_pm_flag_t flags) return 0; } EXPORT_SYMBOL_GPL(sdio_set_host_pm_flags); + +/** + * sdio_retune_crc_disable - temporarily disable retuning on CRC errors + * @func: SDIO function attached to host + * + * If the SDIO card is known to be in a state where it might produce + * CRC errors on the bus in response to commands (like if we know it is + * transitioning between power states), an SDIO function driver can + * call this function to temporarily disable the SD/MMC core behavior of + * triggering an automatic retuning. + * + * This function should be called while the host is claimed and the host + * should remain claimed until sdio_retune_crc_enable() is called. + * Specifically, the expected sequence of calls is: + * - sdio_claim_host() + * - sdio_retune_crc_disable() + * - some number of calls like sdio_writeb() and sdio_readb() + * - sdio_retune_crc_enable() + * - sdio_release_host() + */ +void sdio_retune_crc_disable(struct sdio_func *func) +{ + func->card->host->retune_crc_disable = true; +} +EXPORT_SYMBOL_GPL(sdio_retune_crc_disable); + +/** + * sdio_retune_crc_enable - re-enable retuning on CRC errors + * @func: SDIO function attached to host + * + * This is the compement to sdio_retune_crc_disable(). + */ +void sdio_retune_crc_enable(struct sdio_func *func) +{ + func->card->host->retune_crc_disable = false; +} +EXPORT_SYMBOL_GPL(sdio_retune_crc_enable); + +/** + * sdio_retune_hold_now - start deferring retuning requests till release + * @func: SDIO function attached to host + * + * This function can be called if it's currently a bad time to do + * a retune of the SDIO card. Retune requests made during this time + * will be held and we'll actually do the retune sometime after the + * release. + * + * This function could be useful if an SDIO card is in a power state + * where it can respond to a small subset of commands that doesn't + * include the retuning command. Care should be taken when using + * this function since (presumably) the retuning request we might be + * deferring was made for a good reason. + * + * This function should be called while the host is claimed. + */ +void sdio_retune_hold_now(struct sdio_func *func) +{ + mmc_retune_hold_now(func->card->host); +} +EXPORT_SYMBOL_GPL(sdio_retune_hold_now); + +/** + * sdio_retune_release - signal that it's OK to retune now + * @func: SDIO function attached to host + * + * This is the complement to sdio_retune_hold_now(). Calling this + * function won't make a retune happen right away but will allow + * them to be scheduled normally. + * + * This function should be called while the host is claimed. + */ +void sdio_retune_release(struct sdio_func *func) +{ + mmc_retune_release(func->card->host); +} +EXPORT_SYMBOL_GPL(sdio_retune_release); diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c index 7ca7b99413f0d9..b299a24d33f965 100644 --- a/drivers/mmc/core/sdio_irq.c +++ b/drivers/mmc/core/sdio_irq.c @@ -38,6 +38,10 @@ static int process_sdio_pending_irqs(struct mmc_host *host) unsigned char pending; struct sdio_func *func; + /* Don't process SDIO IRQs if the card is suspended. */ + if (mmc_card_suspended(card)) + return 0; + /* * Optimization, if there is only 1 function interrupt registered * and we know an IRQ was signaled then call irq handler directly. diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 1841d250e9e2c6..eb1a65cb878f03 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -1295,9 +1295,10 @@ static irqreturn_t mmci_irq(int irq, void *dev_id) } /* - * Don't poll for busy completion in irq context. + * Busy detection has been handled by mmci_cmd_irq() above. + * Clear the status bit to prevent polling in IRQ context. */ - if (host->variant->busy_detect && host->busy_status) + if (host->variant->busy_detect_flag) status &= ~host->variant->busy_detect_flag; ret = 1; diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c index cc3ffeffd7a2e6..fa8d9da2ab7f6b 100644 --- a/drivers/mmc/host/sdhci-pci-o2micro.c +++ b/drivers/mmc/host/sdhci-pci-o2micro.c @@ -117,6 +117,7 @@ static int sdhci_o2_execute_tuning(struct mmc_host *mmc, u32 opcode) */ if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) { current_bus_width = mmc->ios.bus_width; + mmc->ios.bus_width = MMC_BUS_WIDTH_4; sdhci_set_bus_width(host, MMC_BUS_WIDTH_4); } @@ -128,8 +129,10 @@ static int sdhci_o2_execute_tuning(struct mmc_host *mmc, u32 opcode) sdhci_end_tuning(host); - if (current_bus_width == MMC_BUS_WIDTH_8) + if (current_bus_width == MMC_BUS_WIDTH_8) { + mmc->ios.bus_width = MMC_BUS_WIDTH_8; sdhci_set_bus_width(host, current_bus_width); + } host->flags &= ~SDHCI_HS400_TUNING; return 0; diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 2646faffd36e9a..6f265d2e647ba8 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -165,7 +165,7 @@ #define FLEXCAN_MB_CNT_LENGTH(x) (((x) & 0xf) << 16) #define FLEXCAN_MB_CNT_TIMESTAMP(x) ((x) & 0xffff) -#define FLEXCAN_TIMEOUT_US (50) +#define FLEXCAN_TIMEOUT_US (250) /* FLEXCAN hardware feature flags * diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 045f0845e665e4..3df23487487f78 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -1424,7 +1424,7 @@ static const struct xcan_devtype_data xcan_canfd_data = { XCAN_FLAG_RXMNF | XCAN_FLAG_TX_MAILBOXES | XCAN_FLAG_RX_FIFO_MULTI, - .bittiming_const = &xcan_bittiming_const, + .bittiming_const = &xcan_bittiming_const_canfd, .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD, .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD, .bus_clk_name = "s_axi_aclk", diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index dfaad1c2c2b8bb..411cfb806459a6 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -1484,7 +1484,7 @@ static int mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid, int err; if (!vid) - return -EINVAL; + return -EOPNOTSUPP; entry->vid = vid - 1; entry->valid = false; diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c index 6dedd43442cc57..35b767baf21f7c 100644 --- a/drivers/net/dsa/rtl8366.c +++ b/drivers/net/dsa/rtl8366.c @@ -307,7 +307,8 @@ int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) struct rtl8366_vlan_4k vlan4k; int ret; - if (!smi->ops->is_vlan_valid(smi, port)) + /* Use VLAN nr port + 1 since VLAN0 is not valid */ + if (!smi->ops->is_vlan_valid(smi, port + 1)) return -EINVAL; dev_info(smi->dev, "%s filtering on port %d\n", @@ -318,12 +319,12 @@ int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) * The hardware support filter ID (FID) 0..7, I have no clue how to * support this in the driver when the callback only says on/off. */ - ret = smi->ops->get_vlan_4k(smi, port, &vlan4k); + ret = smi->ops->get_vlan_4k(smi, port + 1, &vlan4k); if (ret) return ret; /* Just set the filter to FID 1 for now then */ - ret = rtl8366_set_vlan(smi, port, + ret = rtl8366_set_vlan(smi, port + 1, vlan4k.member, vlan4k.untag, 1); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 6f3312350cac4c..b3c7994d73eb15 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -139,10 +139,10 @@ void aq_ring_queue_stop(struct aq_ring_s *ring) bool aq_ring_tx_clean(struct aq_ring_s *self) { struct device *dev = aq_nic_get_dev(self->aq_nic); - unsigned int budget = AQ_CFG_TX_CLEAN_BUDGET; + unsigned int budget; - for (; self->sw_head != self->hw_head && budget--; - self->sw_head = aq_ring_next_dx(self, self->sw_head)) { + for (budget = AQ_CFG_TX_CLEAN_BUDGET; + budget && self->sw_head != self->hw_head; budget--) { struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; if (likely(buff->is_mapped)) { @@ -167,6 +167,7 @@ bool aq_ring_tx_clean(struct aq_ring_s *self) buff->pa = 0U; buff->eop_index = 0xffffU; + self->sw_head = aq_ring_next_dx(self, self->sw_head); } return !!budget; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 56363ff5c89199..51cd1f98bcf07e 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -695,38 +695,41 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) { /* MAC error or DMA error */ buff->is_error = 1U; - } else { - if (self->aq_nic_cfg->is_rss) { - /* last 4 byte */ - u16 rss_type = rxd_wb->type & 0xFU; - - if (rss_type && rss_type < 0x8U) { - buff->is_hash_l4 = (rss_type == 0x4 || - rss_type == 0x5); - buff->rss_hash = rxd_wb->rss_hash; - } + } + if (self->aq_nic_cfg->is_rss) { + /* last 4 byte */ + u16 rss_type = rxd_wb->type & 0xFU; + + if (rss_type && rss_type < 0x8U) { + buff->is_hash_l4 = (rss_type == 0x4 || + rss_type == 0x5); + buff->rss_hash = rxd_wb->rss_hash; } + } - if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) { - buff->len = rxd_wb->pkt_len % - AQ_CFG_RX_FRAME_MAX; - buff->len = buff->len ? - buff->len : AQ_CFG_RX_FRAME_MAX; - buff->next = 0U; - buff->is_eop = 1U; + if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) { + buff->len = rxd_wb->pkt_len % + AQ_CFG_RX_FRAME_MAX; + buff->len = buff->len ? + buff->len : AQ_CFG_RX_FRAME_MAX; + buff->next = 0U; + buff->is_eop = 1U; + } else { + buff->len = + rxd_wb->pkt_len > AQ_CFG_RX_FRAME_MAX ? + AQ_CFG_RX_FRAME_MAX : rxd_wb->pkt_len; + + if (HW_ATL_B0_RXD_WB_STAT2_RSCCNT & + rxd_wb->status) { + /* LRO */ + buff->next = rxd_wb->next_desc_ptr; + ++ring->stats.rx.lro_packets; } else { - if (HW_ATL_B0_RXD_WB_STAT2_RSCCNT & - rxd_wb->status) { - /* LRO */ - buff->next = rxd_wb->next_desc_ptr; - ++ring->stats.rx.lro_packets; - } else { - /* jumbo */ - buff->next = - aq_ring_next_dx(ring, - ring->hw_head); - ++ring->stats.rx.jumbo_packets; - } + /* jumbo */ + buff->next = + aq_ring_next_dx(ring, + ring->hw_head); + ++ring->stats.rx.jumbo_packets; } } } diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index 66535d1653f6d8..f16853c3c851a7 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -2107,7 +2107,6 @@ static struct eisa_driver de4x5_eisa_driver = { .remove = de4x5_eisa_remove, } }; -MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids); #endif #ifdef CONFIG_PCI diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index 3f6749fc889f97..bfb16a47449017 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -1105,7 +1105,7 @@ static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, cmd->data = be_get_rss_hash_opts(adapter, cmd->flow_type); break; case ETHTOOL_GRXRINGS: - cmd->data = adapter->num_rx_qs - 1; + cmd->data = adapter->num_rx_qs; break; default: return -EINVAL; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index e2710ff48fb0fb..1fa0cd527ead3d 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -339,6 +339,7 @@ static int __lb_setup(struct net_device *ndev, static int __lb_up(struct net_device *ndev, enum hnae_loop loop_mode) { +#define NIC_LB_TEST_WAIT_PHY_LINK_TIME 300 struct hns_nic_priv *priv = netdev_priv(ndev); struct hnae_handle *h = priv->ae_handle; int speed, duplex; @@ -365,6 +366,9 @@ static int __lb_up(struct net_device *ndev, h->dev->ops->adjust_link(h, speed, duplex); + /* wait adjust link done and phy ready */ + msleep(NIC_LB_TEST_WAIT_PHY_LINK_TIME); + return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 340baf6a470cc1..4648c6a9d9e865 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -4300,8 +4300,11 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport, return hclge_add_mac_vlan_tbl(vport, &req, NULL); /* check if we just hit the duplicate */ - if (!ret) - ret = -EINVAL; + if (!ret) { + dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n", + vport->vport_id, addr); + return 0; + } dev_err(&hdev->pdev->dev, "PF failed to add unicast entry(%pM) in the MAC table\n", diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index df8808cd7e1144..4e04985fb4307a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -6758,10 +6758,12 @@ static int i40e_setup_tc(struct net_device *netdev, void *type_data) struct i40e_pf *pf = vsi->back; u8 enabled_tc = 0, num_tc, hw; bool need_reset = false; + int old_queue_pairs; int ret = -EINVAL; u16 mode; int i; + old_queue_pairs = vsi->num_queue_pairs; num_tc = mqprio_qopt->qopt.num_tc; hw = mqprio_qopt->qopt.hw; mode = mqprio_qopt->mode; @@ -6862,6 +6864,7 @@ static int i40e_setup_tc(struct net_device *netdev, void *type_data) } ret = i40e_configure_queue_channels(vsi); if (ret) { + vsi->num_queue_pairs = old_queue_pairs; netdev_info(netdev, "Failed configuring queue channels\n"); need_reset = true; diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index db1543bca7013d..875f97aba6e0dd 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -652,6 +652,9 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) case ICE_FC_RX_PAUSE: fc = "RX"; break; + case ICE_FC_NONE: + fc = "None"; + break; default: fc = "Unknown"; break; diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c index 392fd895f27826..ae2240074d8ef8 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c @@ -1905,8 +1905,7 @@ static int mvpp2_prs_ip6_init(struct mvpp2 *priv) } /* Find tcam entry with matched pair */ -static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid, - u16 mask) +static int mvpp2_prs_vid_range_find(struct mvpp2_port *port, u16 vid, u16 mask) { unsigned char byte[2], enable[2]; struct mvpp2_prs_entry pe; @@ -1914,13 +1913,13 @@ static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid, int tid; /* Go through the all entries with MVPP2_PRS_LU_VID */ - for (tid = MVPP2_PE_VID_FILT_RANGE_START; - tid <= MVPP2_PE_VID_FILT_RANGE_END; tid++) { - if (!priv->prs_shadow[tid].valid || - priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID) + for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id); + tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) { + if (!port->priv->prs_shadow[tid].valid || + port->priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID) continue; - mvpp2_prs_init_from_hw(priv, &pe, tid); + mvpp2_prs_init_from_hw(port->priv, &pe, tid); mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]); mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]); @@ -1950,7 +1949,7 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) memset(&pe, 0, sizeof(pe)); /* Scan TCAM and see if entry with this already exist */ - tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, mask); + tid = mvpp2_prs_vid_range_find(port, vid, mask); reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id)); if (reg_val & MVPP2_DSA_EXTENDED) @@ -2008,7 +2007,7 @@ void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid) int tid; /* Scan TCAM and see if entry with this already exist */ - tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, 0xfff); + tid = mvpp2_prs_vid_range_find(port, vid, 0xfff); /* No such entry */ if (tid < 0) @@ -2026,8 +2025,10 @@ void mvpp2_prs_vid_remove_all(struct mvpp2_port *port) for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id); tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) { - if (priv->prs_shadow[tid].valid) - mvpp2_prs_vid_entry_remove(port, tid); + if (priv->prs_shadow[tid].valid) { + mvpp2_prs_hw_inv(priv, tid); + priv->prs_shadow[tid].valid = false; + } } } diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 6e6abdc399deb3..1d55f014725efd 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -1784,6 +1784,7 @@ static void mtk_poll_controller(struct net_device *dev) static int mtk_start_dma(struct mtk_eth *eth) { + u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0; int err; err = mtk_dma_init(eth); @@ -1800,7 +1801,7 @@ static int mtk_start_dma(struct mtk_eth *eth) MTK_QDMA_GLO_CFG); mtk_w32(eth, - MTK_RX_DMA_EN | MTK_RX_2B_OFFSET | + MTK_RX_DMA_EN | rx_2b_offset | MTK_RX_BT_32DWORDS | MTK_MULTI_EN, MTK_PDMA_GLO_CFG); @@ -2304,13 +2305,13 @@ static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, switch (cmd->cmd) { case ETHTOOL_GRXRINGS: - if (dev->features & NETIF_F_LRO) { + if (dev->hw_features & NETIF_F_LRO) { cmd->data = MTK_MAX_RX_RING_NUM; ret = 0; } break; case ETHTOOL_GRXCLSRLCNT: - if (dev->features & NETIF_F_LRO) { + if (dev->hw_features & NETIF_F_LRO) { struct mtk_mac *mac = netdev_priv(dev); cmd->rule_cnt = mac->hwlro_ip_cnt; @@ -2318,11 +2319,11 @@ static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, } break; case ETHTOOL_GRXCLSRULE: - if (dev->features & NETIF_F_LRO) + if (dev->hw_features & NETIF_F_LRO) ret = mtk_hwlro_get_fdir_entry(dev, cmd); break; case ETHTOOL_GRXCLSRLALL: - if (dev->features & NETIF_F_LRO) + if (dev->hw_features & NETIF_F_LRO) ret = mtk_hwlro_get_fdir_all(dev, cmd, rule_locs); break; @@ -2339,11 +2340,11 @@ static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) switch (cmd->cmd) { case ETHTOOL_SRXCLSRLINS: - if (dev->features & NETIF_F_LRO) + if (dev->hw_features & NETIF_F_LRO) ret = mtk_hwlro_add_ipaddr(dev, cmd); break; case ETHTOOL_SRXCLSRLDEL: - if (dev->features & NETIF_F_LRO) + if (dev->hw_features & NETIF_F_LRO) ret = mtk_hwlro_del_ipaddr(dev, cmd); break; default: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index 37ba7c78859db1..1c225be9c7db99 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -342,11 +342,32 @@ void mlx5_unregister_interface(struct mlx5_interface *intf) } EXPORT_SYMBOL(mlx5_unregister_interface); +/* Must be called with intf_mutex held */ +static bool mlx5_has_added_dev_by_protocol(struct mlx5_core_dev *mdev, int protocol) +{ + struct mlx5_device_context *dev_ctx; + struct mlx5_interface *intf; + bool found = false; + + list_for_each_entry(intf, &intf_list, list) { + if (intf->protocol == protocol) { + dev_ctx = mlx5_get_device(intf, &mdev->priv); + if (dev_ctx && test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state)) + found = true; + break; + } + } + + return found; +} + void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol) { mutex_lock(&mlx5_intf_mutex); - mlx5_remove_dev_by_protocol(mdev, protocol); - mlx5_add_dev_by_protocol(mdev, protocol); + if (mlx5_has_added_dev_by_protocol(mdev, protocol)) { + mlx5_remove_dev_by_protocol(mdev, protocol); + mlx5_add_dev_by_protocol(mdev, protocol); + } mutex_unlock(&mlx5_intf_mutex); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index c5b82e283d1349..ff2f6b8e2fabe5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -2488,6 +2488,10 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev, mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL); autoneg = cmd->base.autoneg == AUTONEG_ENABLE; + if (!autoneg && cmd->base.speed == SPEED_56000) { + netdev_err(dev, "56G not supported with autoneg off\n"); + return -EINVAL; + } eth_proto_new = autoneg ? mlxsw_sp_to_ptys_advert_link(cmd) : mlxsw_sp_to_ptys_speed(cmd->base.speed); diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index f27a0dc8c56331..5e3e6e262ba374 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1588,6 +1588,10 @@ static void sh_eth_dev_exit(struct net_device *ndev) sh_eth_get_stats(ndev); mdp->cd->soft_reset(ndev); + /* Set the RMII mode again if required */ + if (mdp->cd->rmiimode) + sh_eth_write(ndev, 0x1, RMIIMODE); + /* Set MAC address again */ update_mac_address(ndev); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 50c00822b2d8b3..45e64d71a93f78 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3319,6 +3319,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE); } rx_q->dirty_rx = entry; + stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); } /** diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 9d699bd5f7151a..cf6b9b1771f12c 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -2405,7 +2405,7 @@ static struct hv_driver netvsc_drv = { .probe = netvsc_probe, .remove = netvsc_remove, .driver = { - .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .probe_type = PROBE_FORCE_SYNCHRONOUS, }, }; diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 68b8007da82bd2..0115a2868933c6 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -178,7 +178,7 @@ static void ipvlan_port_destroy(struct net_device *dev) } #define IPVLAN_FEATURES \ - (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ + (NETIF_F_SG | NETIF_F_CSUM_MASK | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ NETIF_F_GSO | NETIF_F_TSO | NETIF_F_GSO_ROBUST | \ NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER) diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index b3935778b19fee..e4bf9e7d758384 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -260,10 +260,8 @@ static int dp83867_config_init(struct phy_device *phydev) ret = phy_write(phydev, MII_DP83867_PHYCTRL, val); if (ret) return ret; - } - if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) && - (phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) { + /* Set up RGMII delays */ val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIICTL); if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index f6e70f2dfd12b9..e029c7977a562d 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -54,6 +54,10 @@ struct phylink { /* The link configuration settings */ struct phylink_link_state link_config; + + /* The current settings */ + phy_interface_t cur_interface; + struct gpio_desc *link_gpio; struct timer_list link_poll; void (*get_fixed_state)(struct net_device *dev, @@ -477,12 +481,12 @@ static void phylink_resolve(struct work_struct *w) if (!link_state.link) { netif_carrier_off(ndev); pl->ops->mac_link_down(ndev, pl->link_an_mode, - pl->phy_state.interface); + pl->cur_interface); netdev_info(ndev, "Link is Down\n"); } else { + pl->cur_interface = link_state.interface; pl->ops->mac_link_up(ndev, pl->link_an_mode, - pl->phy_state.interface, - pl->phydev); + pl->cur_interface, pl->phydev); netif_carrier_on(ndev); diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c index e0d6760f321951..4b5af24139703a 100644 --- a/drivers/net/thunderbolt.c +++ b/drivers/net/thunderbolt.c @@ -1285,6 +1285,7 @@ static int __maybe_unused tbnet_suspend(struct device *dev) tbnet_tear_down(net, true); } + tb_unregister_protocol_handler(&net->handler); return 0; } @@ -1293,6 +1294,8 @@ static int __maybe_unused tbnet_resume(struct device *dev) struct tb_service *svc = tb_to_service(dev); struct tbnet *net = tb_service_get_drvdata(svc); + tb_register_protocol_handler(&net->handler); + netif_carrier_off(net->dev); if (netif_running(net->dev)) { netif_device_attach(net->dev); diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index 3d8a70d3ea9bd6..3d71f17163902a 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c @@ -437,17 +437,18 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net) dev); dev->tx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + netif_stop_queue(net); retval = usb_submit_urb(dev->tx_urb, GFP_ATOMIC); if (retval) { dev_err(&dev->intf->dev, "%s: usb_submit_urb: %d\n", __func__, retval); dev->net->stats.tx_errors++; dev_kfree_skb_any(skb); + netif_wake_queue(net); } else { dev->net->stats.tx_packets++; dev->net->stats.tx_bytes += skb->len; dev_consume_skb_any(skb); - netif_stop_queue(net); } return NETDEV_TX_OK; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index a907d7b065fa8e..53e4962ceb8ae8 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -667,6 +667,12 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on) brcmf_dbg(TRACE, "Enter: on=%d\n", on); + sdio_retune_crc_disable(bus->sdiodev->func1); + + /* Cannot re-tune if device is asleep; defer till we're awake */ + if (on) + sdio_retune_hold_now(bus->sdiodev->func1); + wr_val = (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT); /* 1st KSO write goes to AOS wake up core if device is asleep */ brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err); @@ -719,6 +725,11 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on) if (try_cnt > MAX_KSO_ATTEMPTS) brcmf_err("max tries: rd_val=0x%x err=%d\n", rd_val, err); + if (on) + sdio_retune_release(bus->sdiodev->func1); + + sdio_retune_crc_enable(bus->sdiodev->func1); + return err; } diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index 9148015ed8036f..a3132a9eb91c21 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -612,7 +612,7 @@ static struct attribute *nd_device_attributes[] = { NULL, }; -/** +/* * nd_device_attribute_group - generic attributes for all devices on an nd bus */ struct attribute_group nd_device_attribute_group = { @@ -641,7 +641,7 @@ static umode_t nd_numa_attr_visible(struct kobject *kobj, struct attribute *a, return a->mode; } -/** +/* * nd_numa_attribute_group - NUMA attributes for all devices on an nd bus */ struct attribute_group nd_numa_attribute_group = { diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c index 452ad379ed70b0..9f1b7e3153f991 100644 --- a/drivers/nvdimm/label.c +++ b/drivers/nvdimm/label.c @@ -25,6 +25,8 @@ static guid_t nvdimm_btt2_guid; static guid_t nvdimm_pfn_guid; static guid_t nvdimm_dax_guid; +static const char NSINDEX_SIGNATURE[] = "NAMESPACE_INDEX\0"; + static u32 best_seq(u32 a, u32 b) { a &= NSINDEX_SEQ_MASK; diff --git a/drivers/nvdimm/label.h b/drivers/nvdimm/label.h index 18bbe183b3a9bd..52f9fcada00a1b 100644 --- a/drivers/nvdimm/label.h +++ b/drivers/nvdimm/label.h @@ -38,8 +38,6 @@ enum { ND_NSINDEX_INIT = 0x1, }; -static const char NSINDEX_SIGNATURE[] = "NAMESPACE_INDEX\0"; - /** * struct nd_namespace_index - label set superblock * @sig: NAMESPACE_INDEX\0 diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index abfb46378cc132..d8869d978c3414 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1277,9 +1277,14 @@ static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk, { #ifdef CONFIG_NVME_MULTIPATH if (disk->fops == &nvme_ns_head_ops) { + struct nvme_ns *ns; + *head = disk->private_data; *srcu_idx = srcu_read_lock(&(*head)->srcu); - return nvme_find_path(*head); + ns = nvme_find_path(*head); + if (!ns) + srcu_read_unlock(&(*head)->srcu, *srcu_idx); + return ns; } #endif *head = NULL; @@ -1293,42 +1298,56 @@ static void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx) srcu_read_unlock(&head->srcu, idx); } -static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned cmd, unsigned long arg) +static int nvme_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) { + struct nvme_ns_head *head = NULL; + void __user *argp = (void __user *)arg; + struct nvme_ns *ns; + int srcu_idx, ret; + + ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx); + if (unlikely(!ns)) + return -EWOULDBLOCK; + + /* + * Handle ioctls that apply to the controller instead of the namespace + * seperately and drop the ns SRCU reference early. This avoids a + * deadlock when deleting namespaces using the passthrough interface. + */ + if (cmd == NVME_IOCTL_ADMIN_CMD || is_sed_ioctl(cmd)) { + struct nvme_ctrl *ctrl = ns->ctrl; + + nvme_get_ctrl(ns->ctrl); + nvme_put_ns_from_disk(head, srcu_idx); + + if (cmd == NVME_IOCTL_ADMIN_CMD) + ret = nvme_user_cmd(ctrl, NULL, argp); + else + ret = sed_ioctl(ctrl->opal_dev, cmd, argp); + + nvme_put_ctrl(ctrl); + return ret; + } + switch (cmd) { case NVME_IOCTL_ID: force_successful_syscall_return(); - return ns->head->ns_id; - case NVME_IOCTL_ADMIN_CMD: - return nvme_user_cmd(ns->ctrl, NULL, (void __user *)arg); + ret = ns->head->ns_id; + break; case NVME_IOCTL_IO_CMD: - return nvme_user_cmd(ns->ctrl, ns, (void __user *)arg); + ret = nvme_user_cmd(ns->ctrl, ns, argp); + break; case NVME_IOCTL_SUBMIT_IO: - return nvme_submit_io(ns, (void __user *)arg); + ret = nvme_submit_io(ns, argp); + break; default: -#ifdef CONFIG_NVM if (ns->ndev) - return nvme_nvm_ioctl(ns, cmd, arg); -#endif - if (is_sed_ioctl(cmd)) - return sed_ioctl(ns->ctrl->opal_dev, cmd, - (void __user *) arg); - return -ENOTTY; + ret = nvme_nvm_ioctl(ns, cmd, arg); + else + ret = -ENOTTY; } -} - -static int nvme_ioctl(struct block_device *bdev, fmode_t mode, - unsigned int cmd, unsigned long arg) -{ - struct nvme_ns_head *head = NULL; - struct nvme_ns *ns; - int srcu_idx, ret; - ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx); - if (unlikely(!ns)) - ret = -EWOULDBLOCK; - else - ret = nvme_ns_ioctl(ns, cmd, arg); nvme_put_ns_from_disk(head, srcu_idx); return ret; } @@ -3209,7 +3228,8 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn) { struct nvme_ns *ns; __le32 *ns_list; - unsigned i, j, nsid, prev = 0, num_lists = DIV_ROUND_UP(nn, 1024); + unsigned i, j, nsid, prev = 0; + unsigned num_lists = DIV_ROUND_UP_ULL((u64)nn, 1024); int ret = 0; ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); @@ -3506,6 +3526,7 @@ EXPORT_SYMBOL_GPL(nvme_start_ctrl); void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) { + dev_pm_qos_hide_latency_tolerance(ctrl->device); cdev_device_del(&ctrl->cdev, ctrl->device); } EXPORT_SYMBOL_GPL(nvme_uninit_ctrl); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 7b9ef8e734e775..c8eeecc5811542 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1132,6 +1132,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) struct nvme_dev *dev = nvmeq->dev; struct request *abort_req; struct nvme_command cmd; + bool shutdown = false; u32 csts = readl(dev->bar + NVME_REG_CSTS); /* If PCI error recovery process is happening, we cannot reset or @@ -1168,12 +1169,14 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) * shutdown, so we return BLK_EH_DONE. */ switch (dev->ctrl.state) { + case NVME_CTRL_DELETING: + shutdown = true; case NVME_CTRL_CONNECTING: case NVME_CTRL_RESETTING: dev_warn_ratelimited(dev->ctrl.device, "I/O %d QID %d timeout, disable controller\n", req->tag, nvmeq->qid); - nvme_dev_disable(dev, false); + nvme_dev_disable(dev, shutdown); nvme_req(req)->flags |= NVME_REQ_CANCELLED; return BLK_EH_DONE; default: @@ -2187,8 +2190,11 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) * must flush all entered requests to their failed completion to avoid * deadlocking blk-mq hot-cpu notifier. */ - if (shutdown) + if (shutdown) { nvme_start_queues(&dev->ctrl); + if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) + blk_mq_unquiesce_queue(dev->ctrl.admin_q); + } mutex_unlock(&dev->shutdown_lock); } diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index 7bc9f624043296..1096dd01ca2288 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -239,6 +239,7 @@ u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req) return 0; case nvme_cmd_write_zeroes: req->execute = nvmet_bdev_execute_write_zeroes; + req->data_len = 0; return 0; default: pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode, diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index 7c530c88b3fbb9..99de51e87f7f84 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -1028,7 +1028,7 @@ EXPORT_SYMBOL_GPL(nvmem_cell_put); static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf) { u8 *p, *b; - int i, bit_offset = cell->bit_offset; + int i, extra, bit_offset = cell->bit_offset; p = b = buf; if (bit_offset) { @@ -1043,11 +1043,16 @@ static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf) p = b; *b++ >>= bit_offset; } - - /* result fits in less bytes */ - if (cell->bytes != DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE)) - *p-- = 0; + } else { + /* point to the msb */ + p += cell->bytes - 1; } + + /* result fits in less bytes */ + extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE); + while (--extra >= 0) + *p-- = 0; + /* clear msb bits if any leftover in the last byte */ *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0); } diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c index d020f89248fd76..69f8e972e29c69 100644 --- a/drivers/nvmem/sunxi_sid.c +++ b/drivers/nvmem/sunxi_sid.c @@ -235,8 +235,10 @@ static const struct sunxi_sid_cfg sun50i_a64_cfg = { static const struct of_device_id sunxi_sid_of_match[] = { { .compatible = "allwinner,sun4i-a10-sid", .data = &sun4i_a10_cfg }, { .compatible = "allwinner,sun7i-a20-sid", .data = &sun7i_a20_cfg }, + { .compatible = "allwinner,sun8i-a83t-sid", .data = &sun50i_a64_cfg }, { .compatible = "allwinner,sun8i-h3-sid", .data = &sun8i_h3_cfg }, { .compatible = "allwinner,sun50i-a64-sid", .data = &sun50i_a64_cfg }, + { .compatible = "allwinner,sun50i-h5-sid", .data = &sun50i_a64_cfg }, {/* sentinel */}, }; MODULE_DEVICE_TABLE(of, sunxi_sid_of_match); diff --git a/drivers/parport/share.c b/drivers/parport/share.c index 5dc53d420ca8ca..7b4ee33c193544 100644 --- a/drivers/parport/share.c +++ b/drivers/parport/share.c @@ -895,6 +895,7 @@ parport_register_dev_model(struct parport *port, const char *name, par_dev->devmodel = true; ret = device_register(&par_dev->dev); if (ret) { + kfree(par_dev->state); put_device(&par_dev->dev); goto err_put_port; } @@ -912,6 +913,7 @@ parport_register_dev_model(struct parport *port, const char *name, spin_unlock(&port->physport->pardevice_lock); pr_debug("%s: cannot grant exclusive access for device %s\n", port->name, name); + kfree(par_dev->state); device_unregister(&par_dev->dev); goto err_put_port; } diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index e88bd221fffeab..5e199e7d2d4fd1 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -237,6 +237,7 @@ static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie) ks_dw_pcie_enable_error_irq(ks_pcie); } +#ifdef CONFIG_ARM /* * When a PCI device does not exist during config cycles, keystone host gets a * bus error instead of returning 0xffffffff. This handler always returns 0 @@ -256,6 +257,7 @@ static int keystone_pcie_fault(unsigned long addr, unsigned int fsr, return 0; } +#endif static int __init ks_pcie_host_init(struct pcie_port *pp) { @@ -279,12 +281,14 @@ static int __init ks_pcie_host_init(struct pcie_port *pp) val |= BIT(12); writel(val, pci->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL); +#ifdef CONFIG_ARM /* * PCIe access errors that result into OCP errors are caught by ARM as * "External aborts" */ hook_fault_code(17, keystone_pcie_fault, SIGBUS, 0, "Asynchronous external abort"); +#endif return 0; } diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index de8635af4cde29..739d97080d3bdb 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -385,6 +385,7 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct pci_epc *epc = ep->epc; + unsigned int aligned_offset; u16 msg_ctrl, msg_data; u32 msg_addr_lower, msg_addr_upper, reg; u64 msg_addr; @@ -410,13 +411,15 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, reg = ep->msi_cap + PCI_MSI_DATA_32; msg_data = dw_pcie_readw_dbi(pci, reg); } - msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower; + aligned_offset = msg_addr_lower & (epc->mem->page_size - 1); + msg_addr = ((u64)msg_addr_upper) << 32 | + (msg_addr_lower & ~aligned_offset); ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr, epc->mem->page_size); if (ret) return ret; - writel(msg_data | (interrupt_num - 1), ep->msi_mem); + writel(msg_data | (interrupt_num - 1), ep->msi_mem + aligned_offset); dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys); diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index b56e22262a7741..acd50920c2ffde 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -303,20 +303,24 @@ void dw_pcie_free_msi(struct pcie_port *pp) irq_domain_remove(pp->msi_domain); irq_domain_remove(pp->irq_domain); + + if (pp->msi_page) + __free_page(pp->msi_page); } void dw_pcie_msi_init(struct pcie_port *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct device *dev = pci->dev; - struct page *page; u64 msi_target; - page = alloc_page(GFP_KERNEL); - pp->msi_data = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); + pp->msi_page = alloc_page(GFP_KERNEL); + pp->msi_data = dma_map_page(dev, pp->msi_page, 0, PAGE_SIZE, + DMA_FROM_DEVICE); if (dma_mapping_error(dev, pp->msi_data)) { dev_err(dev, "Failed to map MSI data\n"); - __free_page(page); + __free_page(pp->msi_page); + pp->msi_page = NULL; return; } msi_target = (u64)pp->msi_data; @@ -439,7 +443,7 @@ int dw_pcie_host_init(struct pcie_port *pp) if (ret) pci->num_viewport = 2; - if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_enabled()) { + if (pci_msi_enabled()) { /* * If a specific SoC driver needs to change the * default number of vectors, it needs to implement @@ -477,7 +481,7 @@ int dw_pcie_host_init(struct pcie_port *pp) if (pp->ops->host_init) { ret = pp->ops->host_init(pp); if (ret) - goto error; + goto err_free_msi; } pp->root_bus_nr = pp->busn->start; @@ -491,7 +495,7 @@ int dw_pcie_host_init(struct pcie_port *pp) ret = pci_scan_root_bus_bridge(bridge); if (ret) - goto error; + goto err_free_msi; bus = bridge->bus; @@ -507,6 +511,9 @@ int dw_pcie_host_init(struct pcie_port *pp) pci_bus_add_devices(bus); return 0; +err_free_msi: + if (pci_msi_enabled() && !pp->ops->msi_host_init) + dw_pcie_free_msi(pp); error: pci_free_host_bridge(bridge); return ret; diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index 9f1a5e399b7033..14dcf66466996c 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h @@ -164,6 +164,7 @@ struct pcie_port { struct irq_domain *irq_domain; struct irq_domain *msi_domain; dma_addr_t msi_data; + struct page *msi_page; u32 num_vectors; u32 irq_status[MAX_MSI_CTRLS]; raw_spinlock_t lock; diff --git a/drivers/pci/controller/pcie-rcar.c b/drivers/pci/controller/pcie-rcar.c index 6a4e435bd35f30..9b9c677ad3a0b6 100644 --- a/drivers/pci/controller/pcie-rcar.c +++ b/drivers/pci/controller/pcie-rcar.c @@ -892,7 +892,7 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie) { struct device *dev = pcie->dev; struct rcar_msi *msi = &pcie->msi; - unsigned long base; + phys_addr_t base; int err, i; mutex_init(&msi->lock); @@ -931,10 +931,14 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie) /* setup MSI data target */ msi->pages = __get_free_pages(GFP_KERNEL, 0); + if (!msi->pages) { + err = -ENOMEM; + goto err; + } base = virt_to_phys((void *)msi->pages); - rcar_pci_write_reg(pcie, base | MSIFE, PCIEMSIALR); - rcar_pci_write_reg(pcie, 0, PCIEMSIAUR); + rcar_pci_write_reg(pcie, lower_32_bits(base) | MSIFE, PCIEMSIALR); + rcar_pci_write_reg(pcie, upper_32_bits(base), PCIEMSIAUR); /* enable all MSI interrupts */ rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER); diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c index 7b1389d8e2a571..ea48cba5480b80 100644 --- a/drivers/pci/controller/pcie-xilinx.c +++ b/drivers/pci/controller/pcie-xilinx.c @@ -336,14 +336,19 @@ static const struct irq_domain_ops msi_domain_ops = { * xilinx_pcie_enable_msi - Enable MSI support * @port: PCIe port information */ -static void xilinx_pcie_enable_msi(struct xilinx_pcie_port *port) +static int xilinx_pcie_enable_msi(struct xilinx_pcie_port *port) { phys_addr_t msg_addr; port->msi_pages = __get_free_pages(GFP_KERNEL, 0); + if (!port->msi_pages) + return -ENOMEM; + msg_addr = virt_to_phys((void *)port->msi_pages); pcie_write(port, 0x0, XILINX_PCIE_REG_MSIBASE1); pcie_write(port, msg_addr, XILINX_PCIE_REG_MSIBASE2); + + return 0; } /* INTx Functions */ @@ -498,6 +503,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port) struct device *dev = port->dev; struct device_node *node = dev->of_node; struct device_node *pcie_intc_node; + int ret; /* Setup INTx */ pcie_intc_node = of_get_next_child(node, NULL); @@ -526,7 +532,9 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port) return -ENODEV; } - xilinx_pcie_enable_msi(port); + ret = xilinx_pcie_enable_msi(port); + if (ret) + return ret; } return 0; diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c index e2356a9c7088a4..182f9e3443eef8 100644 --- a/drivers/pci/hotplug/rpadlpar_core.c +++ b/drivers/pci/hotplug/rpadlpar_core.c @@ -51,6 +51,7 @@ static struct device_node *find_vio_slot_node(char *drc_name) if (rc == 0) break; } + of_node_put(parent); return dn; } @@ -71,6 +72,7 @@ static struct device_node *find_php_slot_pci_node(char *drc_name, return np; } +/* Returns a device_node with its reference count incremented */ static struct device_node *find_dlpar_node(char *drc_name, int *node_type) { struct device_node *dn; @@ -306,6 +308,7 @@ int dlpar_add_slot(char *drc_name) rc = dlpar_add_phb(drc_name, dn); break; } + of_node_put(dn); printk(KERN_INFO "%s: slot %s added\n", DLPAR_MODULE_NAME, drc_name); exit: @@ -439,6 +442,7 @@ int dlpar_remove_slot(char *drc_name) rc = dlpar_remove_pci_slot(drc_name, dn); break; } + of_node_put(dn); vm_unmap_aliases(); printk(KERN_INFO "%s: slot %s removed\n", DLPAR_MODULE_NAME, drc_name); diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index f8436d1c4d45fe..f7218c1673ceba 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -625,7 +625,8 @@ static bool acpi_pci_need_resume(struct pci_dev *dev) if (!adev || !acpi_device_power_manageable(adev)) return false; - if (device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count) + if (adev->wakeup.flags.valid && + device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count) return true; if (acpi_target_system_state() == ACPI_STATE_S0) diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c index 37d0c15c9eeb0b..72db2e0ebced56 100644 --- a/drivers/pci/switch/switchtec.c +++ b/drivers/pci/switch/switchtec.c @@ -1116,7 +1116,8 @@ static int mask_event(struct switchtec_dev *stdev, int eid, int idx) if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ)) return 0; - if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE) + if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE || + eid == SWITCHTEC_IOCTL_EVENT_MRPC_COMP) return 0; dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr); diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c index e5d5b1adb5a95b..ac784ac66ac341 100644 --- a/drivers/platform/chrome/cros_ec_proto.c +++ b/drivers/platform/chrome/cros_ec_proto.c @@ -67,6 +67,17 @@ static int send_command(struct cros_ec_device *ec_dev, else xfer_fxn = ec_dev->cmd_xfer; + if (!xfer_fxn) { + /* + * This error can happen if a communication error happened and + * the EC is trying to use protocol v2, on an underlying + * communication mechanism that does not support v2. + */ + dev_err_once(ec_dev->dev, + "missing EC transfer API, cannot send command\n"); + return -EIO; + } + ret = (*xfer_fxn)(ec_dev, msg); if (msg->result == EC_RES_IN_PROGRESS) { int i; diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c index e7edc8c6393674..4ad9d127f2f58b 100644 --- a/drivers/platform/x86/intel_pmc_ipc.c +++ b/drivers/platform/x86/intel_pmc_ipc.c @@ -776,13 +776,17 @@ static int ipc_create_pmc_devices(void) if (ret) { dev_err(ipcdev.dev, "Failed to add punit platform device\n"); platform_device_unregister(ipcdev.tco_dev); + return ret; } if (!ipcdev.telem_res_inval) { ret = ipc_create_telemetry_device(); - if (ret) + if (ret) { dev_warn(ipcdev.dev, "Failed to add telemetry platform device\n"); + platform_device_unregister(ipcdev.punit_dev); + platform_device_unregister(ipcdev.tco_dev); + } } return ret; diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c index c7039f52ad5180..b1d8043762371c 100644 --- a/drivers/platform/x86/pmc_atom.c +++ b/drivers/platform/x86/pmc_atom.c @@ -398,12 +398,45 @@ static int pmc_dbgfs_register(struct pmc_dev *pmc) */ static const struct dmi_system_id critclk_systems[] = { { + /* pmc_plt_clk0 is used for an external HSIC USB HUB */ .ident = "MPL CEC1x", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "MPL AG"), DMI_MATCH(DMI_PRODUCT_NAME, "CEC10 Family"), }, }, + { + /* pmc_plt_clk0 - 3 are used for the 4 ethernet controllers */ + .ident = "Lex 3I380D", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Lex BayTrail"), + DMI_MATCH(DMI_PRODUCT_NAME, "3I380D"), + }, + }, + { + /* pmc_plt_clk* - are used for ethernet controllers */ + .ident = "Beckhoff CB3163", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"), + DMI_MATCH(DMI_BOARD_NAME, "CB3163"), + }, + }, + { + /* pmc_plt_clk* - are used for ethernet controllers */ + .ident = "Beckhoff CB6263", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"), + DMI_MATCH(DMI_BOARD_NAME, "CB6263"), + }, + }, + { + /* pmc_plt_clk* - are used for ethernet controllers */ + .ident = "Beckhoff CB6363", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"), + DMI_MATCH(DMI_BOARD_NAME, "CB6363"), + }, + }, { /*sentinel*/ } }; diff --git a/drivers/power/supply/max14656_charger_detector.c b/drivers/power/supply/max14656_charger_detector.c index b91b1d2999dc6d..d19307f791c689 100644 --- a/drivers/power/supply/max14656_charger_detector.c +++ b/drivers/power/supply/max14656_charger_detector.c @@ -280,6 +280,13 @@ static int max14656_probe(struct i2c_client *client, INIT_DELAYED_WORK(&chip->irq_work, max14656_irq_worker); + chip->detect_psy = devm_power_supply_register(dev, + &chip->psy_desc, &psy_cfg); + if (IS_ERR(chip->detect_psy)) { + dev_err(dev, "power_supply_register failed\n"); + return -EINVAL; + } + ret = devm_request_irq(dev, chip->irq, max14656_irq, IRQF_TRIGGER_FALLING, MAX14656_NAME, chip); @@ -289,13 +296,6 @@ static int max14656_probe(struct i2c_client *client, } enable_irq_wake(chip->irq); - chip->detect_psy = devm_power_supply_register(dev, - &chip->psy_desc, &psy_cfg); - if (IS_ERR(chip->detect_psy)) { - dev_err(dev, "power_supply_register failed\n"); - return -EINVAL; - } - schedule_delayed_work(&chip->irq_work, msecs_to_jiffies(2000)); return 0; diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index 1581f6ab1b1f42..c45e5719ba172c 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -311,10 +311,12 @@ int pwmchip_add_with_polarity(struct pwm_chip *chip, if (IS_ENABLED(CONFIG_OF)) of_pwmchip_add(chip); - pwmchip_sysfs_export(chip); - out: mutex_unlock(&pwm_lock); + + if (!ret) + pwmchip_sysfs_export(chip); + return ret; } EXPORT_SYMBOL_GPL(pwmchip_add_with_polarity); @@ -348,7 +350,7 @@ int pwmchip_remove(struct pwm_chip *chip) unsigned int i; int ret = 0; - pwmchip_sysfs_unexport_children(chip); + pwmchip_sysfs_unexport(chip); mutex_lock(&pwm_lock); @@ -368,8 +370,6 @@ int pwmchip_remove(struct pwm_chip *chip) free_pwms(chip); - pwmchip_sysfs_unexport(chip); - out: mutex_unlock(&pwm_lock); return ret; diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c index c1ed641b3e2662..f6e738ad7bd929 100644 --- a/drivers/pwm/pwm-meson.c +++ b/drivers/pwm/pwm-meson.c @@ -111,6 +111,10 @@ struct meson_pwm { const struct meson_pwm_data *data; void __iomem *base; u8 inverter_mask; + /* + * Protects register (write) access to the REG_MISC_AB register + * that is shared between the two PWMs. + */ spinlock_t lock; }; @@ -235,6 +239,7 @@ static void meson_pwm_enable(struct meson_pwm *meson, { u32 value, clk_shift, clk_enable, enable; unsigned int offset; + unsigned long flags; switch (id) { case 0: @@ -255,6 +260,8 @@ static void meson_pwm_enable(struct meson_pwm *meson, return; } + spin_lock_irqsave(&meson->lock, flags); + value = readl(meson->base + REG_MISC_AB); value &= ~(MISC_CLK_DIV_MASK << clk_shift); value |= channel->pre_div << clk_shift; @@ -267,11 +274,14 @@ static void meson_pwm_enable(struct meson_pwm *meson, value = readl(meson->base + REG_MISC_AB); value |= enable; writel(value, meson->base + REG_MISC_AB); + + spin_unlock_irqrestore(&meson->lock, flags); } static void meson_pwm_disable(struct meson_pwm *meson, unsigned int id) { u32 value, enable; + unsigned long flags; switch (id) { case 0: @@ -286,9 +296,13 @@ static void meson_pwm_disable(struct meson_pwm *meson, unsigned int id) return; } + spin_lock_irqsave(&meson->lock, flags); + value = readl(meson->base + REG_MISC_AB); value &= ~enable; writel(value, meson->base + REG_MISC_AB); + + spin_unlock_irqrestore(&meson->lock, flags); } static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, @@ -296,19 +310,16 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, { struct meson_pwm_channel *channel = pwm_get_chip_data(pwm); struct meson_pwm *meson = to_meson_pwm(chip); - unsigned long flags; int err = 0; if (!state) return -EINVAL; - spin_lock_irqsave(&meson->lock, flags); - if (!state->enabled) { meson_pwm_disable(meson, pwm->hwpwm); channel->state.enabled = false; - goto unlock; + return 0; } if (state->period != channel->state.period || @@ -329,7 +340,7 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, err = meson_pwm_calc(meson, channel, pwm->hwpwm, state->duty_cycle, state->period); if (err < 0) - goto unlock; + return err; channel->state.polarity = state->polarity; channel->state.period = state->period; @@ -341,9 +352,7 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, channel->state.enabled = true; } -unlock: - spin_unlock_irqrestore(&meson->lock, flags); - return err; + return 0; } static void meson_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm, diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c index f7b8a86fa5c5e9..ad4a40c0f27cf4 100644 --- a/drivers/pwm/pwm-tiehrpwm.c +++ b/drivers/pwm/pwm-tiehrpwm.c @@ -382,6 +382,8 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) } /* Update shadow register first before modifying active register */ + ehrpwm_modify(pc->mmio_base, AQSFRC, AQSFRC_RLDCSF_MASK, + AQSFRC_RLDCSF_ZRO); ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val); /* * Changes to immediate action on Action Qualifier. This puts diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c index 7c71cdb8a9d8f9..1c64fd8e9234a9 100644 --- a/drivers/pwm/sysfs.c +++ b/drivers/pwm/sysfs.c @@ -399,19 +399,6 @@ void pwmchip_sysfs_export(struct pwm_chip *chip) } void pwmchip_sysfs_unexport(struct pwm_chip *chip) -{ - struct device *parent; - - parent = class_find_device(&pwm_class, NULL, chip, - pwmchip_sysfs_match); - if (parent) { - /* for class_find_device() */ - put_device(parent); - device_unregister(parent); - } -} - -void pwmchip_sysfs_unexport_children(struct pwm_chip *chip) { struct device *parent; unsigned int i; @@ -429,6 +416,7 @@ void pwmchip_sysfs_unexport_children(struct pwm_chip *chip) } put_device(parent); + device_unregister(parent); } static int __init pwm_sysfs_init(void) diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c index bad0e0ea4f3059..ef989a15aefc41 100644 --- a/drivers/rapidio/rio_cm.c +++ b/drivers/rapidio/rio_cm.c @@ -2145,6 +2145,14 @@ static int riocm_add_mport(struct device *dev, mutex_init(&cm->rx_lock); riocm_rx_fill(cm, RIOCM_RX_RING_SIZE); cm->rx_wq = create_workqueue(DRV_NAME "/rxq"); + if (!cm->rx_wq) { + riocm_error("failed to allocate IBMBOX_%d on %s", + cmbox, mport->name); + rio_release_outb_mbox(mport, cmbox); + kfree(cm); + return -ENOMEM; + } + INIT_WORK(&cm->rx_work, rio_ibmsg_handler); cm->tx_slot = 0; diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c index 2d9ec378a8bc34..f85d6b7a198485 100644 --- a/drivers/ras/cec.c +++ b/drivers/ras/cec.c @@ -2,6 +2,7 @@ #include #include #include +#include #include @@ -123,16 +124,12 @@ static u64 dfs_pfn; /* Amount of errors after which we offline */ static unsigned int count_threshold = COUNT_MASK; -/* - * The timer "decays" element count each timer_interval which is 24hrs by - * default. - */ - -#define CEC_TIMER_DEFAULT_INTERVAL 24 * 60 * 60 /* 24 hrs */ -#define CEC_TIMER_MIN_INTERVAL 1 * 60 * 60 /* 1h */ -#define CEC_TIMER_MAX_INTERVAL 30 * 24 * 60 * 60 /* one month */ -static struct timer_list cec_timer; -static u64 timer_interval = CEC_TIMER_DEFAULT_INTERVAL; +/* Each element "decays" each decay_interval which is 24hrs by default. */ +#define CEC_DECAY_DEFAULT_INTERVAL 24 * 60 * 60 /* 24 hrs */ +#define CEC_DECAY_MIN_INTERVAL 1 * 60 * 60 /* 1h */ +#define CEC_DECAY_MAX_INTERVAL 30 * 24 * 60 * 60 /* one month */ +static struct delayed_work cec_work; +static u64 decay_interval = CEC_DECAY_DEFAULT_INTERVAL; /* * Decrement decay value. We're using DECAY_BITS bits to denote decay of an @@ -160,20 +157,21 @@ static void do_spring_cleaning(struct ce_array *ca) /* * @interval in seconds */ -static void cec_mod_timer(struct timer_list *t, unsigned long interval) +static void cec_mod_work(unsigned long interval) { unsigned long iv; - iv = interval * HZ + jiffies; - - mod_timer(t, round_jiffies(iv)); + iv = interval * HZ; + mod_delayed_work(system_wq, &cec_work, round_jiffies(iv)); } -static void cec_timer_fn(struct timer_list *unused) +static void cec_work_fn(struct work_struct *work) { + mutex_lock(&ce_mutex); do_spring_cleaning(&ce_arr); + mutex_unlock(&ce_mutex); - cec_mod_timer(&cec_timer, timer_interval); + cec_mod_work(decay_interval); } /* @@ -183,32 +181,38 @@ static void cec_timer_fn(struct timer_list *unused) */ static int __find_elem(struct ce_array *ca, u64 pfn, unsigned int *to) { + int min = 0, max = ca->n - 1; u64 this_pfn; - int min = 0, max = ca->n; - while (min < max) { - int tmp = (max + min) >> 1; + while (min <= max) { + int i = (min + max) >> 1; - this_pfn = PFN(ca->array[tmp]); + this_pfn = PFN(ca->array[i]); if (this_pfn < pfn) - min = tmp + 1; + min = i + 1; else if (this_pfn > pfn) - max = tmp; - else { - min = tmp; - break; + max = i - 1; + else if (this_pfn == pfn) { + if (to) + *to = i; + + return i; } } + /* + * When the loop terminates without finding @pfn, min has the index of + * the element slot where the new @pfn should be inserted. The loop + * terminates when min > max, which means the min index points to the + * bigger element while the max index to the smaller element, in-between + * which the new @pfn belongs to. + * + * For more details, see exercise 1, Section 6.2.1 in TAOCP, vol. 3. + */ if (to) *to = min; - this_pfn = PFN(ca->array[min]); - - if (this_pfn == pfn) - return min; - return -ENOKEY; } @@ -374,15 +378,15 @@ static int decay_interval_set(void *data, u64 val) { *(u64 *)data = val; - if (val < CEC_TIMER_MIN_INTERVAL) + if (val < CEC_DECAY_MIN_INTERVAL) return -EINVAL; - if (val > CEC_TIMER_MAX_INTERVAL) + if (val > CEC_DECAY_MAX_INTERVAL) return -EINVAL; - timer_interval = val; + decay_interval = val; - cec_mod_timer(&cec_timer, timer_interval); + cec_mod_work(decay_interval); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(decay_interval_ops, u64_get, decay_interval_set, "%lld\n"); @@ -426,7 +430,7 @@ static int array_dump(struct seq_file *m, void *v) seq_printf(m, "Flags: 0x%x\n", ca->flags); - seq_printf(m, "Timer interval: %lld seconds\n", timer_interval); + seq_printf(m, "Decay interval: %lld seconds\n", decay_interval); seq_printf(m, "Decays: %lld\n", ca->decays_done); seq_printf(m, "Action threshold: %d\n", count_threshold); @@ -472,7 +476,7 @@ static int __init create_debugfs_nodes(void) } decay = debugfs_create_file("decay_interval", S_IRUSR | S_IWUSR, d, - &timer_interval, &decay_interval_ops); + &decay_interval, &decay_interval_ops); if (!decay) { pr_warn("Error creating decay_interval debugfs node!\n"); goto err; @@ -508,8 +512,8 @@ void __init cec_init(void) if (create_debugfs_nodes()) return; - timer_setup(&cec_timer, cec_timer_fn, 0); - cec_mod_timer(&cec_timer, CEC_TIMER_DEFAULT_INTERVAL); + INIT_DELAYED_WORK(&cec_work, cec_work_fn); + schedule_delayed_work(&cec_work, CEC_DECAY_DEFAULT_INTERVAL); pr_info("Correctable Errors collector initialized.\n"); } diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c index 453615f8ac9a01..3fcd2cbafc8457 100644 --- a/drivers/rtc/rtc-pcf8523.c +++ b/drivers/rtc/rtc-pcf8523.c @@ -85,6 +85,18 @@ static int pcf8523_write(struct i2c_client *client, u8 reg, u8 value) return 0; } +static int pcf8523_voltage_low(struct i2c_client *client) +{ + u8 value; + int err; + + err = pcf8523_read(client, REG_CONTROL3, &value); + if (err < 0) + return err; + + return !!(value & REG_CONTROL3_BLF); +} + static int pcf8523_select_capacitance(struct i2c_client *client, bool high) { u8 value; @@ -167,6 +179,14 @@ static int pcf8523_rtc_read_time(struct device *dev, struct rtc_time *tm) struct i2c_msg msgs[2]; int err; + err = pcf8523_voltage_low(client); + if (err < 0) { + return err; + } else if (err > 0) { + dev_err(dev, "low voltage detected, time is unreliable\n"); + return -EINVAL; + } + msgs[0].addr = client->addr; msgs[0].flags = 0; msgs[0].len = 1; @@ -251,17 +271,13 @@ static int pcf8523_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) { struct i2c_client *client = to_i2c_client(dev); - u8 value; - int ret = 0, err; + int ret; switch (cmd) { case RTC_VL_READ: - err = pcf8523_read(client, REG_CONTROL3, &value); - if (err < 0) - return err; - - if (value & REG_CONTROL3_BLF) - ret = 1; + ret = pcf8523_voltage_low(client); + if (ret < 0) + return ret; if (copy_to_user((void __user *)arg, &ret, sizeof(int))) return -EFAULT; diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index b7513c5848cf9a..c1c35eccd5b653 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -1901,7 +1901,7 @@ static void qeth_bridgeport_an_set_cb(void *priv, l2entry = (struct qdio_brinfo_entry_l2 *)entry; code = IPA_ADDR_CHANGE_CODE_MACADDR; - if (l2entry->addr_lnid.lnid) + if (l2entry->addr_lnid.lnid < VLAN_N_VID) code |= IPA_ADDR_CHANGE_CODE_VLANID; qeth_bridge_emit_host_event(card, anev_reg_unreg, code, (struct net_if_token *)&l2entry->nit, diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c index e8ae4d671d233b..097305949a955c 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c @@ -830,7 +830,7 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) ((u64)err_entry->data.err_warn_bitmap_hi << 32) | (u64)err_entry->data.err_warn_bitmap_lo; for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) { - if (err_warn_bit_map & (u64) (1 << i)) { + if (err_warn_bit_map & ((u64)1 << i)) { err_warn = i; break; } diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index f2c561ca731a32..cd2c247d6d0ce1 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -641,6 +641,10 @@ cxgbi_check_route(struct sockaddr *dst_addr, int ifindex) if (ndev->flags & IFF_LOOPBACK) { ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr); + if (!ndev) { + err = -ENETUNREACH; + goto rel_neigh; + } mtu = ndev->mtu; pr_info("rt dev %s, loopback -> %s, mtu %u.\n", n->dev->name, ndev->name, mtu); diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 12dc7100bb4c24..d1154baa9436a0 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -1173,10 +1173,8 @@ static int __init alua_init(void) int r; kaluad_wq = alloc_workqueue("kaluad", WQ_MEM_RECLAIM, 0); - if (!kaluad_wq) { - /* Temporary failure, bypass */ - return SCSI_DH_DEV_TEMP_BUSY; - } + if (!kaluad_wq) + return -ENOMEM; r = scsi_register_device_handler(&alua_dh); if (r != 0) { diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 231eb79efa320f..b141d1061f38ea 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -989,6 +989,8 @@ static struct domain_device *sas_ex_discover_expander( list_del(&child->dev_list_node); spin_unlock_irq(&parent->port->dev_list_lock); sas_put_device(child); + sas_port_delete(phy->port); + phy->port = NULL; return NULL; } list_add_tail(&child->siblings, &parent->ex_dev.children); diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index cb19b12e72116d..55cd96e2469c65 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -341,7 +341,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, phba->sli4_hba.scsi_xri_max, lpfc_sli4_get_els_iocb_cnt(phba)); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto buffer_done; + goto rcu_unlock_buf_done; /* Port state is only one of two values for now. */ if (localport->port_id) @@ -357,7 +357,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, wwn_to_u64(vport->fc_nodename.u.wwn), localport->port_id, statep); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto buffer_done; + goto rcu_unlock_buf_done; list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { nrport = NULL; @@ -384,39 +384,39 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, /* Tab in to show lport ownership. */ if (strlcat(buf, "NVME RPORT ", PAGE_SIZE) >= PAGE_SIZE) - goto buffer_done; + goto rcu_unlock_buf_done; if (phba->brd_no >= 10) { if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE) - goto buffer_done; + goto rcu_unlock_buf_done; } scnprintf(tmp, sizeof(tmp), "WWPN x%llx ", nrport->port_name); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto buffer_done; + goto rcu_unlock_buf_done; scnprintf(tmp, sizeof(tmp), "WWNN x%llx ", nrport->node_name); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto buffer_done; + goto rcu_unlock_buf_done; scnprintf(tmp, sizeof(tmp), "DID x%06x ", nrport->port_id); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto buffer_done; + goto rcu_unlock_buf_done; /* An NVME rport can have multiple roles. */ if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) { if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE) - goto buffer_done; + goto rcu_unlock_buf_done; } if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) { if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE) - goto buffer_done; + goto rcu_unlock_buf_done; } if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) { if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE) - goto buffer_done; + goto rcu_unlock_buf_done; } if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR | FC_PORT_ROLE_NVME_TARGET | @@ -424,12 +424,12 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x", nrport->port_role); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto buffer_done; + goto rcu_unlock_buf_done; } scnprintf(tmp, sizeof(tmp), "%s\n", statep); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto buffer_done; + goto rcu_unlock_buf_done; } rcu_read_unlock(); @@ -491,7 +491,13 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, atomic_read(&lport->cmpl_fcp_err)); strlcat(buf, tmp, PAGE_SIZE); -buffer_done: + /* RCU is already unlocked. */ + goto buffer_done; + + rcu_unlock_buf_done: + rcu_read_unlock(); + + buffer_done: len = strnlen(buf, PAGE_SIZE); if (unlikely(len >= (PAGE_SIZE - 1))) { diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 0d214e6b8e9aeb..f3c6801c0b3128 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -7094,7 +7094,10 @@ int lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq) { struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport, - rrq->nlp_DID); + rrq->nlp_DID); + if (!ndlp) + return 1; + if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag)) return lpfc_issue_els_rrq(rrq->vport, ndlp, rrq->nlp_DID, rrq); diff --git a/drivers/scsi/qedi/qedi_dbg.c b/drivers/scsi/qedi/qedi_dbg.c index 8fd28b056f73f3..3383314a3882bc 100644 --- a/drivers/scsi/qedi/qedi_dbg.c +++ b/drivers/scsi/qedi/qedi_dbg.c @@ -16,10 +16,6 @@ qedi_dbg_err(struct qedi_dbg_ctx *qedi, const char *func, u32 line, { va_list va; struct va_format vaf; - char nfunc[32]; - - memset(nfunc, 0, sizeof(nfunc)); - memcpy(nfunc, func, sizeof(nfunc) - 1); va_start(va, fmt); @@ -28,9 +24,9 @@ qedi_dbg_err(struct qedi_dbg_ctx *qedi, const char *func, u32 line, if (likely(qedi) && likely(qedi->pdev)) pr_err("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev), - nfunc, line, qedi->host_no, &vaf); + func, line, qedi->host_no, &vaf); else - pr_err("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf); + pr_err("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf); va_end(va); } @@ -41,10 +37,6 @@ qedi_dbg_warn(struct qedi_dbg_ctx *qedi, const char *func, u32 line, { va_list va; struct va_format vaf; - char nfunc[32]; - - memset(nfunc, 0, sizeof(nfunc)); - memcpy(nfunc, func, sizeof(nfunc) - 1); va_start(va, fmt); @@ -56,9 +48,9 @@ qedi_dbg_warn(struct qedi_dbg_ctx *qedi, const char *func, u32 line, if (likely(qedi) && likely(qedi->pdev)) pr_warn("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev), - nfunc, line, qedi->host_no, &vaf); + func, line, qedi->host_no, &vaf); else - pr_warn("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf); + pr_warn("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf); ret: va_end(va); @@ -70,10 +62,6 @@ qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line, { va_list va; struct va_format vaf; - char nfunc[32]; - - memset(nfunc, 0, sizeof(nfunc)); - memcpy(nfunc, func, sizeof(nfunc) - 1); va_start(va, fmt); @@ -85,10 +73,10 @@ qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line, if (likely(qedi) && likely(qedi->pdev)) pr_notice("[%s]:[%s:%d]:%d: %pV", - dev_name(&qedi->pdev->dev), nfunc, line, + dev_name(&qedi->pdev->dev), func, line, qedi->host_no, &vaf); else - pr_notice("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf); + pr_notice("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf); ret: va_end(va); @@ -100,10 +88,6 @@ qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line, { va_list va; struct va_format vaf; - char nfunc[32]; - - memset(nfunc, 0, sizeof(nfunc)); - memcpy(nfunc, func, sizeof(nfunc) - 1); va_start(va, fmt); @@ -115,9 +99,9 @@ qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line, if (likely(qedi) && likely(qedi->pdev)) pr_info("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev), - nfunc, line, qedi->host_no, &vaf); + func, line, qedi->host_no, &vaf); else - pr_info("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf); + pr_info("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf); ret: va_end(va); diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c index 4130b9117055f8..1b7049dce1699b 100644 --- a/drivers/scsi/qedi/qedi_iscsi.c +++ b/drivers/scsi/qedi/qedi_iscsi.c @@ -810,8 +810,6 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, struct qedi_endpoint *qedi_ep; struct sockaddr_in *addr; struct sockaddr_in6 *addr6; - struct qed_dev *cdev = NULL; - struct qedi_uio_dev *udev = NULL; struct iscsi_path path_req; u32 msg_type = ISCSI_KEVENT_IF_DOWN; u32 iscsi_cid = QEDI_CID_RESERVED; @@ -831,8 +829,6 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, } qedi = iscsi_host_priv(shost); - cdev = qedi->cdev; - udev = qedi->udev; if (test_bit(QEDI_IN_OFFLINE, &qedi->flags) || test_bit(QEDI_IN_RECOVERY, &qedi->flags)) { diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index de3f2a097451d0..1f1a05a90d3d7f 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -3261,6 +3261,8 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res) "Async done-%s res %x, WWPN %8phC \n", sp->name, res, fcport->port_name); + fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); + if (res == QLA_FUNCTION_TIMEOUT) return; @@ -4604,6 +4606,7 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport) done_free_sp: sp->free(sp); + fcport->flags &= ~FCF_ASYNC_SENT; done: return rval; } diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 3781e8109dd740..98f2d076f938f3 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -3697,8 +3697,10 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, return -ETIMEDOUT; msecs_blocked = jiffies_to_msecs(jiffies - start_jiffies); - if (msecs_blocked >= timeout_msecs) - return -ETIMEDOUT; + if (msecs_blocked >= timeout_msecs) { + rc = -ETIMEDOUT; + goto out; + } timeout_msecs -= msecs_blocked; } } @@ -6378,7 +6380,7 @@ static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info) else mask = DMA_BIT_MASK(32); - rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask); + rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask); if (rc) { dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n"); goto disable_device; diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c index 895a9b5ac98993..30c22e16b1e34b 100644 --- a/drivers/scsi/ufs/ufshcd-pltfrm.c +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c @@ -340,24 +340,21 @@ int ufshcd_pltfrm_init(struct platform_device *pdev, goto dealloc_host; } - pm_runtime_set_active(&pdev->dev); - pm_runtime_enable(&pdev->dev); - ufshcd_init_lanes_per_dir(hba); err = ufshcd_init(hba, mmio_base, irq); if (err) { dev_err(dev, "Initialization failed\n"); - goto out_disable_rpm; + goto dealloc_host; } platform_set_drvdata(pdev, hba); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + return 0; -out_disable_rpm: - pm_runtime_disable(&pdev->dev); - pm_runtime_set_suspended(&pdev->dev); dealloc_host: ufshcd_dealloc_host(hba); out: diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 3183fa8c585782..b8b59cfeacd1f9 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -1914,7 +1914,8 @@ int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE); /* Get the descriptor */ - if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) { + if (hba->dev_cmd.query.descriptor && + lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) { u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + GENERAL_UPIU_REQUEST_SIZE; u16 resp_len; diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c index 4e931fdf4d0918..011a40b5fb4907 100644 --- a/drivers/soc/mediatek/mtk-pmic-wrap.c +++ b/drivers/soc/mediatek/mtk-pmic-wrap.c @@ -1104,7 +1104,7 @@ static bool pwrap_is_pmic_cipher_ready(struct pmic_wrapper *wrp) static int pwrap_init_cipher(struct pmic_wrapper *wrp) { int ret; - u32 rdata; + u32 rdata = 0; pwrap_writel(wrp, 0x1, PWRAP_CIPHER_SWRST); pwrap_writel(wrp, 0x0, PWRAP_CIPHER_SWRST); diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c index d44d0e687ab8ad..2a43d6e99962f8 100644 --- a/drivers/soc/renesas/renesas-soc.c +++ b/drivers/soc/renesas/renesas-soc.c @@ -285,6 +285,9 @@ static int __init renesas_soc_init(void) /* R-Car M3-W ES1.1 incorrectly identifies as ES2.0 */ if ((product & 0x7fff) == 0x5210) product ^= 0x11; + /* R-Car M3-W ES1.3 incorrectly identifies as ES2.1 */ + if ((product & 0x7fff) == 0x5211) + product ^= 0x12; if (soc->id && ((product >> 8) & 0xff) != soc->id) { pr_warn("SoC mismatch (product = 0x%x)\n", product); return -ENODEV; diff --git a/drivers/soc/rockchip/grf.c b/drivers/soc/rockchip/grf.c index 96882ffde67ea6..3b81e1d75a97e7 100644 --- a/drivers/soc/rockchip/grf.c +++ b/drivers/soc/rockchip/grf.c @@ -66,9 +66,11 @@ static const struct rockchip_grf_info rk3228_grf __initconst = { }; #define RK3288_GRF_SOC_CON0 0x244 +#define RK3288_GRF_SOC_CON2 0x24c static const struct rockchip_grf_value rk3288_defaults[] __initconst = { { "jtag switching", RK3288_GRF_SOC_CON0, HIWORD_UPDATE(0, 1, 12) }, + { "pwm select", RK3288_GRF_SOC_CON2, HIWORD_UPDATE(1, 1, 0) }, }; static const struct rockchip_grf_info rk3288_grf __initconst = { diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 729be74621e374..f41333817c501f 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -1416,12 +1416,7 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = { static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param) { - struct device *dev = param; - - if (dev != chan->device->dev->parent) - return false; - - return true; + return param == chan->device->dev; } static struct pxa2xx_spi_master * diff --git a/drivers/staging/erofs/erofs_fs.h b/drivers/staging/erofs/erofs_fs.h index 2f8e2bf70941dc..7677da889f1255 100644 --- a/drivers/staging/erofs/erofs_fs.h +++ b/drivers/staging/erofs/erofs_fs.h @@ -17,10 +17,16 @@ #define EROFS_SUPER_MAGIC_V1 0xE0F5E1E2 #define EROFS_SUPER_OFFSET 1024 +/* + * Any bits that aren't in EROFS_ALL_REQUIREMENTS should be + * incompatible with this kernel version. + */ +#define EROFS_ALL_REQUIREMENTS 0 + struct erofs_super_block { /* 0 */__le32 magic; /* in the little endian */ /* 4 */__le32 checksum; /* crc32c(super_block) */ -/* 8 */__le32 features; +/* 8 */__le32 features; /* (aka. feature_compat) */ /* 12 */__u8 blkszbits; /* support block_size == PAGE_SIZE only */ /* 13 */__u8 reserved; @@ -34,9 +40,10 @@ struct erofs_super_block { /* 44 */__le32 xattr_blkaddr; /* 48 */__u8 uuid[16]; /* 128-bit uuid for volume */ /* 64 */__u8 volume_name[16]; /* volume name */ +/* 80 */__le32 requirements; /* (aka. feature_incompat) */ -/* 80 */__u8 reserved2[48]; /* 128 bytes */ -} __packed; +/* 84 */__u8 reserved2[44]; +} __packed; /* 128 bytes */ #define __EROFS_BIT(_prefix, _cur, _pre) enum { \ _prefix ## _cur ## _BIT = _prefix ## _pre ## _BIT + \ diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h index 58d8cbc3f921f1..8ce37091db2051 100644 --- a/drivers/staging/erofs/internal.h +++ b/drivers/staging/erofs/internal.h @@ -111,6 +111,8 @@ struct erofs_sb_info { u8 uuid[16]; /* 128-bit uuid for volume */ u8 volume_name[16]; /* volume name */ + u32 requirements; + char *dev_name; unsigned int mount_opt; diff --git a/drivers/staging/erofs/super.c b/drivers/staging/erofs/super.c index b0583cdb079ae4..b49ebdf6ebdaf0 100644 --- a/drivers/staging/erofs/super.c +++ b/drivers/staging/erofs/super.c @@ -75,6 +75,22 @@ static void destroy_inode(struct inode *inode) call_rcu(&inode->i_rcu, i_callback); } +static bool check_layout_compatibility(struct super_block *sb, + struct erofs_super_block *layout) +{ + const unsigned int requirements = le32_to_cpu(layout->requirements); + + EROFS_SB(sb)->requirements = requirements; + + /* check if current kernel meets all mandatory requirements */ + if (requirements & (~EROFS_ALL_REQUIREMENTS)) { + errln("unidentified requirements %x, please upgrade kernel version", + requirements & ~EROFS_ALL_REQUIREMENTS); + return false; + } + return true; +} + static int superblock_read(struct super_block *sb) { struct erofs_sb_info *sbi; @@ -108,6 +124,9 @@ static int superblock_read(struct super_block *sb) goto out; } + if (!check_layout_compatibility(sb, layout)) + goto out; + sbi->blocks = le32_to_cpu(layout->blocks); sbi->meta_blkaddr = le32_to_cpu(layout->meta_blkaddr); #ifdef CONFIG_EROFS_FS_XATTR diff --git a/drivers/staging/vc04_services/bcm2835-camera/controls.c b/drivers/staging/vc04_services/bcm2835-camera/controls.c index cff7b1e07153b9..b688ebc0174059 100644 --- a/drivers/staging/vc04_services/bcm2835-camera/controls.c +++ b/drivers/staging/vc04_services/bcm2835-camera/controls.c @@ -576,7 +576,7 @@ static int ctrl_set_image_effect(struct bm2835_mmal_dev *dev, dev->colourfx.enable ? "true" : "false", dev->colourfx.u, dev->colourfx.v, ret, (ret == 0 ? 0 : -EINVAL)); - return (ret == 0 ? 0 : EINVAL); + return (ret == 0 ? 0 : -EINVAL); } static int ctrl_set_colfx(struct bm2835_mmal_dev *dev, @@ -600,7 +600,7 @@ static int ctrl_set_colfx(struct bm2835_mmal_dev *dev, "%s: After: mmal_ctrl:%p ctrl id:0x%x ctrl val:%d ret %d(%d)\n", __func__, mmal_ctrl, ctrl->id, ctrl->val, ret, (ret == 0 ? 0 : -EINVAL)); - return (ret == 0 ? 0 : EINVAL); + return (ret == 0 ? 0 : -EINVAL); } static int ctrl_set_bitrate(struct bm2835_mmal_dev *dev, diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c index a2c9bfae3d867e..b139713289a42d 100644 --- a/drivers/thermal/qcom/tsens.c +++ b/drivers/thermal/qcom/tsens.c @@ -171,7 +171,8 @@ static int tsens_probe(struct platform_device *pdev) if (tmdev->ops->calibrate) { ret = tmdev->ops->calibrate(tmdev); if (ret < 0) { - dev_err(dev, "tsens calibration failed\n"); + if (ret != -EPROBE_DEFER) + dev_err(dev, "tsens calibration failed\n"); return ret; } } diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c index 7aed5337bdd35b..704c8ad045bb55 100644 --- a/drivers/thermal/rcar_gen3_thermal.c +++ b/drivers/thermal/rcar_gen3_thermal.c @@ -328,6 +328,9 @@ MODULE_DEVICE_TABLE(of, rcar_gen3_thermal_dt_ids); static int rcar_gen3_thermal_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; + struct rcar_gen3_thermal_priv *priv = dev_get_drvdata(dev); + + rcar_thermal_irq_set(priv, false); pm_runtime_put(dev); pm_runtime_disable(dev); diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index d31b975dd3fd7b..284e8d052fc3ce 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c @@ -365,7 +365,7 @@ static bool dw8250_fallback_dma_filter(struct dma_chan *chan, void *param) static bool dw8250_idma_filter(struct dma_chan *chan, void *param) { - return param == chan->device->dev->parent; + return param == chan->device->dev; } /* @@ -434,7 +434,7 @@ static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data) data->uart_16550_compatible = true; } - /* Platforms with iDMA */ + /* Platforms with iDMA 64-bit */ if (platform_get_resource_byname(to_platform_device(p->dev), IORESOURCE_MEM, "lpss_priv")) { data->dma.rx_param = p->dev->parent; diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c index 63e34d868de8f3..f8503f8fc44e20 100644 --- a/drivers/tty/serial/sunhv.c +++ b/drivers/tty/serial/sunhv.c @@ -397,7 +397,7 @@ static const struct uart_ops sunhv_pops = { static struct uart_driver sunhv_reg = { .owner = THIS_MODULE, .driver_name = "sunhv", - .dev_name = "ttyS", + .dev_name = "ttyHV", .major = TTY_MAJOR, }; diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index 9852ec5e6e017e..cc7c856126df5a 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c @@ -1621,6 +1621,25 @@ static int ci_udc_pullup(struct usb_gadget *_gadget, int is_on) static int ci_udc_start(struct usb_gadget *gadget, struct usb_gadget_driver *driver); static int ci_udc_stop(struct usb_gadget *gadget); + +/* Match ISOC IN from the highest endpoint */ +static struct usb_ep *ci_udc_match_ep(struct usb_gadget *gadget, + struct usb_endpoint_descriptor *desc, + struct usb_ss_ep_comp_descriptor *comp_desc) +{ + struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget); + struct usb_ep *ep; + + if (usb_endpoint_xfer_isoc(desc) && usb_endpoint_dir_in(desc)) { + list_for_each_entry_reverse(ep, &ci->gadget.ep_list, ep_list) { + if (ep->caps.dir_in && !ep->claimed) + return ep; + } + } + + return NULL; +} + /** * Device operations part of the API to the USB controller hardware, * which don't involve endpoints (or i/o) @@ -1634,6 +1653,7 @@ static const struct usb_gadget_ops usb_gadget_ops = { .vbus_draw = ci_udc_vbus_draw, .udc_start = ci_udc_start, .udc_stop = ci_udc_stop, + .match_ep = ci_udc_match_ep, }; static int init_eps(struct ci_hdrc *ci) diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 6082b008969b73..6b641307358433 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -215,6 +215,9 @@ static const struct usb_device_id usb_quirk_list[] = { /* Cherry Stream G230 2.0 (G85-231) and 3.0 (G85-232) */ { USB_DEVICE(0x046a, 0x0023), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Logitech HD Webcam C270 */ + { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */ { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT }, diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index 260010abf9d81c..aad7edc29bddd4 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -2673,8 +2673,10 @@ static void dwc2_free_dma_aligned_buffer(struct urb *urb) return; /* Restore urb->transfer_buffer from the end of the allocated area */ - memcpy(&stored_xfer_buffer, urb->transfer_buffer + - urb->transfer_buffer_length, sizeof(urb->transfer_buffer)); + memcpy(&stored_xfer_buffer, + PTR_ALIGN(urb->transfer_buffer + urb->transfer_buffer_length, + dma_get_cache_alignment()), + sizeof(urb->transfer_buffer)); if (usb_urb_dir_in(urb)) { if (usb_pipeisoc(urb->pipe)) @@ -2706,6 +2708,7 @@ static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags) * DMA */ kmalloc_size = urb->transfer_buffer_length + + (dma_get_cache_alignment() - 1) + sizeof(urb->transfer_buffer); kmalloc_ptr = kmalloc(kmalloc_size, mem_flags); @@ -2716,7 +2719,8 @@ static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags) * Position value of original urb->transfer_buffer pointer to the end * of allocation for later referencing */ - memcpy(kmalloc_ptr + urb->transfer_buffer_length, + memcpy(PTR_ALIGN(kmalloc_ptr + urb->transfer_buffer_length, + dma_get_cache_alignment()), &urb->transfer_buffer, sizeof(urb->transfer_buffer)); if (usb_urb_dir_out(urb)) @@ -2801,7 +2805,7 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) chan->dev_addr = dwc2_hcd_get_dev_addr(&urb->pipe_info); chan->ep_num = dwc2_hcd_get_ep_num(&urb->pipe_info); chan->speed = qh->dev_speed; - chan->max_packet = dwc2_max_packet(qh->maxp); + chan->max_packet = qh->maxp; chan->xfer_started = 0; chan->halt_status = DWC2_HC_XFER_NO_HALT_STATUS; @@ -2879,7 +2883,7 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) * This value may be modified when the transfer is started * to reflect the actual transfer length */ - chan->multi_count = dwc2_hb_mult(qh->maxp); + chan->multi_count = qh->maxp_mult; if (hsotg->params.dma_desc_enable) { chan->desc_list_addr = qh->desc_list_dma; @@ -3991,19 +3995,21 @@ static struct dwc2_hcd_urb *dwc2_hcd_urb_alloc(struct dwc2_hsotg *hsotg, static void dwc2_hcd_urb_set_pipeinfo(struct dwc2_hsotg *hsotg, struct dwc2_hcd_urb *urb, u8 dev_addr, - u8 ep_num, u8 ep_type, u8 ep_dir, u16 mps) + u8 ep_num, u8 ep_type, u8 ep_dir, + u16 maxp, u16 maxp_mult) { if (dbg_perio() || ep_type == USB_ENDPOINT_XFER_BULK || ep_type == USB_ENDPOINT_XFER_CONTROL) dev_vdbg(hsotg->dev, - "addr=%d, ep_num=%d, ep_dir=%1x, ep_type=%1x, mps=%d\n", - dev_addr, ep_num, ep_dir, ep_type, mps); + "addr=%d, ep_num=%d, ep_dir=%1x, ep_type=%1x, maxp=%d (%d mult)\n", + dev_addr, ep_num, ep_dir, ep_type, maxp, maxp_mult); urb->pipe_info.dev_addr = dev_addr; urb->pipe_info.ep_num = ep_num; urb->pipe_info.pipe_type = ep_type; urb->pipe_info.pipe_dir = ep_dir; - urb->pipe_info.mps = mps; + urb->pipe_info.maxp = maxp; + urb->pipe_info.maxp_mult = maxp_mult; } /* @@ -4094,8 +4100,9 @@ void dwc2_hcd_dump_state(struct dwc2_hsotg *hsotg) dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT"); dev_dbg(hsotg->dev, - " Max packet size: %d\n", - dwc2_hcd_get_mps(&urb->pipe_info)); + " Max packet size: %d (%d mult)\n", + dwc2_hcd_get_maxp(&urb->pipe_info), + dwc2_hcd_get_maxp_mult(&urb->pipe_info)); dev_dbg(hsotg->dev, " transfer_buffer: %p\n", urb->buf); @@ -4653,8 +4660,10 @@ static void dwc2_dump_urb_info(struct usb_hcd *hcd, struct urb *urb, } dev_vdbg(hsotg->dev, " Speed: %s\n", speed); - dev_vdbg(hsotg->dev, " Max packet size: %d\n", - usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))); + dev_vdbg(hsotg->dev, " Max packet size: %d (%d mult)\n", + usb_endpoint_maxp(&urb->ep->desc), + usb_endpoint_maxp_mult(&urb->ep->desc)); + dev_vdbg(hsotg->dev, " Data buffer length: %d\n", urb->transfer_buffer_length); dev_vdbg(hsotg->dev, " Transfer buffer: %p, Transfer DMA: %08lx\n", @@ -4737,8 +4746,8 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, dwc2_hcd_urb_set_pipeinfo(hsotg, dwc2_urb, usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe), ep_type, usb_pipein(urb->pipe), - usb_maxpacket(urb->dev, urb->pipe, - !(usb_pipein(urb->pipe)))); + usb_endpoint_maxp(&ep->desc), + usb_endpoint_maxp_mult(&ep->desc)); buf = urb->transfer_buffer; diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h index c089ffa1f0a8fa..ce6445a065889b 100644 --- a/drivers/usb/dwc2/hcd.h +++ b/drivers/usb/dwc2/hcd.h @@ -171,7 +171,8 @@ struct dwc2_hcd_pipe_info { u8 ep_num; u8 pipe_type; u8 pipe_dir; - u16 mps; + u16 maxp; + u16 maxp_mult; }; struct dwc2_hcd_iso_packet_desc { @@ -264,6 +265,7 @@ struct dwc2_hs_transfer_time { * - USB_ENDPOINT_XFER_ISOC * @ep_is_in: Endpoint direction * @maxp: Value from wMaxPacketSize field of Endpoint Descriptor + * @maxp_mult: Multiplier for maxp * @dev_speed: Device speed. One of the following values: * - USB_SPEED_LOW * - USB_SPEED_FULL @@ -340,6 +342,7 @@ struct dwc2_qh { u8 ep_type; u8 ep_is_in; u16 maxp; + u16 maxp_mult; u8 dev_speed; u8 data_toggle; u8 ping_state; @@ -503,9 +506,14 @@ static inline u8 dwc2_hcd_get_pipe_type(struct dwc2_hcd_pipe_info *pipe) return pipe->pipe_type; } -static inline u16 dwc2_hcd_get_mps(struct dwc2_hcd_pipe_info *pipe) +static inline u16 dwc2_hcd_get_maxp(struct dwc2_hcd_pipe_info *pipe) +{ + return pipe->maxp; +} + +static inline u16 dwc2_hcd_get_maxp_mult(struct dwc2_hcd_pipe_info *pipe) { - return pipe->mps; + return pipe->maxp_mult; } static inline u8 dwc2_hcd_get_dev_addr(struct dwc2_hcd_pipe_info *pipe) @@ -620,12 +628,6 @@ static inline bool dbg_urb(struct urb *urb) static inline bool dbg_perio(void) { return false; } #endif -/* High bandwidth multiplier as encoded in highspeed endpoint descriptors */ -#define dwc2_hb_mult(wmaxpacketsize) (1 + (((wmaxpacketsize) >> 11) & 0x03)) - -/* Packet size for any kind of endpoint descriptor */ -#define dwc2_max_packet(wmaxpacketsize) ((wmaxpacketsize) & 0x07ff) - /* * Returns true if frame1 index is greater than frame2 index. The comparison * is done modulo FRLISTEN_64_SIZE. This accounts for the rollover of the diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c index 88b5dcf3aefc57..a052d39b4375e4 100644 --- a/drivers/usb/dwc2/hcd_intr.c +++ b/drivers/usb/dwc2/hcd_intr.c @@ -1617,8 +1617,9 @@ static void dwc2_hc_ahberr_intr(struct dwc2_hsotg *hsotg, dev_err(hsotg->dev, " Speed: %s\n", speed); - dev_err(hsotg->dev, " Max packet size: %d\n", - dwc2_hcd_get_mps(&urb->pipe_info)); + dev_err(hsotg->dev, " Max packet size: %d (mult %d)\n", + dwc2_hcd_get_maxp(&urb->pipe_info), + dwc2_hcd_get_maxp_mult(&urb->pipe_info)); dev_err(hsotg->dev, " Data buffer length: %d\n", urb->length); dev_err(hsotg->dev, " Transfer buffer: %p, Transfer DMA: %08lx\n", urb->buf, (unsigned long)urb->dma); diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c index ea3aa640c15c18..68bbac64b7536a 100644 --- a/drivers/usb/dwc2/hcd_queue.c +++ b/drivers/usb/dwc2/hcd_queue.c @@ -708,7 +708,7 @@ static void dwc2_hs_pmap_unschedule(struct dwc2_hsotg *hsotg, static int dwc2_uframe_schedule_split(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) { - int bytecount = dwc2_hb_mult(qh->maxp) * dwc2_max_packet(qh->maxp); + int bytecount = qh->maxp_mult * qh->maxp; int ls_search_slice; int err = 0; int host_interval_in_sched; @@ -1332,7 +1332,7 @@ static int dwc2_check_max_xfer_size(struct dwc2_hsotg *hsotg, u32 max_channel_xfer_size; int status = 0; - max_xfer_size = dwc2_max_packet(qh->maxp) * dwc2_hb_mult(qh->maxp); + max_xfer_size = qh->maxp * qh->maxp_mult; max_channel_xfer_size = hsotg->params.max_transfer_size; if (max_xfer_size > max_channel_xfer_size) { @@ -1517,8 +1517,9 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, u32 prtspd = (hprt & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT; bool do_split = (prtspd == HPRT0_SPD_HIGH_SPEED && dev_speed != USB_SPEED_HIGH); - int maxp = dwc2_hcd_get_mps(&urb->pipe_info); - int bytecount = dwc2_hb_mult(maxp) * dwc2_max_packet(maxp); + int maxp = dwc2_hcd_get_maxp(&urb->pipe_info); + int maxp_mult = dwc2_hcd_get_maxp_mult(&urb->pipe_info); + int bytecount = maxp_mult * maxp; char *speed, *type; /* Initialize QH */ @@ -1531,6 +1532,7 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, qh->data_toggle = DWC2_HC_PID_DATA0; qh->maxp = maxp; + qh->maxp_mult = maxp_mult; INIT_LIST_HEAD(&qh->qtd_list); INIT_LIST_HEAD(&qh->qh_list_entry); diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c index cadc01336bf80e..7ba6afc7ef230e 100644 --- a/drivers/usb/host/xhci-debugfs.c +++ b/drivers/usb/host/xhci-debugfs.c @@ -440,6 +440,9 @@ void xhci_debugfs_create_endpoint(struct xhci_hcd *xhci, struct xhci_ep_priv *epriv; struct xhci_slot_priv *spriv = dev->debugfs_private; + if (!spriv) + return; + if (spriv->eps[ep_index]) return; diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index b62953ee0fc656..f896a00662efe3 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -1604,8 +1604,13 @@ static void handle_port_status(struct xhci_hcd *xhci, usb_hcd_resume_root_hub(hcd); } - if (hcd->speed >= HCD_USB3 && (portsc & PORT_PLS_MASK) == XDEV_INACTIVE) + if (hcd->speed >= HCD_USB3 && + (portsc & PORT_PLS_MASK) == XDEV_INACTIVE) { + slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1); + if (slot_id && xhci->devs[slot_id]) + xhci->devs[slot_id]->flags |= VDEV_PORT_ERROR; bus_state->port_remote_wakeup &= ~(1 << hcd_portnum); + } if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_RESUME) { xhci_dbg(xhci, "port resume event for port %d\n", port_id); @@ -1793,6 +1798,14 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, { struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; struct xhci_command *command; + + /* + * Avoid resetting endpoint if link is inactive. Can cause host hang. + * Device will be reset soon to recover the link so don't do anything + */ + if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) + return; + command = xhci_alloc_command(xhci, false, GFP_ATOMIC); if (!command) return; diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index f30b065095fa7b..4ffadca2c71a62 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -1441,6 +1441,10 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag xhci_dbg(xhci, "urb submitted during PCI suspend\n"); return -ESHUTDOWN; } + if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) { + xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n"); + return -ENODEV; + } if (usb_endpoint_xfer_isoc(&urb->ep->desc)) num_tds = urb->number_of_packets; @@ -3724,6 +3728,7 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd, } /* If necessary, update the number of active TTs on this root port */ xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); + virt_dev->flags = 0; ret = 0; command_cleanup: @@ -5030,16 +5035,26 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) } else { /* * Some 3.1 hosts return sbrn 0x30, use xhci supported protocol - * minor revision instead of sbrn + * minor revision instead of sbrn. Minor revision is a two digit + * BCD containing minor and sub-minor numbers, only show minor. */ - minor_rev = xhci->usb3_rhub.min_rev; - if (minor_rev) { + minor_rev = xhci->usb3_rhub.min_rev / 0x10; + + switch (minor_rev) { + case 2: + hcd->speed = HCD_USB32; + hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS; + hcd->self.root_hub->rx_lanes = 2; + hcd->self.root_hub->tx_lanes = 2; + break; + case 1: hcd->speed = HCD_USB31; hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS; + break; } - xhci_info(xhci, "Host supports USB 3.%x %s SuperSpeed\n", + xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n", minor_rev, - minor_rev ? "Enhanced" : ""); + minor_rev ? "Enhanced " : ""); xhci->usb3_rhub.hcd = hcd; /* xHCI private pointer was set in xhci_pci_probe for the second diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index dc00f59c8e6923..761b341d27b076 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1010,6 +1010,15 @@ struct xhci_virt_device { u8 real_port; struct xhci_interval_bw_table *bw_table; struct xhci_tt_bw_info *tt_info; + /* + * flags for state tracking based on events and issued commands. + * Software can not rely on states from output contexts because of + * latency between events and xHC updating output context values. + * See xhci 1.1 section 4.8.3 for more details + */ + unsigned long flags; +#define VDEV_PORT_ERROR BIT(0) /* Port error, link inactive */ + /* The current max exit latency for the enabled USB3 link states. */ u16 current_mel; /* Used for the debugfs interfaces. */ diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index d8c474b386a811..ea891195bbdf06 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -1171,6 +1171,10 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214), .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) }, + { USB_DEVICE(TELIT_VENDOR_ID, 0x1260), + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, + { USB_DEVICE(TELIT_VENDOR_ID, 0x1261), + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, { USB_DEVICE(TELIT_VENDOR_ID, 0x1900), /* Telit LN940 (QMI) */ .driver_info = NCTRL(0) | RSVD(1) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */ @@ -1772,6 +1776,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E), .driver_info = RSVD(5) | RSVD(6) }, { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9003, 0xff) }, /* Simcom SIM7500/SIM7600 MBIM mode */ + { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9011, 0xff), /* Simcom SIM7500/SIM7600 RNDIS mode */ + .driver_info = RSVD(7) }, { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200), .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) }, { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D), diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 5a6df6e9ad5776..5d7b21ea623834 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -106,6 +106,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) }, { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) }, { USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) }, + { USB_DEVICE(AT_VENDOR_ID, AT_VTKIT3_PRODUCT_ID) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index 559941ca884daf..b0175f17d1a2b6 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h @@ -155,3 +155,6 @@ #define SMART_VENDOR_ID 0x0b8c #define SMART_PRODUCT_ID 0x2303 +/* Allied Telesis VT-Kit3 */ +#define AT_VENDOR_ID 0x0caa +#define AT_VTKIT3_PRODUCT_ID 0x3001 diff --git a/drivers/usb/storage/unusual_realtek.h b/drivers/usb/storage/unusual_realtek.h index 6b2140f966ef87..7e14c2d7cf734f 100644 --- a/drivers/usb/storage/unusual_realtek.h +++ b/drivers/usb/storage/unusual_realtek.h @@ -17,6 +17,11 @@ UNUSUAL_DEV(0x0bda, 0x0138, 0x0000, 0x9999, "USB Card Reader", USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0), +UNUSUAL_DEV(0x0bda, 0x0153, 0x0000, 0x9999, + "Realtek", + "USB Card Reader", + USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0), + UNUSUAL_DEV(0x0bda, 0x0158, 0x0000, 0x9999, "Realtek", "USB Card Reader", diff --git a/drivers/usb/typec/fusb302/fusb302.c b/drivers/usb/typec/fusb302/fusb302.c index 82bed9810be6c4..62a0060d39d8dc 100644 --- a/drivers/usb/typec/fusb302/fusb302.c +++ b/drivers/usb/typec/fusb302/fusb302.c @@ -641,6 +641,8 @@ static int fusb302_set_toggling(struct fusb302_chip *chip, return ret; chip->intr_togdone = false; } else { + /* Datasheet says vconn MUST be off when toggling */ + WARN(chip->vconn_on, "Vconn is on during toggle start"); /* unmask TOGDONE interrupt */ ret = fusb302_i2c_clear_bits(chip, FUSB_REG_MASKA, FUSB_REG_MASKA_TOGDONE); diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index 64833879f75d3c..7a386fb30bf1fc 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -34,6 +34,7 @@ #include #include #include +#include #define DRIVER_VERSION "0.3" #define DRIVER_AUTHOR "Alex Williamson " @@ -904,30 +905,17 @@ void *vfio_device_data(struct vfio_device *device) } EXPORT_SYMBOL_GPL(vfio_device_data); -/* Given a referenced group, check if it contains the device */ -static bool vfio_dev_present(struct vfio_group *group, struct device *dev) -{ - struct vfio_device *device; - - device = vfio_group_get_device(group, dev); - if (!device) - return false; - - vfio_device_put(device); - return true; -} - /* * Decrement the device reference count and wait for the device to be * removed. Open file descriptors for the device... */ void *vfio_del_group_dev(struct device *dev) { + DEFINE_WAIT_FUNC(wait, woken_wake_function); struct vfio_device *device = dev_get_drvdata(dev); struct vfio_group *group = device->group; void *device_data = device->device_data; struct vfio_unbound_dev *unbound; unsigned int i = 0; - long ret; bool interrupted = false; /* @@ -964,6 +952,8 @@ void *vfio_del_group_dev(struct device *dev) * interval with counter to allow the driver to take escalating * measures to release the device if it has the ability to do so. */ + add_wait_queue(&vfio.release_q, &wait); + do { device = vfio_group_get_device(group, dev); if (!device) @@ -975,12 +965,10 @@ void *vfio_del_group_dev(struct device *dev) vfio_device_put(device); if (interrupted) { - ret = wait_event_timeout(vfio.release_q, - !vfio_dev_present(group, dev), HZ * 10); + wait_woken(&wait, TASK_UNINTERRUPTIBLE, HZ * 10); } else { - ret = wait_event_interruptible_timeout(vfio.release_q, - !vfio_dev_present(group, dev), HZ * 10); - if (ret == -ERESTARTSYS) { + wait_woken(&wait, TASK_INTERRUPTIBLE, HZ * 10); + if (signal_pending(current)) { interrupted = true; dev_warn(dev, "Device is currently in use, task" @@ -989,8 +977,10 @@ void *vfio_del_group_dev(struct device *dev) current->comm, task_pid_nr(current)); } } - } while (ret <= 0); + } while (1); + + remove_wait_queue(&vfio.release_q, &wait); /* * In order to support multiple devices per group, devices can be * plucked from the group while other devices in the group are still diff --git a/drivers/video/fbdev/hgafb.c b/drivers/video/fbdev/hgafb.c index 46302854317367..59e1cae5794814 100644 --- a/drivers/video/fbdev/hgafb.c +++ b/drivers/video/fbdev/hgafb.c @@ -285,6 +285,8 @@ static int hga_card_detect(void) hga_vram_len = 0x08000; hga_vram = ioremap(0xb0000, hga_vram_len); + if (!hga_vram) + goto error; if (request_region(0x3b0, 12, "hgafb")) release_io_ports = 1; diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c index ecdcf358ad5eac..ffcf553719a314 100644 --- a/drivers/video/fbdev/imsttfb.c +++ b/drivers/video/fbdev/imsttfb.c @@ -1516,6 +1516,11 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) info->fix.smem_start = addr; info->screen_base = (__u8 *)ioremap(addr, par->ramdac == IBM ? 0x400000 : 0x800000); + if (!info->screen_base) { + release_mem_region(addr, size); + framebuffer_release(info); + return -ENOMEM; + } info->fix.mmio_start = addr + 0x800000; par->dc_regs = ioremap(addr + 0x800000, 0x1000); par->cmap_regs_phys = addr + 0x840000; diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 5ea8909a41f952..b165c46aca741b 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -1967,6 +1967,7 @@ comment "Watchdog Pretimeout Governors" config WATCHDOG_PRETIMEOUT_GOV bool "Enable watchdog pretimeout governors" + depends on WATCHDOG_CORE help The option allows to select watchdog pretimeout governors. diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c index 2b52514eaa86a8..7e7bdcbbc741b3 100644 --- a/drivers/watchdog/imx2_wdt.c +++ b/drivers/watchdog/imx2_wdt.c @@ -178,8 +178,10 @@ static void __imx2_wdt_set_timeout(struct watchdog_device *wdog, static int imx2_wdt_set_timeout(struct watchdog_device *wdog, unsigned int new_timeout) { - __imx2_wdt_set_timeout(wdog, new_timeout); + unsigned int actual; + actual = min(new_timeout, wdog->max_hw_heartbeat_ms * 1000); + __imx2_wdt_set_timeout(wdog, actual); wdog->timeout = new_timeout; return 0; } diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c index 91da7e44d5d4f4..3a144eecb6a728 100644 --- a/drivers/xen/pvcalls-front.c +++ b/drivers/xen/pvcalls-front.c @@ -538,7 +538,6 @@ static int __write_ring(struct pvcalls_data_intf *intf, int pvcalls_front_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { - struct pvcalls_bedata *bedata; struct sock_mapping *map; int sent, tot_sent = 0; int count = 0, flags; @@ -550,7 +549,6 @@ int pvcalls_front_sendmsg(struct socket *sock, struct msghdr *msg, map = pvcalls_enter_sock(sock); if (IS_ERR(map)) return PTR_ERR(map); - bedata = dev_get_drvdata(&pvcalls_front_dev->dev); mutex_lock(&map->active.out_mutex); if ((flags & MSG_DONTWAIT) && !pvcalls_front_write_todo(map)) { @@ -633,7 +631,6 @@ static int __read_ring(struct pvcalls_data_intf *intf, int pvcalls_front_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { - struct pvcalls_bedata *bedata; int ret; struct sock_mapping *map; @@ -643,7 +640,6 @@ int pvcalls_front_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, map = pvcalls_enter_sock(sock); if (IS_ERR(map)) return PTR_ERR(map); - bedata = dev_get_drvdata(&pvcalls_front_dev->dev); mutex_lock(&map->active.in_mutex); if (len > XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER)) diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h index 092981171df17f..d75a2385b37c77 100644 --- a/drivers/xen/xenbus/xenbus.h +++ b/drivers/xen/xenbus/xenbus.h @@ -83,6 +83,7 @@ struct xb_req_data { int num_vecs; int err; enum xb_req_state state; + bool user_req; void (*cb)(struct xb_req_data *); void *par; }; @@ -133,4 +134,6 @@ void xenbus_ring_ops_init(void); int xenbus_dev_request_and_reply(struct xsd_sockmsg *msg, void *par); void xenbus_dev_queue_reply(struct xb_req_data *req); +extern unsigned int xb_dev_generation_id; + #endif diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c index 0782ff3c227352..39c63152a35800 100644 --- a/drivers/xen/xenbus/xenbus_dev_frontend.c +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c @@ -62,6 +62,8 @@ #include "xenbus.h" +unsigned int xb_dev_generation_id; + /* * An element of a list of outstanding transactions, for which we're * still waiting a reply. @@ -69,6 +71,7 @@ struct xenbus_transaction_holder { struct list_head list; struct xenbus_transaction handle; + unsigned int generation_id; }; /* @@ -441,6 +444,7 @@ static int xenbus_write_transaction(unsigned msg_type, rc = -ENOMEM; goto out; } + trans->generation_id = xb_dev_generation_id; list_add(&trans->list, &u->transactions); } else if (msg->hdr.tx_id != 0 && !xenbus_get_transaction(u, msg->hdr.tx_id)) @@ -449,6 +453,20 @@ static int xenbus_write_transaction(unsigned msg_type, !(msg->hdr.len == 2 && (!strcmp(msg->body, "T") || !strcmp(msg->body, "F")))) return xenbus_command_reply(u, XS_ERROR, "EINVAL"); + else if (msg_type == XS_TRANSACTION_END) { + trans = xenbus_get_transaction(u, msg->hdr.tx_id); + if (trans && trans->generation_id != xb_dev_generation_id) { + list_del(&trans->list); + kfree(trans); + if (!strcmp(msg->body, "T")) + return xenbus_command_reply(u, XS_ERROR, + "EAGAIN"); + else + return xenbus_command_reply(u, + XS_TRANSACTION_END, + "OK"); + } + } rc = xenbus_dev_request_and_reply(&msg->hdr, u); if (rc && trans) { diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c index 49a3874ae6bb45..ddc18da61834e6 100644 --- a/drivers/xen/xenbus/xenbus_xs.c +++ b/drivers/xen/xenbus/xenbus_xs.c @@ -105,6 +105,7 @@ static void xs_suspend_enter(void) static void xs_suspend_exit(void) { + xb_dev_generation_id++; spin_lock(&xs_state_lock); xs_suspend_active--; spin_unlock(&xs_state_lock); @@ -125,7 +126,7 @@ static uint32_t xs_request_enter(struct xb_req_data *req) spin_lock(&xs_state_lock); } - if (req->type == XS_TRANSACTION_START) + if (req->type == XS_TRANSACTION_START && !req->user_req) xs_state_users++; xs_state_users++; rq_id = xs_request_id++; @@ -140,7 +141,7 @@ void xs_request_exit(struct xb_req_data *req) spin_lock(&xs_state_lock); xs_state_users--; if ((req->type == XS_TRANSACTION_START && req->msg.type == XS_ERROR) || - (req->type == XS_TRANSACTION_END && + (req->type == XS_TRANSACTION_END && !req->user_req && !WARN_ON_ONCE(req->msg.type == XS_ERROR && !strcmp(req->body, "ENOENT")))) xs_state_users--; @@ -286,6 +287,7 @@ int xenbus_dev_request_and_reply(struct xsd_sockmsg *msg, void *par) req->num_vecs = 1; req->cb = xenbus_dev_queue_reply; req->par = par; + req->user_req = true; xs_send(req, msg); @@ -313,6 +315,7 @@ static void *xs_talkv(struct xenbus_transaction t, req->vec = iovec; req->num_vecs = num_vecs; req->cb = xs_wake_up; + req->user_req = false; msg.req_id = 0; msg.tx_id = t.id; diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c index dec14b739b1016..859274e384175d 100644 --- a/fs/btrfs/reada.c +++ b/fs/btrfs/reada.c @@ -745,6 +745,7 @@ static void __reada_start_machine(struct btrfs_fs_info *fs_info) u64 total = 0; int i; +again: do { enqueued = 0; mutex_lock(&fs_devices->device_list_mutex); @@ -756,6 +757,10 @@ static void __reada_start_machine(struct btrfs_fs_info *fs_info) mutex_unlock(&fs_devices->device_list_mutex); total += enqueued; } while (enqueued && total < 10000); + if (fs_devices->seed) { + fs_devices = fs_devices->seed; + goto again; + } if (enqueued == 0) return; diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c index 18814f1d67d9b1..3c0bad577859b3 100644 --- a/fs/cifs/smb2maperror.c +++ b/fs/cifs/smb2maperror.c @@ -457,7 +457,7 @@ static const struct status_to_posix_error smb2_error_map_table[] = { {STATUS_FILE_INVALID, -EIO, "STATUS_FILE_INVALID"}, {STATUS_ALLOTTED_SPACE_EXCEEDED, -EIO, "STATUS_ALLOTTED_SPACE_EXCEEDED"}, - {STATUS_INSUFFICIENT_RESOURCES, -EREMOTEIO, + {STATUS_INSUFFICIENT_RESOURCES, -EAGAIN, "STATUS_INSUFFICIENT_RESOURCES"}, {STATUS_DFS_EXIT_PATH_FOUND, -EIO, "STATUS_DFS_EXIT_PATH_FOUND"}, {STATUS_DEVICE_DATA_ERROR, -EIO, "STATUS_DEVICE_DATA_ERROR"}, diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index 39843fa7e11b07..809c1edffbaf16 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c @@ -58,15 +58,13 @@ static void configfs_d_iput(struct dentry * dentry, if (sd) { /* Coordinate with configfs_readdir */ spin_lock(&configfs_dirent_lock); - /* Coordinate with configfs_attach_attr where will increase - * sd->s_count and update sd->s_dentry to new allocated one. - * Only set sd->dentry to null when this dentry is the only - * sd owner. - * If not do so, configfs_d_iput may run just after - * configfs_attach_attr and set sd->s_dentry to null - * even it's still in use. + /* + * Set sd->s_dentry to null only when this dentry is the one + * that is going to be killed. Otherwise configfs_d_iput may + * run just after configfs_attach_attr and set sd->s_dentry to + * NULL even it's still in use. */ - if (atomic_read(&sd->s_count) <= 2) + if (sd->s_dentry == dentry) sd->s_dentry = NULL; spin_unlock(&configfs_dirent_lock); @@ -1755,12 +1753,19 @@ int configfs_register_group(struct config_group *parent_group, inode_lock_nested(d_inode(parent), I_MUTEX_PARENT); ret = create_default_group(parent_group, group); - if (!ret) { - spin_lock(&configfs_dirent_lock); - configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata); - spin_unlock(&configfs_dirent_lock); - } + if (ret) + goto err_out; + + spin_lock(&configfs_dirent_lock); + configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata); + spin_unlock(&configfs_dirent_lock); + inode_unlock(d_inode(parent)); + return 0; +err_out: inode_unlock(d_inode(parent)); + mutex_lock(&subsys->su_mutex); + unlink_group(group); + mutex_unlock(&subsys->su_mutex); return ret; } EXPORT_SYMBOL(configfs_register_group); diff --git a/fs/dax.c b/fs/dax.c index 004c8ac1117c4c..75a289c31c7e5d 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -908,7 +908,7 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping, goto unlock_pmd; flush_cache_page(vma, address, pfn); - pmd = pmdp_huge_clear_flush(vma, address, pmdp); + pmd = pmdp_invalidate(vma, address, pmdp); pmd = pmd_wrprotect(pmd); pmd = pmd_mkclean(pmd); set_pmd_at(vma->vm_mm, address, pmdp, pmd); diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index a4b6eacf22ea13..44ea7ac69ef48b 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -1744,6 +1744,7 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi, return -ENOSPC; } +void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...); static inline void dec_valid_block_count(struct f2fs_sb_info *sbi, struct inode *inode, block_t count) @@ -1752,13 +1753,21 @@ static inline void dec_valid_block_count(struct f2fs_sb_info *sbi, spin_lock(&sbi->stat_lock); f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count); - f2fs_bug_on(sbi, inode->i_blocks < sectors); sbi->total_valid_block_count -= (block_t)count; if (sbi->reserved_blocks && sbi->current_reserved_blocks < sbi->reserved_blocks) sbi->current_reserved_blocks = min(sbi->reserved_blocks, sbi->current_reserved_blocks + count); spin_unlock(&sbi->stat_lock); + if (unlikely(inode->i_blocks < sectors)) { + f2fs_msg(sbi->sb, KERN_WARNING, + "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu", + inode->i_ino, + (unsigned long long)inode->i_blocks, + (unsigned long long)sectors); + set_sbi_flag(sbi, SBI_NEED_FSCK); + return; + } f2fs_i_blocks_write(inode, count, false, true); } @@ -2488,7 +2497,9 @@ static inline void *inline_xattr_addr(struct inode *inode, struct page *page) static inline int inline_xattr_size(struct inode *inode) { - return get_inline_xattr_addrs(inode) * sizeof(__le32); + if (f2fs_has_inline_xattr(inode)) + return get_inline_xattr_addrs(inode) * sizeof(__le32); + return 0; } static inline int f2fs_has_inline_data(struct inode *inode) @@ -2727,7 +2738,6 @@ static inline void f2fs_update_iostat(struct f2fs_sb_info *sbi, bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type); -void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...); static inline void verify_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type) { diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index dd608b819a3c33..0f31df01e36c60 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c @@ -179,8 +179,8 @@ bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page) if (provided != calculated) f2fs_msg(sbi->sb, KERN_WARNING, - "checksum invalid, ino = %x, %x vs. %x", - ino_of_node(page), provided, calculated); + "checksum invalid, nid = %lu, ino_of_node = %x, %x vs. %x", + page->index, ino_of_node(page), provided, calculated); return provided == calculated; } @@ -476,6 +476,7 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino) return inode; bad_inode: + f2fs_inode_synced(inode); iget_failed(inode); trace_f2fs_iget_exit(inode, ret); return ERR_PTR(ret); diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 19a0d83aae6537..e2d9edad758cdf 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -1180,8 +1180,14 @@ int f2fs_remove_inode_page(struct inode *inode) f2fs_put_dnode(&dn); return -EIO; } - f2fs_bug_on(F2FS_I_SB(inode), - inode->i_blocks != 0 && inode->i_blocks != 8); + + if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) { + f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING, + "Inconsistent i_blocks, ino:%lu, iblocks:%llu", + inode->i_ino, + (unsigned long long)inode->i_blocks); + set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK); + } /* will put inode & node pages */ err = truncate_node(&dn); @@ -1276,9 +1282,10 @@ static int read_node_page(struct page *page, int op_flags) int err; if (PageUptodate(page)) { -#ifdef CONFIG_F2FS_CHECK_FS - f2fs_bug_on(sbi, !f2fs_inode_chksum_verify(sbi, page)); -#endif + if (!f2fs_inode_chksum_verify(sbi, page)) { + ClearPageUptodate(page); + return -EBADMSG; + } return LOCKED_PAGE; } @@ -2073,6 +2080,9 @@ static bool add_free_nid(struct f2fs_sb_info *sbi, if (unlikely(nid == 0)) return false; + if (unlikely(f2fs_check_nid_range(sbi, nid))) + return false; + i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS); i->nid = nid; i->state = FREE_NID; diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index ae0e5f2e67b4ae..bf5c5f4fa77ea2 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c @@ -485,7 +485,15 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, goto err; f2fs_bug_on(sbi, ni.ino != ino_of_node(page)); - f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page)); + + if (ofs_of_node(dn.node_page) != ofs_of_node(page)) { + f2fs_msg(sbi->sb, KERN_WARNING, + "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u", + inode->i_ino, ofs_of_node(dn.node_page), + ofs_of_node(page)); + err = -EFAULT; + goto err; + } for (; start < end; start++, dn.ofs_in_node++) { block_t src, dest; diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 03fa2c4d3d793f..8fc3edb6760c2f 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -3069,13 +3069,18 @@ int f2fs_inplace_write_data(struct f2fs_io_info *fio) { int err; struct f2fs_sb_info *sbi = fio->sbi; + unsigned int segno; fio->new_blkaddr = fio->old_blkaddr; /* i/o temperature is needed for passing down write hints */ __get_segment_type(fio); - f2fs_bug_on(sbi, !IS_DATASEG(get_seg_entry(sbi, - GET_SEGNO(sbi, fio->new_blkaddr))->type)); + segno = GET_SEGNO(sbi, fio->new_blkaddr); + + if (!IS_DATASEG(get_seg_entry(sbi, segno)->type)) { + set_sbi_flag(sbi, SBI_NEED_FSCK); + return -EFAULT; + } stat_inc_inplace_blocks(fio->sbi); diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h index b3d9e317ff0c14..5079532cb176b7 100644 --- a/fs/f2fs/segment.h +++ b/fs/f2fs/segment.h @@ -660,7 +660,6 @@ static inline void verify_block_addr(struct f2fs_io_info *fio, block_t blk_addr) static inline int check_block_count(struct f2fs_sb_info *sbi, int segno, struct f2fs_sit_entry *raw_sit) { -#ifdef CONFIG_F2FS_CHECK_FS bool is_valid = test_bit_le(0, raw_sit->valid_map) ? true : false; int valid_blocks = 0; int cur_pos = 0, next_pos; @@ -687,7 +686,7 @@ static inline int check_block_count(struct f2fs_sb_info *sbi, set_sbi_flag(sbi, SBI_NEED_FSCK); return -EINVAL; } -#endif + /* check segment usage, and check boundary of a given segment number */ if (unlikely(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg || segno > TOTAL_SEGS(sbi) - 1)) { diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c index 409a637f7a926b..88e30f7cf9e14e 100644 --- a/fs/f2fs/xattr.c +++ b/fs/f2fs/xattr.c @@ -205,12 +205,17 @@ static inline const struct xattr_handler *f2fs_xattr_handler(int index) return handler; } -static struct f2fs_xattr_entry *__find_xattr(void *base_addr, int index, - size_t len, const char *name) +static struct f2fs_xattr_entry *__find_xattr(void *base_addr, + void *last_base_addr, int index, + size_t len, const char *name) { struct f2fs_xattr_entry *entry; list_for_each_xattr(entry, base_addr) { + if ((void *)(entry) + sizeof(__u32) > last_base_addr || + (void *)XATTR_NEXT_ENTRY(entry) > last_base_addr) + return NULL; + if (entry->e_name_index != index) continue; if (entry->e_name_len != len) @@ -300,20 +305,22 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage, const char *name, struct f2fs_xattr_entry **xe, void **base_addr, int *base_size) { - void *cur_addr, *txattr_addr, *last_addr = NULL; + void *cur_addr, *txattr_addr, *last_txattr_addr; + void *last_addr = NULL; nid_t xnid = F2FS_I(inode)->i_xattr_nid; - unsigned int size = xnid ? VALID_XATTR_BLOCK_SIZE : 0; unsigned int inline_size = inline_xattr_size(inode); int err = 0; - if (!size && !inline_size) + if (!xnid && !inline_size) return -ENODATA; - *base_size = inline_size + size + XATTR_PADDING_SIZE; + *base_size = XATTR_SIZE(xnid, inode) + XATTR_PADDING_SIZE; txattr_addr = f2fs_kzalloc(F2FS_I_SB(inode), *base_size, GFP_NOFS); if (!txattr_addr) return -ENOMEM; + last_txattr_addr = (void *)txattr_addr + XATTR_SIZE(xnid, inode); + /* read from inline xattr */ if (inline_size) { err = read_inline_xattr(inode, ipage, txattr_addr); @@ -340,7 +347,11 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage, else cur_addr = txattr_addr; - *xe = __find_xattr(cur_addr, index, len, name); + *xe = __find_xattr(cur_addr, last_txattr_addr, index, len, name); + if (!*xe) { + err = -EFAULT; + goto out; + } check: if (IS_XATTR_LAST_ENTRY(*xe)) { err = -ENODATA; @@ -584,7 +595,8 @@ static int __f2fs_setxattr(struct inode *inode, int index, struct page *ipage, int flags) { struct f2fs_xattr_entry *here, *last; - void *base_addr; + void *base_addr, *last_base_addr; + nid_t xnid = F2FS_I(inode)->i_xattr_nid; int found, newsize; size_t len; __u32 new_hsize; @@ -608,8 +620,14 @@ static int __f2fs_setxattr(struct inode *inode, int index, if (error) return error; + last_base_addr = (void *)base_addr + XATTR_SIZE(xnid, inode); + /* find entry with wanted name. */ - here = __find_xattr(base_addr, index, len, name); + here = __find_xattr(base_addr, last_base_addr, index, len, name); + if (!here) { + error = -EFAULT; + goto exit; + } found = IS_XATTR_LAST_ENTRY(here) ? 0 : 1; diff --git a/fs/f2fs/xattr.h b/fs/f2fs/xattr.h index dbcd1d16e66982..2a4ecaf338eac6 100644 --- a/fs/f2fs/xattr.h +++ b/fs/f2fs/xattr.h @@ -74,6 +74,8 @@ struct f2fs_xattr_entry { entry = XATTR_NEXT_ENTRY(entry)) #define VALID_XATTR_BLOCK_SIZE (PAGE_SIZE - sizeof(struct node_footer)) #define XATTR_PADDING_SIZE (sizeof(__u32)) +#define XATTR_SIZE(x,i) (((x) ? VALID_XATTR_BLOCK_SIZE : 0) + \ + (inline_xattr_size(i))) #define MIN_OFFSET(i) XATTR_ALIGN(inline_xattr_size(i) + \ VALID_XATTR_BLOCK_SIZE) diff --git a/fs/fat/file.c b/fs/fat/file.c index 4f3d72fb1e60d6..f86ea08bd6ce4c 100644 --- a/fs/fat/file.c +++ b/fs/fat/file.c @@ -193,12 +193,17 @@ static int fat_file_release(struct inode *inode, struct file *filp) int fat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync) { struct inode *inode = filp->f_mapping->host; - int res, err; + int err; + + err = __generic_file_fsync(filp, start, end, datasync); + if (err) + return err; - res = generic_file_fsync(filp, start, end, datasync); err = sync_mapping_buffers(MSDOS_SB(inode->i_sb)->fat_inode->i_mapping); + if (err) + return err; - return res ? res : err; + return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); } diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 249de20f752a16..6ee471b72a34da 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -1681,7 +1681,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, offset = outarg->offset & ~PAGE_MASK; file_size = i_size_read(inode); - num = outarg->size; + num = min(outarg->size, fc->max_write); if (outarg->offset > file_size) num = 0; else if (outarg->offset + num > file_size) diff --git a/fs/inode.c b/fs/inode.c index 42f6d25f32a520..5c63693326bb43 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -1817,8 +1817,13 @@ int file_remove_privs(struct file *file) int kill; int error = 0; - /* Fast path for nothing security related */ - if (IS_NOSEC(inode)) + /* + * Fast path for nothing security related. + * As well for non-regular files, e.g. blkdev inodes. + * For example, blkdev_write_iter() might get here + * trying to remove privs which it is not allowed to. + */ + if (IS_NOSEC(inode) || !S_ISREG(inode->i_mode)) return 0; kill = dentry_needs_remove_privs(dentry); diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 418fa9c78186b6..db0beefe65ec2c 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -2413,8 +2413,10 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp, __be32 status; int err; struct nfs4_acl *acl = NULL; +#ifdef CONFIG_NFSD_V4_SECURITY_LABEL void *context = NULL; int contextlen; +#endif bool contextsupport = false; struct nfsd4_compoundres *resp = rqstp->rq_resp; u32 minorversion = resp->cstate.minorversion; @@ -2899,12 +2901,14 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp, *p++ = cpu_to_be32(NFS4_CHANGE_TYPE_IS_TIME_METADATA); } +#ifdef CONFIG_NFSD_V4_SECURITY_LABEL if (bmval2 & FATTR4_WORD2_SECURITY_LABEL) { status = nfsd4_encode_security_label(xdr, rqstp, context, contextlen); if (status) goto out; } +#endif attrlen = htonl(xdr->buf->len - attrlen_offset - 4); write_bytes_to_xdr_buf(xdr->buf, attrlen_offset, &attrlen, 4); diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h index a7e107309f767d..db351247892d05 100644 --- a/fs/nfsd/vfs.h +++ b/fs/nfsd/vfs.h @@ -120,8 +120,11 @@ void nfsd_put_raparams(struct file *file, struct raparms *ra); static inline int fh_want_write(struct svc_fh *fh) { - int ret = mnt_want_write(fh->fh_export->ex_path.mnt); + int ret; + if (fh->fh_want_write) + return 0; + ret = mnt_want_write(fh->fh_export->ex_path.mnt); if (!ret) fh->fh_want_write = true; return ret; diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c index 290373024d9d67..e8ace3b54e9c19 100644 --- a/fs/ocfs2/dcache.c +++ b/fs/ocfs2/dcache.c @@ -310,6 +310,18 @@ int ocfs2_dentry_attach_lock(struct dentry *dentry, out_attach: spin_lock(&dentry_attach_lock); + if (unlikely(dentry->d_fsdata && !alias)) { + /* d_fsdata is set by a racing thread which is doing + * the same thing as this thread is doing. Leave the racing + * thread going ahead and we return here. + */ + spin_unlock(&dentry_attach_lock); + iput(dl->dl_inode); + ocfs2_lock_res_free(&dl->dl_lockres); + kfree(dl); + return 0; + } + dentry->d_fsdata = dl; dl->dl_count++; spin_unlock(&dentry_attach_lock); diff --git a/fs/ocfs2/filecheck.c b/fs/ocfs2/filecheck.c index f65f2b2f594d4f..1906cc962c4d93 100644 --- a/fs/ocfs2/filecheck.c +++ b/fs/ocfs2/filecheck.c @@ -193,6 +193,7 @@ int ocfs2_filecheck_create_sysfs(struct ocfs2_super *osb) ret = kobject_init_and_add(&entry->fs_kobj, &ocfs2_ktype_filecheck, NULL, "filecheck"); if (ret) { + kobject_put(&entry->fs_kobj); kfree(fcheck); return ret; } diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c index 0c810f20f778b9..0bd276e4ccbeee 100644 --- a/fs/overlayfs/file.c +++ b/fs/overlayfs/file.c @@ -11,6 +11,7 @@ #include #include #include +#include #include "overlayfs.h" static char ovl_whatisit(struct inode *inode, struct inode *realinode) @@ -29,10 +30,11 @@ static struct file *ovl_open_realfile(const struct file *file, struct inode *inode = file_inode(file); struct file *realfile; const struct cred *old_cred; + int flags = file->f_flags | O_NOATIME | FMODE_NONOTIFY; old_cred = ovl_override_creds(inode->i_sb); - realfile = open_with_fake_path(&file->f_path, file->f_flags | O_NOATIME, - realinode, current_cred()); + realfile = open_with_fake_path(&file->f_path, flags, realinode, + current_cred()); revert_creds(old_cred); pr_debug("open(%p[%pD2/%c], 0%o) -> (%p, 0%o)\n", @@ -50,7 +52,7 @@ static int ovl_change_flags(struct file *file, unsigned int flags) int err; /* No atime modificaton on underlying */ - flags |= O_NOATIME; + flags |= O_NOATIME | FMODE_NONOTIFY; /* If some flag changed that cannot be changed then something's amiss */ if (WARN_ON((file->f_flags ^ flags) & ~OVL_SETFL_MASK)) @@ -144,11 +146,47 @@ static int ovl_release(struct inode *inode, struct file *file) static loff_t ovl_llseek(struct file *file, loff_t offset, int whence) { - struct inode *realinode = ovl_inode_real(file_inode(file)); + struct inode *inode = file_inode(file); + struct fd real; + const struct cred *old_cred; + ssize_t ret; + + /* + * The two special cases below do not need to involve real fs, + * so we can optimizing concurrent callers. + */ + if (offset == 0) { + if (whence == SEEK_CUR) + return file->f_pos; + + if (whence == SEEK_SET) + return vfs_setpos(file, 0, 0); + } + + ret = ovl_real_fdget(file, &real); + if (ret) + return ret; + + /* + * Overlay file f_pos is the master copy that is preserved + * through copy up and modified on read/write, but only real + * fs knows how to SEEK_HOLE/SEEK_DATA and real fs may impose + * limitations that are more strict than ->s_maxbytes for specific + * files, so we use the real file to perform seeks. + */ + inode_lock(inode); + real.file->f_pos = file->f_pos; - return generic_file_llseek_size(file, offset, whence, - realinode->i_sb->s_maxbytes, - i_size_read(realinode)); + old_cred = ovl_override_creds(inode->i_sb); + ret = vfs_llseek(real.file, offset, whence); + revert_creds(old_cred); + + file->f_pos = real.file->f_pos; + inode_unlock(inode); + + fdput(real); + + return ret; } static void ovl_file_accessed(struct file *file) @@ -371,34 +409,118 @@ static long ovl_real_ioctl(struct file *file, unsigned int cmd, return ret; } -static long ovl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +static long ovl_ioctl_set_flags(struct file *file, unsigned int cmd, + unsigned long arg, unsigned int iflags) { long ret; struct inode *inode = file_inode(file); + unsigned int old_iflags; + + if (!inode_owner_or_capable(inode)) + return -EACCES; + + ret = mnt_want_write_file(file); + if (ret) + return ret; + + inode_lock(inode); + + /* Check the capability before cred override */ + ret = -EPERM; + old_iflags = READ_ONCE(inode->i_flags); + if (((iflags ^ old_iflags) & (S_APPEND | S_IMMUTABLE)) && + !capable(CAP_LINUX_IMMUTABLE)) + goto unlock; + + ret = ovl_maybe_copy_up(file_dentry(file), O_WRONLY); + if (ret) + goto unlock; + + ret = ovl_real_ioctl(file, cmd, arg); + + ovl_copyflags(ovl_inode_real(inode), inode); +unlock: + inode_unlock(inode); + + mnt_drop_write_file(file); + + return ret; + +} + +static unsigned int ovl_fsflags_to_iflags(unsigned int flags) +{ + unsigned int iflags = 0; + + if (flags & FS_SYNC_FL) + iflags |= S_SYNC; + if (flags & FS_APPEND_FL) + iflags |= S_APPEND; + if (flags & FS_IMMUTABLE_FL) + iflags |= S_IMMUTABLE; + if (flags & FS_NOATIME_FL) + iflags |= S_NOATIME; + + return iflags; +} + +static long ovl_ioctl_set_fsflags(struct file *file, unsigned int cmd, + unsigned long arg) +{ + unsigned int flags; + + if (get_user(flags, (int __user *) arg)) + return -EFAULT; + + return ovl_ioctl_set_flags(file, cmd, arg, + ovl_fsflags_to_iflags(flags)); +} + +static unsigned int ovl_fsxflags_to_iflags(unsigned int xflags) +{ + unsigned int iflags = 0; + + if (xflags & FS_XFLAG_SYNC) + iflags |= S_SYNC; + if (xflags & FS_XFLAG_APPEND) + iflags |= S_APPEND; + if (xflags & FS_XFLAG_IMMUTABLE) + iflags |= S_IMMUTABLE; + if (xflags & FS_XFLAG_NOATIME) + iflags |= S_NOATIME; + + return iflags; +} + +static long ovl_ioctl_set_fsxflags(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct fsxattr fa; + + memset(&fa, 0, sizeof(fa)); + if (copy_from_user(&fa, (void __user *) arg, sizeof(fa))) + return -EFAULT; + + return ovl_ioctl_set_flags(file, cmd, arg, + ovl_fsxflags_to_iflags(fa.fsx_xflags)); +} + +static long ovl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + long ret; switch (cmd) { case FS_IOC_GETFLAGS: + case FS_IOC_FSGETXATTR: ret = ovl_real_ioctl(file, cmd, arg); break; case FS_IOC_SETFLAGS: - if (!inode_owner_or_capable(inode)) - return -EACCES; - - ret = mnt_want_write_file(file); - if (ret) - return ret; - - ret = ovl_maybe_copy_up(file_dentry(file), O_WRONLY); - if (!ret) { - ret = ovl_real_ioctl(file, cmd, arg); - - inode_lock(inode); - ovl_copyflags(ovl_inode_real(inode), inode); - inode_unlock(inode); - } + ret = ovl_ioctl_set_fsflags(file, cmd, arg); + break; - mnt_drop_write_file(file); + case FS_IOC_FSSETXATTR: + ret = ovl_ioctl_set_fsxflags(file, cmd, arg); break; default: diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index b48273e846adc5..f0389849fd8078 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c @@ -553,15 +553,15 @@ static void ovl_fill_inode(struct inode *inode, umode_t mode, dev_t rdev, int xinobits = ovl_xino_bits(inode->i_sb); /* - * When NFS export is enabled and d_ino is consistent with st_ino - * (samefs or i_ino has enough bits to encode layer), set the same - * value used for d_ino to i_ino, because nfsd readdirplus compares - * d_ino values to i_ino values of child entries. When called from + * When d_ino is consistent with st_ino (samefs or i_ino has enough + * bits to encode layer), set the same value used for st_ino to i_ino, + * so inode number exposed via /proc/locks and a like will be + * consistent with d_ino and st_ino values. An i_ino value inconsistent + * with d_ino also causes nfsd readdirplus to fail. When called from * ovl_new_inode(), ino arg is 0, so i_ino will be updated to real * upper inode i_ino on ovl_inode_init() or ovl_inode_update(). */ - if (inode->i_sb->s_export_op && - (ovl_same_sb(inode->i_sb) || xinobits)) { + if (ovl_same_sb(inode->i_sb) || xinobits) { inode->i_ino = ino; if (xinobits && fsid && !(ino >> (64 - xinobits))) inode->i_ino |= (unsigned long)fsid << (64 - xinobits); @@ -777,6 +777,54 @@ struct inode *ovl_lookup_inode(struct super_block *sb, struct dentry *real, return inode; } +bool ovl_lookup_trap_inode(struct super_block *sb, struct dentry *dir) +{ + struct inode *key = d_inode(dir); + struct inode *trap; + bool res; + + trap = ilookup5(sb, (unsigned long) key, ovl_inode_test, key); + if (!trap) + return false; + + res = IS_DEADDIR(trap) && !ovl_inode_upper(trap) && + !ovl_inode_lower(trap); + + iput(trap); + return res; +} + +/* + * Create an inode cache entry for layer root dir, that will intentionally + * fail ovl_verify_inode(), so any lookup that will find some layer root + * will fail. + */ +struct inode *ovl_get_trap_inode(struct super_block *sb, struct dentry *dir) +{ + struct inode *key = d_inode(dir); + struct inode *trap; + + if (!d_is_dir(dir)) + return ERR_PTR(-ENOTDIR); + + trap = iget5_locked(sb, (unsigned long) key, ovl_inode_test, + ovl_inode_set, key); + if (!trap) + return ERR_PTR(-ENOMEM); + + if (!(trap->i_state & I_NEW)) { + /* Conflicting layer roots? */ + iput(trap); + return ERR_PTR(-ELOOP); + } + + trap->i_mode = S_IFDIR; + trap->i_flags = S_DEAD; + unlock_new_inode(trap); + + return trap; +} + /* * Does overlay inode need to be hashed by lower inode? */ diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c index efd372312ef100..badf039267a2fd 100644 --- a/fs/overlayfs/namei.c +++ b/fs/overlayfs/namei.c @@ -18,6 +18,7 @@ #include "overlayfs.h" struct ovl_lookup_data { + struct super_block *sb; struct qstr name; bool is_dir; bool opaque; @@ -244,6 +245,12 @@ static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d, if (!d->metacopy || d->last) goto out; } else { + if (ovl_lookup_trap_inode(d->sb, this)) { + /* Caught in a trap of overlapping layers */ + err = -ELOOP; + goto out_err; + } + if (last_element) d->is_dir = true; if (d->last) @@ -819,6 +826,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, int err; bool metacopy = false; struct ovl_lookup_data d = { + .sb = dentry->d_sb, .name = dentry->d_name, .is_dir = false, .opaque = false, diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h index 80fb6642676092..265bf9cfde089c 100644 --- a/fs/overlayfs/overlayfs.h +++ b/fs/overlayfs/overlayfs.h @@ -270,6 +270,7 @@ void ovl_clear_flag(unsigned long flag, struct inode *inode); bool ovl_test_flag(unsigned long flag, struct inode *inode); bool ovl_inuse_trylock(struct dentry *dentry); void ovl_inuse_unlock(struct dentry *dentry); +bool ovl_is_inuse(struct dentry *dentry); bool ovl_need_index(struct dentry *dentry); int ovl_nlink_start(struct dentry *dentry, bool *locked); void ovl_nlink_end(struct dentry *dentry, bool locked); @@ -366,6 +367,8 @@ struct ovl_inode_params { struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, dev_t rdev); struct inode *ovl_lookup_inode(struct super_block *sb, struct dentry *real, bool is_upper); +bool ovl_lookup_trap_inode(struct super_block *sb, struct dentry *dir); +struct inode *ovl_get_trap_inode(struct super_block *sb, struct dentry *dir); struct inode *ovl_get_inode(struct super_block *sb, struct ovl_inode_params *oip); static inline void ovl_copyattr(struct inode *from, struct inode *to) diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h index ec237035333afc..6ed1ace8f8b300 100644 --- a/fs/overlayfs/ovl_entry.h +++ b/fs/overlayfs/ovl_entry.h @@ -29,6 +29,8 @@ struct ovl_sb { struct ovl_layer { struct vfsmount *mnt; + /* Trap in ovl inode cache */ + struct inode *trap; struct ovl_sb *fs; /* Index of this layer in fs root (upper idx == 0) */ int idx; @@ -65,6 +67,10 @@ struct ovl_fs { /* Did we take the inuse lock? */ bool upperdir_locked; bool workdir_locked; + /* Traps in ovl inode cache */ + struct inode *upperdir_trap; + struct inode *workdir_trap; + struct inode *indexdir_trap; /* Inode numbers in all layers do not use the high xino_bits */ unsigned int xino_bits; }; diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 0fb0a59a5e5c2f..2d028c02621fa8 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -217,6 +217,9 @@ static void ovl_free_fs(struct ovl_fs *ofs) { unsigned i; + iput(ofs->indexdir_trap); + iput(ofs->workdir_trap); + iput(ofs->upperdir_trap); dput(ofs->indexdir); dput(ofs->workdir); if (ofs->workdir_locked) @@ -225,8 +228,10 @@ static void ovl_free_fs(struct ovl_fs *ofs) if (ofs->upperdir_locked) ovl_inuse_unlock(ofs->upper_mnt->mnt_root); mntput(ofs->upper_mnt); - for (i = 0; i < ofs->numlower; i++) + for (i = 0; i < ofs->numlower; i++) { + iput(ofs->lower_layers[i].trap); mntput(ofs->lower_layers[i].mnt); + } for (i = 0; i < ofs->numlowerfs; i++) free_anon_bdev(ofs->lower_fs[i].pseudo_dev); kfree(ofs->lower_layers); @@ -984,7 +989,26 @@ static const struct xattr_handler *ovl_xattr_handlers[] = { NULL }; -static int ovl_get_upper(struct ovl_fs *ofs, struct path *upperpath) +static int ovl_setup_trap(struct super_block *sb, struct dentry *dir, + struct inode **ptrap, const char *name) +{ + struct inode *trap; + int err; + + trap = ovl_get_trap_inode(sb, dir); + err = PTR_ERR_OR_ZERO(trap); + if (err) { + if (err == -ELOOP) + pr_err("overlayfs: conflicting %s path\n", name); + return err; + } + + *ptrap = trap; + return 0; +} + +static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs, + struct path *upperpath) { struct vfsmount *upper_mnt; int err; @@ -1004,6 +1028,11 @@ static int ovl_get_upper(struct ovl_fs *ofs, struct path *upperpath) if (err) goto out; + err = ovl_setup_trap(sb, upperpath->dentry, &ofs->upperdir_trap, + "upperdir"); + if (err) + goto out; + upper_mnt = clone_private_mount(upperpath); err = PTR_ERR(upper_mnt); if (IS_ERR(upper_mnt)) { @@ -1030,7 +1059,8 @@ static int ovl_get_upper(struct ovl_fs *ofs, struct path *upperpath) return err; } -static int ovl_make_workdir(struct ovl_fs *ofs, struct path *workpath) +static int ovl_make_workdir(struct super_block *sb, struct ovl_fs *ofs, + struct path *workpath) { struct vfsmount *mnt = ofs->upper_mnt; struct dentry *temp; @@ -1045,6 +1075,10 @@ static int ovl_make_workdir(struct ovl_fs *ofs, struct path *workpath) if (!ofs->workdir) goto out; + err = ovl_setup_trap(sb, ofs->workdir, &ofs->workdir_trap, "workdir"); + if (err) + goto out; + /* * Upper should support d_type, else whiteouts are visible. Given * workdir and upper are on same fs, we can do iterate_dir() on @@ -1105,7 +1139,8 @@ static int ovl_make_workdir(struct ovl_fs *ofs, struct path *workpath) return err; } -static int ovl_get_workdir(struct ovl_fs *ofs, struct path *upperpath) +static int ovl_get_workdir(struct super_block *sb, struct ovl_fs *ofs, + struct path *upperpath) { int err; struct path workpath = { }; @@ -1136,19 +1171,16 @@ static int ovl_get_workdir(struct ovl_fs *ofs, struct path *upperpath) pr_warn("overlayfs: workdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n"); } - err = ovl_make_workdir(ofs, &workpath); - if (err) - goto out; + err = ovl_make_workdir(sb, ofs, &workpath); - err = 0; out: path_put(&workpath); return err; } -static int ovl_get_indexdir(struct ovl_fs *ofs, struct ovl_entry *oe, - struct path *upperpath) +static int ovl_get_indexdir(struct super_block *sb, struct ovl_fs *ofs, + struct ovl_entry *oe, struct path *upperpath) { struct vfsmount *mnt = ofs->upper_mnt; int err; @@ -1167,6 +1199,11 @@ static int ovl_get_indexdir(struct ovl_fs *ofs, struct ovl_entry *oe, ofs->indexdir = ovl_workdir_create(ofs, OVL_INDEXDIR_NAME, true); if (ofs->indexdir) { + err = ovl_setup_trap(sb, ofs->indexdir, &ofs->indexdir_trap, + "indexdir"); + if (err) + goto out; + /* * Verify upper root is exclusively associated with index dir. * Older kernels stored upper fh in "trusted.overlay.origin" @@ -1226,8 +1263,8 @@ static int ovl_get_fsid(struct ovl_fs *ofs, struct super_block *sb) return ofs->numlowerfs; } -static int ovl_get_lower_layers(struct ovl_fs *ofs, struct path *stack, - unsigned int numlower) +static int ovl_get_lower_layers(struct super_block *sb, struct ovl_fs *ofs, + struct path *stack, unsigned int numlower) { int err; unsigned int i; @@ -1245,16 +1282,28 @@ static int ovl_get_lower_layers(struct ovl_fs *ofs, struct path *stack, for (i = 0; i < numlower; i++) { struct vfsmount *mnt; + struct inode *trap; int fsid; err = fsid = ovl_get_fsid(ofs, stack[i].mnt->mnt_sb); if (err < 0) goto out; + err = -EBUSY; + if (ovl_is_inuse(stack[i].dentry)) { + pr_err("overlayfs: lowerdir is in-use as upperdir/workdir\n"); + goto out; + } + + err = ovl_setup_trap(sb, stack[i].dentry, &trap, "lowerdir"); + if (err) + goto out; + mnt = clone_private_mount(&stack[i]); err = PTR_ERR(mnt); if (IS_ERR(mnt)) { pr_err("overlayfs: failed to clone lowerpath\n"); + iput(trap); goto out; } @@ -1264,6 +1313,7 @@ static int ovl_get_lower_layers(struct ovl_fs *ofs, struct path *stack, */ mnt->mnt_flags |= MNT_READONLY | MNT_NOATIME; + ofs->lower_layers[ofs->numlower].trap = trap; ofs->lower_layers[ofs->numlower].mnt = mnt; ofs->lower_layers[ofs->numlower].idx = i + 1; ofs->lower_layers[ofs->numlower].fsid = fsid; @@ -1358,7 +1408,7 @@ static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb, goto out_err; } - err = ovl_get_lower_layers(ofs, stack, numlower); + err = ovl_get_lower_layers(sb, ofs, stack, numlower); if (err) goto out_err; @@ -1390,6 +1440,77 @@ static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb, goto out; } +/* + * Check if this layer root is a descendant of: + * - another layer of this overlayfs instance + * - upper/work dir of any overlayfs instance + */ +static int ovl_check_layer(struct super_block *sb, struct dentry *dentry, + const char *name) +{ + struct dentry *next = dentry, *parent; + int err = 0; + + if (!dentry) + return 0; + + parent = dget_parent(next); + + /* Walk back ancestors to root (inclusive) looking for traps */ + while (!err && parent != next) { + if (ovl_is_inuse(parent)) { + err = -EBUSY; + pr_err("overlayfs: %s path overlapping in-use upperdir/workdir\n", + name); + } else if (ovl_lookup_trap_inode(sb, parent)) { + err = -ELOOP; + pr_err("overlayfs: overlapping %s path\n", name); + } + next = parent; + parent = dget_parent(next); + dput(next); + } + + dput(parent); + + return err; +} + +/* + * Check if any of the layers or work dirs overlap. + */ +static int ovl_check_overlapping_layers(struct super_block *sb, + struct ovl_fs *ofs) +{ + int i, err; + + if (ofs->upper_mnt) { + err = ovl_check_layer(sb, ofs->upper_mnt->mnt_root, "upperdir"); + if (err) + return err; + + /* + * Checking workbasedir avoids hitting ovl_is_inuse(parent) of + * this instance and covers overlapping work and index dirs, + * unless work or index dir have been moved since created inside + * workbasedir. In that case, we already have their traps in + * inode cache and we will catch that case on lookup. + */ + err = ovl_check_layer(sb, ofs->workbasedir, "workdir"); + if (err) + return err; + } + + for (i = 0; i < ofs->numlower; i++) { + err = ovl_check_layer(sb, ofs->lower_layers[i].mnt->mnt_root, + "lowerdir"); + if (err) + return err; + } + + return 0; +} + static int ovl_fill_super(struct super_block *sb, void *data, int silent) { struct path upperpath = { }; @@ -1429,17 +1550,20 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) if (ofs->config.xino != OVL_XINO_OFF) ofs->xino_bits = BITS_PER_LONG - 32; + /* alloc/destroy_inode needed for setting up traps in inode cache */ + sb->s_op = &ovl_super_operations; + if (ofs->config.upperdir) { if (!ofs->config.workdir) { pr_err("overlayfs: missing 'workdir'\n"); goto out_err; } - err = ovl_get_upper(ofs, &upperpath); + err = ovl_get_upper(sb, ofs, &upperpath); if (err) goto out_err; - err = ovl_get_workdir(ofs, &upperpath); + err = ovl_get_workdir(sb, ofs, &upperpath); if (err) goto out_err; @@ -1460,7 +1584,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) sb->s_flags |= SB_RDONLY; if (!(ovl_force_readonly(ofs)) && ofs->config.index) { - err = ovl_get_indexdir(ofs, oe, &upperpath); + err = ovl_get_indexdir(sb, ofs, oe, &upperpath); if (err) goto out_free_oe; @@ -1473,6 +1597,10 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) } + err = ovl_check_overlapping_layers(sb, ofs); + if (err) + goto out_free_oe; + /* Show index=off in /proc/mounts for forced r/o mount */ if (!ofs->indexdir) { ofs->config.index = false; @@ -1494,7 +1622,6 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) cap_lower(cred->cap_effective, CAP_SYS_RESOURCE); sb->s_magic = OVERLAYFS_SUPER_MAGIC; - sb->s_op = &ovl_super_operations; sb->s_xattr = ovl_xattr_handlers; sb->s_fs_info = ofs; sb->s_flags |= SB_POSIXACL; diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c index c9a2e3c6d5374f..db8bdb29b3207e 100644 --- a/fs/overlayfs/util.c +++ b/fs/overlayfs/util.c @@ -653,6 +653,18 @@ void ovl_inuse_unlock(struct dentry *dentry) } } +bool ovl_is_inuse(struct dentry *dentry) +{ + struct inode *inode = d_inode(dentry); + bool inuse; + + spin_lock(&inode->i_lock); + inuse = (inode->i_state & I_OVL_INUSE); + spin_unlock(&inode->i_lock); + + return inuse; +} + /* * Does this overlay dentry need to be indexed on copy up? */ diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index e3c40483311569..53be104aab5ce4 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -466,6 +466,7 @@ struct edid *drm_get_edid_switcheroo(struct drm_connector *connector, struct i2c_adapter *adapter); struct edid *drm_edid_duplicate(const struct edid *edid); int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid); +int drm_add_override_edid_modes(struct drm_connector *connector); u8 drm_match_cea_mode(const struct drm_display_mode *to_match); enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code); diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index ca51b2c15bcc6d..8937d48a5389d1 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -485,7 +485,7 @@ static inline struct cgroup_subsys_state *task_css(struct task_struct *task, * * Find the css for the (@task, @subsys_id) combination, increment a * reference on and return it. This function is guaranteed to return a - * valid css. + * valid css. The returned css may already have been offlined. */ static inline struct cgroup_subsys_state * task_get_css(struct task_struct *task, int subsys_id) @@ -495,7 +495,13 @@ task_get_css(struct task_struct *task, int subsys_id) rcu_read_lock(); while (true) { css = task_css(task, subsys_id); - if (likely(css_tryget_online(css))) + /* + * Can't use css_tryget_online() here. A task which has + * PF_EXITING set may stay associated with an offline css. + * If such task calls this function, css_tryget_online() + * will keep failing. + */ + if (likely(css_tryget(css))) break; cpu_relax(); } diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index caf40ad0bbc6e0..d64d8c2bbdabc5 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -101,6 +101,7 @@ enum cpuhp_state { CPUHP_AP_IRQ_BCM2836_STARTING, CPUHP_AP_IRQ_MIPS_GIC_STARTING, CPUHP_AP_ARM_MVEBU_COHERENCY, + CPUHP_AP_MICROCODE_LOADER, CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, CPUHP_AP_PERF_X86_STARTING, CPUHP_AP_PERF_X86_AMD_IBS_STARTING, diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index beed7121c7818b..2ff52de1c2b894 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -395,6 +395,7 @@ struct mmc_host { unsigned int retune_now:1; /* do re-tuning at next req */ unsigned int retune_paused:1; /* re-tuning is temporarily disabled */ unsigned int use_blk_mq:1; /* use blk-mq */ + unsigned int retune_crc_disable:1; /* don't trigger retune upon crc */ int rescan_disable; /* disable card detection */ int rescan_entered; /* used with nonremovable devices */ diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h index 97ca105347a6c5..6905f3f641cce6 100644 --- a/include/linux/mmc/sdio_func.h +++ b/include/linux/mmc/sdio_func.h @@ -159,4 +159,10 @@ extern void sdio_f0_writeb(struct sdio_func *func, unsigned char b, extern mmc_pm_flag_t sdio_get_host_pm_caps(struct sdio_func *func); extern int sdio_set_host_pm_flags(struct sdio_func *func, mmc_pm_flag_t flags); +extern void sdio_retune_crc_disable(struct sdio_func *func); +extern void sdio_retune_crc_enable(struct sdio_func *func); + +extern void sdio_retune_hold_now(struct sdio_func *func); +extern void sdio_retune_release(struct sdio_func *func); + #endif /* LINUX_MMC_SDIO_FUNC_H */ diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 56518adc31dd70..bd7d611d63e914 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -639,7 +639,6 @@ static inline void pwm_remove_table(struct pwm_lookup *table, size_t num) #ifdef CONFIG_PWM_SYSFS void pwmchip_sysfs_export(struct pwm_chip *chip); void pwmchip_sysfs_unexport(struct pwm_chip *chip); -void pwmchip_sysfs_unexport_children(struct pwm_chip *chip); #else static inline void pwmchip_sysfs_export(struct pwm_chip *chip) { @@ -648,10 +647,6 @@ static inline void pwmchip_sysfs_export(struct pwm_chip *chip) static inline void pwmchip_sysfs_unexport(struct pwm_chip *chip) { } - -static inline void pwmchip_sysfs_unexport_children(struct pwm_chip *chip) -{ -} #endif /* CONFIG_PWM_SYSFS */ #endif /* __LINUX_PWM_H */ diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index cebb79fe2c72a6..0d10b7ce0da744 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -54,6 +54,10 @@ static inline void mmdrop(struct mm_struct *mm) * followed by taking the mmap_sem for writing before modifying the * vmas or anything the coredump pretends not to change from under it. * + * It also has to be called when mmgrab() is used in the context of + * the process, but then the mm_count refcount is transferred outside + * the context of the process to run down_write() on that pinned mm. + * * NOTE: find_extend_vma() called from GUP context is the only place * that can modify the "mm" (notably the vm_start/end) under mmap_sem * for reading and outside the context of the process, so it is also diff --git a/include/linux/tcp.h b/include/linux/tcp.h index d2c8f280e48fa9..4374196b98ea48 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -485,4 +485,8 @@ static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss) return (user_mss && user_mss < mss) ? user_mss : mss; } + +int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from, int pcount, + int shiftlen); + #endif /* _LINUX_TCP_H */ diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 4de121e24ce58f..67e0a990144a6e 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -3448,7 +3448,8 @@ struct cfg80211_ops { * on wiphy_new(), but can be changed by the driver if it has a good * reason to override the default * @WIPHY_FLAG_4ADDR_AP: supports 4addr mode even on AP (with a single station - * on a VLAN interface) + * on a VLAN interface). This flag also serves an extra purpose of + * supporting 4ADDR AP mode on devices which do not support AP/VLAN iftype. * @WIPHY_FLAG_4ADDR_STATION: supports 4addr mode even as a station * @WIPHY_FLAG_CONTROL_PORT_PROTOCOL: This device supports setting the * control port protocol ethertype. The device also honours the diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 622db6bc2f02da..366e2a60010e81 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -114,6 +114,7 @@ struct netns_ipv4 { #endif int sysctl_tcp_mtu_probing; int sysctl_tcp_base_mss; + int sysctl_tcp_min_snd_mss; int sysctl_tcp_probe_threshold; u32 sysctl_tcp_probe_interval; diff --git a/include/net/tcp.h b/include/net/tcp.h index 770917d0caa718..e75661f92daaaa 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -55,6 +55,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); #define MAX_TCP_HEADER (128 + MAX_HEADER) #define MAX_TCP_OPTION_SPACE 40 +#define TCP_MIN_SND_MSS 48 +#define TCP_MIN_GSO_SIZE (TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE) /* * Never offer a window over 32767 without using window scaling. Some diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index f80135e5feaa88..abae27c3001c3d 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -282,6 +282,7 @@ enum LINUX_MIB_TCPACKCOMPRESSED, /* TCPAckCompressed */ LINUX_MIB_TCPZEROWINDOWDROP, /* TCPZeroWindowDrop */ LINUX_MIB_TCPRCVQDROP, /* TCPRcvQDrop */ + LINUX_MIB_TCPWQUEUETOOBIG, /* TCPWqueueTooBig */ __LINUX_MIB_MAX }; diff --git a/init/initramfs.c b/init/initramfs.c index f6f4a1e4cd5475..cd5fb00fcb5496 100644 --- a/init/initramfs.c +++ b/init/initramfs.c @@ -612,13 +612,12 @@ static int __init populate_rootfs(void) printk(KERN_INFO "Trying to unpack rootfs image as initramfs...\n"); err = unpack_to_rootfs((char *)initrd_start, initrd_end - initrd_start); - if (!err) { - free_initrd(); + if (!err) goto done; - } else { - clean_rootfs(); - unpack_to_rootfs(__initramfs_start, __initramfs_size); - } + + clean_rootfs(); + unpack_to_rootfs(__initramfs_start, __initramfs_size); + printk(KERN_INFO "rootfs image is not initramfs (%s)" "; looks like an initrd\n", err); fd = ksys_open("/initrd.image", @@ -632,7 +631,6 @@ static int __init populate_rootfs(void) written, initrd_end - initrd_start); ksys_close(fd); - free_initrd(); } done: /* empty statement */; @@ -642,9 +640,9 @@ static int __init populate_rootfs(void) initrd_end - initrd_start); if (err) printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err); - free_initrd(); #endif } + free_initrd(); flush_delayed_fput(); /* * Try loading default modules from initramfs. This gives diff --git a/ipc/mqueue.c b/ipc/mqueue.c index c0d58f390c3b43..bce7af1546d9c5 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -391,7 +391,8 @@ static void mqueue_evict_inode(struct inode *inode) struct user_struct *user; unsigned long mq_bytes, mq_treesize; struct ipc_namespace *ipc_ns; - struct msg_msg *msg; + struct msg_msg *msg, *nmsg; + LIST_HEAD(tmp_msg); clear_inode(inode); @@ -402,10 +403,15 @@ static void mqueue_evict_inode(struct inode *inode) info = MQUEUE_I(inode); spin_lock(&info->lock); while ((msg = msg_get(info)) != NULL) - free_msg(msg); + list_add_tail(&msg->m_list, &tmp_msg); kfree(info->node_cache); spin_unlock(&info->lock); + list_for_each_entry_safe(msg, nmsg, &tmp_msg, m_list) { + list_del(&msg->m_list); + free_msg(msg); + } + /* Total amount of bytes accounted for the mqueue */ mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * diff --git a/ipc/msgutil.c b/ipc/msgutil.c index 84598025a6ade1..e65593742e2be5 100644 --- a/ipc/msgutil.c +++ b/ipc/msgutil.c @@ -18,6 +18,7 @@ #include #include #include +#include #include "util.h" @@ -64,6 +65,9 @@ static struct msg_msg *alloc_msg(size_t len) pseg = &msg->next; while (len > 0) { struct msg_msgseg *seg; + + cond_resched(); + alen = min(len, DATALEN_SEG); seg = kmalloc(sizeof(*seg) + alen, GFP_KERNEL_ACCOUNT); if (seg == NULL) @@ -176,6 +180,8 @@ void free_msg(struct msg_msg *msg) kfree(msg); while (seg != NULL) { struct msg_msgseg *tmp = seg->next; + + cond_resched(); kfree(seg); seg = tmp; } diff --git a/kernel/Makefile b/kernel/Makefile index 7a63d567fdb571..df5e3ca30acd26 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -30,6 +30,7 @@ KCOV_INSTRUMENT_extable.o := n # Don't self-instrument. KCOV_INSTRUMENT_kcov.o := n KASAN_SANITIZE_kcov.o := n +CFLAGS_kcov.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) # cond_syscall is currently not LTO compatible CFLAGS_sys_ni.o = $(DISABLE_LTO) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index acc2305ad89578..d3580a68dbefa9 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -5743,7 +5743,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) insn->dst_reg, shift); insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg, - (1 << size * 8) - 1); + (1ULL << size * 8) - 1); } } diff --git a/kernel/cred.c b/kernel/cred.c index ecf03657e71c9c..efd04b2ec84c2c 100644 --- a/kernel/cred.c +++ b/kernel/cred.c @@ -448,6 +448,15 @@ int commit_creds(struct cred *new) if (task->mm) set_dumpable(task->mm, suid_dumpable); task->pdeath_signal = 0; + /* + * If a task drops privileges and becomes nondumpable, + * the dumpability change must become visible before + * the credential change; otherwise, a __ptrace_may_access() + * racing with this change may be able to attach to a task it + * shouldn't be able to attach to (as if the task had dropped + * privileges without becoming nondumpable). + * Pairs with a read barrier in __ptrace_may_access(). + */ smp_wmb(); } diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 99c7f199f2d4e4..12f351b253bbb4 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -49,14 +49,30 @@ static void perf_output_put_handle(struct perf_output_handle *handle) unsigned long head; again: + /* + * In order to avoid publishing a head value that goes backwards, + * we must ensure the load of @rb->head happens after we've + * incremented @rb->nest. + * + * Otherwise we can observe a @rb->head value before one published + * by an IRQ/NMI happening between the load and the increment. + */ + barrier(); head = local_read(&rb->head); /* - * IRQ/NMI can happen here, which means we can miss a head update. + * IRQ/NMI can happen here and advance @rb->head, causing our + * load above to be stale. */ - if (!local_dec_and_test(&rb->nest)) + /* + * If this isn't the outermost nesting, we don't have to update + * @rb->user_page->data_head. + */ + if (local_read(&rb->nest) > 1) { + local_dec(&rb->nest); goto out; + } /* * Since the mmap() consumer (userspace) can run on a different CPU: @@ -85,12 +101,21 @@ static void perf_output_put_handle(struct perf_output_handle *handle) * See perf_output_begin(). */ smp_wmb(); /* B, matches C */ - rb->user_page->data_head = head; + WRITE_ONCE(rb->user_page->data_head, head); + + /* + * We must publish the head before decrementing the nest count, + * otherwise an IRQ/NMI can publish a more recent head value and our + * write will (temporarily) publish a stale value. + */ + barrier(); + local_set(&rb->nest, 0); /* - * Now check if we missed an update -- rely on previous implied - * compiler barriers to force a re-read. + * Ensure we decrement @rb->nest before we validate the @rb->head. + * Otherwise we cannot be sure we caught the 'last' nested update. */ + barrier(); if (unlikely(head != local_read(&rb->head))) { local_inc(&rb->nest); goto again; @@ -465,7 +490,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size) handle->aux_flags); } - rb->user_page->aux_head = rb->aux_head; + WRITE_ONCE(rb->user_page->aux_head, rb->aux_head); if (rb_need_aux_wakeup(rb)) wakeup = true; @@ -497,7 +522,7 @@ int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size) rb->aux_head += size; - rb->user_page->aux_head = rb->aux_head; + WRITE_ONCE(rb->user_page->aux_head, rb->aux_head); if (rb_need_aux_wakeup(rb)) { perf_output_wakeup(handle); handle->wakeup = rb->aux_wakeup + rb->aux_watermark; diff --git a/kernel/ptrace.c b/kernel/ptrace.c index fc0d667f57929f..5d0838c2349eeb 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -323,6 +323,16 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode) return -EPERM; ok: rcu_read_unlock(); + /* + * If a task drops privileges and becomes nondumpable (through a syscall + * like setresuid()) while we are trying to access it, we must ensure + * that the dumpability is read after the credentials; otherwise, + * we may be able to attach to a task that we shouldn't be able to + * attach to (as if the task had dropped privileges without becoming + * nondumpable). + * Pairs with a write barrier in commit_creds(). + */ + smp_rmb(); mm = task->mm; if (mm && ((get_dumpable(mm) != SUID_DUMP_USER) && @@ -704,6 +714,10 @@ static int ptrace_peek_siginfo(struct task_struct *child, if (arg.nr < 0) return -EINVAL; + /* Ensure arg.off fits in an unsigned long */ + if (arg.off > ULONG_MAX) + return 0; + if (arg.flags & PTRACE_PEEKSIGINFO_SHARED) pending = &child->signal->shared_pending; else @@ -711,18 +725,20 @@ static int ptrace_peek_siginfo(struct task_struct *child, for (i = 0; i < arg.nr; ) { siginfo_t info; - s32 off = arg.off + i; + unsigned long off = arg.off + i; + bool found = false; spin_lock_irq(&child->sighand->siglock); list_for_each_entry(q, &pending->list, list) { if (!off--) { + found = true; copy_siginfo(&info, &q->info); break; } } spin_unlock_irq(&child->sighand->siglock); - if (off >= 0) /* beyond the end of the list */ + if (!found) /* beyond the end of the list */ break; #ifdef CONFIG_COMPAT diff --git a/kernel/sys.c b/kernel/sys.c index 123bd73046ec6b..096932a4504666 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1919,7 +1919,7 @@ static int validate_prctl_map(struct prctl_mm_map *prctl_map) ((unsigned long)prctl_map->__m1 __op \ (unsigned long)prctl_map->__m2) ? 0 : -EINVAL error = __prctl_check_order(start_code, <, end_code); - error |= __prctl_check_order(start_data, <, end_data); + error |= __prctl_check_order(start_data,<=, end_data); error |= __prctl_check_order(start_brk, <=, brk); error |= __prctl_check_order(arg_start, <=, arg_end); error |= __prctl_check_order(env_start, <=, env_end); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 9a85c7ae736210..f8576509c7bef2 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -2791,8 +2791,10 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int if (neg) continue; val = convmul * val / convdiv; - if ((min && val < *min) || (max && val > *max)) - continue; + if ((min && val < *min) || (max && val > *max)) { + err = -EINVAL; + break; + } *i = val; } else { val = convdiv * (*i) / convmul; diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index c5e0cba3b39cc1..6b23cd584295f5 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -698,7 +698,7 @@ static inline void process_adjtimex_modes(const struct timex *txc, s32 *time_tai time_constant = max(time_constant, 0l); } - if (txc->modes & ADJ_TAI && txc->constant > 0) + if (txc->modes & ADJ_TAI && txc->constant >= 0) *time_tai = txc->constant; if (txc->modes & ADJ_OFFSET) diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 9a6bfcd22dc66f..443edcddac8ab2 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -812,17 +812,18 @@ ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs) struct timekeeper *tk = &tk_core.timekeeper; unsigned int seq; ktime_t base, *offset = offsets[offs]; + u64 nsecs; WARN_ON(timekeeping_suspended); do { seq = read_seqcount_begin(&tk_core.seq); base = ktime_add(tk->tkr_mono.base, *offset); + nsecs = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift; } while (read_seqcount_retry(&tk_core.seq, seq)); - return base; - + return base + nsecs; } EXPORT_SYMBOL_GPL(ktime_get_coarse_with_offset); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 1bd7a758583b15..181dba75a203c8 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -8351,12 +8351,8 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) cnt++; - /* reset all but tr, trace, and overruns */ - memset(&iter.seq, 0, - sizeof(struct trace_iterator) - - offsetof(struct trace_iterator, seq)); + trace_iterator_reset(&iter); iter.iter_flags |= TRACE_FILE_LAT_FMT; - iter.pos = -1; if (trace_find_next_entry_inc(&iter) != NULL) { int ret; diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 447bd96ee658aa..d11d7bfc3fa5cc 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -1895,4 +1895,22 @@ static inline void tracer_hardirqs_off(unsigned long a0, unsigned long a1) { } extern struct trace_iterator *tracepoint_print_iter; +/* + * Reset the state of the trace_iterator so that it can read consumed data. + * Normally, the trace_iterator is used for reading the data when it is not + * consumed, and must retain state. + */ +static __always_inline void trace_iterator_reset(struct trace_iterator *iter) +{ + const size_t offset = offsetof(struct trace_iterator, seq); + + /* + * Keep gcc from complaining about overwriting more than just one + * member in the structure. + */ + memset((char *)iter + offset, 0, sizeof(struct trace_iterator) - offset); + + iter->pos = -1; +} + #endif /* _LINUX_KERNEL_TRACE_H */ diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 11853e90b64972..3f34cfb66a85f2 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -1632,6 +1632,9 @@ static u64 hist_field_var_ref(struct hist_field *hist_field, struct hist_elt_data *elt_data; u64 var_val = 0; + if (WARN_ON_ONCE(!elt)) + return var_val; + elt_data = elt->private_data; var_val = elt_data->var_ref_vals[hist_field->var_ref_idx]; diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c index 810d78a8d14c76..2905a3dd94c1db 100644 --- a/kernel/trace/trace_kdb.c +++ b/kernel/trace/trace_kdb.c @@ -41,12 +41,8 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file) kdb_printf("Dumping ftrace buffer:\n"); - /* reset all but tr, trace, and overruns */ - memset(&iter.seq, 0, - sizeof(struct trace_iterator) - - offsetof(struct trace_iterator, seq)); + trace_iterator_reset(&iter); iter.iter_flags |= TRACE_FILE_LAT_FMT; - iter.pos = -1; if (cpu_file == RING_BUFFER_ALL_CPUS) { for_each_tracing_cpu(cpu) { diff --git a/mm/Kconfig b/mm/Kconfig index de64ea658716a8..b457e94ae6182a 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -700,12 +700,12 @@ config DEV_PAGEMAP_OPS config HMM bool + select MMU_NOTIFIER select MIGRATE_VMA_HELPER config HMM_MIRROR bool "HMM mirror CPU page table into a device page table" depends on ARCH_HAS_HMM - select MMU_NOTIFIER select HMM help Select HMM_MIRROR if you want to mirror range of the CPU page table of a diff --git a/mm/cma.c b/mm/cma.c index bfe9f5397165c7..476dfe13a701f7 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -106,8 +106,10 @@ static int __init cma_activate_area(struct cma *cma) cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); - if (!cma->bitmap) + if (!cma->bitmap) { + cma->count = 0; return -ENOMEM; + } WARN_ON_ONCE(!pfn_valid(pfn)); zone = page_zone(pfn_to_page(pfn)); @@ -369,23 +371,26 @@ int __init cma_declare_contiguous(phys_addr_t base, #ifdef CONFIG_CMA_DEBUG static void cma_debug_show_areas(struct cma *cma) { - unsigned long next_zero_bit, next_set_bit; + unsigned long next_zero_bit, next_set_bit, nr_zero; unsigned long start = 0; - unsigned int nr_zero, nr_total = 0; + unsigned long nr_part, nr_total = 0; + unsigned long nbits = cma_bitmap_maxno(cma); mutex_lock(&cma->lock); pr_info("number of available pages: "); for (;;) { - next_zero_bit = find_next_zero_bit(cma->bitmap, cma->count, start); - if (next_zero_bit >= cma->count) + next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start); + if (next_zero_bit >= nbits) break; - next_set_bit = find_next_bit(cma->bitmap, cma->count, next_zero_bit); + next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit); nr_zero = next_set_bit - next_zero_bit; - pr_cont("%s%u@%lu", nr_total ? "+" : "", nr_zero, next_zero_bit); - nr_total += nr_zero; + nr_part = nr_zero << cma->order_per_bit; + pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part, + next_zero_bit); + nr_total += nr_part; start = next_zero_bit + nr_zero; } - pr_cont("=> %u free of %lu total pages\n", nr_total, cma->count); + pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count); mutex_unlock(&cma->lock); } #else diff --git a/mm/cma_debug.c b/mm/cma_debug.c index ad6723e9d110a9..3e0415076cc9ef 100644 --- a/mm/cma_debug.c +++ b/mm/cma_debug.c @@ -58,7 +58,7 @@ static int cma_maxchunk_get(void *data, u64 *val) mutex_lock(&cma->lock); for (;;) { start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end); - if (start >= cma->count) + if (start >= bitmap_maxno) break; end = find_next_bit(cma->bitmap, bitmap_maxno, start); maxchunk = max(end - start, maxchunk); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 0bbb033d7d8c8e..65179513c2b251 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1256,12 +1256,23 @@ void free_huge_page(struct page *page) ClearPagePrivate(page); /* - * A return code of zero implies that the subpool will be under its - * minimum size if the reservation is not restored after page is free. - * Therefore, force restore_reserve operation. + * If PagePrivate() was set on page, page allocation consumed a + * reservation. If the page was associated with a subpool, there + * would have been a page reserved in the subpool before allocation + * via hugepage_subpool_get_pages(). Since we are 'restoring' the + * reservtion, do not call hugepage_subpool_put_pages() as this will + * remove the reserved page from the subpool. */ - if (hugepage_subpool_put_pages(spool, 1) == 0) - restore_reserve = true; + if (!restore_reserve) { + /* + * A return code of zero implies that the subpool will be + * under its minimum size if the reservation is not restored + * after page is free. Therefore, force restore_reserve + * operation. + */ + if (hugepage_subpool_put_pages(spool, 1) == 0) + restore_reserve = true; + } spin_lock(&hugetlb_lock); clear_page_huge_active(page); diff --git a/mm/khugepaged.c b/mm/khugepaged.c index fde5820be24d8a..ecefdba4b0ddae 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1005,6 +1005,9 @@ static void collapse_huge_page(struct mm_struct *mm, * handled by the anon_vma lock + PG_lock. */ down_write(&mm->mmap_sem); + result = SCAN_ANY_PROCESS; + if (!mmget_still_valid(mm)) + goto out; result = hugepage_vma_revalidate(mm, address, &vma); if (result) goto out; diff --git a/mm/list_lru.c b/mm/list_lru.c index f0a15d32b95930..758653dd144314 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c @@ -353,7 +353,7 @@ static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus, } return 0; fail: - __memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1); + __memcg_destroy_list_lru_node(memcg_lrus, begin, i); return -ENOMEM; } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 8e6932a140b827..2d04bd2e1ced75 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5937,13 +5937,15 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid, unsigned long *zone_end_pfn, unsigned long *ignored) { + unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; + unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; /* When hotadd a new node from cpu_up(), the node should be empty */ if (!node_start_pfn && !node_end_pfn) return 0; /* Get the start and end of the zone */ - *zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type]; - *zone_end_pfn = arch_zone_highest_possible_pfn[zone_type]; + *zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); + *zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); adjust_zone_range_for_zone_movable(nid, zone_type, node_start_pfn, node_end_pfn, zone_start_pfn, zone_end_pfn); diff --git a/mm/percpu.c b/mm/percpu.c index 41e58f3d8fbf22..ff76fa0b752883 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -988,7 +988,8 @@ static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits, /* * Search to find a fit. */ - end = start + alloc_bits + PCPU_BITMAP_BLOCK_BITS; + end = min_t(int, start + alloc_bits + PCPU_BITMAP_BLOCK_BITS, + pcpu_chunk_map_bits(chunk)); bit_off = bitmap_find_next_zero_area(chunk->alloc_map, end, start, alloc_bits, align_mask); if (bit_off >= end) @@ -1721,6 +1722,7 @@ void free_percpu(void __percpu *ptr) struct pcpu_chunk *chunk; unsigned long flags; int off; + bool need_balance = false; if (!ptr) return; @@ -1742,7 +1744,7 @@ void free_percpu(void __percpu *ptr) list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) if (pos != chunk) { - pcpu_schedule_balance_work(); + need_balance = true; break; } } @@ -1750,6 +1752,9 @@ void free_percpu(void __percpu *ptr) trace_percpu_free_percpu(chunk->base_addr, off, ptr); spin_unlock_irqrestore(&pcpu_lock, flags); + + if (need_balance) + pcpu_schedule_balance_work(); } EXPORT_SYMBOL_GPL(free_percpu); diff --git a/mm/rmap.c b/mm/rmap.c index 85b7f94233526a..f048c2651954b6 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -926,7 +926,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, continue; flush_cache_page(vma, address, page_to_pfn(page)); - entry = pmdp_huge_clear_flush(vma, address, pmd); + entry = pmdp_invalidate(vma, address, pmd); entry = pmd_wrprotect(entry); entry = pmd_mkclean(entry); set_pmd_at(vma->vm_mm, address, pmd, entry); diff --git a/mm/slab.c b/mm/slab.c index 018d32496e8d13..46f21e73db2f8f 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -4326,8 +4326,12 @@ static int leaks_show(struct seq_file *m, void *p) * whole processing. */ do { - set_store_user_clean(cachep); drain_cpu_caches(cachep); + /* + * drain_cpu_caches() could make kmemleak_object and + * debug_objects_cache dirty, so reset afterwards. + */ + set_store_user_clean(cachep); x[1] = 0; diff --git a/mm/vmscan.c b/mm/vmscan.c index ee545d1e9894de..dec88fcf887651 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1510,7 +1510,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone, list_for_each_entry_safe(page, next, page_list, lru) { if (page_is_file_cache(page) && !PageDirty(page) && - !__PageMovable(page)) { + !__PageMovable(page) && !PageUnevictable(page)) { ClearPageActive(page); list_move(&page->lru, &clean_pages); } diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c index 66f74c85cf6bd1..66d54fc11831c0 100644 --- a/net/ax25/ax25_route.c +++ b/net/ax25/ax25_route.c @@ -429,9 +429,11 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr) } if (ax25->sk != NULL) { + local_bh_disable(); bh_lock_sock(ax25->sk); sock_reset_flag(ax25->sk, SOCK_ZAPPED); bh_unlock_sock(ax25->sk); + local_bh_enable(); } put: diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 3cf0764d5793f4..15d1cb5aee18da 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -1276,14 +1276,6 @@ int hci_conn_check_link_mode(struct hci_conn *conn) !test_bit(HCI_CONN_ENCRYPT, &conn->flags)) return 0; - /* The minimum encryption key size needs to be enforced by the - * host stack before establishing any L2CAP connections. The - * specification in theory allows a minimum of 1, but to align - * BR/EDR and LE transports, a minimum of 7 is chosen. - */ - if (conn->enc_key_size < HCI_MIN_ENC_KEY_SIZE) - return 0; - return 1; } @@ -1400,8 +1392,16 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type, return 0; encrypt: - if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) + if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) { + /* Ensure that the encryption key size has been read, + * otherwise stall the upper layer responses. + */ + if (!conn->enc_key_size) + return 0; + + /* Nothing else needed, all requirements are met */ return 1; + } hci_conn_encrypt(conn); return 0; diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 2c6eabf294b31c..69e3be51a2c331 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -1340,6 +1340,21 @@ static void l2cap_request_info(struct l2cap_conn *conn) sizeof(req), &req); } +static bool l2cap_check_enc_key_size(struct hci_conn *hcon) +{ + /* The minimum encryption key size needs to be enforced by the + * host stack before establishing any L2CAP connections. The + * specification in theory allows a minimum of 1, but to align + * BR/EDR and LE transports, a minimum of 7 is chosen. + * + * This check might also be called for unencrypted connections + * that have no key size requirements. Ensure that the link is + * actually encrypted before enforcing a key size. + */ + return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) || + hcon->enc_key_size > HCI_MIN_ENC_KEY_SIZE); +} + static void l2cap_do_start(struct l2cap_chan *chan) { struct l2cap_conn *conn = chan->conn; @@ -1357,9 +1372,14 @@ static void l2cap_do_start(struct l2cap_chan *chan) if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) return; - if (l2cap_chan_check_security(chan, true) && - __l2cap_no_conn_pending(chan)) + if (!l2cap_chan_check_security(chan, true) || + !__l2cap_no_conn_pending(chan)) + return; + + if (l2cap_check_enc_key_size(conn->hcon)) l2cap_start_connection(chan); + else + __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); } static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask) @@ -1438,7 +1458,10 @@ static void l2cap_conn_start(struct l2cap_conn *conn) continue; } - l2cap_start_connection(chan); + if (l2cap_check_enc_key_size(conn->hcon)) + l2cap_start_connection(chan); + else + l2cap_chan_close(chan, ECONNREFUSED); } else if (chan->state == BT_CONNECT2) { struct l2cap_conn_rsp rsp; @@ -7455,7 +7478,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) } if (chan->state == BT_CONNECT) { - if (!status) + if (!status && l2cap_check_enc_key_size(hcon)) l2cap_start_connection(chan); else __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); @@ -7464,7 +7487,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) struct l2cap_conn_rsp rsp; __u16 res, stat; - if (!status) { + if (!status && l2cap_check_enc_key_size(hcon)) { if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { res = L2CAP_CR_PEND; stat = L2CAP_CS_AUTHOR_PEND; diff --git a/net/can/af_can.c b/net/can/af_can.c index 1684ba5b51eb6c..e386d654116dd6 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c @@ -105,6 +105,7 @@ EXPORT_SYMBOL(can_ioctl); static void can_sock_destruct(struct sock *sk) { skb_queue_purge(&sk->sk_receive_queue); + skb_queue_purge(&sk->sk_error_queue); } static const struct can_proto *can_get_proto(int protocol) diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 4e4ac77c6816c4..cd9e991f21d734 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -2751,6 +2751,7 @@ static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos) } void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags) + __acquires(tbl->lock) __acquires(rcu_bh) { struct neigh_seq_state *state = seq->private; @@ -2761,6 +2762,7 @@ void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl rcu_read_lock_bh(); state->nht = rcu_dereference_bh(tbl->nht); + read_lock(&tbl->lock); return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN; } @@ -2794,8 +2796,13 @@ void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos) EXPORT_SYMBOL(neigh_seq_next); void neigh_seq_stop(struct seq_file *seq, void *v) + __releases(tbl->lock) __releases(rcu_bh) { + struct neigh_seq_state *state = seq->private; + struct neigh_table *tbl = state->tbl; + + read_unlock(&tbl->lock); rcu_read_unlock_bh(); } EXPORT_SYMBOL(neigh_seq_stop); diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 70289682a67014..eab5c02da8ae84 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -290,6 +290,7 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("TCPAckCompressed", LINUX_MIB_TCPACKCOMPRESSED), SNMP_MIB_ITEM("TCPZeroWindowDrop", LINUX_MIB_TCPZEROWINDOWDROP), SNMP_MIB_ITEM("TCPRcvQDrop", LINUX_MIB_TCPRCVQDROP), + SNMP_MIB_ITEM("TCPWqueueTooBig", LINUX_MIB_TCPWQUEUETOOBIG), SNMP_MIB_SENTINEL }; diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index ce64453d337d34..ad132b6e8cfade 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -39,6 +39,8 @@ static int ip_local_port_range_min[] = { 1, 1 }; static int ip_local_port_range_max[] = { 65535, 65535 }; static int tcp_adv_win_scale_min = -31; static int tcp_adv_win_scale_max = 31; +static int tcp_min_snd_mss_min = TCP_MIN_SND_MSS; +static int tcp_min_snd_mss_max = 65535; static int ip_privileged_port_min; static int ip_privileged_port_max = 65535; static int ip_ttl_min = 1; @@ -737,6 +739,15 @@ static struct ctl_table ipv4_net_table[] = { .mode = 0644, .proc_handler = proc_dointvec, }, + { + .procname = "tcp_min_snd_mss", + .data = &init_net.ipv4.sysctl_tcp_min_snd_mss, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &tcp_min_snd_mss_min, + .extra2 = &tcp_min_snd_mss_max, + }, { .procname = "tcp_probe_threshold", .data = &init_net.ipv4.sysctl_tcp_probe_threshold, diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 30c6e94b06c49d..364e6fdaa38f50 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -3829,6 +3829,7 @@ void __init tcp_init(void) unsigned long limit; unsigned int i; + BUILD_BUG_ON(TCP_MIN_SND_MSS <= MAX_TCP_OPTION_SPACE); BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb)); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index cfdd70e32755e6..4a8869d3966221 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1315,7 +1315,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev, TCP_SKB_CB(skb)->seq += shifted; tcp_skb_pcount_add(prev, pcount); - BUG_ON(tcp_skb_pcount(skb) < pcount); + WARN_ON_ONCE(tcp_skb_pcount(skb) < pcount); tcp_skb_pcount_add(skb, -pcount); /* When we're adding to gso_segs == 1, gso_size will be zero, @@ -1381,6 +1381,21 @@ static int skb_can_shift(const struct sk_buff *skb) return !skb_headlen(skb) && skb_is_nonlinear(skb); } +int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from, + int pcount, int shiftlen) +{ + /* TCP min gso_size is 8 bytes (TCP_MIN_GSO_SIZE) + * Since TCP_SKB_CB(skb)->tcp_gso_segs is 16 bits, we need + * to make sure not storing more than 65535 * 8 bytes per skb, + * even if current MSS is bigger. + */ + if (unlikely(to->len + shiftlen >= 65535 * TCP_MIN_GSO_SIZE)) + return 0; + if (unlikely(tcp_skb_pcount(to) + pcount > 65535)) + return 0; + return skb_shift(to, from, shiftlen); +} + /* Try collapsing SACK blocks spanning across multiple skbs to a single * skb. */ @@ -1486,7 +1501,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una)) goto fallback; - if (!skb_shift(prev, skb, len)) + if (!tcp_skb_shift(prev, skb, pcount, len)) goto fallback; if (!tcp_shifted_skb(sk, prev, skb, state, pcount, len, mss, dup_sack)) goto out; @@ -1504,11 +1519,10 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, goto out; len = skb->len; - if (skb_shift(prev, skb, len)) { - pcount += tcp_skb_pcount(skb); - tcp_shifted_skb(sk, prev, skb, state, tcp_skb_pcount(skb), + pcount = tcp_skb_pcount(skb); + if (tcp_skb_shift(prev, skb, pcount, len)) + tcp_shifted_skb(sk, prev, skb, state, pcount, len, mss, 0); - } out: return prev; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 11101cf8693b1d..b76cf96d5cfed9 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2527,6 +2527,7 @@ static int __net_init tcp_sk_init(struct net *net) net->ipv4.sysctl_tcp_ecn_fallback = 1; net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS; + net->ipv4.sysctl_tcp_min_snd_mss = TCP_MIN_SND_MSS; net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD; net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index bd134e3a0473e7..221d9b72423b96 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1299,6 +1299,12 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue, if (nsize < 0) nsize = 0; + if (unlikely((sk->sk_wmem_queued >> 1) > sk->sk_sndbuf && + tcp_queue != TCP_FRAG_IN_WRITE_QUEUE)) { + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG); + return -ENOMEM; + } + if (skb_unclone(skb, gfp)) return -ENOMEM; @@ -1457,8 +1463,7 @@ static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu) mss_now -= icsk->icsk_ext_hdr_len; /* Then reserve room for full set of TCP options and 8 bytes of data */ - if (mss_now < 48) - mss_now = 48; + mss_now = max(mss_now, sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss); return mss_now; } @@ -2727,7 +2732,7 @@ static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) if (next_skb_size <= skb_availroom(skb)) skb_copy_bits(next_skb, 0, skb_put(skb, next_skb_size), next_skb_size); - else if (!skb_shift(skb, next_skb, next_skb_size)) + else if (!tcp_skb_shift(skb, next_skb, 1, next_skb_size)) return false; } tcp_highest_sack_replace(sk, next_skb, skb); diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index b1b5a648def6cd..17335a370e6452 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -166,6 +166,7 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1; mss = min(net->ipv4.sysctl_tcp_base_mss, mss); mss = max(mss, 68 - tcp_sk(sk)->tcp_header_len); + mss = max(mss, net->ipv4.sysctl_tcp_min_snd_mss); icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); } tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index be5f3d7ceb966d..f994f50e151622 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -254,9 +254,9 @@ struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label) rcu_read_lock_bh(); for_each_sk_fl_rcu(np, sfl) { struct ip6_flowlabel *fl = sfl->fl; - if (fl->label == label) { + + if (fl->label == label && atomic_inc_not_zero(&fl->users)) { fl->lastuse = jiffies; - atomic_inc(&fl->users); rcu_read_unlock_bh(); return fl; } @@ -622,7 +622,8 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen) goto done; } fl1 = sfl->fl; - atomic_inc(&fl1->users); + if (!atomic_inc_not_zero(&fl1->users)) + fl1 = NULL; break; } } diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c index db6e0afe3a20b6..1740f852002ec3 100644 --- a/net/lapb/lapb_iface.c +++ b/net/lapb/lapb_iface.c @@ -182,6 +182,7 @@ int lapb_unregister(struct net_device *dev) lapb = __lapb_devtostruct(dev); if (!lapb) goto out; + lapb_put(lapb); lapb_stop_t1timer(lapb); lapb_stop_t2timer(lapb); diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 172aeae21ae9d0..35c6dfa13fa85b 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -2183,6 +2183,9 @@ void ieee80211_tdls_cancel_channel_switch(struct wiphy *wiphy, const u8 *addr); void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata); void ieee80211_tdls_chsw_work(struct work_struct *wk); +void ieee80211_tdls_handle_disconnect(struct ieee80211_sub_if_data *sdata, + const u8 *peer, u16 reason); +const char *ieee80211_get_reason_code_string(u16 reason_code); extern const struct ethtool_ops ieee80211_ethtool_ops; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 2ac749c4a6b261..1aaa73fa308e65 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -2868,7 +2868,7 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, #define case_WLAN(type) \ case WLAN_REASON_##type: return #type -static const char *ieee80211_get_reason_code_string(u16 reason_code) +const char *ieee80211_get_reason_code_string(u16 reason_code) { switch (reason_code) { case_WLAN(UNSPECIFIED); @@ -2933,6 +2933,11 @@ static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, if (len < 24 + 2) return; + if (!ether_addr_equal(mgmt->bssid, mgmt->sa)) { + ieee80211_tdls_handle_disconnect(sdata, mgmt->sa, reason_code); + return; + } + if (ifmgd->associated && ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid)) { const u8 *bssid = ifmgd->associated->bssid; @@ -2982,6 +2987,11 @@ static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata, reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); + if (!ether_addr_equal(mgmt->bssid, mgmt->sa)) { + ieee80211_tdls_handle_disconnect(sdata, mgmt->sa, reason_code); + return; + } + sdata_info(sdata, "disassociated from %pM (Reason: %u=%s)\n", mgmt->sa, reason_code, ieee80211_get_reason_code_string(reason_code)); diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index e946ee4f335bd7..7523d995ea8abe 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -3752,6 +3752,8 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx) case NL80211_IFTYPE_STATION: if (!bssid && !sdata->u.mgd.use_4addr) return false; + if (ieee80211_is_robust_mgmt_frame(skb) && !rx->sta) + return false; if (multicast) return true; return ether_addr_equal(sdata->vif.addr, hdr->addr1); diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c index 6c647f425e057d..67745d1d4c5d10 100644 --- a/net/mac80211/tdls.c +++ b/net/mac80211/tdls.c @@ -1992,3 +1992,26 @@ void ieee80211_tdls_chsw_work(struct work_struct *wk) } rtnl_unlock(); } + +void ieee80211_tdls_handle_disconnect(struct ieee80211_sub_if_data *sdata, + const u8 *peer, u16 reason) +{ + struct ieee80211_sta *sta; + + rcu_read_lock(); + sta = ieee80211_find_sta(&sdata->vif, peer); + if (!sta || !sta->tdls) { + rcu_read_unlock(); + return; + } + rcu_read_unlock(); + + tdls_dbg(sdata, "disconnected from TDLS peer %pM (Reason: %u=%s)\n", + peer, reason, + ieee80211_get_reason_code_string(reason)); + + ieee80211_tdls_oper_request(&sdata->vif, peer, + NL80211_TDLS_TEARDOWN, + WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE, + GFP_ATOMIC); +} diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 3deaa01ebee4bb..2558a34c9df16f 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -3523,7 +3523,9 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata, } /* Always allow software iftypes */ - if (local->hw.wiphy->software_iftypes & BIT(iftype)) { + if (local->hw.wiphy->software_iftypes & BIT(iftype) || + (iftype == NL80211_IFTYPE_AP_VLAN && + local->hw.wiphy->flags & WIPHY_FLAG_4ADDR_AP)) { if (radar_detect) return -EINVAL; return 0; diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index 58d0b258b684cd..5dd48f0a4b1bd1 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c @@ -1175,7 +1175,7 @@ ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx) struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_key *key = rx->key; struct ieee80211_mmie_16 *mmie; - u8 aad[GMAC_AAD_LEN], mic[GMAC_MIC_LEN], ipn[6], nonce[GMAC_NONCE_LEN]; + u8 aad[GMAC_AAD_LEN], *mic, ipn[6], nonce[GMAC_NONCE_LEN]; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; if (!ieee80211_is_mgmt(hdr->frame_control)) @@ -1206,13 +1206,18 @@ ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx) memcpy(nonce, hdr->addr2, ETH_ALEN); memcpy(nonce + ETH_ALEN, ipn, 6); + mic = kmalloc(GMAC_MIC_LEN, GFP_ATOMIC); + if (!mic) + return RX_DROP_UNUSABLE; if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce, skb->data + 24, skb->len - 24, mic) < 0 || crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) { key->u.aes_gmac.icverrors++; + kfree(mic); return RX_DROP_UNUSABLE; } + kfree(mic); } memcpy(key->u.aes_gmac.rx_pn, ipn, 6); diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index a42c1bc7c69820..62c0e80dcd7198 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -2280,7 +2280,6 @@ static void __net_exit __ip_vs_cleanup(struct net *net) { struct netns_ipvs *ipvs = net_ipvs(net); - nf_unregister_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); ip_vs_service_net_cleanup(ipvs); /* ip_vs_flush() with locks */ ip_vs_conn_net_cleanup(ipvs); ip_vs_app_net_cleanup(ipvs); @@ -2295,6 +2294,7 @@ static void __net_exit __ip_vs_dev_cleanup(struct net *net) { struct netns_ipvs *ipvs = net_ipvs(net); EnterFunction(2); + nf_unregister_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); ipvs->enable = 0; /* Disable packet reception */ smp_wmb(); ip_vs_sync_net_cleanup(ipvs); diff --git a/net/netfilter/nf_conntrack_h323_asn1.c b/net/netfilter/nf_conntrack_h323_asn1.c index 1601275efe2d10..4c2ef42e189cbd 100644 --- a/net/netfilter/nf_conntrack_h323_asn1.c +++ b/net/netfilter/nf_conntrack_h323_asn1.c @@ -172,7 +172,7 @@ static int nf_h323_error_boundary(struct bitstr *bs, size_t bytes, size_t bits) if (bits % BITS_PER_BYTE > 0) bytes++; - if (*bs->cur + bytes > *bs->end) + if (bs->cur + bytes > bs->end) return 1; return 0; diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c index e1537ace2b90c6..5df7486bb4164e 100644 --- a/net/netfilter/nf_flow_table_core.c +++ b/net/netfilter/nf_flow_table_core.c @@ -185,14 +185,25 @@ static const struct rhashtable_params nf_flow_offload_rhash_params = { int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow) { - flow->timeout = (u32)jiffies; + int err; - rhashtable_insert_fast(&flow_table->rhashtable, - &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node, - nf_flow_offload_rhash_params); - rhashtable_insert_fast(&flow_table->rhashtable, - &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node, - nf_flow_offload_rhash_params); + err = rhashtable_insert_fast(&flow_table->rhashtable, + &flow->tuplehash[0].node, + nf_flow_offload_rhash_params); + if (err < 0) + return err; + + err = rhashtable_insert_fast(&flow_table->rhashtable, + &flow->tuplehash[1].node, + nf_flow_offload_rhash_params); + if (err < 0) { + rhashtable_remove_fast(&flow_table->rhashtable, + &flow->tuplehash[0].node, + nf_flow_offload_rhash_params); + return err; + } + + flow->timeout = (u32)jiffies; return 0; } EXPORT_SYMBOL_GPL(flow_offload_add); diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c index 15ed91309992e8..129e9ec99ec976 100644 --- a/net/netfilter/nf_flow_table_ip.c +++ b/net/netfilter/nf_flow_table_ip.c @@ -181,6 +181,9 @@ static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev, iph->protocol != IPPROTO_UDP) return -1; + if (iph->ttl <= 1) + return -1; + thoff = iph->ihl * 4; if (!pskb_may_pull(skb, thoff + sizeof(*ports))) return -1; @@ -412,6 +415,9 @@ static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev, ip6h->nexthdr != IPPROTO_UDP) return -1; + if (ip6h->hop_limit <= 1) + return -1; + thoff = sizeof(*ip6h); if (!pskb_may_pull(skb, thoff + sizeof(*ports))) return -1; diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index d67a96a25a681e..7569ba00e73284 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c @@ -238,6 +238,7 @@ static unsigned int nf_iterate(struct sk_buff *skb, repeat: verdict = nf_hook_entry_hookfn(hook, skb, state); if (verdict != NF_ACCEPT) { + *index = i; if (verdict != NF_REPEAT) return verdict; goto repeat; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index ebfcfe1dcbdbbb..29ff59dd99acec 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -1142,6 +1142,9 @@ static int nft_dump_stats(struct sk_buff *skb, struct nft_stats __percpu *stats) u64 pkts, bytes; int cpu; + if (!stats) + return 0; + memset(&total, 0, sizeof(total)); for_each_possible_cpu(cpu) { cpu_stats = per_cpu_ptr(stats, cpu); @@ -1199,6 +1202,7 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net, if (nft_is_base_chain(chain)) { const struct nft_base_chain *basechain = nft_base_chain(chain); const struct nf_hook_ops *ops = &basechain->ops; + struct nft_stats __percpu *stats; struct nlattr *nest; nest = nla_nest_start(skb, NFTA_CHAIN_HOOK); @@ -1220,8 +1224,9 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net, if (nla_put_string(skb, NFTA_CHAIN_TYPE, basechain->type->name)) goto nla_put_failure; - if (rcu_access_pointer(basechain->stats) && - nft_dump_stats(skb, rcu_dereference(basechain->stats))) + stats = rcu_dereference_check(basechain->stats, + lockdep_commit_lock_is_held(net)); + if (nft_dump_stats(skb, stats)) goto nla_put_failure; } diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c index 436cc14cfc59b1..7f85af4c40ff7b 100644 --- a/net/netfilter/nft_flow_offload.c +++ b/net/netfilter/nft_flow_offload.c @@ -113,6 +113,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr, if (ret < 0) goto err_flow_add; + dst_release(route.tuple[!dir].dst); return; err_flow_add: diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index 376181cc1defc6..9f2875efb4ac96 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c @@ -922,7 +922,8 @@ static int nfc_genl_deactivate_target(struct sk_buff *skb, u32 device_idx, target_idx; int rc; - if (!info->attrs[NFC_ATTR_DEVICE_INDEX]) + if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || + !info->attrs[NFC_ATTR_TARGET_INDEX]) return -EINVAL; device_idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c index bb95c43aae7627..5a304cfc842336 100644 --- a/net/openvswitch/vport-internal_dev.c +++ b/net/openvswitch/vport-internal_dev.c @@ -169,7 +169,9 @@ static struct vport *internal_dev_create(const struct vport_parms *parms) { struct vport *vport; struct internal_dev *internal_dev; + struct net_device *dev; int err; + bool free_vport = true; vport = ovs_vport_alloc(0, &ovs_internal_vport_ops, parms); if (IS_ERR(vport)) { @@ -177,8 +179,9 @@ static struct vport *internal_dev_create(const struct vport_parms *parms) goto error; } - vport->dev = alloc_netdev(sizeof(struct internal_dev), - parms->name, NET_NAME_USER, do_setup); + dev = alloc_netdev(sizeof(struct internal_dev), + parms->name, NET_NAME_USER, do_setup); + vport->dev = dev; if (!vport->dev) { err = -ENOMEM; goto error_free_vport; @@ -199,8 +202,10 @@ static struct vport *internal_dev_create(const struct vport_parms *parms) rtnl_lock(); err = register_netdevice(vport->dev); - if (err) + if (err) { + free_vport = false; goto error_unlock; + } dev_set_promiscuity(vport->dev, 1); rtnl_unlock(); @@ -210,11 +215,12 @@ static struct vport *internal_dev_create(const struct vport_parms *parms) error_unlock: rtnl_unlock(); - free_percpu(vport->dev->tstats); + free_percpu(dev->tstats); error_free_netdev: - free_netdev(vport->dev); + free_netdev(dev); error_free_vport: - ovs_vport_free(vport); + if (free_vport) + ovs_vport_free(vport); error: return ERR_PTR(err); } diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index ae65a1cfa596bc..fb546b2d67ca85 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -2600,6 +2600,8 @@ static int sctp_process_param(struct sctp_association *asoc, case SCTP_PARAM_STATE_COOKIE: asoc->peer.cookie_len = ntohs(param.p->length) - sizeof(struct sctp_paramhdr); + if (asoc->peer.cookie) + kfree(asoc->peer.cookie); asoc->peer.cookie = kmemdup(param.cookie->body, asoc->peer.cookie_len, gfp); if (!asoc->peer.cookie) retval = 0; @@ -2664,6 +2666,8 @@ static int sctp_process_param(struct sctp_association *asoc, goto fall_through; /* Save peer's random parameter */ + if (asoc->peer.peer_random) + kfree(asoc->peer.peer_random); asoc->peer.peer_random = kmemdup(param.p, ntohs(param.p->length), gfp); if (!asoc->peer.peer_random) { @@ -2677,6 +2681,8 @@ static int sctp_process_param(struct sctp_association *asoc, goto fall_through; /* Save peer's HMAC list */ + if (asoc->peer.peer_hmacs) + kfree(asoc->peer.peer_hmacs); asoc->peer.peer_hmacs = kmemdup(param.p, ntohs(param.p->length), gfp); if (!asoc->peer.peer_hmacs) { @@ -2692,6 +2698,8 @@ static int sctp_process_param(struct sctp_association *asoc, if (!ep->auth_enable) goto fall_through; + if (asoc->peer.peer_chunks) + kfree(asoc->peer.peer_chunks); asoc->peer.peer_chunks = kmemdup(param.p, ntohs(param.p->length), gfp); if (!asoc->peer.peer_chunks) diff --git a/net/tipc/group.c b/net/tipc/group.c index 06fee142f09fbe..3ee93b5c19b633 100644 --- a/net/tipc/group.c +++ b/net/tipc/group.c @@ -218,6 +218,7 @@ void tipc_group_delete(struct net *net, struct tipc_group *grp) rbtree_postorder_for_each_entry_safe(m, tmp, tree, tree_node) { tipc_group_proto_xmit(grp, m, GRP_LEAVE_MSG, &xmitq); + __skb_queue_purge(&m->deferredq); list_del(&m->list); kfree(m); } diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index f3f3d06cb6d8f6..e30f53728725d1 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -871,8 +871,10 @@ virtio_transport_recv_connected(struct sock *sk, if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_SEND) vsk->peer_shutdown |= SEND_SHUTDOWN; if (vsk->peer_shutdown == SHUTDOWN_MASK && - vsock_stream_has_data(vsk) <= 0) + vsock_stream_has_data(vsk) <= 0) { + sock_set_flag(sk, SOCK_DONE); sk->sk_state = TCP_CLOSING; + } if (le32_to_cpu(pkt->hdr.flags)) sk->sk_state_change(sk); break; diff --git a/net/wireless/core.c b/net/wireless/core.c index a88551f3bc43f2..2a46ec3cb72c15 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -498,7 +498,7 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv, &rdev->rfkill_ops, rdev); if (!rdev->rfkill) { - kfree(rdev); + wiphy_free(&rdev->wiphy); return NULL; } @@ -1335,8 +1335,12 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, } break; case NETDEV_PRE_UP: - if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype))) + if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)) && + !(wdev->iftype == NL80211_IFTYPE_AP_VLAN && + rdev->wiphy.flags & WIPHY_FLAG_4ADDR_AP && + wdev->use_4addr)) return notifier_from_errno(-EOPNOTSUPP); + if (rfkill_blocked(rdev->rfkill)) return notifier_from_errno(-ERFKILL); break; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index c6711ead5e5919..8e2f03ab4cc9f7 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -3191,8 +3191,7 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) return -EINVAL; } - if (!rdev->ops->add_virtual_intf || - !(rdev->wiphy.interface_modes & (1 << type))) + if (!rdev->ops->add_virtual_intf) return -EOPNOTSUPP; if ((type == NL80211_IFTYPE_P2P_DEVICE || type == NL80211_IFTYPE_NAN || @@ -3211,6 +3210,11 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) return err; } + if (!(rdev->wiphy.interface_modes & (1 << type)) && + !(type == NL80211_IFTYPE_AP_VLAN && params.use_4addr && + rdev->wiphy.flags & WIPHY_FLAG_4ADDR_AP)) + return -EOPNOTSUPP; + err = nl80211_parse_mon_options(rdev, type, info, ¶ms); if (err < 0) return err; @@ -4607,8 +4611,10 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid, struct nlattr *sinfoattr, *bss_param; hdr = nl80211hdr_put(msg, portid, seq, flags, cmd); - if (!hdr) + if (!hdr) { + cfg80211_sinfo_release_content(sinfo); return -1; + } if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr) || diff --git a/scripts/checkstack.pl b/scripts/checkstack.pl index 34414c6efad639..a2c9e7f98e0673 100755 --- a/scripts/checkstack.pl +++ b/scripts/checkstack.pl @@ -46,7 +46,7 @@ $x = "[0-9a-f]"; # hex character $xs = "[0-9a-f ]"; # hex character or space $funcre = qr/^$x* <(.*)>:$/; - if ($arch eq 'aarch64') { + if ($arch =~ '^(aarch|arm)64$') { #ffffffc0006325cc: a9bb7bfd stp x29, x30, [sp, #-80]! $re = qr/^.*stp.*sp, \#-([0-9]{1,8})\]\!/o; } elsif ($arch eq 'arm') { diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h index ab64c6b5db5aca..28c098fb6208b4 100644 --- a/security/apparmor/include/policy.h +++ b/security/apparmor/include/policy.h @@ -214,7 +214,16 @@ static inline struct aa_profile *aa_get_newest_profile(struct aa_profile *p) return labels_profile(aa_get_newest_label(&p->label)); } -#define PROFILE_MEDIATES(P, T) ((P)->policy.start[(unsigned char) (T)]) +static inline unsigned int PROFILE_MEDIATES(struct aa_profile *profile, + unsigned char class) +{ + if (class <= AA_CLASS_LAST) + return profile->policy.start[class]; + else + return aa_dfa_match_len(profile->policy.dfa, + profile->policy.start[0], &class, 1); +} + static inline unsigned int PROFILE_MEDIATES_AF(struct aa_profile *profile, u16 AF) { unsigned int state = PROFILE_MEDIATES(profile, AA_CLASS_NET); diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c index 21cb384d712a28..088ea2ac857065 100644 --- a/security/apparmor/policy_unpack.c +++ b/security/apparmor/policy_unpack.c @@ -276,7 +276,7 @@ static bool unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name) char *tag = NULL; size_t size = unpack_u16_chunk(e, &tag); /* if a name is specified it must match. otherwise skip tag */ - if (name && (!size || strcmp(name, tag))) + if (name && (!size || tag[size-1] != '\0' || strcmp(name, tag))) goto fail; } else if (name) { /* if a name is specified and there is no name tag fail */ diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c index b55cb96d1fed4d..f59e13c1d84a8c 100644 --- a/sound/core/seq/seq_clientmgr.c +++ b/sound/core/seq/seq_clientmgr.c @@ -1900,20 +1900,14 @@ static int snd_seq_ioctl_get_subscription(struct snd_seq_client *client, int result; struct snd_seq_client *sender = NULL; struct snd_seq_client_port *sport = NULL; - struct snd_seq_subscribers *p; result = -EINVAL; if ((sender = snd_seq_client_use_ptr(subs->sender.client)) == NULL) goto __end; if ((sport = snd_seq_port_use_ptr(sender, subs->sender.port)) == NULL) goto __end; - p = snd_seq_port_get_subscription(&sport->c_src, &subs->dest); - if (p) { - result = 0; - *subs = p->info; - } else - result = -ENOENT; - + result = snd_seq_port_get_subscription(&sport->c_src, &subs->dest, + subs); __end: if (sport) snd_seq_port_unlock(sport); diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c index 24d90abfc64dfe..16289aefb443a0 100644 --- a/sound/core/seq/seq_ports.c +++ b/sound/core/seq/seq_ports.c @@ -550,10 +550,10 @@ static void delete_and_unsubscribe_port(struct snd_seq_client *client, list_del_init(list); grp->exclusive = 0; write_unlock_irq(&grp->list_lock); - up_write(&grp->list_mutex); if (!empty) unsubscribe_port(client, port, grp, &subs->info, ack); + up_write(&grp->list_mutex); } /* connect two ports */ @@ -635,20 +635,23 @@ int snd_seq_port_disconnect(struct snd_seq_client *connector, /* get matched subscriber */ -struct snd_seq_subscribers *snd_seq_port_get_subscription(struct snd_seq_port_subs_info *src_grp, - struct snd_seq_addr *dest_addr) +int snd_seq_port_get_subscription(struct snd_seq_port_subs_info *src_grp, + struct snd_seq_addr *dest_addr, + struct snd_seq_port_subscribe *subs) { - struct snd_seq_subscribers *s, *found = NULL; + struct snd_seq_subscribers *s; + int err = -ENOENT; down_read(&src_grp->list_mutex); list_for_each_entry(s, &src_grp->list_head, src_list) { if (addr_match(dest_addr, &s->info.dest)) { - found = s; + *subs = s->info; + err = 0; break; } } up_read(&src_grp->list_mutex); - return found; + return err; } /* diff --git a/sound/core/seq/seq_ports.h b/sound/core/seq/seq_ports.h index 26bd71f36c41d1..06003b36652ef3 100644 --- a/sound/core/seq/seq_ports.h +++ b/sound/core/seq/seq_ports.h @@ -135,7 +135,8 @@ int snd_seq_port_subscribe(struct snd_seq_client_port *port, struct snd_seq_port_subscribe *info); /* get matched subscriber */ -struct snd_seq_subscribers *snd_seq_port_get_subscription(struct snd_seq_port_subs_info *src_grp, - struct snd_seq_addr *dest_addr); +int snd_seq_port_get_subscription(struct snd_seq_port_subs_info *src_grp, + struct snd_seq_addr *dest_addr, + struct snd_seq_port_subscribe *subs); #endif diff --git a/sound/firewire/motu/motu-stream.c b/sound/firewire/motu/motu-stream.c index 73e7a5e527fc02..483a8771d5020a 100644 --- a/sound/firewire/motu/motu-stream.c +++ b/sound/firewire/motu/motu-stream.c @@ -345,7 +345,7 @@ static void destroy_stream(struct snd_motu *motu, } amdtp_stream_destroy(stream); - fw_iso_resources_free(resources); + fw_iso_resources_destroy(resources); } int snd_motu_stream_init_duplex(struct snd_motu *motu) diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c index 5f82a375725af4..4ecaf69569dcbf 100644 --- a/sound/firewire/oxfw/oxfw.c +++ b/sound/firewire/oxfw/oxfw.c @@ -170,9 +170,6 @@ static int detect_quirks(struct snd_oxfw *oxfw) oxfw->midi_input_ports = 0; oxfw->midi_output_ports = 0; - /* Output stream exists but no data channels are useful. */ - oxfw->has_output = false; - return snd_oxfw_scs1x_add(oxfw); } diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 9bc8a7cb40ea62..308ce76149ccac 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -378,6 +378,7 @@ enum { #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98) #define IS_CFL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa348) +#define IS_CNL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9dc8) static char *driver_short_names[] = { [AZX_DRIVER_ICH] = "HDA Intel", @@ -1795,8 +1796,8 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci, else chip->bdl_pos_adj = bdl_pos_adj[dev]; - /* Workaround for a communication error on CFL (bko#199007) */ - if (IS_CFL(pci)) + /* Workaround for a communication error on CFL (bko#199007) and CNL */ + if (IS_CFL(pci) || IS_CNL(pci)) chip->polling_mode = 1; err = azx_bus_init(chip, model[dev], &pci_hda_io_ops); @@ -1883,9 +1884,6 @@ static int azx_first_init(struct azx *chip) chip->msi = 0; } - if (azx_acquire_irq(chip, 0) < 0) - return -EBUSY; - pci_set_master(pci); synchronize_irq(bus->irq); @@ -2000,6 +1998,9 @@ static int azx_first_init(struct azx *chip) return -ENODEV; } + if (azx_acquire_irq(chip, 0) < 0) + return -EBUSY; + strcpy(card->driver, "HDA-Intel"); strlcpy(card->shortname, driver_short_names[chip->driver_type], sizeof(card->shortname)); diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 5e6cb625db8376..e154506a66cb66 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -4082,18 +4082,19 @@ static struct coef_fw alc225_pre_hsmode[] = { static void alc_headset_mode_unplugged(struct hda_codec *codec) { static struct coef_fw coef0255[] = { + WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */ WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */ UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/ WRITE_COEF(0x06, 0x6104), /* Set MIC2 Vref gate with HP */ WRITE_COEFEX(0x57, 0x03, 0x8aa6), /* Direct Drive HP Amp control */ {} }; - static struct coef_fw coef0255_1[] = { - WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */ - {} - }; static struct coef_fw coef0256[] = { WRITE_COEF(0x1b, 0x0c4b), /* LDO and MISC control */ + WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */ + WRITE_COEF(0x06, 0x6104), /* Set MIC2 Vref gate with HP */ + WRITE_COEFEX(0x57, 0x03, 0x09a3), /* Direct Drive HP Amp control */ + UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/ {} }; static struct coef_fw coef0233[] = { @@ -4156,13 +4157,11 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec) switch (codec->core.vendor_id) { case 0x10ec0255: - alc_process_coef_fw(codec, coef0255_1); alc_process_coef_fw(codec, coef0255); break; case 0x10ec0236: case 0x10ec0256: alc_process_coef_fw(codec, coef0256); - alc_process_coef_fw(codec, coef0255); break; case 0x10ec0234: case 0x10ec0274: @@ -4215,6 +4214,12 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin, WRITE_COEF(0x06, 0x6100), /* Set MIC2 Vref gate to normal */ {} }; + static struct coef_fw coef0256[] = { + UPDATE_COEFEX(0x57, 0x05, 1<<14, 1<<14), /* Direct Drive HP Amp control(Set to verb control)*/ + WRITE_COEFEX(0x57, 0x03, 0x09a3), + WRITE_COEF(0x06, 0x6100), /* Set MIC2 Vref gate to normal */ + {} + }; static struct coef_fw coef0233[] = { UPDATE_COEF(0x35, 0, 1<<14), WRITE_COEF(0x06, 0x2100), @@ -4262,14 +4267,19 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin, }; switch (codec->core.vendor_id) { - case 0x10ec0236: case 0x10ec0255: - case 0x10ec0256: alc_write_coef_idx(codec, 0x45, 0xc489); snd_hda_set_pin_ctl_cache(codec, hp_pin, 0); alc_process_coef_fw(codec, coef0255); snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50); break; + case 0x10ec0236: + case 0x10ec0256: + alc_write_coef_idx(codec, 0x45, 0xc489); + snd_hda_set_pin_ctl_cache(codec, hp_pin, 0); + alc_process_coef_fw(codec, coef0256); + snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50); + break; case 0x10ec0234: case 0x10ec0274: case 0x10ec0294: @@ -4351,6 +4361,14 @@ static void alc_headset_mode_default(struct hda_codec *codec) WRITE_COEF(0x49, 0x0049), {} }; + static struct coef_fw coef0256[] = { + WRITE_COEF(0x45, 0xc489), + WRITE_COEFEX(0x57, 0x03, 0x0da3), + WRITE_COEF(0x49, 0x0049), + UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/ + WRITE_COEF(0x06, 0x6100), + {} + }; static struct coef_fw coef0233[] = { WRITE_COEF(0x06, 0x2100), WRITE_COEF(0x32, 0x4ea3), @@ -4401,11 +4419,16 @@ static void alc_headset_mode_default(struct hda_codec *codec) alc_process_coef_fw(codec, alc225_pre_hsmode); alc_process_coef_fw(codec, coef0225); break; - case 0x10ec0236: case 0x10ec0255: - case 0x10ec0256: alc_process_coef_fw(codec, coef0255); break; + case 0x10ec0236: + case 0x10ec0256: + alc_write_coef_idx(codec, 0x1b, 0x0e4b); + alc_write_coef_idx(codec, 0x45, 0xc089); + msleep(50); + alc_process_coef_fw(codec, coef0256); + break; case 0x10ec0234: case 0x10ec0274: case 0x10ec0294: @@ -4449,8 +4472,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec) }; static struct coef_fw coef0256[] = { WRITE_COEF(0x45, 0xd489), /* Set to CTIA type */ - WRITE_COEF(0x1b, 0x0c6b), - WRITE_COEFEX(0x57, 0x03, 0x8ea6), + WRITE_COEF(0x1b, 0x0e6b), {} }; static struct coef_fw coef0233[] = { @@ -4568,8 +4590,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec) }; static struct coef_fw coef0256[] = { WRITE_COEF(0x45, 0xe489), /* Set to OMTP Type */ - WRITE_COEF(0x1b, 0x0c6b), - WRITE_COEFEX(0x57, 0x03, 0x8ea6), + WRITE_COEF(0x1b, 0x0e6b), {} }; static struct coef_fw coef0233[] = { @@ -4701,13 +4722,37 @@ static void alc_determine_headset_type(struct hda_codec *codec) }; switch (codec->core.vendor_id) { - case 0x10ec0236: case 0x10ec0255: + alc_process_coef_fw(codec, coef0255); + msleep(300); + val = alc_read_coef_idx(codec, 0x46); + is_ctia = (val & 0x0070) == 0x0070; + break; + case 0x10ec0236: case 0x10ec0256: + alc_write_coef_idx(codec, 0x1b, 0x0e4b); + alc_write_coef_idx(codec, 0x06, 0x6104); + alc_write_coefex_idx(codec, 0x57, 0x3, 0x09a3); + + snd_hda_codec_write(codec, 0x21, 0, + AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); + msleep(80); + snd_hda_codec_write(codec, 0x21, 0, + AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0); + alc_process_coef_fw(codec, coef0255); msleep(300); val = alc_read_coef_idx(codec, 0x46); is_ctia = (val & 0x0070) == 0x0070; + + alc_write_coefex_idx(codec, 0x57, 0x3, 0x0da3); + alc_update_coefex_idx(codec, 0x57, 0x5, 1<<14, 0); + + snd_hda_codec_write(codec, 0x21, 0, + AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT); + msleep(80); + snd_hda_codec_write(codec, 0x21, 0, + AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE); break; case 0x10ec0234: case 0x10ec0274: @@ -6084,15 +6129,13 @@ static const struct hda_fixup alc269_fixups[] = { .chain_id = ALC269_FIXUP_THINKPAD_ACPI, }, [ALC255_FIXUP_ACER_MIC_NO_PRESENCE] = { - .type = HDA_FIXUP_VERBS, - .v.verbs = (const struct hda_verb[]) { - /* Enable the Mic */ - { 0x20, AC_VERB_SET_COEF_INDEX, 0x45 }, - { 0x20, AC_VERB_SET_PROC_COEF, 0x5089 }, - {} + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ + { } }, .chained = true, - .chain_id = ALC269_FIXUP_LIFEBOOK_EXTMIC + .chain_id = ALC255_FIXUP_HEADSET_MODE }, [ALC255_FIXUP_ASUS_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, @@ -7123,10 +7166,6 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { {0x18, 0x02a11030}, {0x19, 0x0181303F}, {0x21, 0x0221102f}), - SND_HDA_PIN_QUIRK(0x10ec0255, 0x1025, "Acer", ALC255_FIXUP_ACER_MIC_NO_PRESENCE, - {0x12, 0x90a60140}, - {0x14, 0x90170120}, - {0x21, 0x02211030}), SND_HDA_PIN_QUIRK(0x10ec0255, 0x1025, "Acer", ALC255_FIXUP_ACER_MIC_NO_PRESENCE, {0x12, 0x90a601c0}, {0x14, 0x90171120}, diff --git a/sound/soc/codecs/cs42xx8.c b/sound/soc/codecs/cs42xx8.c index ebb9e0cf83647b..28a4ac36c4f856 100644 --- a/sound/soc/codecs/cs42xx8.c +++ b/sound/soc/codecs/cs42xx8.c @@ -558,6 +558,7 @@ static int cs42xx8_runtime_resume(struct device *dev) msleep(5); regcache_cache_only(cs42xx8->regmap, false); + regcache_mark_dirty(cs42xx8->regmap); ret = regcache_sync(cs42xx8->regmap); if (ret) { diff --git a/sound/soc/fsl/fsl_asrc.c b/sound/soc/fsl/fsl_asrc.c index 528e8b10842297..09e03b884a829c 100644 --- a/sound/soc/fsl/fsl_asrc.c +++ b/sound/soc/fsl/fsl_asrc.c @@ -282,8 +282,8 @@ static int fsl_asrc_config_pair(struct fsl_asrc_pair *pair) return -EINVAL; } - if ((outrate > 8000 && outrate < 30000) && - (outrate/inrate > 24 || inrate/outrate > 8)) { + if ((outrate >= 8000 && outrate <= 30000) && + (outrate > 24 * inrate || inrate > 8 * outrate)) { pair_err("exceed supported ratio range [1/24, 8] for \ inrate/outrate: %d/%d\n", inrate, outrate); return -EINVAL; diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat index 195ba486640f9b..ba7ee74ee53305 100755 --- a/tools/kvm/kvm_stat/kvm_stat +++ b/tools/kvm/kvm_stat/kvm_stat @@ -575,8 +575,12 @@ class TracepointProvider(Provider): def update_fields(self, fields_filter): """Refresh fields, applying fields_filter""" self.fields = [field for field in self._get_available_fields() - if self.is_field_wanted(fields_filter, field) or - ARCH.tracepoint_is_child(field)] + if self.is_field_wanted(fields_filter, field)] + # add parents for child fields - otherwise we won't see any output! + for field in self._fields: + parent = ARCH.tracepoint_is_child(field) + if (parent and parent not in self._fields): + self.fields.append(parent) @staticmethod def _get_online_cpus(): @@ -735,8 +739,12 @@ class DebugfsProvider(Provider): def update_fields(self, fields_filter): """Refresh fields, applying fields_filter""" self._fields = [field for field in self._get_available_fields() - if self.is_field_wanted(fields_filter, field) or - ARCH.debugfs_is_child(field)] + if self.is_field_wanted(fields_filter, field)] + # add parents for child fields - otherwise we won't see any output! + for field in self._fields: + parent = ARCH.debugfs_is_child(field) + if (parent and parent not in self._fields): + self.fields.append(parent) @property def fields(self): diff --git a/tools/kvm/kvm_stat/kvm_stat.txt b/tools/kvm/kvm_stat/kvm_stat.txt index 0811d860fe7500..c057ba52364e0f 100644 --- a/tools/kvm/kvm_stat/kvm_stat.txt +++ b/tools/kvm/kvm_stat/kvm_stat.txt @@ -34,6 +34,8 @@ INTERACTIVE COMMANDS *c*:: clear filter *f*:: filter by regular expression + :: *Note*: Child events pull in their parents, and parents' stats summarize + all child events, not just the filtered ones *g*:: filter by guest name/PID diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 46be345766203e..ecf5fc77f50b56 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -28,6 +28,8 @@ #include #include +#define FAKE_JUMP_OFFSET -1 + struct alternative { struct list_head list; struct instruction *insn; @@ -501,7 +503,7 @@ static int add_jump_destinations(struct objtool_file *file) insn->type != INSN_JUMP_UNCONDITIONAL) continue; - if (insn->ignore) + if (insn->ignore || insn->offset == FAKE_JUMP_OFFSET) continue; rela = find_rela_by_dest_range(insn->sec, insn->offset, @@ -670,10 +672,10 @@ static int handle_group_alt(struct objtool_file *file, clear_insn_state(&fake_jump->state); fake_jump->sec = special_alt->new_sec; - fake_jump->offset = -1; + fake_jump->offset = FAKE_JUMP_OFFSET; fake_jump->type = INSN_JUMP_UNCONDITIONAL; fake_jump->jump_dest = list_next_entry(last_orig_insn, list); - fake_jump->ignore = true; + fake_jump->func = orig_insn->func; } if (!special_alt->new_len) { @@ -837,7 +839,7 @@ static int add_switch_table(struct objtool_file *file, struct instruction *insn, struct symbol *pfunc = insn->func->pfunc; unsigned int prev_offset = 0; - list_for_each_entry_from(rela, &file->rodata->rela->rela_list, list) { + list_for_each_entry_from(rela, &table->rela_sec->rela_list, list) { if (rela == next_table) break; @@ -927,6 +929,7 @@ static struct rela *find_switch_table(struct objtool_file *file, { struct rela *text_rela, *rodata_rela; struct instruction *orig_insn = insn; + struct section *rodata_sec; unsigned long table_offset; /* @@ -954,10 +957,13 @@ static struct rela *find_switch_table(struct objtool_file *file, /* look for a relocation which references .rodata */ text_rela = find_rela_by_dest_range(insn->sec, insn->offset, insn->len); - if (!text_rela || text_rela->sym != file->rodata->sym) + if (!text_rela || text_rela->sym->type != STT_SECTION || + !text_rela->sym->sec->rodata) continue; table_offset = text_rela->addend; + rodata_sec = text_rela->sym->sec; + if (text_rela->type == R_X86_64_PC32) table_offset += 4; @@ -965,10 +971,10 @@ static struct rela *find_switch_table(struct objtool_file *file, * Make sure the .rodata address isn't associated with a * symbol. gcc jump tables are anonymous data. */ - if (find_symbol_containing(file->rodata, table_offset)) + if (find_symbol_containing(rodata_sec, table_offset)) continue; - rodata_rela = find_rela_by_dest(file->rodata, table_offset); + rodata_rela = find_rela_by_dest(rodata_sec, table_offset); if (rodata_rela) { /* * Use of RIP-relative switch jumps is quite rare, and @@ -1053,7 +1059,7 @@ static int add_switch_table_alts(struct objtool_file *file) struct symbol *func; int ret; - if (!file->rodata || !file->rodata->rela) + if (!file->rodata) return 0; for_each_sec(file, sec) { @@ -1199,10 +1205,33 @@ static int read_retpoline_hints(struct objtool_file *file) return 0; } +static void mark_rodata(struct objtool_file *file) +{ + struct section *sec; + bool found = false; + + /* + * This searches for the .rodata section or multiple .rodata.func_name + * sections if -fdata-sections is being used. The .str.1.1 and .str.1.8 + * rodata sections are ignored as they don't contain jump tables. + */ + for_each_sec(file, sec) { + if (!strncmp(sec->name, ".rodata", 7) && + !strstr(sec->name, ".str1.")) { + sec->rodata = true; + found = true; + } + } + + file->rodata = found; +} + static int decode_sections(struct objtool_file *file) { int ret; + mark_rodata(file); + ret = decode_instructions(file); if (ret) return ret; @@ -2174,7 +2203,6 @@ int check(const char *_objname, bool orc) INIT_LIST_HEAD(&file.insn_list); hash_init(file.insn_hash); file.whitelist = find_section_by_name(file.elf, ".discard.func_stack_frame_non_standard"); - file.rodata = find_section_by_name(file.elf, ".rodata"); file.c_file = find_section_by_name(file.elf, ".comment"); file.ignore_unreachables = no_unreachable; file.hints = false; diff --git a/tools/objtool/check.h b/tools/objtool/check.h index 95700a2bcb7c1e..e6e8a655b5563e 100644 --- a/tools/objtool/check.h +++ b/tools/objtool/check.h @@ -60,8 +60,8 @@ struct objtool_file { struct elf *elf; struct list_head insn_list; DECLARE_HASHTABLE(insn_hash, 16); - struct section *rodata, *whitelist; - bool ignore_unreachables, c_file, hints; + struct section *whitelist; + bool ignore_unreachables, c_file, hints, rodata; }; int check(const char *objname, bool orc); diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c index b75d004f6482d8..abed594a9653b0 100644 --- a/tools/objtool/elf.c +++ b/tools/objtool/elf.c @@ -390,6 +390,7 @@ static int read_relas(struct elf *elf) rela->offset = rela->rela.r_offset; symndx = GELF_R_SYM(rela->rela.r_info); rela->sym = find_symbol_by_index(elf, symndx); + rela->rela_sec = sec; if (!rela->sym) { WARN("can't find rela entry symbol %d for %s", symndx, sec->name); diff --git a/tools/objtool/elf.h b/tools/objtool/elf.h index de5cd2ddded987..bc97ed86b9cd8e 100644 --- a/tools/objtool/elf.h +++ b/tools/objtool/elf.h @@ -48,7 +48,7 @@ struct section { char *name; int idx; unsigned int len; - bool changed, text; + bool changed, text, rodata; }; struct symbol { @@ -68,6 +68,7 @@ struct rela { struct list_head list; struct hlist_node hash; GElf_Rela rela; + struct section *rela_sec; struct symbol *sym; unsigned int type; unsigned long offset; diff --git a/tools/perf/arch/s390/util/machine.c b/tools/perf/arch/s390/util/machine.c index 0b205400731444..a19690a17291eb 100644 --- a/tools/perf/arch/s390/util/machine.c +++ b/tools/perf/arch/s390/util/machine.c @@ -5,16 +5,19 @@ #include "util.h" #include "machine.h" #include "api/fs/fs.h" +#include "debug.h" int arch__fix_module_text_start(u64 *start, const char *name) { + u64 m_start = *start; char path[PATH_MAX]; snprintf(path, PATH_MAX, "module/%.*s/sections/.text", (int)strlen(name) - 2, name + 1); - - if (sysfs__read_ull(path, (unsigned long long *)start) < 0) - return -1; + if (sysfs__read_ull(path, (unsigned long long *)start) < 0) { + pr_debug2("Using module %s start:%#lx\n", path, m_start); + *start = m_start; + } return 0; } diff --git a/tools/perf/util/data-convert-bt.c b/tools/perf/util/data-convert-bt.c index abd38abf1d918a..24f2a87cf91ddf 100644 --- a/tools/perf/util/data-convert-bt.c +++ b/tools/perf/util/data-convert-bt.c @@ -271,7 +271,7 @@ static int string_set_value(struct bt_ctf_field *field, const char *string) if (i > 0) strncpy(buffer, string, i); } - strncat(buffer + p, numstr, 4); + memcpy(buffer + p, numstr, 4); p += 3; } } diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 2048d393ece6f2..56007a7e0b4d7f 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -128,7 +128,7 @@ void thread__put(struct thread *thread) } } -struct namespaces *thread__namespaces(const struct thread *thread) +static struct namespaces *__thread__namespaces(const struct thread *thread) { if (list_empty(&thread->namespaces_list)) return NULL; @@ -136,10 +136,21 @@ struct namespaces *thread__namespaces(const struct thread *thread) return list_first_entry(&thread->namespaces_list, struct namespaces, list); } +struct namespaces *thread__namespaces(const struct thread *thread) +{ + struct namespaces *ns; + + down_read((struct rw_semaphore *)&thread->namespaces_lock); + ns = __thread__namespaces(thread); + up_read((struct rw_semaphore *)&thread->namespaces_lock); + + return ns; +} + static int __thread__set_namespaces(struct thread *thread, u64 timestamp, struct namespaces_event *event) { - struct namespaces *new, *curr = thread__namespaces(thread); + struct namespaces *new, *curr = __thread__namespaces(thread); new = namespaces__new(event); if (!new) diff --git a/tools/testing/selftests/cgroup/test_core.c b/tools/testing/selftests/cgroup/test_core.c index be59f9c34ea21d..79053a4f478384 100644 --- a/tools/testing/selftests/cgroup/test_core.c +++ b/tools/testing/selftests/cgroup/test_core.c @@ -198,7 +198,7 @@ static int test_cgcore_no_internal_process_constraint_on_threads(const char *roo char *parent = NULL, *child = NULL; if (cg_read_strstr(root, "cgroup.controllers", "cpu") || - cg_read_strstr(root, "cgroup.subtree_control", "cpu")) { + cg_write(root, "cgroup.subtree_control", "+cpu")) { ret = KSFT_SKIP; goto cleanup; } @@ -376,6 +376,11 @@ int main(int argc, char *argv[]) if (cg_find_unified_root(root, sizeof(root))) ksft_exit_skip("cgroup v2 isn't mounted\n"); + + if (cg_read_strstr(root, "cgroup.subtree_control", "memory")) + if (cg_write(root, "cgroup.subtree_control", "+memory")) + ksft_exit_skip("Failed to set memory controller\n"); + for (i = 0; i < ARRAY_SIZE(tests); i++) { switch (tests[i].fn(root)) { case KSFT_PASS: diff --git a/tools/testing/selftests/cgroup/test_memcontrol.c b/tools/testing/selftests/cgroup/test_memcontrol.c index 6f339882a6ca14..c19a97dd02d496 100644 --- a/tools/testing/selftests/cgroup/test_memcontrol.c +++ b/tools/testing/selftests/cgroup/test_memcontrol.c @@ -1205,6 +1205,10 @@ int main(int argc, char **argv) if (cg_read_strstr(root, "cgroup.controllers", "memory")) ksft_exit_skip("memory controller isn't available\n"); + if (cg_read_strstr(root, "cgroup.subtree_control", "memory")) + if (cg_write(root, "cgroup.subtree_control", "+memory")) + ksft_exit_skip("Failed to set memory controller\n"); + for (i = 0; i < ARRAY_SIZE(tests); i++) { switch (tests[i].fn(root)) { case KSFT_PASS: diff --git a/tools/testing/selftests/net/fib_rule_tests.sh b/tools/testing/selftests/net/fib_rule_tests.sh index d84193bdc307fc..dbd90ca73e44fb 100755 --- a/tools/testing/selftests/net/fib_rule_tests.sh +++ b/tools/testing/selftests/net/fib_rule_tests.sh @@ -55,7 +55,7 @@ setup() $IP link add dummy0 type dummy $IP link set dev dummy0 up - $IP address add 198.51.100.1/24 dev dummy0 + $IP address add 192.51.100.1/24 dev dummy0 $IP -6 address add 2001:db8:1::1/64 dev dummy0 set +e diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh index 8ec76681605cca..f25f72a75cf3db 100755 --- a/tools/testing/selftests/netfilter/nft_nat.sh +++ b/tools/testing/selftests/netfilter/nft_nat.sh @@ -23,7 +23,11 @@ ip netns add ns0 ip netns add ns1 ip netns add ns2 -ip link add veth0 netns ns0 type veth peer name eth0 netns ns1 +ip link add veth0 netns ns0 type veth peer name eth0 netns ns1 > /dev/null 2>&1 +if [ $? -ne 0 ];then + echo "SKIP: No virtual ethernet pair device support in kernel" + exit $ksft_skip +fi ip link add veth1 netns ns0 type veth peer name eth0 netns ns2 ip -net ns0 link set lo up diff --git a/tools/testing/selftests/timers/adjtick.c b/tools/testing/selftests/timers/adjtick.c index 0caca3a06bd26e..54d8d87f36b3ca 100644 --- a/tools/testing/selftests/timers/adjtick.c +++ b/tools/testing/selftests/timers/adjtick.c @@ -136,6 +136,7 @@ int check_tick_adj(long tickval) eppm = get_ppm_drift(); printf("%lld usec, %lld ppm", systick + (systick * eppm / MILLION), eppm); + fflush(stdout); tx1.modes = 0; adjtimex(&tx1); diff --git a/tools/testing/selftests/timers/leapcrash.c b/tools/testing/selftests/timers/leapcrash.c index 830c462f605d94..dc80728ed19155 100644 --- a/tools/testing/selftests/timers/leapcrash.c +++ b/tools/testing/selftests/timers/leapcrash.c @@ -101,6 +101,7 @@ int main(void) } clear_time_state(); printf("."); + fflush(stdout); } printf("[OK]\n"); return ksft_exit_pass(); diff --git a/tools/testing/selftests/timers/mqueue-lat.c b/tools/testing/selftests/timers/mqueue-lat.c index 1867db5d6f5e0f..7916cf5cc6fff1 100644 --- a/tools/testing/selftests/timers/mqueue-lat.c +++ b/tools/testing/selftests/timers/mqueue-lat.c @@ -102,6 +102,7 @@ int main(int argc, char **argv) int ret; printf("Mqueue latency : "); + fflush(stdout); ret = mqueue_lat_test(); if (ret < 0) { diff --git a/tools/testing/selftests/timers/nanosleep.c b/tools/testing/selftests/timers/nanosleep.c index 8adb0bb51d4da4..71b5441c2fd9f6 100644 --- a/tools/testing/selftests/timers/nanosleep.c +++ b/tools/testing/selftests/timers/nanosleep.c @@ -142,6 +142,7 @@ int main(int argc, char **argv) continue; printf("Nanosleep %-31s ", clockstring(clockid)); + fflush(stdout); length = 10; while (length <= (NSEC_PER_SEC * 10)) { diff --git a/tools/testing/selftests/timers/nsleep-lat.c b/tools/testing/selftests/timers/nsleep-lat.c index c3c3dc10db17fe..eb3e79ed7b4a9e 100644 --- a/tools/testing/selftests/timers/nsleep-lat.c +++ b/tools/testing/selftests/timers/nsleep-lat.c @@ -155,6 +155,7 @@ int main(int argc, char **argv) continue; printf("nsleep latency %-26s ", clockstring(clockid)); + fflush(stdout); length = 10; while (length <= (NSEC_PER_SEC * 10)) { diff --git a/tools/testing/selftests/timers/raw_skew.c b/tools/testing/selftests/timers/raw_skew.c index dcf73c5dab6e12..b41d8dd0c40c56 100644 --- a/tools/testing/selftests/timers/raw_skew.c +++ b/tools/testing/selftests/timers/raw_skew.c @@ -112,6 +112,7 @@ int main(int argv, char **argc) printf("WARNING: ADJ_OFFSET in progress, this will cause inaccurate results\n"); printf("Estimating clock drift: "); + fflush(stdout); sleep(120); get_monotonic_and_raw(&mon, &raw); diff --git a/tools/testing/selftests/timers/set-tai.c b/tools/testing/selftests/timers/set-tai.c index 70fed27d8fd3cf..8c4179ee2ca294 100644 --- a/tools/testing/selftests/timers/set-tai.c +++ b/tools/testing/selftests/timers/set-tai.c @@ -55,6 +55,7 @@ int main(int argc, char **argv) printf("tai offset started at %i\n", ret); printf("Checking tai offsets can be properly set: "); + fflush(stdout); for (i = 1; i <= 60; i++) { ret = set_tai(i); ret = get_tai(); diff --git a/tools/testing/selftests/timers/set-tz.c b/tools/testing/selftests/timers/set-tz.c index 877fd5532fee03..62bd33eb16f0a5 100644 --- a/tools/testing/selftests/timers/set-tz.c +++ b/tools/testing/selftests/timers/set-tz.c @@ -65,6 +65,7 @@ int main(int argc, char **argv) printf("tz_minuteswest started at %i, dst at %i\n", min, dst); printf("Checking tz_minuteswest can be properly set: "); + fflush(stdout); for (i = -15*60; i < 15*60; i += 30) { ret = set_tz(i, dst); ret = get_tz_min(); @@ -76,6 +77,7 @@ int main(int argc, char **argv) printf("[OK]\n"); printf("Checking invalid tz_minuteswest values are caught: "); + fflush(stdout); if (!set_tz(-15*60-1, dst)) { printf("[FAILED] %i didn't return failure!\n", -15*60-1); diff --git a/tools/testing/selftests/timers/threadtest.c b/tools/testing/selftests/timers/threadtest.c index 759c9c06f1a034..cf3e48919874b3 100644 --- a/tools/testing/selftests/timers/threadtest.c +++ b/tools/testing/selftests/timers/threadtest.c @@ -163,6 +163,7 @@ int main(int argc, char **argv) strftime(buf, 255, "%a, %d %b %Y %T %z", localtime(&start)); printf("%s\n", buf); printf("Testing consistency with %i threads for %ld seconds: ", thread_count, runtime); + fflush(stdout); /* spawn */ for (i = 0; i < thread_count; i++) diff --git a/tools/testing/selftests/timers/valid-adjtimex.c b/tools/testing/selftests/timers/valid-adjtimex.c index d9d3ab93b31a75..5397de708d3c25 100644 --- a/tools/testing/selftests/timers/valid-adjtimex.c +++ b/tools/testing/selftests/timers/valid-adjtimex.c @@ -123,6 +123,7 @@ int validate_freq(void) /* Set the leap second insert flag */ printf("Testing ADJ_FREQ... "); + fflush(stdout); for (i = 0; i < NUM_FREQ_VALID; i++) { tx.modes = ADJ_FREQUENCY; tx.freq = valid_freq[i]; @@ -250,6 +251,7 @@ int set_bad_offset(long sec, long usec, int use_nano) int validate_set_offset(void) { printf("Testing ADJ_SETOFFSET... "); + fflush(stdout); /* Test valid values */ if (set_offset(NSEC_PER_SEC - 1, 1)) diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile index dc68340a6a960f..2cf3dc49bd0346 100644 --- a/tools/testing/selftests/vm/Makefile +++ b/tools/testing/selftests/vm/Makefile @@ -24,6 +24,8 @@ TEST_GEN_FILES += virtual_address_range TEST_PROGS := run_vmtests +TEST_FILES := test_vmalloc.sh + KSFT_KHDR_INSTALL := 1 include ../lib.mk diff --git a/virt/kvm/arm/aarch32.c b/virt/kvm/arm/aarch32.c index 5abbe9b3c65213..6880236974b854 100644 --- a/virt/kvm/arm/aarch32.c +++ b/virt/kvm/arm/aarch32.c @@ -25,127 +25,6 @@ #include #include -/* - * stolen from arch/arm/kernel/opcodes.c - * - * condition code lookup table - * index into the table is test code: EQ, NE, ... LT, GT, AL, NV - * - * bit position in short is condition code: NZCV - */ -static const unsigned short cc_map[16] = { - 0xF0F0, /* EQ == Z set */ - 0x0F0F, /* NE */ - 0xCCCC, /* CS == C set */ - 0x3333, /* CC */ - 0xFF00, /* MI == N set */ - 0x00FF, /* PL */ - 0xAAAA, /* VS == V set */ - 0x5555, /* VC */ - 0x0C0C, /* HI == C set && Z clear */ - 0xF3F3, /* LS == C clear || Z set */ - 0xAA55, /* GE == (N==V) */ - 0x55AA, /* LT == (N!=V) */ - 0x0A05, /* GT == (!Z && (N==V)) */ - 0xF5FA, /* LE == (Z || (N!=V)) */ - 0xFFFF, /* AL always */ - 0 /* NV */ -}; - -/* - * Check if a trapped instruction should have been executed or not. - */ -bool __hyp_text kvm_condition_valid32(const struct kvm_vcpu *vcpu) -{ - unsigned long cpsr; - u32 cpsr_cond; - int cond; - - /* Top two bits non-zero? Unconditional. */ - if (kvm_vcpu_get_hsr(vcpu) >> 30) - return true; - - /* Is condition field valid? */ - cond = kvm_vcpu_get_condition(vcpu); - if (cond == 0xE) - return true; - - cpsr = *vcpu_cpsr(vcpu); - - if (cond < 0) { - /* This can happen in Thumb mode: examine IT state. */ - unsigned long it; - - it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3); - - /* it == 0 => unconditional. */ - if (it == 0) - return true; - - /* The cond for this insn works out as the top 4 bits. */ - cond = (it >> 4); - } - - cpsr_cond = cpsr >> 28; - - if (!((cc_map[cond] >> cpsr_cond) & 1)) - return false; - - return true; -} - -/** - * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block - * @vcpu: The VCPU pointer - * - * When exceptions occur while instructions are executed in Thumb IF-THEN - * blocks, the ITSTATE field of the CPSR is not advanced (updated), so we have - * to do this little bit of work manually. The fields map like this: - * - * IT[7:0] -> CPSR[26:25],CPSR[15:10] - */ -static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu) -{ - unsigned long itbits, cond; - unsigned long cpsr = *vcpu_cpsr(vcpu); - bool is_arm = !(cpsr & PSR_AA32_T_BIT); - - if (is_arm || !(cpsr & PSR_AA32_IT_MASK)) - return; - - cond = (cpsr & 0xe000) >> 13; - itbits = (cpsr & 0x1c00) >> (10 - 2); - itbits |= (cpsr & (0x3 << 25)) >> 25; - - /* Perform ITAdvance (see page A2-52 in ARM DDI 0406C) */ - if ((itbits & 0x7) == 0) - itbits = cond = 0; - else - itbits = (itbits << 1) & 0x1f; - - cpsr &= ~PSR_AA32_IT_MASK; - cpsr |= cond << 13; - cpsr |= (itbits & 0x1c) << (10 - 2); - cpsr |= (itbits & 0x3) << 25; - *vcpu_cpsr(vcpu) = cpsr; -} - -/** - * kvm_skip_instr - skip a trapped instruction and proceed to the next - * @vcpu: The vcpu pointer - */ -void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr) -{ - bool is_thumb; - - is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT); - if (is_thumb && !is_wide_instr) - *vcpu_pc(vcpu) += 2; - else - *vcpu_pc(vcpu) += 4; - kvm_adjust_itstate(vcpu); -} - /* * Table taken from ARMv8 ARM DDI0487B-B, table G1-10. */ diff --git a/virt/kvm/arm/hyp/aarch32.c b/virt/kvm/arm/hyp/aarch32.c new file mode 100644 index 00000000000000..d31f267961e75a --- /dev/null +++ b/virt/kvm/arm/hyp/aarch32.c @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Hyp portion of the (not much of an) Emulation layer for 32bit guests. + * + * Copyright (C) 2012,2013 - ARM Ltd + * Author: Marc Zyngier + * + * based on arch/arm/kvm/emulate.c + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall + */ + +#include +#include +#include + +/* + * stolen from arch/arm/kernel/opcodes.c + * + * condition code lookup table + * index into the table is test code: EQ, NE, ... LT, GT, AL, NV + * + * bit position in short is condition code: NZCV + */ +static const unsigned short cc_map[16] = { + 0xF0F0, /* EQ == Z set */ + 0x0F0F, /* NE */ + 0xCCCC, /* CS == C set */ + 0x3333, /* CC */ + 0xFF00, /* MI == N set */ + 0x00FF, /* PL */ + 0xAAAA, /* VS == V set */ + 0x5555, /* VC */ + 0x0C0C, /* HI == C set && Z clear */ + 0xF3F3, /* LS == C clear || Z set */ + 0xAA55, /* GE == (N==V) */ + 0x55AA, /* LT == (N!=V) */ + 0x0A05, /* GT == (!Z && (N==V)) */ + 0xF5FA, /* LE == (Z || (N!=V)) */ + 0xFFFF, /* AL always */ + 0 /* NV */ +}; + +/* + * Check if a trapped instruction should have been executed or not. + */ +bool __hyp_text kvm_condition_valid32(const struct kvm_vcpu *vcpu) +{ + unsigned long cpsr; + u32 cpsr_cond; + int cond; + + /* Top two bits non-zero? Unconditional. */ + if (kvm_vcpu_get_hsr(vcpu) >> 30) + return true; + + /* Is condition field valid? */ + cond = kvm_vcpu_get_condition(vcpu); + if (cond == 0xE) + return true; + + cpsr = *vcpu_cpsr(vcpu); + + if (cond < 0) { + /* This can happen in Thumb mode: examine IT state. */ + unsigned long it; + + it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3); + + /* it == 0 => unconditional. */ + if (it == 0) + return true; + + /* The cond for this insn works out as the top 4 bits. */ + cond = (it >> 4); + } + + cpsr_cond = cpsr >> 28; + + if (!((cc_map[cond] >> cpsr_cond) & 1)) + return false; + + return true; +} + +/** + * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block + * @vcpu: The VCPU pointer + * + * When exceptions occur while instructions are executed in Thumb IF-THEN + * blocks, the ITSTATE field of the CPSR is not advanced (updated), so we have + * to do this little bit of work manually. The fields map like this: + * + * IT[7:0] -> CPSR[26:25],CPSR[15:10] + */ +static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu) +{ + unsigned long itbits, cond; + unsigned long cpsr = *vcpu_cpsr(vcpu); + bool is_arm = !(cpsr & PSR_AA32_T_BIT); + + if (is_arm || !(cpsr & PSR_AA32_IT_MASK)) + return; + + cond = (cpsr & 0xe000) >> 13; + itbits = (cpsr & 0x1c00) >> (10 - 2); + itbits |= (cpsr & (0x3 << 25)) >> 25; + + /* Perform ITAdvance (see page A2-52 in ARM DDI 0406C) */ + if ((itbits & 0x7) == 0) + itbits = cond = 0; + else + itbits = (itbits << 1) & 0x1f; + + cpsr &= ~PSR_AA32_IT_MASK; + cpsr |= cond << 13; + cpsr |= (itbits & 0x1c) << (10 - 2); + cpsr |= (itbits & 0x3) << 25; + *vcpu_cpsr(vcpu) = cpsr; +} + +/** + * kvm_skip_instr - skip a trapped instruction and proceed to the next + * @vcpu: The vcpu pointer + */ +void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr) +{ + bool is_thumb; + + is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT); + if (is_thumb && !is_wide_instr) + *vcpu_pc(vcpu) += 2; + else + *vcpu_pc(vcpu) += 4; + kvm_adjust_itstate(vcpu); +}