diff --git a/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt b/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt new file mode 100644 index 00000000000000..7f7ce1f042fd84 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt @@ -0,0 +1,37 @@ +Qualcomm RPM Clock Controller Binding +------------------------------------------------ +The RPM is a dedicated hardware engine for managing the shared +SoC resources in order to keep the lowest power profile. It +communicates with other hardware subsystems via shared memory +and accepts clock requests, aggregates the requests and turns +the clocks on/off or scales them on demand. + +Required properties : +- compatible : shall contain only one of the following. The generic + compatible "qcom,rpmcc" should be also included. + + "qcom,rpmcc-msm8916", "qcom,rpmcc" + "qcom,rpmcc-apq8064", "qcom,rpmcc" + +- #clock-cells : shall contain 1 + +Example: + smd { + compatible = "qcom,smd"; + + rpm { + interrupts = <0 168 1>; + qcom,ipc = <&apcs 8 0>; + qcom,smd-edge = <15>; + + rpm_requests { + compatible = "qcom,rpm-msm8916"; + qcom,smd-channels = "rpm_requests"; + + rpmcc: qcom,rpmcc { + compatible = "qcom,rpmcc-msm8916", "qcom,rpmcc"; + #clock-cells = <1>; + }; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/display/msm/hdmi.txt b/Documentation/devicetree/bindings/display/msm/hdmi.txt index 379ee2ea9a3d84..21590eec303c9d 100644 --- a/Documentation/devicetree/bindings/display/msm/hdmi.txt +++ b/Documentation/devicetree/bindings/display/msm/hdmi.txt @@ -11,6 +11,7 @@ Required properties: - reg: Physical base address and length of the controller's registers - reg-names: "core_physical" - interrupts: The interrupt signal from the hdmi block. +- power-domains: Should be <&mmcc MDSS_GDSC>. - clocks: device clocks See ../clocks/clock-bindings.txt for details. - qcom,hdmi-tx-ddc-clk-gpio: ddc clk pin @@ -18,6 +19,7 @@ Required properties: - qcom,hdmi-tx-hpd-gpio: hpd pin - core-vdda-supply: phandle to supply regulator - hdmi-mux-supply: phandle to mux regulator +- qcom,dsi-phy: phandle to HDMI PHY device node Optional properties: - qcom,hdmi-tx-mux-en-gpio: hdmi mux enable pin @@ -27,6 +29,27 @@ Optional properties: - pinctrl-0: the default pinctrl state (active) - pinctrl-1: the "sleep" pinctrl state +HDMI PHY: +Required properties: +- compatible: Could be the following + * "qcom,hdmi-phy-8x60" + * "qcom,hdmi-phy-8960" + * "qcom,hdmi-phy-8x74" + * "qcom,hdmi-phy-8996" +- reg: Physical base address and length of the registers of the PHY sub blocks. +- reg-names: The names of register regions. The following regions are required: + * "hdmi_pll" + * "hdmi_phy" + For HDMI PHY on msm8996, these additional register regions are required: + * "hdmi_tx_l0" + * "hdmi_tx_l1" + * "hdmi_tx_l3" + * "hdmi_tx_l4" +- power-domains: Should be <&mmcc MDSS_GDSC>. +- clocks: device clocks + See Documentation/devicetree/bindings/clocks/clock-bindings.txt for details. +- core-vdda-supply: phandle to vdda regulator device node + Example: / { @@ -35,7 +58,7 @@ Example: hdmi: qcom,hdmi-tx-8960@4a00000 { compatible = "qcom,hdmi-tx-8960"; reg-names = "core_physical"; - reg = <0x04a00000 0x1000>; + reg = <0x04a00000 0x2f0>; interrupts = ; power-domains = <&mmcc MDSS_GDSC>; clock-names = @@ -54,5 +77,19 @@ Example: pinctrl-names = "default", "sleep"; pinctrl-0 = <&hpd_active &ddc_active &cec_active>; pinctrl-1 = <&hpd_suspend &ddc_suspend &cec_suspend>; + + qcom,hdmi-phy = <&hdmi_phy>; + }; + + hdmi_phy: qcom,hdmi-phy-8960@4a00400 { + compatible = "qcom,hdmi-phy-8960"; + reg-names = "hdmi_phy", + "hdmi_pll"; + reg = <0x4a00400 0x60>, + <0x4a00500 0x100>; + power-domains = <&mmcc MDSS_GDSC>; + clock-names = "slave_iface_clk"; + clocks = <&mmcc HDMI_S_AHB_CLK>; + core-vdda-supply = <&pm8921_hdmi_mvs>; }; }; diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile index 8e94af64ee9451..0fd9a17adf4c87 100644 --- a/arch/arm64/boot/dts/qcom/Makefile +++ b/arch/arm64/boot/dts/qcom/Makefile @@ -1,4 +1,7 @@ -dtb-$(CONFIG_ARCH_QCOM) += apq8016-sbc.dtb msm8916-mtp.dtb +dtb-$(CONFIG_ARCH_QCOM) += apq8016-sbc.dtb +dtb-$(CONFIG_ARCH_QCOM) += msm8916-mtp.dtb +dtb-$(CONFIG_ARCH_QCOM) += msm8996-mtp.dtb +dtb-$(CONFIG_ARCH_QCOM) += apq8096-dragonboard.dtb always := $(dtb-y) subdir-y := $(dts-dirs) diff --git a/arch/arm64/boot/dts/qcom/apq8096-dragonboard.dts b/arch/arm64/boot/dts/qcom/apq8096-dragonboard.dts new file mode 100644 index 00000000000000..2a4ecfb012ed4a --- /dev/null +++ b/arch/arm64/boot/dts/qcom/apq8096-dragonboard.dts @@ -0,0 +1,22 @@ +/* + * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; + +#include "apq8096-dragonboard.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. APQ 8096 DragonBoard"; + compatible = "qcom,apq8096-dragonboard"; + qcom,board-id = <10 0>; +}; diff --git a/arch/arm64/boot/dts/qcom/apq8096-dragonboard.dtsi b/arch/arm64/boot/dts/qcom/apq8096-dragonboard.dtsi new file mode 100644 index 00000000000000..51c26f75b9d3d4 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/apq8096-dragonboard.dtsi @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "msm8996.dtsi" + +/ { + aliases { + serial0 = &blsp2_uart1; + }; + + chosen { + stdout-path = "serial0"; + }; + + soc { + serial@75b0000 { + status = "okay"; + }; + + qcom,hdmi_phy@9a0600 { + status = "okay"; + + vddio-supply = <&pm8994_l12>; + vcca-supply = <&pm8994_l28>; + }; + + qcom,hdmi_tx@9a0000 { + status = "okay"; + + core-vdda-supply = <&pm8994_l12>; + core-vcc-supply = <&pm8994_s4>; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/msm8996-mtp.dts b/arch/arm64/boot/dts/qcom/msm8996-mtp.dts new file mode 100644 index 00000000000000..619af44a595da7 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/msm8996-mtp.dts @@ -0,0 +1,21 @@ +/* + * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; + +#include "msm8996-mtp.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. MSM 8996 MTP"; + compatible = "qcom,msm8996-mtp"; +}; diff --git a/arch/arm64/boot/dts/qcom/msm8996-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8996-mtp.dtsi new file mode 100644 index 00000000000000..9bab5c011c070b --- /dev/null +++ b/arch/arm64/boot/dts/qcom/msm8996-mtp.dtsi @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "msm8996.dtsi" + +/ { + aliases { + serial0 = &blsp2_uart1; + }; + + chosen { + stdout-path = "serial0"; + }; + + soc { + serial@75b0000 { + status = "okay"; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi new file mode 100644 index 00000000000000..45e0f1a3636c46 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi @@ -0,0 +1,596 @@ +/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include + +/ { + model = "Qualcomm Technologies, Inc. MSM8996"; + + qcom,msm-id = <291 0x30001>; + qcom,pmic-id = <0x20009 0x2000A 0x0 0x0>; + interrupt-parent = <&intc>; + + #address-cells = <2>; + #size-cells = <2>; + + chosen { }; + + memory { + device_type = "memory"; + /* We expect the bootloader to fill in the reg */ + reg = <0 0 0 0>; + }; + + cpus { + #address-cells = <2>; + #size-cells = <0>; + + CPU0: cpu@0 { + device_type = "cpu"; + compatible = "qcom,kryo"; + reg = <0x0 0x0>; + enable-method = "psci"; + next-level-cache = <&L2_0>; + L2_0: l2-cache { + compatible = "cache"; + cache-level = <2>; + }; + }; + + CPU1: cpu@1 { + device_type = "cpu"; + compatible = "qcom,kryo"; + reg = <0x0 0x1>; + enable-method = "psci"; + next-level-cache = <&L2_0>; + }; + + CPU2: cpu@100 { + device_type = "cpu"; + compatible = "qcom,kryo"; + reg = <0x0 0x100>; + enable-method = "psci"; + next-level-cache = <&L2_1>; + L2_1: l2-cache { + compatible = "cache"; + cache-level = <2>; + }; + }; + + CPU3: cpu@101 { + device_type = "cpu"; + compatible = "qcom,kryo"; + reg = <0x0 0x101>; + enable-method = "psci"; + next-level-cache = <&L2_1>; + }; + + cpu-map { + cluster0 { + core0 { + cpu = <&CPU0>; + }; + + core1 { + cpu = <&CPU1>; + }; + }; + + cluster1 { + core0 { + cpu = <&CPU2>; + }; + + core1 { + cpu = <&CPU3>; + }; + }; + }; + }; + + timer { + compatible = "arm,armv8-timer"; + interrupts = , + , + , + ; + }; + + clocks { + xo_board { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <19200000>; + clock-output-names = "xo_board"; + }; + + sleep_clk { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <32764>; + clock-output-names = "sleep_clk"; + }; + }; + + psci { + compatible = "arm,psci-1.0"; + method = "smc"; + }; + + soc: soc { + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0 0 0xffffffff>; + compatible = "simple-bus"; + + intc: interrupt-controller@9bc0000 { + compatible = "arm,gic-v3"; + #interrupt-cells = <3>; + interrupt-controller; + #redistributor-regions = <1>; + redistributor-stride = <0x0 0x40000>; + reg = <0x09bc0000 0x10000>, + <0x09c00000 0x100000>; + interrupts = ; + }; + + gcc: clock-controller@300000 { + compatible = "qcom,gcc-msm8996"; + #clock-cells = <1>; + #reset-cells = <1>; + #power-domain-cells = <1>; + reg = <0x300000 0x90000>; + }; + + blsp2_uart1: serial@75b0000 { + compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm"; + reg = <0x75b0000 0x1000>; + interrupts = ; + clocks = <&gcc GCC_BLSP2_UART2_APPS_CLK>, + <&gcc GCC_BLSP2_AHB_CLK>; + clock-names = "core", "iface"; + status = "disabled"; + }; + + pinctrl@1010000 { + compatible = "qcom,msm8996-pinctrl"; + reg = <0x01010000 0x300000>; + interrupts = ; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + + mdss_hdmi_hpd_active: mdss_hdmi_hpd_active { + mux { + pins = "gpio34"; + function = "hdmi_hot"; + }; + + config { + pins = "gpio34"; + bias-pull-down; + drive-strength = <16>; + }; + }; + + mdss_hdmi_hpd_suspend: mdss_hdmi_hpd_suspend { + mux { + pins = "gpio34"; + function = "hdmi_hot"; + }; + + config { + pins = "gpio34"; + bias-pull-down; + drive-strength = <2>; + }; + }; + + mdss_hdmi_ddc_active: mdss_hdmi_ddc_active { + mux { + pins = "gpio32", "gpio33"; + function = "hdmi_ddc"; + }; + + config { + pins = "gpio32", "gpio33"; + drive-strength = <2>; + bias-pull-up; + }; + }; + + mdss_hdmi_ddc_suspend: mdss_hdmi_ddc_suspend { + mux { + pins = "gpio32", "gpio33"; + function = "hdmi_ddc"; + }; + + config { + pins = "gpio32", "gpio33"; + drive-strength = <2>; + bias-pull-down; + }; + }; + }; + + timer@09840000 { + #address-cells = <1>; + #size-cells = <1>; + ranges; + compatible = "arm,armv7-timer-mem"; + reg = <0x09840000 0x1000>; + clock-frequency = <19200000>; + + frame@9850000 { + frame-number = <0>; + interrupts = , + ; + reg = <0x09850000 0x1000>, + <0x09860000 0x1000>; + }; + + frame@9870000 { + frame-number = <1>; + interrupts = ; + reg = <0x09870000 0x1000>; + status = "disabled"; + }; + + frame@9880000 { + frame-number = <2>; + interrupts = ; + reg = <0x09880000 0x1000>; + status = "disabled"; + }; + + frame@9890000 { + frame-number = <3>; + interrupts = ; + reg = <0x09890000 0x1000>; + status = "disabled"; + }; + + frame@98a0000 { + frame-number = <4>; + interrupts = ; + reg = <0x098a0000 0x1000>; + status = "disabled"; + }; + + frame@98b0000 { + frame-number = <5>; + interrupts = ; + reg = <0x098b0000 0x1000>; + status = "disabled"; + }; + + frame@98c0000 { + frame-number = <6>; + interrupts = ; + reg = <0x098c0000 0x1000>; + status = "disabled"; + }; + }; + + spmi_bus: qcom,spmi@400f000 { + compatible = "qcom,spmi-pmic-arb"; + reg = <0x400f000 0x1000>, + <0x4400000 0x800000>, + <0x4c00000 0x800000>, + <0x5800000 0x200000>, + <0x400a000 0x002100>; + reg-names = "core", "chnls", "obsrvr", "intr", "cnfg"; + interrupt-names = "periph_irq"; + interrupts = ; + qcom,ee = <0>; + qcom,channel = <0>; + #address-cells = <2>; + #size-cells = <0>; + interrupt-controller; + #interrupt-cells = <4>; + }; + + mmcc: clock-controller@8c0000 { + compatible = "qcom,mmcc-msm8996"; + #clock-cells = <1>; + #reset-cells = <1>; + #power-domain-cells = <1>; + reg = <0x8c0000 0x40000>; + assigned-clocks = <&mmcc MMPLL9_PLL>, + <&mmcc MMPLL1_PLL>, + <&mmcc MMPLL3_PLL>, + <&mmcc MMPLL4_PLL>, + <&mmcc MMPLL5_PLL>; + assigned-clock-rates = <624000000>, + <810000000>, + <980000000>, + <960000000>, + <825000000>; + }; + + glink { + compatible = "qcom,glink"; + #address-cells = <1>; + #size-cells = <1>; + ranges; + + rpm { + qcom,glink-edge = "rpm"; + interrupts = <0 168 1>; + qcom,irq-mask = <0x1>; + reg = <0x00068000 0x6000>, + <0x09820010 0x4>; + reg-names = "msgram", "irq-reg-base"; + + rpm-requests { + compatible = "qcom,rpm-msm8996"; + qcom,glink-channels = "rpm_requests"; + + rpmcc: qcom,rpmcc { + compatible = "qcom,rpmcc-msm8996", "qcom,rpmcc"; + #clock-cells = <1>; + }; + + pm8994-regulators { + compatible = "qcom,rpm-pm8994-regulators"; + + pm8994_s1: s1 {}; + pm8994_s2: s2 {}; + pm8994_s3: s3 { + regulator-min-microvolt = <1300000>; + regulator-max-microvolt = <1300000>; + }; + pm8994_s4: s4 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + }; + pm8994_s5: s5 { + regulator-min-microvolt = <2150000>; + regulator-max-microvolt = <2150000>; + }; + pm8994_s6: s6 {}; + pm8994_s7: s7 { + regulator-min-microvolt = <800000>; + regulator-max-microvolt = <800000>; + }; + pm8994_s8: s8 {}; + pm8994_s9: s9 {}; + pm8994_s10: s10 {}; + pm8994_s11: s11 {}; + pm8994_s12: s12 {}; + + pm8994_l1: l1 { + regulator-min-microvolt = <1000000>; + regulator-max-microvolt = <1000000>; + }; + pm8994_l2: l2 { + regulator-min-microvolt = <1250000>; + regulator-max-microvolt = <1250000>; + }; + pm8994_l3: l3 { + regulator-min-microvolt = <850000>; + regulator-max-microvolt = <850000>; + }; + pm8994_l4: l4 { + regulator-min-microvolt = <1225000>; + regulator-max-microvolt = <1225000>; + }; + pm8994_l5: l5 {}; + pm8994_l6: l6 { + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + }; + pm8994_l7: l7 {}; + pm8994_l8: l8 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + }; + pm8994_l9: l9 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + }; + pm8994_l10: l10 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + }; + pm8994_l11: l11 { + regulator-min-microvolt = <1150000>; + regulator-max-microvolt = <1150000>; + }; + pm8994_l12: l12 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + }; + pm8994_l13: l13 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <2950000>; + }; + pm8994_l14: l14 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + }; + pm8994_l15: l15 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + }; + pm8994_l16: l16 { + regulator-min-microvolt = <2700000>; + regulator-max-microvolt = <2700000>; + }; + pm8994_l17: l17 { + regulator-min-microvolt = <2500000>; + regulator-max-microvolt = <2500000>; + }; + pm8994_l18: l18 { + regulator-min-microvolt = <2700000>; + regulator-max-microvolt = <2900000>; + }; + pm8994_l19: l19 { + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3000000>; + }; + pm8994_l20: l20 { + regulator-min-microvolt = <2950000>; + regulator-max-microvolt = <2950000>; + }; + pm8994_l21: l21 { + regulator-min-microvolt = <2950000>; + regulator-max-microvolt = <2950000>; + }; + pm8994_l22: l22 { + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + }; + pm8994_l23: 23 { + regulator-min-microvolt = <2800000>; + regulator-max-microvolt = <2800000>; + }; + pm8994_l24: l24 { + regulator-min-microvolt = <3075000>; + regulator-max-microvolt = <3075000>; + }; + pm8994_l25: l25 { + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + }; + pm8994_l26: l26 {}; + pm8994_l27: l27 { + regulator-min-microvolt = <1000000>; + regulator-max-microvolt = <1000000>; + }; + pm8994_l28: l28 { + regulator-min-microvolt = <925000>; + regulator-max-microvolt = <925000>; + }; + pm8994_l29: l29 { + regulator-min-microvolt = <2800000>; + regulator-max-microvolt = <2800000>; + }; + pm8994_l30: l30 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + }; + pm8994_l31: l31 {}; + pm8994_l32: l32 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + }; + }; + }; + }; + }; + + mdss_mdp: qcom,mdss_mdp@900000 { + compatible = "qcom,mdss_mdp"; + reg = <0x00900000 0x90000>, + <0x009b0000 0x1040>, + <0x009b8000 0x1040>; + reg-names = "mdp_phys", + "vbif_phys", + "vbif_nrt_phys"; + clocks = <&mmcc MDSS_AHB_CLK>, + <&mmcc MMSS_MMAGIC_AHB_CLK>, + <&mmcc MDSS_AXI_CLK>, + <&mmcc MDP_CLK_SRC>, + <&mmcc MDSS_MDP_CLK>, + <&mmcc SMMU_MDP_AXI_CLK>, + <&mmcc MDSS_VSYNC_CLK>, + <&mmcc MMAGIC_MDSS_AXI_CLK>, + <&mmcc MMSS_MMAGIC_AXI_CLK>, + <&mmcc MMSS_S0_AXI_CLK>, + <&mmcc MMAGIC_BIMC_AXI_CLK>; + clock-names = "iface_clk", + "mmagic_iface_clk", + "bus_clk", + "core_clk_src", + "core_clk", + "iommu_clk", + "vsync_clk", + "mmagic_mdss_bus_clk", + "mmagic_mmss_bus_clk", + "mmss_s0_bus_clk", + "mmagic_bimc_bus_clk"; + + connectors = <&mdss_hdmi>; + + power-domains = <&mmcc MDSS_GDSC>; + + interrupt-parent = <&intc>; + interrupts = <0 83 0>; + interrupt-controller; + #interrupt-cells = <1>; + //iommus = <&mdp_smmu 0>; + }; + + mdss_hdmi_phy: qcom,hdmi_phy@9a0600 { + compatible = "qcom,hdmi-phy-8996"; + reg = <0x9a0600 0x1c4>, + <0x9a0a00 0x124>, + <0x9a0c00 0x124>, + <0x9a0e00 0x124>, + <0x9a1000 0x124>, + <0x9a1200 0x0c8>; + reg-names = "hdmi_pll", + "hdmi_tx_l0", + "hdmi_tx_l1", + "hdmi_tx_l2", + "hdmi_tx_l3", + "hdmi_phy"; + + power-domains = <&mmcc MDSS_GDSC>; + + clocks = <&mmcc MDSS_AHB_CLK>, + <&mmcc MMSS_MMAGIC_AHB_CLK>, + <&gcc GCC_HDMI_CLKREF_CLK>; + clock-names = "iface_clk", + "mmagic_iface_clk", + "ref_clk"; + }; + + mdss_hdmi: qcom,hdmi_tx@9a0000 { + compatible = "qcom,hdmi-tx-8996"; + reg = <0x009a0000 0x50c>, + <0x00070000 0x6158>, + <0x009e0000 0xfff>; + reg-names = "core_physical", + "qfprom_physical", + "hdcp_physical"; + clocks = <&mmcc MDSS_MDP_CLK>, + <&mmcc MDSS_AHB_CLK>, + <&mmcc MDSS_HDMI_CLK>, + <&mmcc MDSS_HDMI_AHB_CLK>, + <&mmcc MDSS_EXTPCLK_CLK>; + clock-names = + "mdp_core_clk", + "iface_clk", + "core_clk", + "alt_iface_clk", + "extp_clk"; + interrupt-parent = <&mdss_mdp>; + interrupts = <8 0>; + + power-domains = <&mmcc MDSS_GDSC>; + + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&mdss_hdmi_hpd_active + &mdss_hdmi_ddc_active>; + pinctrl-1 = <&mdss_hdmi_hpd_suspend + &mdss_hdmi_ddc_suspend>; + + qcom,hdmi-phy = <&mdss_hdmi_phy>; + status = "ok"; + }; + }; +}; diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 86581f793e398b..b89949d1dafa3e 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -145,6 +145,7 @@ CONFIG_SPI=y CONFIG_SPI_PL022=y CONFIG_SPI_QUP=y CONFIG_PINCTRL_MSM8916=y +CONFIG_PINCTRL_MSM8996=y CONFIG_GPIO_PL061=y CONFIG_GPIO_RCAR=y CONFIG_GPIO_XGENE=y @@ -205,11 +206,16 @@ CONFIG_XEN_GRANT_DEV_ALLOC=y CONFIG_COMMON_CLK_CS2000_CP=y CONFIG_COMMON_CLK_QCOM=y CONFIG_MSM_GCC_8916=y +CONFIG_MSM_GCC_8996=y +CONFIG_MSM_MMCC_8996=y CONFIG_HWSPINLOCK_QCOM=y CONFIG_ARM_SMMU=y CONFIG_QCOM_SMEM=y CONFIG_QCOM_SMD=y CONFIG_QCOM_SMD_RPM=y +CONFIG_MSM_GLINK=y +CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y +CONFIG_QCOM_CLK_SMD_RPM=y CONFIG_ARCH_TEGRA_132_SOC=y CONFIG_ARCH_TEGRA_210_SOC=y CONFIG_HISILICON_IRQ_MBIGEN=y diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig index b552eceec2beb0..912da1762b1c1d 100644 --- a/drivers/clk/qcom/Kconfig +++ b/drivers/clk/qcom/Kconfig @@ -2,6 +2,9 @@ config QCOM_GDSC bool select PM_GENERIC_DOMAINS if PM +config QCOM_RPMCC + bool + config COMMON_CLK_QCOM tristate "Support for Qualcomm's clock controllers" depends on OF @@ -9,6 +12,22 @@ config COMMON_CLK_QCOM select REGMAP_MMIO select RESET_CONTROLLER +config QCOM_CLK_SMD_RPM + tristate "RPM over SMD based Clock Controller" + depends on COMMON_CLK_QCOM && QCOM_SMD_RPM + select QCOM_RPMCC + help + Support for the clocks exposed by the Resource Power Manager + processor on devices like apq8016, apq8084 and msm8974. + +config QCOM_CLK_RPM + tristate "RPM based Clock Controller" + depends on COMMON_CLK_QCOM + select QCOM_RPMCC + help + Support for the clocks exposed by the Resource Power Manager + processor on devices like apq8064. + config APQ_GCC_8084 tristate "APQ8084 Global Clock Controller" select QCOM_GDSC diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile index dc4280b85db1f6..660e332fbdb0ea 100644 --- a/drivers/clk/qcom/Makefile +++ b/drivers/clk/qcom/Makefile @@ -10,6 +10,8 @@ clk-qcom-y += clk-branch.o clk-qcom-y += clk-regmap-divider.o clk-qcom-y += clk-regmap-mux.o clk-qcom-y += reset.o +clk-qcom-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o +clk-qcom-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o diff --git a/drivers/clk/qcom/clk-rpm.c b/drivers/clk/qcom/clk-rpm.c new file mode 100644 index 00000000000000..66f204f7120fee --- /dev/null +++ b/drivers/clk/qcom/clk-rpm.c @@ -0,0 +1,290 @@ +/* + * Copyright (c) 2015, Linaro Limited + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "clk-rpm.h" +#include + +#define to_clk_rpm(_hw) container_of(_hw, struct clk_rpm, hw) + +static DEFINE_MUTEX(rpm_clk_lock); + +static int clk_rpm_set_rate_active(struct clk_rpm *r, unsigned long rate) +{ + u32 value = DIV_ROUND_UP(rate, 1000); /* to kHz */ + + return qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE, + r->rpm_clk_id, &value, 1); +} + +static int clk_rpm_prepare(struct clk_hw *hw) +{ + struct clk_rpm *r = to_clk_rpm(hw); + unsigned long rate = r->rate; + int ret = 0; + + mutex_lock(&rpm_clk_lock); + + if (!rate) + goto out; + + if (r->branch) + rate = !!rate; + + ret = clk_rpm_set_rate_active(r, rate); + + if (ret) + goto out; + +out: + if (!ret) + r->enabled = true; + + mutex_unlock(&rpm_clk_lock); + + return ret; +} + +static void clk_rpm_unprepare(struct clk_hw *hw) +{ + struct clk_rpm *r = to_clk_rpm(hw); + int ret; + + mutex_lock(&rpm_clk_lock); + + if (!r->rate) + goto out; + + ret = clk_rpm_set_rate_active(r, r->rate); + if (ret) + goto out; + + r->enabled = false; + +out: + mutex_unlock(&rpm_clk_lock); +} + +static int clk_rpm_set_rate(struct clk_hw *hw, + unsigned long rate, unsigned long parent_rate) +{ + struct clk_rpm *r = to_clk_rpm(hw); + int ret = 0; + + mutex_lock(&rpm_clk_lock); + + if (r->enabled) + ret = clk_rpm_set_rate_active(r, rate); + + if (!ret) + r->rate = rate; + + mutex_unlock(&rpm_clk_lock); + + return ret; +} +static long clk_rpm_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + /* + * RPM handles rate rounding and we don't have a way to + * know what the rate will be, so just return whatever + * rate is requested. + */ + return rate; +} + +static unsigned long clk_rpm_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_rpm *r = to_clk_rpm(hw); + + /* + * RPM handles rate rounding and we don't have a way to + * know what the rate will be, so just return whatever + * rate was set. + */ + return r->rate; +} + +const struct clk_ops clk_rpm_ops = { + .prepare = clk_rpm_prepare, + .unprepare = clk_rpm_unprepare, + .set_rate = clk_rpm_set_rate, + .round_rate = clk_rpm_round_rate, + .recalc_rate = clk_rpm_recalc_rate, +}; +EXPORT_SYMBOL_GPL(clk_rpm_ops); + +const struct clk_ops clk_rpm_branch_ops = { + .prepare = clk_rpm_prepare, + .unprepare = clk_rpm_unprepare, + .round_rate = clk_rpm_round_rate, + .recalc_rate = clk_rpm_recalc_rate, +}; +EXPORT_SYMBOL_GPL(clk_rpm_branch_ops); + +struct rpm_cc { + struct qcom_rpm *rpm; + struct clk_onecell_data data; + struct clk *clks[]; +}; + +struct rpm_clk_desc { + struct clk_rpm **clks; + size_t num_clks; +}; + +/* apq8064 */ +DEFINE_CLK_RPM_PXO_BRANCH(apq8064, pxo, QCOM_RPM_PXO_CLK, 27000000); +DEFINE_CLK_RPM_CXO_BRANCH(apq8064, cxo, QCOM_RPM_CXO_CLK, 19200000); +DEFINE_CLK_RPM(apq8064, afab_clk, QCOM_RPM_APPS_FABRIC_CLK); +DEFINE_CLK_RPM(apq8064, cfpb_clk, QCOM_RPM_CFPB_CLK); +DEFINE_CLK_RPM(apq8064, daytona_clk, QCOM_RPM_DAYTONA_FABRIC_CLK); +DEFINE_CLK_RPM(apq8064, ebi1_clk, QCOM_RPM_EBI1_CLK); +DEFINE_CLK_RPM(apq8064, mmfab_clk, QCOM_RPM_MM_FABRIC_CLK); +DEFINE_CLK_RPM(apq8064, mmfpb_clk, QCOM_RPM_MMFPB_CLK); +DEFINE_CLK_RPM(apq8064, sfab_clk, QCOM_RPM_SYS_FABRIC_CLK); +DEFINE_CLK_RPM(apq8064, sfpb_clk, QCOM_RPM_SFPB_CLK); +DEFINE_CLK_RPM(apq8064, qdss_clk, QCOM_RPM_QDSS_CLK); + +static struct clk_rpm *apq8064_clks[] = { + [QCOM_RPM_PXO_CLK] = &apq8064_pxo, + [QCOM_RPM_CXO_CLK] = &apq8064_cxo, + [QCOM_RPM_APPS_FABRIC_CLK] = &apq8064_afab_clk, + [QCOM_RPM_CFPB_CLK] = &apq8064_cfpb_clk, + [QCOM_RPM_DAYTONA_FABRIC_CLK] = &apq8064_daytona_clk, + [QCOM_RPM_EBI1_CLK] = &apq8064_ebi1_clk, + [QCOM_RPM_MM_FABRIC_CLK] = &apq8064_mmfab_clk, + [QCOM_RPM_MMFPB_CLK] = &apq8064_mmfpb_clk, + [QCOM_RPM_SYS_FABRIC_CLK] = &apq8064_sfab_clk, + [QCOM_RPM_SFPB_CLK] = &apq8064_sfpb_clk, + [QCOM_RPM_QDSS_CLK] = &apq8064_qdss_clk, +}; + +static const struct rpm_clk_desc rpm_clk_apq8064 = { + .clks = apq8064_clks, + .num_clks = ARRAY_SIZE(apq8064_clks), +}; + +static const struct of_device_id rpm_clk_match_table[] = { + { .compatible = "qcom,rpmcc-apq8064", .data = &rpm_clk_apq8064}, + { } +}; +MODULE_DEVICE_TABLE(of, rpm_clk_match_table); + +static int rpm_clk_probe(struct platform_device *pdev) +{ + struct clk **clks; + struct clk *clk; + struct rpm_cc *rcc; + struct clk_onecell_data *data; + int ret, i; + size_t num_clks; + struct qcom_rpm *rpm; + struct clk_rpm **rpm_clks; + const struct rpm_clk_desc *desc; + + rpm = dev_get_drvdata(pdev->dev.parent); + if (!rpm) { + dev_err(&pdev->dev, "Unable to retrieve handle to RPM\n"); + return -ENODEV; + } + + desc = of_device_get_match_data(&pdev->dev); + if (!desc) + return -EINVAL; + + rpm_clks = desc->clks; + num_clks = desc->num_clks; + + rcc = devm_kzalloc(&pdev->dev, sizeof(*rcc) + sizeof(*clks) * num_clks, + GFP_KERNEL); + if (!rcc) + return -ENOMEM; + + clks = rcc->clks; + data = &rcc->data; + data->clks = clks; + data->clk_num = num_clks; + + for (i = 0; i < num_clks; i++) { + if (!rpm_clks[i]) { + clks[i] = ERR_PTR(-ENOENT); + continue; + } + + rpm_clks[i]->rpm = rpm; + clk = devm_clk_register(&pdev->dev, &rpm_clks[i]->hw); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + goto err; + } + + clks[i] = clk; + } + + ret = of_clk_add_provider(pdev->dev.of_node, of_clk_src_onecell_get, + data); + if (ret) + goto err; + + return 0; + +err: + dev_err(&pdev->dev, "Error registering RPM Clock driver (%d)\n", ret); + return ret; +} + +static int rpm_clk_remove(struct platform_device *pdev) +{ + of_clk_del_provider(pdev->dev.of_node); + return 0; +} + +static struct platform_driver rpm_clk_driver = { + .driver = { + .name = "qcom-clk-rpm", + .of_match_table = rpm_clk_match_table, + }, + .probe = rpm_clk_probe, + .remove = rpm_clk_remove, +}; + +static int __init rpm_clk_init(void) +{ + return platform_driver_register(&rpm_clk_driver); +} +core_initcall(rpm_clk_init); + +static void __exit rpm_clk_exit(void) +{ + platform_driver_unregister(&rpm_clk_driver); +} +module_exit(rpm_clk_exit); + +MODULE_DESCRIPTION("Qualcomm RPM Clock Controller Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:qcom-clk-rpm"); diff --git a/drivers/clk/qcom/clk-rpm.h b/drivers/clk/qcom/clk-rpm.h new file mode 100644 index 00000000000000..c0ac30f806b535 --- /dev/null +++ b/drivers/clk/qcom/clk-rpm.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2015, Linaro Limited + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __QCOM_CLK_RPM_H__ +#define __QCOM_CLK_RPM_H__ + +#include + +struct qcom_rpm; + +struct clk_rpm { + const int rpm_clk_id; + unsigned long rate; + bool enabled; + bool branch; + struct clk_hw hw; + struct qcom_rpm *rpm; +}; + +extern const struct clk_ops clk_rpm_ops; +extern const struct clk_ops clk_rpm_branch_ops; + +#define DEFINE_CLK_RPM(_platform, _name, r_id) \ + static struct clk_rpm _platform##_##_name = { \ + .rpm_clk_id = (r_id), \ + .rate = INT_MAX, \ + .hw.init = &(struct clk_init_data){ \ + .name = #_name, \ + .parent_names = (const char *[]){ "pxo_board" }, \ + .num_parents = 1, \ + .ops = &clk_rpm_ops, \ + }, \ + } + +#define DEFINE_CLK_RPM_PXO_BRANCH(_platform, _name, r_id, r) \ + static struct clk_rpm _platform##_##_name = { \ + .rpm_clk_id = (r_id), \ + .branch = true, \ + .rate = (r), \ + .hw.init = &(struct clk_init_data){ \ + .name = #_name, \ + .parent_names = (const char *[]){ "pxo_board" }, \ + .num_parents = 1, \ + .ops = &clk_rpm_branch_ops, \ + }, \ + } + +#define DEFINE_CLK_RPM_CXO_BRANCH(_platform, _name, r_id, r) \ + static struct clk_rpm _platform##_##_name = { \ + .rpm_clk_id = (r_id), \ + .branch = true, \ + .rate = (r), \ + .hw.init = &(struct clk_init_data){ \ + .name = #_name, \ + .parent_names = (const char *[]){ "cxo_board" }, \ + .num_parents = 1, \ + .ops = &clk_rpm_branch_ops, \ + }, \ + } +#endif diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c new file mode 100644 index 00000000000000..67a4c324f50ed8 --- /dev/null +++ b/drivers/clk/qcom/clk-smd-rpm.c @@ -0,0 +1,505 @@ +/* + * Copyright (c) 2015, Linaro Limited + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "clk-smd-rpm.h" +#include + +#define to_clk_smd_rpm(_hw) container_of(_hw, struct clk_smd_rpm, hw) + +static DEFINE_MUTEX(rpm_smd_clk_lock); + +static int clk_smd_rpm_set_rate_active(struct clk_smd_rpm *r, + unsigned long rate) +{ + struct clk_smd_rpm_req req = { + .key = cpu_to_le32(r->rpm_key), + .nbytes = cpu_to_le32(sizeof(u32)), + .value = cpu_to_le32(DIV_ROUND_UP(rate, 1000)), /* to kHz */ + }; + + return qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_ACTIVE_STATE, + r->rpm_res_type, r->rpm_clk_id, &req, + sizeof(req)); +} + +static int clk_smd_rpm_set_rate_sleep(struct clk_smd_rpm *r, + unsigned long rate) +{ + struct clk_smd_rpm_req req = { + .key = cpu_to_le32(r->rpm_key), + .nbytes = cpu_to_le32(sizeof(u32)), + .value = cpu_to_le32(DIV_ROUND_UP(rate, 1000)), /* to kHz */ + }; + + return qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_SLEEP_STATE, + r->rpm_res_type, r->rpm_clk_id, &req, + sizeof(req)); +} + +static void to_active_sleep(struct clk_smd_rpm *r, unsigned long rate, + unsigned long *active, unsigned long *sleep) +{ + *active = rate; + + /* + * Active-only clocks don't care what the rate is during sleep. So, + * they vote for zero. + */ + if (r->active_only) + *sleep = 0; + else + *sleep = *active; +} + +static int clk_smd_rpm_prepare(struct clk_hw *hw) +{ + struct clk_smd_rpm *r = to_clk_smd_rpm(hw); + struct clk_smd_rpm *peer = r->peer; + unsigned long this_rate = 0, this_sleep_rate = 0; + unsigned long peer_rate = 0, peer_sleep_rate = 0; + unsigned long active_rate, sleep_rate; + int ret = 0; + + mutex_lock(&rpm_smd_clk_lock); + + /* Don't send requests to the RPM if the rate has not been set. */ + if (!r->rate) + goto out; + + to_active_sleep(r, r->rate, &this_rate, &this_sleep_rate); + + /* Take peer clock's rate into account only if it's enabled. */ + if (peer->enabled) + to_active_sleep(peer, peer->rate, + &peer_rate, &peer_sleep_rate); + + active_rate = max(this_rate, peer_rate); + + if (r->branch) + active_rate = !!active_rate; + + ret = clk_smd_rpm_set_rate_active(r, active_rate); + if (ret) + goto out; + + sleep_rate = max(this_sleep_rate, peer_sleep_rate); + if (r->branch) + sleep_rate = !!sleep_rate; + + ret = clk_smd_rpm_set_rate_sleep(r, sleep_rate); + if (ret) + /* Undo the active set vote and restore it */ + ret = clk_smd_rpm_set_rate_active(r, peer_rate); + +out: + if (!ret) + r->enabled = true; + + mutex_unlock(&rpm_smd_clk_lock); + + return ret; +} + +static void clk_smd_rpm_unprepare(struct clk_hw *hw) +{ + struct clk_smd_rpm *r = to_clk_smd_rpm(hw); + struct clk_smd_rpm *peer = r->peer; + unsigned long peer_rate = 0, peer_sleep_rate = 0; + unsigned long active_rate, sleep_rate; + int ret; + + mutex_lock(&rpm_smd_clk_lock); + + if (!r->rate) + goto out; + + /* Take peer clock's rate into account only if it's enabled. */ + if (peer->enabled) + to_active_sleep(peer, peer->rate, &peer_rate, + &peer_sleep_rate); + + active_rate = r->branch ? !!peer_rate : peer_rate; + ret = clk_smd_rpm_set_rate_active(r, active_rate); + if (ret) + goto out; + + sleep_rate = r->branch ? !!peer_sleep_rate : peer_sleep_rate; + ret = clk_smd_rpm_set_rate_sleep(r, sleep_rate); + if (ret) + goto out; + + r->enabled = false; + +out: + mutex_unlock(&rpm_smd_clk_lock); +} + +static int clk_smd_rpm_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_smd_rpm *r = to_clk_smd_rpm(hw); + struct clk_smd_rpm *peer = r->peer; + unsigned long active_rate, sleep_rate; + unsigned long this_rate = 0, this_sleep_rate = 0; + unsigned long peer_rate = 0, peer_sleep_rate = 0; + int ret = 0; + + mutex_lock(&rpm_smd_clk_lock); + + if (!r->enabled) + goto out; + + to_active_sleep(r, rate, &this_rate, &this_sleep_rate); + + /* Take peer clock's rate into account only if it's enabled. */ + if (peer->enabled) + to_active_sleep(peer, peer->rate, + &peer_rate, &peer_sleep_rate); + + active_rate = max(this_rate, peer_rate); + ret = clk_smd_rpm_set_rate_active(r, active_rate); + if (ret) + goto out; + + sleep_rate = max(this_sleep_rate, peer_sleep_rate); + ret = clk_smd_rpm_set_rate_sleep(r, sleep_rate); + if (ret) + goto out; + + r->rate = rate; + +out: + mutex_unlock(&rpm_smd_clk_lock); + + return ret; +} + +static long clk_smd_rpm_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + /* + * RPM handles rate rounding and we don't have a way to + * know what the rate will be, so just return whatever + * rate is requested. + */ + return rate; +} + +static unsigned long clk_smd_rpm_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_smd_rpm *r = to_clk_smd_rpm(hw); + + /* + * RPM handles rate rounding and we don't have a way to + * know what the rate will be, so just return whatever + * rate was set. + */ + return r->rate; +} + +static int clk_smd_rpm_enable_scaling(struct qcom_smd_rpm *rpm) +{ + int ret; + struct clk_smd_rpm_req req = { + .key = cpu_to_le32(QCOM_RPM_SMD_KEY_ENABLE), + .nbytes = cpu_to_le32(sizeof(u32)), + .value = cpu_to_le32(1), + }; + + ret = qcom_rpm_smd_write(rpm, QCOM_SMD_RPM_SLEEP_STATE, + QCOM_SMD_RPM_MISC_CLK, + QCOM_RPM_SCALING_ENABLE_ID, &req, sizeof(req)); + if (ret) { + pr_err("RPM clock scaling (sleep set) not enabled!\n"); + return ret; + } + + ret = qcom_rpm_smd_write(rpm, QCOM_SMD_RPM_ACTIVE_STATE, + QCOM_SMD_RPM_MISC_CLK, + QCOM_RPM_SCALING_ENABLE_ID, &req, sizeof(req)); + if (ret) { + pr_err("RPM clock scaling (active set) not enabled!\n"); + return ret; + } + + pr_debug("%s: RPM clock scaling is enabled\n", __func__); + return 0; +} + +const struct clk_ops clk_smd_rpm_ops = { + .prepare = clk_smd_rpm_prepare, + .unprepare = clk_smd_rpm_unprepare, + .set_rate = clk_smd_rpm_set_rate, + .round_rate = clk_smd_rpm_round_rate, + .recalc_rate = clk_smd_rpm_recalc_rate, +}; +EXPORT_SYMBOL_GPL(clk_smd_rpm_ops); + +const struct clk_ops clk_smd_rpm_branch_ops = { + .prepare = clk_smd_rpm_prepare, + .unprepare = clk_smd_rpm_unprepare, + .round_rate = clk_smd_rpm_round_rate, + .recalc_rate = clk_smd_rpm_recalc_rate, +}; +EXPORT_SYMBOL_GPL(clk_smd_rpm_branch_ops); + +struct rpm_cc { + struct qcom_rpm *rpm; + struct clk_onecell_data data; + struct clk *clks[]; +}; + +struct rpm_smd_clk_desc { + struct clk_smd_rpm **clks; + size_t num_clks; +}; + +/* msm8916 */ +DEFINE_CLK_SMD_RPM(msm8916, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0); +DEFINE_CLK_SMD_RPM(msm8916, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1); +DEFINE_CLK_SMD_RPM(msm8916, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0); +DEFINE_CLK_SMD_RPM_BRANCH(msm8916, xo, xo_a, QCOM_SMD_RPM_MISC_CLK, 0, 19200000); +DEFINE_CLK_SMD_RPM_QDSS(msm8916, qdss_clk, qdss_a_clk, QCOM_SMD_RPM_MISC_CLK, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, bb_clk1, bb_clk1_a, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, bb_clk2, bb_clk2_a, 2); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, rf_clk1, rf_clk1_a, 4); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, rf_clk2, rf_clk2_a, 5); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, bb_clk1_pin, bb_clk1_a_pin, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, bb_clk2_pin, bb_clk2_a_pin, 2); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, rf_clk1_pin, rf_clk1_a_pin, 4); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, rf_clk2_pin, rf_clk2_a_pin, 5); + +static struct clk_smd_rpm *msm8916_clks[] = { + [RPM_XO_CLK_SRC] = &msm8916_xo, + [RPM_XO_A_CLK_SRC] = &msm8916_xo_a, + [RPM_PCNOC_CLK] = &msm8916_pcnoc_clk, + [RPM_PCNOC_A_CLK] = &msm8916_pcnoc_a_clk, + [RPM_SNOC_CLK] = &msm8916_snoc_clk, + [RPM_SNOC_A_CLK] = &msm8916_snoc_a_clk, + [RPM_BIMC_CLK] = &msm8916_bimc_clk, + [RPM_BIMC_A_CLK] = &msm8916_bimc_a_clk, + [RPM_QDSS_CLK] = &msm8916_qdss_clk, + [RPM_QDSS_A_CLK] = &msm8916_qdss_a_clk, + [RPM_BB_CLK1] = &msm8916_bb_clk1, + [RPM_BB_CLK1_A] = &msm8916_bb_clk1_a, + [RPM_BB_CLK2] = &msm8916_bb_clk2, + [RPM_BB_CLK2_A] = &msm8916_bb_clk2_a, + [RPM_RF_CLK1] = &msm8916_rf_clk1, + [RPM_RF_CLK1_A] = &msm8916_rf_clk1_a, + [RPM_RF_CLK2] = &msm8916_rf_clk2, + [RPM_RF_CLK2_A] = &msm8916_rf_clk2_a, + [RPM_BB_CLK1_PIN] = &msm8916_bb_clk1_pin, + [RPM_BB_CLK1_A_PIN] = &msm8916_bb_clk1_a_pin, + [RPM_BB_CLK2_PIN] = &msm8916_bb_clk2_pin, + [RPM_BB_CLK2_A_PIN] = &msm8916_bb_clk2_a_pin, + [RPM_RF_CLK1_PIN] = &msm8916_rf_clk1_pin, + [RPM_RF_CLK1_A_PIN] = &msm8916_rf_clk1_a_pin, + [RPM_RF_CLK2_PIN] = &msm8916_rf_clk2_pin, + [RPM_RF_CLK2_A_PIN] = &msm8916_rf_clk2_a_pin, +}; + +static const struct rpm_smd_clk_desc rpm_clk_msm8916 = { + .clks = msm8916_clks, + .num_clks = ARRAY_SIZE(msm8916_clks), +}; + +/* msm8996 */ +DEFINE_CLK_SMD_RPM(msm8996, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0); +DEFINE_CLK_SMD_RPM(msm8996, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1); +DEFINE_CLK_SMD_RPM(msm8996, cnoc_clk, cnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2); +DEFINE_CLK_SMD_RPM(msm8996, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0); +DEFINE_CLK_SMD_RPM(msm8996, mmssnoc_axi_rpm_clk, mmssnoc_axi_rpm_a_clk, + QCOM_SMD_RPM_MMAXI_CLK, 0); +DEFINE_CLK_SMD_RPM(msm8996, ipa_clk, ipa_a_clk, QCOM_SMD_RPM_IPA_CLK, 0); +DEFINE_CLK_SMD_RPM(msm8996, ce1_clk, ce1_a_clk, QCOM_SMD_RPM_CE_CLK, 0); +DEFINE_CLK_SMD_RPM_BRANCH(msm8996, cxo, cxo_a, QCOM_SMD_RPM_MISC_CLK, 0, 19200000); +DEFINE_CLK_SMD_RPM_BRANCH(msm8996, aggre1_noc_clk, aggre1_noc_a_clk, + QCOM_SMD_RPM_AGGR_CLK, 0, 1000); +DEFINE_CLK_SMD_RPM_BRANCH(msm8996, aggre2_noc_clk, aggre2_noc_a_clk, + QCOM_SMD_RPM_AGGR_CLK, 1, 1000); +DEFINE_CLK_SMD_RPM_QDSS(msm8996, qdss_clk, qdss_a_clk, QCOM_SMD_RPM_MISC_CLK, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, bb_clk1, bb_clk1_a, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, bb_clk2, bb_clk2_a, 2); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, rf_clk1, rf_clk1_a, 4); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, rf_clk2, rf_clk2_a, 5); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, div_clk1, div_clk1_ao, 0xb); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, div_clk2, div_clk2_ao, 0xc); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, div_clk3, div_clk3_ao, 0xc); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8996, bb_clk1_pin, bb_clk1_a_pin, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8996, bb_clk2_pin, bb_clk2_a_pin, 2); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8996, rf_clk1_pin, rf_clk1_a_pin, 4); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8996, rf_clk2_pin, rf_clk2_a_pin, 5); + +static struct clk_smd_rpm *msm8996_clks[] = { + [RPM_XO_CLK_SRC] = &msm8996_cxo, + [RPM_XO_A_CLK_SRC] = &msm8996_cxo_a, + [RPM_AGGR1_NOC_CLK] = &msm8996_aggre1_noc_clk, + [RPM_AGGR1_NOC_A_CLK] = &msm8996_aggre1_noc_a_clk, + [RPM_AGGR2_NOC_CLK] = &msm8996_aggre2_noc_clk, + [RPM_AGGR2_NOC_A_CLK] = &msm8996_aggre2_noc_a_clk, + [RPM_PCNOC_CLK] = &msm8996_pcnoc_clk, + [RPM_PCNOC_A_CLK] = &msm8996_pcnoc_a_clk, + [RPM_SNOC_CLK] = &msm8996_snoc_clk, + [RPM_SNOC_A_CLK] = &msm8996_snoc_a_clk, + [RPM_CNOC_CLK] = &msm8996_cnoc_clk, + [RPM_CNOC_A_CLK] = &msm8996_cnoc_a_clk, + [RPM_BIMC_CLK] = &msm8996_bimc_clk, + [RPM_BIMC_A_CLK] = &msm8996_bimc_a_clk, + [RPM_MMAXI_CLK] = &msm8996_mmssnoc_axi_rpm_clk, + [RPM_MMAXI_A_CLK] = &msm8996_mmssnoc_axi_rpm_a_clk, + [RPM_IPA_CLK] = &msm8996_ipa_clk, + [RPM_IPA_A_CLK] = &msm8996_ipa_a_clk, + [RPM_CE1_CLK] = &msm8996_ce1_clk, + [RPM_CE1_A_CLK] = &msm8996_ce1_a_clk, + [RPM_QDSS_CLK] = &msm8996_qdss_clk, + [RPM_QDSS_A_CLK] = &msm8996_qdss_a_clk, + [RPM_DIV_CLK1] = &msm8996_div_clk1, + [RPM_DIV_CLK1_AO] = &msm8996_div_clk1_ao, + [RPM_DIV_CLK2] = &msm8996_div_clk2, + [RPM_DIV_CLK2_AO] = &msm8996_div_clk2_ao, + [RPM_DIV_CLK3] = &msm8996_div_clk3, + [RPM_DIV_CLK3_AO] = &msm8996_div_clk3_ao, + [RPM_BB_CLK1_PIN] = &msm8996_bb_clk1_pin, + [RPM_BB_CLK1_A_PIN] = &msm8996_bb_clk1_a_pin, + [RPM_BB_CLK2_PIN] = &msm8996_bb_clk2_pin, + [RPM_BB_CLK2_A_PIN] = &msm8996_bb_clk2_a_pin, + [RPM_RF_CLK1_PIN] = &msm8996_rf_clk1_pin, + [RPM_RF_CLK1_A_PIN] = &msm8996_rf_clk1_a_pin, + [RPM_RF_CLK2_PIN] = &msm8996_rf_clk2_pin, + [RPM_RF_CLK2_A_PIN] = &msm8996_rf_clk2_a_pin, +}; + +static const struct rpm_smd_clk_desc rpm_clk_msm8996 = { + .clks = msm8996_clks, + .num_clks = ARRAY_SIZE(msm8996_clks), +}; + +static const struct of_device_id rpm_smd_clk_match_table[] = { + { .compatible = "qcom,rpmcc-msm8916", .data = &rpm_clk_msm8916}, + { .compatible = "qcom,rpmcc-msm8996", .data = &rpm_clk_msm8996}, + { } +}; +MODULE_DEVICE_TABLE(of, rpm_smd_clk_match_table); + +static int rpm_smd_clk_probe(struct platform_device *pdev) +{ + struct clk **clks; + struct clk *clk; + struct rpm_cc *rcc; + struct clk_onecell_data *data; + int ret, i; + size_t num_clks; + struct qcom_smd_rpm *rpm; + struct clk_smd_rpm **rpm_smd_clks; + const struct rpm_smd_clk_desc *desc; + + rpm = dev_get_drvdata(pdev->dev.parent); + if (!rpm) { + dev_err(&pdev->dev, "Unable to retrieve handle to RPM\n"); + return -ENODEV; + } + + desc = of_device_get_match_data(&pdev->dev); + if (!desc) + return -EINVAL; + + rpm_smd_clks = desc->clks; + num_clks = desc->num_clks; + + rcc = devm_kzalloc(&pdev->dev, sizeof(*rcc) + sizeof(*clks) * num_clks, + GFP_KERNEL); + if (!rcc) + return -ENOMEM; + + clks = rcc->clks; + data = &rcc->data; + data->clks = clks; + data->clk_num = num_clks; + + for (i = 0; i < num_clks; i++) { + if (!rpm_smd_clks[i]) { + clks[i] = ERR_PTR(-ENOENT); + continue; + } + + rpm_smd_clks[i]->rpm = rpm; + clk = devm_clk_register(&pdev->dev, &rpm_smd_clks[i]->hw); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + goto err; + } + + clks[i] = clk; + } + + ret = of_clk_add_provider(pdev->dev.of_node, of_clk_src_onecell_get, + data); + if (ret) + goto err; + + ret = clk_smd_rpm_enable_scaling(rpm); + if (ret) { + of_clk_del_provider(pdev->dev.of_node); + goto err; + } + + return 0; +err: + dev_err(&pdev->dev, "Error registering SMD clock driver (%d)\n", ret); + return ret; +} + +static int rpm_smd_clk_remove(struct platform_device *pdev) +{ + of_clk_del_provider(pdev->dev.of_node); + return 0; +} + +static struct platform_driver rpm_smd_clk_driver = { + .driver = { + .name = "qcom-clk-smd-rpm", + .of_match_table = rpm_smd_clk_match_table, + }, + .probe = rpm_smd_clk_probe, + .remove = rpm_smd_clk_remove, +}; + +static int __init rpm_smd_clk_init(void) +{ + return platform_driver_register(&rpm_smd_clk_driver); +} +core_initcall(rpm_smd_clk_init); + +static void __exit rpm_smd_clk_exit(void) +{ + platform_driver_unregister(&rpm_smd_clk_driver); +} +module_exit(rpm_smd_clk_exit); + +MODULE_DESCRIPTION("Qualcomm RPM over SMD Clock Controller Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:qcom-clk-smd-rpm"); diff --git a/drivers/clk/qcom/clk-smd-rpm.h b/drivers/clk/qcom/clk-smd-rpm.h new file mode 100644 index 00000000000000..7ac58294d36826 --- /dev/null +++ b/drivers/clk/qcom/clk-smd-rpm.h @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2015, Linaro Limited + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __QCOM_CLK_SMD_RPM_H__ +#define __QCOM_CLK_SMD_RPM_H__ + +#include + +#define QCOM_RPM_KEY_SOFTWARE_ENABLE 0x6e657773 +#define QCOM_RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY 0x62636370 +#define QCOM_RPM_SMD_KEY_RATE 0x007a484b +#define QCOM_RPM_SMD_KEY_ENABLE 0x62616e45 +#define QCOM_RPM_SMD_KEY_STATE 0x54415453 +#define QCOM_RPM_SCALING_ENABLE_ID 0x2 + +struct qcom_smd_rpm; + +struct clk_smd_rpm { + const int rpm_res_type; + const int rpm_key; + const int rpm_clk_id; + const int rpm_status_id; + const bool active_only; + bool enabled; + bool branch; + struct clk_smd_rpm *peer; + struct clk_hw hw; + unsigned long rate; + struct qcom_smd_rpm *rpm; +}; + +struct clk_smd_rpm_req { + __le32 key; + __le32 nbytes; + __le32 value; +}; + +extern const struct clk_ops clk_smd_rpm_ops; +extern const struct clk_ops clk_smd_rpm_branch_ops; + +#define __DEFINE_CLK_SMD_RPM(_platform, _name, _active, type, r_id, stat_id, \ + key) \ + static struct clk_smd_rpm _platform##_##_active; \ + static struct clk_smd_rpm _platform##_##_name = { \ + .rpm_res_type = (type), \ + .rpm_clk_id = (r_id), \ + .rpm_status_id = (stat_id), \ + .rpm_key = (key), \ + .peer = &_platform##_##_active, \ + .rate = INT_MAX, \ + .hw.init = &(struct clk_init_data){ \ + .ops = &clk_smd_rpm_ops, \ + .name = #_name, \ + .parent_names = (const char *[]){ "xo_board" }, \ + .num_parents = 1, \ + }, \ + }; \ + static struct clk_smd_rpm _platform##_##_active = { \ + .rpm_res_type = (type), \ + .rpm_clk_id = (r_id), \ + .rpm_status_id = (stat_id), \ + .rpm_key = (key), \ + .peer = &_platform##_##_name, \ + .active_only = true, \ + .rate = INT_MAX, \ + .hw.init = &(struct clk_init_data){ \ + .ops = &clk_smd_rpm_ops, \ + .name = #_active, \ + .parent_names = (const char *[]){ "xo_board" }, \ + .num_parents = 1, \ + }, \ + } + +#define __DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, type, r_id, \ + stat_id, r, key) \ + static struct clk_smd_rpm _platform##_##_active; \ + static struct clk_smd_rpm _platform##_##_name = { \ + .rpm_res_type = (type), \ + .rpm_clk_id = (r_id), \ + .rpm_status_id = (stat_id), \ + .rpm_key = (key), \ + .peer = &_platform##_##_active, \ + .branch = true, \ + .rate = (r), \ + .hw.init = &(struct clk_init_data){ \ + .ops = &clk_smd_rpm_branch_ops, \ + .name = #_name, \ + .parent_names = (const char *[]){ "xo_board" }, \ + .num_parents = 1, \ + }, \ + }; \ + static struct clk_smd_rpm _platform##_##_active = { \ + .rpm_res_type = (type), \ + .rpm_clk_id = (r_id), \ + .rpm_status_id = (stat_id), \ + .rpm_key = (key), \ + .peer = &_platform##_##_name, \ + .active_only = true, \ + .branch = true, \ + .rate = (r), \ + .hw.init = &(struct clk_init_data){ \ + .ops = &clk_smd_rpm_branch_ops, \ + .name = #_active, \ + .parent_names = (const char *[]){ "xo_board" }, \ + .num_parents = 1, \ + }, \ + } + +#define DEFINE_CLK_SMD_RPM(_platform, _name, _active, type, r_id) \ + __DEFINE_CLK_SMD_RPM(_platform, _name, _active, type, r_id, \ + 0, QCOM_RPM_SMD_KEY_RATE) + +#define DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, type, r_id, r) \ + __DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, type, \ + r_id, 0, r, QCOM_RPM_SMD_KEY_ENABLE) + +#define DEFINE_CLK_SMD_RPM_QDSS(_platform, _name, _active, type, r_id) \ + __DEFINE_CLK_SMD_RPM(_platform, _name, _active, type, r_id, \ + 0, QCOM_RPM_SMD_KEY_STATE) + +#define DEFINE_CLK_SMD_RPM_XO_BUFFER(_platform, _name, _active, r_id) \ + __DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, \ + QCOM_SMD_RPM_CLK_BUF_A, r_id, 0, 1000, \ + QCOM_RPM_KEY_SOFTWARE_ENABLE) + +#define DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(_platform, _name, _active, r_id) \ + __DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, \ + QCOM_SMD_RPM_CLK_BUF_A, r_id, 0, 1000, \ + QCOM_RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY) + +#endif diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c index c112ebaba70d17..4a343295c530b2 100644 --- a/drivers/clk/qcom/common.c +++ b/drivers/clk/qcom/common.c @@ -185,6 +185,7 @@ int qcom_cc_really_probe(struct platform_device *pdev, struct clk **clks; struct qcom_reset_controller *reset; struct qcom_cc *cc; + struct gdsc_desc *scd; size_t num_clks = desc->num_clks; struct clk_regmap **rclks = desc->clks; @@ -230,15 +231,18 @@ int qcom_cc_really_probe(struct platform_device *pdev, devm_add_action(dev, qcom_cc_reset_unregister, &reset->rcdev); if (desc->gdscs && desc->num_gdscs) { - ret = gdsc_register(dev, desc->gdscs, desc->num_gdscs, - &reset->rcdev, regmap); + scd = devm_kzalloc(dev, sizeof(*scd), GFP_KERNEL); + if (!scd) + return -ENOMEM; + scd->dev = dev; + scd->scs = desc->gdscs; + scd->num = desc->num_gdscs; + ret = gdsc_register(scd, &reset->rcdev, regmap); if (ret) return ret; + devm_add_action(dev, qcom_cc_gdsc_unregister, scd); } - devm_add_action(dev, qcom_cc_gdsc_unregister, dev); - - return 0; } EXPORT_SYMBOL_GPL(qcom_cc_really_probe); diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c index 16d7c323db4985..bb8c61ff0176bb 100644 --- a/drivers/clk/qcom/gcc-msm8996.c +++ b/drivers/clk/qcom/gcc-msm8996.c @@ -30,6 +30,7 @@ #include "clk-rcg.h" #include "clk-branch.h" #include "reset.h" +#include "gdsc.h" #define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) } @@ -3059,6 +3060,83 @@ static struct clk_hw *gcc_msm8996_hws[] = { &ufs_ice_core_postdiv_clk_src.hw, }; +static struct gdsc aggre0_noc_gdsc = { + .gdscr = 0x81004, + .gds_hw_ctrl = 0x81028, + .pd = { + .name = "aggre0_noc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, +}; + +static struct gdsc hlos1_vote_aggre0_noc_gdsc = { + .gdscr = 0x7d024, + .pd = { + .name = "hlos1_vote_aggre0_noc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, +}; + +static struct gdsc hlos1_vote_lpass_adsp_gdsc = { + .gdscr = 0x7d034, + .pd = { + .name = "hlos1_vote_lpass_adsp", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, +}; + +static struct gdsc hlos1_vote_lpass_core_gdsc = { + .gdscr = 0x7d038, + .pd = { + .name = "hlos1_vote_lpass_core", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, +}; + +static struct gdsc usb30_gdsc = { + .gdscr = 0xf004, + .pd = { + .name = "usb30", + }, + .pwrsts = PWRSTS_OFF_ON, +}; + +static struct gdsc pcie0_gdsc = { + .gdscr = 0x6b004, + .pd = { + .name = "pcie0", + }, + .pwrsts = PWRSTS_OFF_ON, +}; + +static struct gdsc pcie1_gdsc = { + .gdscr = 0x6d004, + .pd = { + .name = "pcie1", + }, + .pwrsts = PWRSTS_OFF_ON, +}; + +static struct gdsc pcie2_gdsc = { + .gdscr = 0x6e004, + .pd = { + .name = "pcie2", + }, + .pwrsts = PWRSTS_OFF_ON, +}; + +static struct gdsc ufs_gdsc = { + .gdscr = 0x75004, + .pd = { + .name = "ufs", + }, + .pwrsts = PWRSTS_OFF_ON, +}; + static struct clk_regmap *gcc_msm8996_clocks[] = { [GPLL0_EARLY] = &gpll0_early.clkr, [GPLL0] = &gpll0.clkr, @@ -3245,6 +3323,18 @@ static struct clk_regmap *gcc_msm8996_clocks[] = { [GCC_RX1_USB2_CLKREF_CLK] = &gcc_rx1_usb2_clkref_clk.clkr, }; +static struct gdsc *gcc_msm8996_gdscs[] = { + [AGGRE0_NOC_GDSC] = &aggre0_noc_gdsc, + [HLOS1_VOTE_AGGRE0_NOC_GDSC] = &hlos1_vote_aggre0_noc_gdsc, + [HLOS1_VOTE_LPASS_ADSP_GDSC] = &hlos1_vote_lpass_adsp_gdsc, + [HLOS1_VOTE_LPASS_CORE_GDSC] = &hlos1_vote_lpass_core_gdsc, + [USB30_GDSC] = &usb30_gdsc, + [PCIE0_GDSC] = &pcie0_gdsc, + [PCIE1_GDSC] = &pcie1_gdsc, + [PCIE2_GDSC] = &pcie2_gdsc, + [UFS_GDSC] = &ufs_gdsc, +}; + static const struct qcom_reset_map gcc_msm8996_resets[] = { [GCC_SYSTEM_NOC_BCR] = { 0x4000 }, [GCC_CONFIG_NOC_BCR] = { 0x5000 }, @@ -3363,6 +3453,8 @@ static const struct qcom_cc_desc gcc_msm8996_desc = { .num_clks = ARRAY_SIZE(gcc_msm8996_clocks), .resets = gcc_msm8996_resets, .num_resets = ARRAY_SIZE(gcc_msm8996_resets), + .gdscs = gcc_msm8996_gdscs, + .num_gdscs = ARRAY_SIZE(gcc_msm8996_gdscs), }; static const struct of_device_id gcc_msm8996_match_table[] = { diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c index da9fad8b642b80..f12d7b2bddd706 100644 --- a/drivers/clk/qcom/gdsc.c +++ b/drivers/clk/qcom/gdsc.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -42,12 +43,12 @@ #define domain_to_gdsc(domain) container_of(domain, struct gdsc, pd) -static int gdsc_is_enabled(struct gdsc *sc) +static int gdsc_is_enabled(struct gdsc *sc, unsigned int reg) { u32 val; int ret; - ret = regmap_read(sc->regmap, sc->gdscr, &val); + ret = regmap_read(sc->regmap, reg, &val); if (ret) return ret; @@ -58,28 +59,46 @@ static int gdsc_toggle_logic(struct gdsc *sc, bool en) { int ret; u32 val = en ? 0 : SW_COLLAPSE_MASK; - u32 check = en ? PWR_ON_MASK : 0; - unsigned long timeout; + ktime_t start; + unsigned int status_reg = sc->gdscr; ret = regmap_update_bits(sc->regmap, sc->gdscr, SW_COLLAPSE_MASK, val); if (ret) return ret; - timeout = jiffies + usecs_to_jiffies(TIMEOUT_US); - do { - ret = regmap_read(sc->regmap, sc->gdscr, &val); - if (ret) - return ret; + /* If disabling votable gdscs, don't poll on status */ + if ((sc->flags & VOTABLE) && !en) { + /* + * Add a short delay here to ensure that an enable + * right after it was disabled does not put it in an + * unknown state + */ + udelay(TIMEOUT_US); + return 0; + } - if ((val & PWR_ON_MASK) == check) - return 0; - } while (time_before(jiffies, timeout)); + if (sc->gds_hw_ctrl) { + status_reg = sc->gds_hw_ctrl; + /* + * The gds hw controller asserts/de-asserts the status bit soon + * after it receives a power on/off request from a master. + * The controller then takes around 8 xo cycles to start its + * internal state machine and update the status bit. During + * this time, the status bit does not reflect the true status + * of the core. + * Add a delay of 1 us between writing to the SW_COLLAPSE bit + * and polling the status bit. + */ + udelay(1); + } - ret = regmap_read(sc->regmap, sc->gdscr, &val); - if (ret) - return ret; + start = ktime_get(); + do { + if (gdsc_is_enabled(sc, status_reg) == en) + return 0; + } while (ktime_us_delta(ktime_get(), start) < TIMEOUT_US); - if ((val & PWR_ON_MASK) == check) + if (gdsc_is_enabled(sc, status_reg) == en) return 0; return -ETIMEDOUT; @@ -165,6 +184,7 @@ static int gdsc_init(struct gdsc *sc) { u32 mask, val; int on, ret; + unsigned int reg; /* * Disable HW trigger: collapse/restore occur based on registers writes. @@ -185,10 +205,18 @@ static int gdsc_init(struct gdsc *sc) return ret; } - on = gdsc_is_enabled(sc); + reg = sc->gds_hw_ctrl ? sc->gds_hw_ctrl : sc->gdscr; + on = gdsc_is_enabled(sc, reg); if (on < 0) return on; + /* + * Votable GDSCs can be ON due to Vote from other masters. + * If a Votable GDSC is ON, make sure we have a Vote. + */ + if ((sc->flags & VOTABLE) && on) + gdsc_enable(&sc->pd); + if (on || (sc->pwrsts & PWRSTS_RET)) gdsc_force_mem_on(sc); else @@ -201,11 +229,14 @@ static int gdsc_init(struct gdsc *sc) return 0; } -int gdsc_register(struct device *dev, struct gdsc **scs, size_t num, +int gdsc_register(struct gdsc_desc *desc, struct reset_controller_dev *rcdev, struct regmap *regmap) { int i, ret; struct genpd_onecell_data *data; + struct device *dev = desc->dev; + struct gdsc **scs = desc->scs; + size_t num = desc->num; data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); if (!data) @@ -228,10 +259,30 @@ int gdsc_register(struct device *dev, struct gdsc **scs, size_t num, data->domains[i] = &scs[i]->pd; } + /* Add subdomains */ + for (i = 0; i < num; i++) { + if (!scs[i]) + continue; + if (scs[i]->parent) + pm_genpd_add_subdomain(scs[i]->parent, &scs[i]->pd); + } + return of_genpd_add_provider_onecell(dev->of_node, data); } -void gdsc_unregister(struct device *dev) +void gdsc_unregister(struct gdsc_desc *desc) { + int i; + struct device *dev = desc->dev; + struct gdsc **scs = desc->scs; + size_t num = desc->num; + + /* Remove subdomains */ + for (i = 0; i < num; i++) { + if (!scs[i]) + continue; + if (scs[i]->parent) + pm_genpd_remove_subdomain(scs[i]->parent, &scs[i]->pd); + } of_genpd_del_provider(dev->of_node); } diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h index 5ded26884f08e9..3bf497c36bdf83 100644 --- a/drivers/clk/qcom/gdsc.h +++ b/drivers/clk/qcom/gdsc.h @@ -20,18 +20,12 @@ struct regmap; struct reset_controller_dev; -/* Powerdomain allowable state bitfields */ -#define PWRSTS_OFF BIT(0) -#define PWRSTS_RET BIT(1) -#define PWRSTS_ON BIT(2) -#define PWRSTS_OFF_ON (PWRSTS_OFF | PWRSTS_ON) -#define PWRSTS_RET_ON (PWRSTS_RET | PWRSTS_ON) - /** * struct gdsc - Globally Distributed Switch Controller * @pd: generic power domain * @regmap: regmap for MMIO accesses * @gdscr: gsdc control register + * @gds_hw_ctrl: gds_hw_ctrl register * @cxcs: offsets of branch registers to toggle mem/periph bits in * @cxc_count: number of @cxcs * @pwrsts: Possible powerdomain power states @@ -41,28 +35,44 @@ struct reset_controller_dev; */ struct gdsc { struct generic_pm_domain pd; + struct generic_pm_domain *parent; struct regmap *regmap; unsigned int gdscr; + unsigned int gds_hw_ctrl; unsigned int *cxcs; unsigned int cxc_count; const u8 pwrsts; +/* Powerdomain allowable state bitfields */ +#define PWRSTS_OFF BIT(0) +#define PWRSTS_RET BIT(1) +#define PWRSTS_ON BIT(2) +#define PWRSTS_OFF_ON (PWRSTS_OFF | PWRSTS_ON) +#define PWRSTS_RET_ON (PWRSTS_RET | PWRSTS_ON) + const u8 flags; +#define VOTABLE BIT(0) struct reset_controller_dev *rcdev; unsigned int *resets; unsigned int reset_count; }; +struct gdsc_desc { + struct device *dev; + struct gdsc **scs; + size_t num; +}; + #ifdef CONFIG_QCOM_GDSC -int gdsc_register(struct device *, struct gdsc **, size_t n, - struct reset_controller_dev *, struct regmap *); -void gdsc_unregister(struct device *); +int gdsc_register(struct gdsc_desc *desc, struct reset_controller_dev *, + struct regmap *); +void gdsc_unregister(struct gdsc_desc *desc); #else -static inline int gdsc_register(struct device *d, struct gdsc **g, size_t n, +static inline int gdsc_register(struct gdsc_desc *desc, struct reset_controller_dev *rcdev, struct regmap *r) { return -ENOSYS; } -static inline void gdsc_unregister(struct device *d) {}; +static inline void gdsc_unregister(struct gdsc_desc *desc) {}; #endif /* CONFIG_QCOM_GDSC */ #endif /* __QCOM_GDSC_H__ */ diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c index bbe28ed936692d..6b2e4e80812513 100644 --- a/drivers/clk/qcom/mmcc-msm8974.c +++ b/drivers/clk/qcom/mmcc-msm8974.c @@ -2400,6 +2400,7 @@ static struct gdsc oxilicx_gdsc = { .pd = { .name = "oxilicx", }, + .parent = &oxili_gdsc.pd, .pwrsts = PWRSTS_OFF_ON, }; @@ -2625,22 +2626,11 @@ static int mmcc_msm8974_probe(struct platform_device *pdev) clk_pll_configure_sr_hpm_lp(&mmpll1, regmap, &mmpll1_config, true); clk_pll_configure_sr_hpm_lp(&mmpll3, regmap, &mmpll3_config, false); - ret = qcom_cc_really_probe(pdev, &mmcc_msm8974_desc, regmap); - if (ret) - return ret; - - return pm_genpd_add_subdomain(&oxili_gdsc.pd, &oxilicx_gdsc.pd); -} - -static int mmcc_msm8974_remove(struct platform_device *pdev) -{ - pm_genpd_remove_subdomain(&oxili_gdsc.pd, &oxilicx_gdsc.pd); - return 0; + return qcom_cc_really_probe(pdev, &mmcc_msm8974_desc, regmap); } static struct platform_driver mmcc_msm8974_driver = { .probe = mmcc_msm8974_probe, - .remove = mmcc_msm8974_remove, .driver = { .name = "mmcc-msm8974", .of_match_table = mmcc_msm8974_match_table, diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c index 064f3eaa39d011..8d1ae14faa94be 100644 --- a/drivers/clk/qcom/mmcc-msm8996.c +++ b/drivers/clk/qcom/mmcc-msm8996.c @@ -32,6 +32,7 @@ #include "clk-rcg.h" #include "clk-branch.h" #include "reset.h" +#include "gdsc.h" #define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) } @@ -1293,6 +1294,21 @@ static struct clk_branch mmss_mmagic_axi_clk = { }, }; +static struct clk_branch mmss_s0_axi_clk = { + .halt_reg = 0x5064, + .clkr = { + .enable_reg = 0x5064, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "mmss_s0_axi_clk", + .parent_names = (const char *[]){ "axi_clk_src" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + static struct clk_branch mmss_mmagic_maxi_clk = { .halt_reg = 0x5074, .clkr = { @@ -2917,6 +2933,152 @@ static struct clk_hw *mmcc_msm8996_hws[] = { &gpll0_div.hw, }; +struct gdsc mmagic_bimc_gdsc = { + .gdscr = 0x529c, + .pd = { + .name = "mmagic_bimc", + }, + .pwrsts = PWRSTS_OFF_ON, +}; + +struct gdsc mmagic_video_gdsc = { + .gdscr = 0x119c, + .gds_hw_ctrl = 0x120c, + .pd = { + .name = "mmagic_video", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, +}; + +struct gdsc mmagic_mdss_gdsc = { + .gdscr = 0x247c, + .gds_hw_ctrl = 0x2480, + .pd = { + .name = "mmagic_mdss", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, +}; + +struct gdsc mmagic_camss_gdsc = { + .gdscr = 0x3c4c, + .gds_hw_ctrl = 0x3c50, + .pd = { + .name = "mmagic_camss", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, +}; + +struct gdsc venus_gdsc = { + .gdscr = 0x1024, + .cxcs = (unsigned int []){ 0x1028, 0x1034, 0x1038 }, + .cxc_count = 3, + .pd = { + .name = "venus", + }, + .parent = &mmagic_video_gdsc.pd, + .pwrsts = PWRSTS_OFF_ON, +}; + +struct gdsc venus_core0_gdsc = { + .gdscr = 0x1040, + .cxcs = (unsigned int []){ 0x1048 }, + .cxc_count = 1, + .pd = { + .name = "venus_core0", + }, + .pwrsts = PWRSTS_OFF_ON, +}; + +struct gdsc venus_core1_gdsc = { + .gdscr = 0x1044, + .cxcs = (unsigned int []){ 0x104c }, + .cxc_count = 1, + .pd = { + .name = "venus_core1", + }, + .pwrsts = PWRSTS_OFF_ON, +}; + +struct gdsc camss_gdsc = { + .gdscr = 0x34a0, + .cxcs = (unsigned int []){ 0x36bc, 0x36c4 }, + .cxc_count = 2, + .pd = { + .name = "camss", + }, + .parent = &mmagic_camss_gdsc.pd, + .pwrsts = PWRSTS_OFF_ON, +}; + +struct gdsc vfe0_gdsc = { + .gdscr = 0x3664, + .cxcs = (unsigned int []){ 0x36a8 }, + .cxc_count = 1, + .pd = { + .name = "vfe0", + }, + .parent = &camss_gdsc.pd, + .pwrsts = PWRSTS_OFF_ON, +}; + +struct gdsc vfe1_gdsc = { + .gdscr = 0x3674, + .cxcs = (unsigned int []){ 0x36ac }, + .cxc_count = 1, + .pd = { + .name = "vfe0", + }, + .parent = &camss_gdsc.pd, + .pwrsts = PWRSTS_OFF_ON, +}; + +struct gdsc jpeg_gdsc = { + .gdscr = 0x35a4, + .cxcs = (unsigned int []){ 0x35a8, 0x35b0, 0x35c0, 0x35b8 }, + .cxc_count = 4, + .pd = { + .name = "jpeg", + }, + .parent = &camss_gdsc.pd, + .pwrsts = PWRSTS_OFF_ON, +}; + +struct gdsc cpp_gdsc = { + .gdscr = 0x36d4, + .cxcs = (unsigned int []){ 0x36b0 }, + .cxc_count = 1, + .pd = { + .name = "cpp", + }, + .parent = &camss_gdsc.pd, + .pwrsts = PWRSTS_OFF_ON, +}; + +struct gdsc fd_gdsc = { + .gdscr = 0x3b64, + .cxcs = (unsigned int []){ 0x3b68, 0x3b6c }, + .cxc_count = 2, + .pd = { + .name = "fd", + }, + .parent = &camss_gdsc.pd, + .pwrsts = PWRSTS_OFF_ON, +}; + +struct gdsc mdss_gdsc = { + .gdscr = 0x2304, + .cxcs = (unsigned int []){ 0x2310, 0x231c }, + .cxc_count = 2, + .pd = { + .name = "mdss", + }, + .parent = &mmagic_mdss_gdsc.pd, + .pwrsts = PWRSTS_OFF_ON, +}; + static struct clk_regmap *mmcc_msm8996_clocks[] = { [MMPLL0_EARLY] = &mmpll0_early.clkr, [MMPLL0_PLL] = &mmpll0.clkr, @@ -2983,6 +3145,7 @@ static struct clk_regmap *mmcc_msm8996_clocks[] = { [MMSS_MISC_AHB_CLK] = &mmss_misc_ahb_clk.clkr, [MMSS_MISC_CXO_CLK] = &mmss_misc_cxo_clk.clkr, [MMSS_MMAGIC_AXI_CLK] = &mmss_mmagic_axi_clk.clkr, + [MMSS_S0_AXI_CLK] = &mmss_s0_axi_clk.clkr, [MMSS_MMAGIC_MAXI_CLK] = &mmss_mmagic_maxi_clk.clkr, [MMAGIC_CAMSS_AXI_CLK] = &mmagic_camss_axi_clk.clkr, [MMAGIC_CAMSS_NOC_CFG_AHB_CLK] = &mmagic_camss_noc_cfg_ahb_clk.clkr, @@ -3093,6 +3256,23 @@ static struct clk_regmap *mmcc_msm8996_clocks[] = { [FD_AHB_CLK] = &fd_ahb_clk.clkr, }; +static struct gdsc *mmcc_msm8996_gdscs[] = { + [MMAGIC_BIMC_GDSC] = &mmagic_bimc_gdsc, + [MMAGIC_VIDEO_GDSC] = &mmagic_video_gdsc, + [MMAGIC_MDSS_GDSC] = &mmagic_mdss_gdsc, + [MMAGIC_CAMSS_GDSC] = &mmagic_camss_gdsc, + [VENUS_GDSC] = &venus_gdsc, + [VENUS_CORE0_GDSC] = &venus_core0_gdsc, + [VENUS_CORE1_GDSC] = &venus_core1_gdsc, + [CAMSS_GDSC] = &camss_gdsc, + [VFE0_GDSC] = &vfe0_gdsc, + [VFE1_GDSC] = &vfe1_gdsc, + [JPEG_GDSC] = &jpeg_gdsc, + [CPP_GDSC] = &cpp_gdsc, + [FD_GDSC] = &fd_gdsc, + [MDSS_GDSC] = &mdss_gdsc, +}; + static const struct qcom_reset_map mmcc_msm8996_resets[] = { [MMAGICAHB_BCR] = { 0x5020 }, [MMAGIC_CFG_BCR] = { 0x5050 }, @@ -3170,6 +3350,8 @@ static const struct qcom_cc_desc mmcc_msm8996_desc = { .num_clks = ARRAY_SIZE(mmcc_msm8996_clocks), .resets = mmcc_msm8996_resets, .num_resets = ARRAY_SIZE(mmcc_msm8996_resets), + .gdscs = mmcc_msm8996_gdscs, + .num_gdscs = ARRAY_SIZE(mmcc_msm8996_gdscs), }; static const struct of_device_id mmcc_msm8996_match_table[] = { diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index 215495c2780c07..b8beb9bccd9144 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -23,42 +23,6 @@ config DRM_MSM_REGISTER_LOGGING that can be parsed by envytools demsm tool. If enabled, register logging can be switched on via msm.reglog=y module param. -config DRM_MSM_DSI - bool "Enable DSI support in MSM DRM driver" - depends on DRM_MSM - select DRM_PANEL - select DRM_MIPI_DSI - default y - help - Choose this option if you have a need for MIPI DSI connector - support. - -config DRM_MSM_DSI_PLL - bool "Enable DSI PLL driver in MSM DRM" - depends on DRM_MSM_DSI && COMMON_CLK - default y - help - Choose this option to enable DSI PLL driver which provides DSI - source clocks under common clock framework. - -config DRM_MSM_DSI_28NM_PHY - bool "Enable DSI 28nm PHY driver in MSM DRM" - depends on DRM_MSM_DSI - default y - help - Choose this option if the 28nm DSI PHY is used on the platform. - -config DRM_MSM_DSI_20NM_PHY - bool "Enable DSI 20nm PHY driver in MSM DRM" - depends on DRM_MSM_DSI - default y - help - Choose this option if the 20nm DSI PHY is used on the platform. - -config DRM_MSM_DSI_28NM_8960_PHY - bool "Enable DSI 28nm 8960 PHY driver in MSM DRM" - depends on DRM_MSM_DSI - default y - help - Choose this option if the 28nm DSI PHY 8960 variant is used on the - platform. +source "drivers/gpu/drm/msm/dsi/Kconfig" +source "drivers/gpu/drm/msm/hdmi/Kconfig" +source "drivers/gpu/drm/msm/edp/Kconfig" diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 065ad413879992..9386cfeb55b22c 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -1,26 +1,10 @@ ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm -ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi msm-y := \ adreno/adreno_device.o \ adreno/adreno_gpu.o \ adreno/a3xx_gpu.o \ adreno/a4xx_gpu.o \ - hdmi/hdmi.o \ - hdmi/hdmi_audio.o \ - hdmi/hdmi_bridge.o \ - hdmi/hdmi_connector.o \ - hdmi/hdmi_hdcp.o \ - hdmi/hdmi_i2c.o \ - hdmi/hdmi_phy_8960.o \ - hdmi/hdmi_phy_8x60.o \ - hdmi/hdmi_phy_8x74.o \ - edp/edp.o \ - edp/edp_aux.o \ - edp/edp_bridge.o \ - edp/edp_connector.o \ - edp/edp_ctrl.o \ - edp/edp_phy.o \ mdp/mdp_format.o \ mdp/mdp_kms.o \ mdp/mdp4/mdp4_crtc.o \ @@ -50,25 +34,14 @@ msm-y := \ msm_rd.o \ msm_ringbuffer.o +msm-$(CONFIG_DRM_MSM_DSI) += mdp/mdp4/mdp4_dsi_encoder.o \ + mdp/mdp5/mdp5_cmd_encoder.o + msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o -msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \ - mdp/mdp4/mdp4_dsi_encoder.o \ - dsi/dsi_cfg.o \ - dsi/dsi_host.o \ - dsi/dsi_manager.o \ - dsi/phy/dsi_phy.o \ - mdp/mdp5/mdp5_cmd_encoder.o - -msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o -msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o -msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/phy/dsi_phy_28nm_8960.o - -ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y) -msm-y += dsi/pll/dsi_pll.o -msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o -msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/pll/dsi_pll_28nm_8960.o -endif +obj-$(CONFIG_DRM_MSM_DSI) += dsi/ +obj-$(CONFIG_DRM_MSM_HDMI) += hdmi/ +obj-$(CONFIG_DRM_MSM_EDP) += edp/ obj-$(CONFIG_DRM_MSM) += msm.o diff --git a/drivers/gpu/drm/msm/dsi/Kconfig b/drivers/gpu/drm/msm/dsi/Kconfig new file mode 100644 index 00000000000000..91dcf21bfc0355 --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/Kconfig @@ -0,0 +1,40 @@ +config DRM_MSM_DSI + bool "Enable DSI support in MSM DRM driver" + depends on DRM_MSM + select DRM_PANEL + select DRM_MIPI_DSI + default y + help + Choose this option if you have a need for MIPI DSI connector + support. + +if DRM_MSM_DSI + +config DRM_MSM_DSI_PLL + bool "Enable DSI PLL driver in MSM DRM" + default y + help + Choose this option to enable DSI PLL driver which provides DSI + source clocks under common clock framework. + +config DRM_MSM_DSI_28NM_PHY + bool "Enable DSI 28nm PHY driver in MSM DRM" + default y + help + Choose this option if the 28nm DSI PHY is used on the platform. + +config DRM_MSM_DSI_20NM_PHY + bool "Enable DSI 20nm PHY driver in MSM DRM" + default y + help + Choose this option if the 20nm DSI PHY is used on the platform. + +config DRM_MSM_DSI_28NM_8960_PHY + bool "Enable DSI 28nm 8960 PHY driver in MSM DRM" + depends on DRM_MSM_DSI + default y + help + Choose this option if the 28nm DSI PHY 8960 variant is used on the + platform. + +endif diff --git a/drivers/gpu/drm/msm/dsi/Makefile b/drivers/gpu/drm/msm/dsi/Makefile new file mode 100644 index 00000000000000..26c8fb689229e9 --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/Makefile @@ -0,0 +1,13 @@ +ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm -Idrivers/gpu/drm/msm/dsi + +obj-y := dsi.o dsi_cfg.o dsi_host.o dsi_manager.o phy/dsi_phy.o + +obj-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += phy/dsi_phy_28nm.o +obj-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += phy/dsi_phy_20nm.o +obj-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += phy/dsi_phy_28nm_8960.o + +ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y) +obj-y += pll/dsi_pll.o +obj-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += pll/dsi_pll_28nm.o +obj-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += pll/dsi_pll_28nm_8960.o +endif diff --git a/drivers/gpu/drm/msm/edp/Kconfig b/drivers/gpu/drm/msm/edp/Kconfig new file mode 100644 index 00000000000000..576d8c2fd713f6 --- /dev/null +++ b/drivers/gpu/drm/msm/edp/Kconfig @@ -0,0 +1,7 @@ +config DRM_MSM_EDP + bool "Enable EDP support in MSM DRM driver" + depends on DRM_MSM + default y + help + Choose this option if you have a need for Embedded Display + Port encoder support. diff --git a/drivers/gpu/drm/msm/edp/Makefile b/drivers/gpu/drm/msm/edp/Makefile new file mode 100644 index 00000000000000..e623d59d1706a7 --- /dev/null +++ b/drivers/gpu/drm/msm/edp/Makefile @@ -0,0 +1,3 @@ +ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm + +obj-y := edp.o edp_aux.o edp_bridge.o edp_connector.o edp_ctrl.o edp_phy.o diff --git a/drivers/gpu/drm/msm/hdmi/Kconfig b/drivers/gpu/drm/msm/hdmi/Kconfig new file mode 100644 index 00000000000000..4b24e1e4bac4cf --- /dev/null +++ b/drivers/gpu/drm/msm/hdmi/Kconfig @@ -0,0 +1,7 @@ +config DRM_MSM_HDMI + bool "Enable HDMI support in MSM DRM driver" + depends on DRM_MSM + default y + help + Choose this option if you have a need for HDMI connector + support. diff --git a/drivers/gpu/drm/msm/hdmi/Makefile b/drivers/gpu/drm/msm/hdmi/Makefile new file mode 100644 index 00000000000000..799dbca89bd4ae --- /dev/null +++ b/drivers/gpu/drm/msm/hdmi/Makefile @@ -0,0 +1,6 @@ +ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm + +obj-y := hdmi.o hdmi_phy.o hdmi_audio.o hdmi_bridge.o hdmi_connector.o \ + hdmi_hdcp.o hdmi_i2c.o hdmi_phy_8960.o hdmi_phy_8x60.o hdmi_phy_8x74.o + +obj-$(CONFIG_COMMON_CLK) += hdmi_pll_8960.o hdmi_phy_8996.o diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c index 9a0989c0b4de2a..6002988951b434 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c @@ -67,8 +67,6 @@ static irqreturn_t hdmi_irq(int irq, void *dev_id) static void hdmi_destroy(struct hdmi *hdmi) { - struct hdmi_phy *phy = hdmi->phy; - /* * at this point, hpd has been disabled, * after flush workq, it's safe to deinit hdcp @@ -78,8 +76,12 @@ static void hdmi_destroy(struct hdmi *hdmi) destroy_workqueue(hdmi->workq); } hdmi_hdcp_destroy(hdmi); - if (phy) - phy->funcs->destroy(phy); + + if (hdmi->phy_dev) { + put_device(hdmi->phy_dev); + hdmi->phy = NULL; + hdmi->phy_dev = NULL; + } if (hdmi->i2c) hdmi_i2c_destroy(hdmi->i2c); @@ -87,6 +89,34 @@ static void hdmi_destroy(struct hdmi *hdmi) platform_set_drvdata(hdmi->pdev, NULL); } +static int hdmi_get_phy(struct hdmi *hdmi) +{ + struct platform_device *pdev = hdmi->pdev; + struct platform_device *phy_pdev; + struct device_node *phy_node; + + phy_node = of_parse_phandle(pdev->dev.of_node, "qcom,hdmi-phy", 0); + if (!phy_node) { + dev_err(&pdev->dev, "cannot find phy device\n"); + return -ENXIO; + } + + phy_pdev = of_find_device_by_node(phy_node); + if (phy_pdev) + hdmi->phy = platform_get_drvdata(phy_pdev); + + of_node_put(phy_node); + + if (!phy_pdev || !hdmi->phy) { + dev_err(&pdev->dev, "phy driver is not ready\n"); + return -EPROBE_DEFER; + } + + hdmi->phy_dev = get_device(&phy_pdev->dev); + + return 0; +} + /* construct hdmi at bind/probe time, grab all the resources. If * we are to EPROBE_DEFER we want to do it here, rather than later * at modeset_init() time @@ -108,18 +138,6 @@ static struct hdmi *hdmi_init(struct platform_device *pdev) hdmi->config = config; spin_lock_init(&hdmi->reg_lock); - /* not sure about which phy maps to which msm.. probably I miss some */ - if (config->phy_init) { - hdmi->phy = config->phy_init(hdmi); - - if (IS_ERR(hdmi->phy)) { - ret = PTR_ERR(hdmi->phy); - dev_err(&pdev->dev, "failed to load phy: %d\n", ret); - hdmi->phy = NULL; - goto fail; - } - } - hdmi->mmio = msm_ioremap(pdev, config->mmio_name, "HDMI"); if (IS_ERR(hdmi->mmio)) { ret = PTR_ERR(hdmi->mmio); @@ -230,6 +248,12 @@ static struct hdmi *hdmi_init(struct platform_device *pdev) goto fail; } + ret = hdmi_get_phy(hdmi); + if (ret) { + dev_err(&pdev->dev, "failed to get phy\n"); + goto fail; + } + hdmi->hdcp_ctrl = hdmi_hdcp_init(hdmi); if (IS_ERR(hdmi->hdcp_ctrl)) { dev_warn(&pdev->dev, "failed to init hdcp: disabled\n"); @@ -331,15 +355,12 @@ int hdmi_modeset_init(struct hdmi *hdmi, static const char *pwr_reg_names_none[] = {}; static const char *hpd_reg_names_none[] = {}; -static struct hdmi_platform_config hdmi_tx_8660_config = { - .phy_init = hdmi_phy_8x60_init, -}; +static struct hdmi_platform_config hdmi_tx_8660_config; static const char *hpd_reg_names_8960[] = {"core-vdda", "hdmi-mux"}; static const char *hpd_clk_names_8960[] = {"core_clk", "master_iface_clk", "slave_iface_clk"}; static struct hdmi_platform_config hdmi_tx_8960_config = { - .phy_init = hdmi_phy_8960_init, HDMI_CFG(hpd_reg, 8960), HDMI_CFG(hpd_clk, 8960), }; @@ -351,7 +372,6 @@ static const char *hpd_clk_names_8x74[] = {"iface_clk", "core_clk", "mdp_core_cl static unsigned long hpd_clk_freq_8x74[] = {0, 19200000, 0}; static struct hdmi_platform_config hdmi_tx_8974_config = { - .phy_init = hdmi_phy_8x74_init, HDMI_CFG(pwr_reg, 8x74), HDMI_CFG(hpd_reg, 8x74), HDMI_CFG(pwr_clk, 8x74), @@ -362,7 +382,6 @@ static struct hdmi_platform_config hdmi_tx_8974_config = { static const char *hpd_reg_names_8084[] = {"hpd-gdsc", "hpd-5v", "hpd-5v-en"}; static struct hdmi_platform_config hdmi_tx_8084_config = { - .phy_init = hdmi_phy_8x74_init, HDMI_CFG(pwr_reg, 8x74), HDMI_CFG(hpd_reg, 8084), HDMI_CFG(pwr_clk, 8x74), @@ -371,7 +390,6 @@ static struct hdmi_platform_config hdmi_tx_8084_config = { }; static struct hdmi_platform_config hdmi_tx_8994_config = { - .phy_init = NULL, /* nothing to do for this HDMI PHY 20nm */ HDMI_CFG(pwr_reg, 8x74), HDMI_CFG(hpd_reg, none), HDMI_CFG(pwr_clk, 8x74), @@ -380,7 +398,6 @@ static struct hdmi_platform_config hdmi_tx_8994_config = { }; static struct hdmi_platform_config hdmi_tx_8996_config = { - .phy_init = NULL, HDMI_CFG(pwr_reg, none), HDMI_CFG(hpd_reg, none), HDMI_CFG(pwr_clk, 8x74), @@ -388,7 +405,21 @@ static struct hdmi_platform_config hdmi_tx_8996_config = { .hpd_freq = hpd_clk_freq_8x74, }; -static int get_gpio(struct device *dev, struct device_node *of_node, const char *name) +static const struct { + const char *name; + const bool output; + const int value; + const char *label; +} hdmi_gpio_pdata[] = { + { "qcom,hdmi-tx-ddc-clk", true, 1, "HDMI_DDC_CLK" }, + { "qcom,hdmi-tx-ddc-data", true, 1, "HDMI_DDC_DATA" }, + { "qcom,hdmi-tx-hpd", false, 1, "HDMI_HPD" }, + { "qcom,hdmi-tx-mux-en", true, 1, "HDMI_MUX_EN" }, + { "qcom,hdmi-tx-mux-sel", true, 0, "HDMI_MUX_SEL" }, + { "qcom,hdmi-tx-mux-lpm", true, 1, "HDMI_MUX_LPM" }, +}; + +static int get_gpio(struct device_node *of_node, const char *name) { int gpio = of_get_named_gpio(of_node, name, 0); if (gpio < 0) { @@ -410,6 +441,7 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data) static struct hdmi_platform_config *hdmi_cfg; struct hdmi *hdmi; struct device_node *of_node = dev->of_node; + int i; hdmi_cfg = (struct hdmi_platform_config *) of_device_get_match_data(dev); @@ -420,12 +452,14 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data) hdmi_cfg->mmio_name = "core_physical"; hdmi_cfg->qfprom_mmio_name = "qfprom_physical"; - hdmi_cfg->ddc_clk_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-clk"); - hdmi_cfg->ddc_data_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-data"); - hdmi_cfg->hpd_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd"); - hdmi_cfg->mux_en_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-en"); - hdmi_cfg->mux_sel_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-sel"); - hdmi_cfg->mux_lpm_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-lpm"); + + for (i = 0; i < HDMI_MAX_NUM_GPIO; i++) { + hdmi_cfg->gpios[i].num = get_gpio(of_node, + hdmi_gpio_pdata[i].name); + hdmi_cfg->gpios[i].output = hdmi_gpio_pdata[i].output; + hdmi_cfg->gpios[i].value = hdmi_gpio_pdata[i].value; + hdmi_cfg->gpios[i].label = hdmi_gpio_pdata[i].label; + } dev->platform_data = hdmi_cfg; @@ -485,10 +519,12 @@ static struct platform_driver hdmi_driver = { void __init hdmi_register(void) { + hdmi_phy_driver_register(); platform_driver_register(&hdmi_driver); } void __exit hdmi_unregister(void) { platform_driver_unregister(&hdmi_driver); + hdmi_phy_driver_unregister(); } diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h index d0e663192d01f6..88557e59f750ef 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.h @@ -27,10 +27,18 @@ #include "msm_drv.h" #include "hdmi.xml.h" +#define HDMI_MAX_NUM_GPIO 6 struct hdmi_phy; struct hdmi_platform_config; +struct hdmi_gpio_data { + int num; + bool output; + int value; + const char *label; +}; + struct hdmi_audio { bool enabled; struct hdmi_audio_infoframe infoframe; @@ -62,6 +70,8 @@ struct hdmi { struct clk **pwr_clks; struct hdmi_phy *phy; + struct device *phy_dev; + struct i2c_adapter *i2c; struct drm_connector *connector; struct drm_bridge *bridge; @@ -88,7 +98,6 @@ struct hdmi { /* platform config data (ie. from DT, or pdata) */ struct hdmi_platform_config { - struct hdmi_phy *(*phy_init)(struct hdmi *hdmi); const char *mmio_name; const char *qfprom_mmio_name; @@ -110,8 +119,7 @@ struct hdmi_platform_config { int pwr_clk_cnt; /* gpio's: */ - int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, mux_en_gpio, mux_sel_gpio; - int mux_lpm_gpio; + struct hdmi_gpio_data gpios[HDMI_MAX_NUM_GPIO]; }; void hdmi_set_mode(struct hdmi *hdmi, bool power_on); @@ -132,24 +140,72 @@ static inline u32 hdmi_qfprom_read(struct hdmi *hdmi, u32 reg) } /* - * The phy appears to be different, for example between 8960 and 8x60, - * so split the phy related functions out and load the correct one at - * runtime: + * hdmi phy: */ -struct hdmi_phy_funcs { - void (*destroy)(struct hdmi_phy *phy); +enum hdmi_phy_type { + MSM_HDMI_PHY_8x60, + MSM_HDMI_PHY_8960, + MSM_HDMI_PHY_8x74, + MSM_HDMI_PHY_8996, + MSM_HDMI_PHY_MAX, +}; + +struct hdmi_phy_cfg { + enum hdmi_phy_type type; void (*powerup)(struct hdmi_phy *phy, unsigned long int pixclock); void (*powerdown)(struct hdmi_phy *phy); + const char * const *reg_names; + int num_regs; + const char * const *clk_names; + int num_clks; }; +extern const struct hdmi_phy_cfg hdmi_phy_8x60_cfg; +extern const struct hdmi_phy_cfg hdmi_phy_8960_cfg; +extern const struct hdmi_phy_cfg hdmi_phy_8x74_cfg; +extern const struct hdmi_phy_cfg hdmi_phy_8996_cfg; + struct hdmi_phy { + struct platform_device *pdev; + void __iomem *mmio; + struct hdmi_phy_cfg *cfg; const struct hdmi_phy_funcs *funcs; + struct regulator **regs; + struct clk **clks; }; -struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi); -struct hdmi_phy *hdmi_phy_8x60_init(struct hdmi *hdmi); -struct hdmi_phy *hdmi_phy_8x74_init(struct hdmi *hdmi); +static inline void hdmi_phy_write(struct hdmi_phy *phy, u32 reg, u32 data) +{ + msm_writel(data, phy->mmio + reg); +} + +static inline u32 hdmi_phy_read(struct hdmi_phy *phy, u32 reg) +{ + return msm_readl(phy->mmio + reg); +} + +int hdmi_phy_resource_enable(struct hdmi_phy *phy); +void hdmi_phy_resource_disable(struct hdmi_phy *phy); +void hdmi_phy_powerup(struct hdmi_phy *phy, unsigned long int pixclock); +void hdmi_phy_powerdown(struct hdmi_phy *phy); +void __init hdmi_phy_driver_register(void); +void __exit hdmi_phy_driver_unregister(void); + +#ifdef CONFIG_COMMON_CLK +int hdmi_pll_8960_init(struct platform_device *pdev); +int hdmi_pll_8996_init(struct platform_device *pdev); +#else +int hdmi_pll_8960_init(struct platform_device *pdev); +{ + return -ENODEV; +} + +int hdmi_pll_8996_init(struct platform_device *pdev) +{ + return -ENODEV; +} +#endif /* * audio: diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h index 10c45700aefedd..46c7aa4afd8c4c 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h @@ -8,19 +8,10 @@ This file was generated by the rules-ng-ng headergen tool in this git repository git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43) -- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) - -Copyright (C) 2013-2015 by the following authors: +- /local/mnt/workspace/source_trees/envytools/rnndb/../rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-08 08:20:42) +- /local/mnt/workspace/source_trees/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-02-09 03:18:10) + +Copyright (C) 2013-2016 by the following authors: - Rob Clark (robclark) Permission is hereby granted, free of charge, to any person obtaining @@ -559,7 +550,7 @@ static inline uint32_t HDMI_VSYNC_TOTAL_F2_V_TOTAL(uint32_t val) #define REG_HDMI_CEC_WR_CHECK_CONFIG 0x00000370 -#define REG_HDMI_8x60_PHY_REG0 0x00000300 +#define REG_HDMI_8x60_PHY_REG0 0x00000000 #define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK 0x0000001c #define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__SHIFT 2 static inline uint32_t HDMI_8x60_PHY_REG0_DESER_DEL_CTRL(uint32_t val) @@ -567,7 +558,7 @@ static inline uint32_t HDMI_8x60_PHY_REG0_DESER_DEL_CTRL(uint32_t val) return ((val) << HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__SHIFT) & HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK; } -#define REG_HDMI_8x60_PHY_REG1 0x00000304 +#define REG_HDMI_8x60_PHY_REG1 0x00000004 #define HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__MASK 0x000000f0 #define HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__SHIFT 4 static inline uint32_t HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(uint32_t val) @@ -581,7 +572,7 @@ static inline uint32_t HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(uint32_t val) return ((val) << HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__SHIFT) & HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__MASK; } -#define REG_HDMI_8x60_PHY_REG2 0x00000308 +#define REG_HDMI_8x60_PHY_REG2 0x00000008 #define HDMI_8x60_PHY_REG2_PD_DESER 0x00000001 #define HDMI_8x60_PHY_REG2_PD_DRIVE_1 0x00000002 #define HDMI_8x60_PHY_REG2_PD_DRIVE_2 0x00000004 @@ -591,152 +582,152 @@ static inline uint32_t HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(uint32_t val) #define HDMI_8x60_PHY_REG2_PD_PWRGEN 0x00000040 #define HDMI_8x60_PHY_REG2_RCV_SENSE_EN 0x00000080 -#define REG_HDMI_8x60_PHY_REG3 0x0000030c +#define REG_HDMI_8x60_PHY_REG3 0x0000000c #define HDMI_8x60_PHY_REG3_PLL_ENABLE 0x00000001 -#define REG_HDMI_8x60_PHY_REG4 0x00000310 +#define REG_HDMI_8x60_PHY_REG4 0x00000010 -#define REG_HDMI_8x60_PHY_REG5 0x00000314 +#define REG_HDMI_8x60_PHY_REG5 0x00000014 -#define REG_HDMI_8x60_PHY_REG6 0x00000318 +#define REG_HDMI_8x60_PHY_REG6 0x00000018 -#define REG_HDMI_8x60_PHY_REG7 0x0000031c +#define REG_HDMI_8x60_PHY_REG7 0x0000001c -#define REG_HDMI_8x60_PHY_REG8 0x00000320 +#define REG_HDMI_8x60_PHY_REG8 0x00000020 -#define REG_HDMI_8x60_PHY_REG9 0x00000324 +#define REG_HDMI_8x60_PHY_REG9 0x00000024 -#define REG_HDMI_8x60_PHY_REG10 0x00000328 +#define REG_HDMI_8x60_PHY_REG10 0x00000028 -#define REG_HDMI_8x60_PHY_REG11 0x0000032c +#define REG_HDMI_8x60_PHY_REG11 0x0000002c -#define REG_HDMI_8x60_PHY_REG12 0x00000330 +#define REG_HDMI_8x60_PHY_REG12 0x00000030 #define HDMI_8x60_PHY_REG12_RETIMING_EN 0x00000001 #define HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN 0x00000002 #define HDMI_8x60_PHY_REG12_FORCE_LOCK 0x00000010 -#define REG_HDMI_8960_PHY_REG0 0x00000400 +#define REG_HDMI_8960_PHY_REG0 0x00000000 -#define REG_HDMI_8960_PHY_REG1 0x00000404 +#define REG_HDMI_8960_PHY_REG1 0x00000004 -#define REG_HDMI_8960_PHY_REG2 0x00000408 +#define REG_HDMI_8960_PHY_REG2 0x00000008 -#define REG_HDMI_8960_PHY_REG3 0x0000040c +#define REG_HDMI_8960_PHY_REG3 0x0000000c -#define REG_HDMI_8960_PHY_REG4 0x00000410 +#define REG_HDMI_8960_PHY_REG4 0x00000010 -#define REG_HDMI_8960_PHY_REG5 0x00000414 +#define REG_HDMI_8960_PHY_REG5 0x00000014 -#define REG_HDMI_8960_PHY_REG6 0x00000418 +#define REG_HDMI_8960_PHY_REG6 0x00000018 -#define REG_HDMI_8960_PHY_REG7 0x0000041c +#define REG_HDMI_8960_PHY_REG7 0x0000001c -#define REG_HDMI_8960_PHY_REG8 0x00000420 +#define REG_HDMI_8960_PHY_REG8 0x00000020 -#define REG_HDMI_8960_PHY_REG9 0x00000424 +#define REG_HDMI_8960_PHY_REG9 0x00000024 -#define REG_HDMI_8960_PHY_REG10 0x00000428 +#define REG_HDMI_8960_PHY_REG10 0x00000028 -#define REG_HDMI_8960_PHY_REG11 0x0000042c +#define REG_HDMI_8960_PHY_REG11 0x0000002c -#define REG_HDMI_8960_PHY_REG12 0x00000430 +#define REG_HDMI_8960_PHY_REG12 0x00000030 #define HDMI_8960_PHY_REG12_SW_RESET 0x00000020 #define HDMI_8960_PHY_REG12_PWRDN_B 0x00000080 -#define REG_HDMI_8960_PHY_REG_BIST_CFG 0x00000434 +#define REG_HDMI_8960_PHY_REG_BIST_CFG 0x00000034 -#define REG_HDMI_8960_PHY_DEBUG_BUS_SEL 0x00000438 +#define REG_HDMI_8960_PHY_DEBUG_BUS_SEL 0x00000038 -#define REG_HDMI_8960_PHY_REG_MISC0 0x0000043c +#define REG_HDMI_8960_PHY_REG_MISC0 0x0000003c -#define REG_HDMI_8960_PHY_REG13 0x00000440 +#define REG_HDMI_8960_PHY_REG13 0x00000040 -#define REG_HDMI_8960_PHY_REG14 0x00000444 +#define REG_HDMI_8960_PHY_REG14 0x00000044 -#define REG_HDMI_8960_PHY_REG15 0x00000448 +#define REG_HDMI_8960_PHY_REG15 0x00000048 -#define REG_HDMI_8960_PHY_PLL_REFCLK_CFG 0x00000500 +#define REG_HDMI_8960_PHY_PLL_REFCLK_CFG 0x00000000 -#define REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG 0x00000504 +#define REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG 0x00000004 -#define REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 0x00000508 +#define REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 0x00000008 -#define REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 0x0000050c +#define REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 0x0000000c -#define REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG 0x00000510 +#define REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG 0x00000010 -#define REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG 0x00000514 +#define REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG 0x00000014 -#define REG_HDMI_8960_PHY_PLL_PWRDN_B 0x00000518 +#define REG_HDMI_8960_PHY_PLL_PWRDN_B 0x00000018 #define HDMI_8960_PHY_PLL_PWRDN_B_PD_PLL 0x00000002 #define HDMI_8960_PHY_PLL_PWRDN_B_PLL_PWRDN_B 0x00000008 -#define REG_HDMI_8960_PHY_PLL_SDM_CFG0 0x0000051c +#define REG_HDMI_8960_PHY_PLL_SDM_CFG0 0x0000001c -#define REG_HDMI_8960_PHY_PLL_SDM_CFG1 0x00000520 +#define REG_HDMI_8960_PHY_PLL_SDM_CFG1 0x00000020 -#define REG_HDMI_8960_PHY_PLL_SDM_CFG2 0x00000524 +#define REG_HDMI_8960_PHY_PLL_SDM_CFG2 0x00000024 -#define REG_HDMI_8960_PHY_PLL_SDM_CFG3 0x00000528 +#define REG_HDMI_8960_PHY_PLL_SDM_CFG3 0x00000028 -#define REG_HDMI_8960_PHY_PLL_SDM_CFG4 0x0000052c +#define REG_HDMI_8960_PHY_PLL_SDM_CFG4 0x0000002c -#define REG_HDMI_8960_PHY_PLL_SSC_CFG0 0x00000530 +#define REG_HDMI_8960_PHY_PLL_SSC_CFG0 0x00000030 -#define REG_HDMI_8960_PHY_PLL_SSC_CFG1 0x00000534 +#define REG_HDMI_8960_PHY_PLL_SSC_CFG1 0x00000034 -#define REG_HDMI_8960_PHY_PLL_SSC_CFG2 0x00000538 +#define REG_HDMI_8960_PHY_PLL_SSC_CFG2 0x00000038 -#define REG_HDMI_8960_PHY_PLL_SSC_CFG3 0x0000053c +#define REG_HDMI_8960_PHY_PLL_SSC_CFG3 0x0000003c -#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 0x00000540 +#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 0x00000040 -#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 0x00000544 +#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 0x00000044 -#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 0x00000548 +#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 0x00000048 -#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 0x0000054c +#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 0x0000004c -#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 0x00000550 +#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 0x00000050 -#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 0x00000554 +#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 0x00000054 -#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 0x00000558 +#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 0x00000058 -#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 0x0000055c +#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 0x0000005c -#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 0x00000560 +#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 0x00000060 -#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 0x00000564 +#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 0x00000064 -#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 0x00000568 +#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 0x00000068 -#define REG_HDMI_8960_PHY_PLL_DEBUG_SEL 0x0000056c +#define REG_HDMI_8960_PHY_PLL_DEBUG_SEL 0x0000006c -#define REG_HDMI_8960_PHY_PLL_MISC0 0x00000570 +#define REG_HDMI_8960_PHY_PLL_MISC0 0x00000070 -#define REG_HDMI_8960_PHY_PLL_MISC1 0x00000574 +#define REG_HDMI_8960_PHY_PLL_MISC1 0x00000074 -#define REG_HDMI_8960_PHY_PLL_MISC2 0x00000578 +#define REG_HDMI_8960_PHY_PLL_MISC2 0x00000078 -#define REG_HDMI_8960_PHY_PLL_MISC3 0x0000057c +#define REG_HDMI_8960_PHY_PLL_MISC3 0x0000007c -#define REG_HDMI_8960_PHY_PLL_MISC4 0x00000580 +#define REG_HDMI_8960_PHY_PLL_MISC4 0x00000080 -#define REG_HDMI_8960_PHY_PLL_MISC5 0x00000584 +#define REG_HDMI_8960_PHY_PLL_MISC5 0x00000084 -#define REG_HDMI_8960_PHY_PLL_MISC6 0x00000588 +#define REG_HDMI_8960_PHY_PLL_MISC6 0x00000088 -#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS0 0x0000058c +#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS0 0x0000008c -#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS1 0x00000590 +#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS1 0x00000090 -#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS2 0x00000594 +#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS2 0x00000094 -#define REG_HDMI_8960_PHY_PLL_STATUS0 0x00000598 +#define REG_HDMI_8960_PHY_PLL_STATUS0 0x00000098 #define HDMI_8960_PHY_PLL_STATUS0_PLL_LOCK 0x00000001 -#define REG_HDMI_8960_PHY_PLL_STATUS1 0x0000059c +#define REG_HDMI_8960_PHY_PLL_STATUS1 0x0000009c #define REG_HDMI_8x74_ANA_CFG0 0x00000000 @@ -843,5 +834,501 @@ static inline uint32_t HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(uint32_t val) #define REG_HDMI_28nm_PHY_PLL_DEBUG_BUS_SEL 0x000000a0 +#define REG_HDMI_8996_PHY_CFG 0x00000000 + +#define REG_HDMI_8996_PHY_PD_CTL 0x00000004 + +#define REG_HDMI_8996_PHY_MODE 0x00000008 + +#define REG_HDMI_8996_PHY_MISR_CLEAR 0x0000000c + +#define REG_HDMI_8996_PHY_TX0_TX1_BIST_CFG0 0x00000010 + +#define REG_HDMI_8996_PHY_TX0_TX1_BIST_CFG1 0x00000014 + +#define REG_HDMI_8996_PHY_TX0_TX1_PRBS_SEED_BYTE0 0x00000018 + +#define REG_HDMI_8996_PHY_TX0_TX1_PRBS_SEED_BYTE1 0x0000001c + +#define REG_HDMI_8996_PHY_TX0_TX1_BIST_PATTERN0 0x00000020 + +#define REG_HDMI_8996_PHY_TX0_TX1_BIST_PATTERN1 0x00000024 + +#define REG_HDMI_8996_PHY_TX2_TX3_BIST_CFG0 0x00000028 + +#define REG_HDMI_8996_PHY_TX2_TX3_BIST_CFG1 0x0000002c + +#define REG_HDMI_8996_PHY_TX2_TX3_PRBS_SEED_BYTE0 0x00000030 + +#define REG_HDMI_8996_PHY_TX2_TX3_PRBS_SEED_BYTE1 0x00000034 + +#define REG_HDMI_8996_PHY_TX2_TX3_BIST_PATTERN0 0x00000038 + +#define REG_HDMI_8996_PHY_TX2_TX3_BIST_PATTERN1 0x0000003c + +#define REG_HDMI_8996_PHY_DEBUG_BUS_SEL 0x00000040 + +#define REG_HDMI_8996_PHY_TXCAL_CFG0 0x00000044 + +#define REG_HDMI_8996_PHY_TXCAL_CFG1 0x00000048 + +#define REG_HDMI_8996_PHY_TX0_TX1_LANE_CTL 0x0000004c + +#define REG_HDMI_8996_PHY_TX2_TX3_LANE_CTL 0x00000050 + +#define REG_HDMI_8996_PHY_LANE_BIST_CONFIG 0x00000054 + +#define REG_HDMI_8996_PHY_CLOCK 0x00000058 + +#define REG_HDMI_8996_PHY_MISC1 0x0000005c + +#define REG_HDMI_8996_PHY_MISC2 0x00000060 + +#define REG_HDMI_8996_PHY_TX0_TX1_BIST_STATUS0 0x00000064 + +#define REG_HDMI_8996_PHY_TX0_TX1_BIST_STATUS1 0x00000068 + +#define REG_HDMI_8996_PHY_TX0_TX1_BIST_STATUS2 0x0000006c + +#define REG_HDMI_8996_PHY_TX2_TX3_BIST_STATUS0 0x00000070 + +#define REG_HDMI_8996_PHY_TX2_TX3_BIST_STATUS1 0x00000074 + +#define REG_HDMI_8996_PHY_TX2_TX3_BIST_STATUS2 0x00000078 + +#define REG_HDMI_8996_PHY_PRE_MISR_STATUS0 0x0000007c + +#define REG_HDMI_8996_PHY_PRE_MISR_STATUS1 0x00000080 + +#define REG_HDMI_8996_PHY_PRE_MISR_STATUS2 0x00000084 + +#define REG_HDMI_8996_PHY_PRE_MISR_STATUS3 0x00000088 + +#define REG_HDMI_8996_PHY_POST_MISR_STATUS0 0x0000008c + +#define REG_HDMI_8996_PHY_POST_MISR_STATUS1 0x00000090 + +#define REG_HDMI_8996_PHY_POST_MISR_STATUS2 0x00000094 + +#define REG_HDMI_8996_PHY_POST_MISR_STATUS3 0x00000098 + +#define REG_HDMI_8996_PHY_STATUS 0x0000009c + +#define REG_HDMI_8996_PHY_MISC3_STATUS 0x000000a0 + +#define REG_HDMI_8996_PHY_MISC4_STATUS 0x000000a4 + +#define REG_HDMI_8996_PHY_DEBUG_BUS0 0x000000a8 + +#define REG_HDMI_8996_PHY_DEBUG_BUS1 0x000000ac + +#define REG_HDMI_8996_PHY_DEBUG_BUS2 0x000000b0 + +#define REG_HDMI_8996_PHY_DEBUG_BUS3 0x000000b4 + +#define REG_HDMI_8996_PHY_PHY_REVISION_ID0 0x000000b8 + +#define REG_HDMI_8996_PHY_PHY_REVISION_ID1 0x000000bc + +#define REG_HDMI_8996_PHY_PHY_REVISION_ID2 0x000000c0 + +#define REG_HDMI_8996_PHY_PHY_REVISION_ID3 0x000000c4 + +#define REG_HDMI_PHY_QSERDES_COM_ATB_SEL1 0x00000000 + +#define REG_HDMI_PHY_QSERDES_COM_ATB_SEL2 0x00000004 + +#define REG_HDMI_PHY_QSERDES_COM_FREQ_UPDATE 0x00000008 + +#define REG_HDMI_PHY_QSERDES_COM_BG_TIMER 0x0000000c + +#define REG_HDMI_PHY_QSERDES_COM_SSC_EN_CENTER 0x00000010 + +#define REG_HDMI_PHY_QSERDES_COM_SSC_ADJ_PER1 0x00000014 + +#define REG_HDMI_PHY_QSERDES_COM_SSC_ADJ_PER2 0x00000018 + +#define REG_HDMI_PHY_QSERDES_COM_SSC_PER1 0x0000001c + +#define REG_HDMI_PHY_QSERDES_COM_SSC_PER2 0x00000020 + +#define REG_HDMI_PHY_QSERDES_COM_SSC_STEP_SIZE1 0x00000024 + +#define REG_HDMI_PHY_QSERDES_COM_SSC_STEP_SIZE2 0x00000028 + +#define REG_HDMI_PHY_QSERDES_COM_POST_DIV 0x0000002c + +#define REG_HDMI_PHY_QSERDES_COM_POST_DIV_MUX 0x00000030 + +#define REG_HDMI_PHY_QSERDES_COM_BIAS_EN_CLKBUFLR_EN 0x00000034 + +#define REG_HDMI_PHY_QSERDES_COM_CLK_ENABLE1 0x00000038 + +#define REG_HDMI_PHY_QSERDES_COM_SYS_CLK_CTRL 0x0000003c + +#define REG_HDMI_PHY_QSERDES_COM_SYSCLK_BUF_ENABLE 0x00000040 + +#define REG_HDMI_PHY_QSERDES_COM_PLL_EN 0x00000044 + +#define REG_HDMI_PHY_QSERDES_COM_PLL_IVCO 0x00000048 + +#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP1_MODE0 0x0000004c + +#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP2_MODE0 0x00000050 + +#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP3_MODE0 0x00000054 + +#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP1_MODE1 0x00000058 + +#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP2_MODE1 0x0000005c + +#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP3_MODE1 0x00000060 + +#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP1_MODE2 0x00000064 + +#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD0 0x00000064 + +#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP2_MODE2 0x00000068 + +#define REG_HDMI_PHY_QSERDES_COM_EP_CLOCK_DETECT_CTRL 0x00000068 + +#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP3_MODE2 0x0000006c + +#define REG_HDMI_PHY_QSERDES_COM_SYSCLK_DET_COMP_STATUS 0x0000006c + +#define REG_HDMI_PHY_QSERDES_COM_BG_TRIM 0x00000070 + +#define REG_HDMI_PHY_QSERDES_COM_CLK_EP_DIV 0x00000074 + +#define REG_HDMI_PHY_QSERDES_COM_CP_CTRL_MODE0 0x00000078 + +#define REG_HDMI_PHY_QSERDES_COM_CP_CTRL_MODE1 0x0000007c + +#define REG_HDMI_PHY_QSERDES_COM_CP_CTRL_MODE2 0x00000080 + +#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD1 0x00000080 + +#define REG_HDMI_PHY_QSERDES_COM_PLL_RCTRL_MODE0 0x00000084 + +#define REG_HDMI_PHY_QSERDES_COM_PLL_RCTRL_MODE1 0x00000088 + +#define REG_HDMI_PHY_QSERDES_COM_PLL_RCTRL_MODE2 0x0000008c + +#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD2 0x0000008c + +#define REG_HDMI_PHY_QSERDES_COM_PLL_CCTRL_MODE0 0x00000090 + +#define REG_HDMI_PHY_QSERDES_COM_PLL_CCTRL_MODE1 0x00000094 + +#define REG_HDMI_PHY_QSERDES_COM_PLL_CCTRL_MODE2 0x00000098 + +#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD3 0x00000098 + +#define REG_HDMI_PHY_QSERDES_COM_PLL_CNTRL 0x0000009c + +#define REG_HDMI_PHY_QSERDES_COM_PHASE_SEL_CTRL 0x000000a0 + +#define REG_HDMI_PHY_QSERDES_COM_PHASE_SEL_DC 0x000000a4 + +#define REG_HDMI_PHY_QSERDES_COM_CORE_CLK_IN_SYNC_SEL 0x000000a8 + +#define REG_HDMI_PHY_QSERDES_COM_BIAS_EN_CTRL_BY_PSM 0x000000a8 + +#define REG_HDMI_PHY_QSERDES_COM_SYSCLK_EN_SEL 0x000000ac + +#define REG_HDMI_PHY_QSERDES_COM_CML_SYSCLK_SEL 0x000000b0 + +#define REG_HDMI_PHY_QSERDES_COM_RESETSM_CNTRL 0x000000b4 + +#define REG_HDMI_PHY_QSERDES_COM_RESETSM_CNTRL2 0x000000b8 + +#define REG_HDMI_PHY_QSERDES_COM_RESTRIM_CTRL 0x000000bc + +#define REG_HDMI_PHY_QSERDES_COM_RESTRIM_CTRL2 0x000000c0 + +#define REG_HDMI_PHY_QSERDES_COM_RESCODE_DIV_NUM 0x000000c4 + +#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP_EN 0x000000c8 + +#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP_CFG 0x000000cc + +#define REG_HDMI_PHY_QSERDES_COM_DEC_START_MODE0 0x000000d0 + +#define REG_HDMI_PHY_QSERDES_COM_DEC_START_MODE1 0x000000d4 + +#define REG_HDMI_PHY_QSERDES_COM_DEC_START_MODE2 0x000000d8 + +#define REG_HDMI_PHY_QSERDES_COM_VCOCAL_DEADMAN_CTRL 0x000000d8 + +#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START1_MODE0 0x000000dc + +#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START2_MODE0 0x000000e0 + +#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START3_MODE0 0x000000e4 + +#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START1_MODE1 0x000000e8 + +#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START2_MODE1 0x000000ec + +#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START3_MODE1 0x000000f0 + +#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START1_MODE2 0x000000f4 + +#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MINVAL1 0x000000f4 + +#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START2_MODE2 0x000000f8 + +#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MINVAL2 0x000000f8 + +#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START3_MODE2 0x000000fc + +#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD4 0x000000fc + +#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_INITVAL 0x00000100 + +#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_EN 0x00000104 + +#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN0_MODE0 0x00000108 + +#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN1_MODE0 0x0000010c + +#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN0_MODE1 0x00000110 + +#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN1_MODE1 0x00000114 + +#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN0_MODE2 0x00000118 + +#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MAXVAL1 0x00000118 + +#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN1_MODE2 0x0000011c + +#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MAXVAL2 0x0000011c + +#define REG_HDMI_PHY_QSERDES_COM_RES_TRIM_CONTROL2 0x00000120 + +#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_CTRL 0x00000124 + +#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MAP 0x00000128 + +#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE1_MODE0 0x0000012c + +#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE2_MODE0 0x00000130 + +#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE1_MODE1 0x00000134 + +#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE2_MODE1 0x00000138 + +#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE1_MODE2 0x0000013c + +#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_INITVAL1 0x0000013c + +#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE2_MODE2 0x00000140 + +#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_INITVAL2 0x00000140 + +#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_TIMER1 0x00000144 + +#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_TIMER2 0x00000148 + +#define REG_HDMI_PHY_QSERDES_COM_SAR 0x0000014c + +#define REG_HDMI_PHY_QSERDES_COM_SAR_CLK 0x00000150 + +#define REG_HDMI_PHY_QSERDES_COM_SAR_CODE_OUT_STATUS 0x00000154 + +#define REG_HDMI_PHY_QSERDES_COM_SAR_CODE_READY_STATUS 0x00000158 + +#define REG_HDMI_PHY_QSERDES_COM_CMN_STATUS 0x0000015c + +#define REG_HDMI_PHY_QSERDES_COM_RESET_SM_STATUS 0x00000160 + +#define REG_HDMI_PHY_QSERDES_COM_RESTRIM_CODE_STATUS 0x00000164 + +#define REG_HDMI_PHY_QSERDES_COM_PLLCAL_CODE1_STATUS 0x00000168 + +#define REG_HDMI_PHY_QSERDES_COM_PLLCAL_CODE2_STATUS 0x0000016c + +#define REG_HDMI_PHY_QSERDES_COM_BG_CTRL 0x00000170 + +#define REG_HDMI_PHY_QSERDES_COM_CLK_SELECT 0x00000174 + +#define REG_HDMI_PHY_QSERDES_COM_HSCLK_SEL 0x00000178 + +#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_BINCODE_STATUS 0x0000017c + +#define REG_HDMI_PHY_QSERDES_COM_PLL_ANALOG 0x00000180 + +#define REG_HDMI_PHY_QSERDES_COM_CORECLK_DIV 0x00000184 + +#define REG_HDMI_PHY_QSERDES_COM_SW_RESET 0x00000188 + +#define REG_HDMI_PHY_QSERDES_COM_CORE_CLK_EN 0x0000018c + +#define REG_HDMI_PHY_QSERDES_COM_C_READY_STATUS 0x00000190 + +#define REG_HDMI_PHY_QSERDES_COM_CMN_CONFIG 0x00000194 + +#define REG_HDMI_PHY_QSERDES_COM_CMN_RATE_OVERRIDE 0x00000198 + +#define REG_HDMI_PHY_QSERDES_COM_SVS_MODE_CLK_SEL 0x0000019c + +#define REG_HDMI_PHY_QSERDES_COM_DEBUG_BUS0 0x000001a0 + +#define REG_HDMI_PHY_QSERDES_COM_DEBUG_BUS1 0x000001a4 + +#define REG_HDMI_PHY_QSERDES_COM_DEBUG_BUS2 0x000001a8 + +#define REG_HDMI_PHY_QSERDES_COM_DEBUG_BUS3 0x000001ac + +#define REG_HDMI_PHY_QSERDES_COM_DEBUG_BUS_SEL 0x000001b0 + +#define REG_HDMI_PHY_QSERDES_COM_CMN_MISC1 0x000001b4 + +#define REG_HDMI_PHY_QSERDES_COM_CMN_MISC2 0x000001b8 + +#define REG_HDMI_PHY_QSERDES_COM_CORECLK_DIV_MODE1 0x000001bc + +#define REG_HDMI_PHY_QSERDES_COM_CORECLK_DIV_MODE2 0x000001c0 + +#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD5 0x000001c4 + +#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_MODE_LANENO 0x00000000 + +#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_INVERT 0x00000004 + +#define REG_HDMI_PHY_QSERDES_TX_LX_CLKBUF_ENABLE 0x00000008 + +#define REG_HDMI_PHY_QSERDES_TX_LX_CMN_CONTROL_ONE 0x0000000c + +#define REG_HDMI_PHY_QSERDES_TX_LX_CMN_CONTROL_TWO 0x00000010 + +#define REG_HDMI_PHY_QSERDES_TX_LX_CMN_CONTROL_THREE 0x00000014 + +#define REG_HDMI_PHY_QSERDES_TX_LX_TX_EMP_POST1_LVL 0x00000018 + +#define REG_HDMI_PHY_QSERDES_TX_LX_TX_POST2_EMPH 0x0000001c + +#define REG_HDMI_PHY_QSERDES_TX_LX_TX_BOOST_LVL_UP_DN 0x00000020 + +#define REG_HDMI_PHY_QSERDES_TX_LX_HP_PD_ENABLES 0x00000024 + +#define REG_HDMI_PHY_QSERDES_TX_LX_TX_IDLE_LVL_LARGE_AMP 0x00000028 + +#define REG_HDMI_PHY_QSERDES_TX_LX_TX_DRV_LVL 0x0000002c + +#define REG_HDMI_PHY_QSERDES_TX_LX_TX_DRV_LVL_OFFSET 0x00000030 + +#define REG_HDMI_PHY_QSERDES_TX_LX_RESET_TSYNC_EN 0x00000034 + +#define REG_HDMI_PHY_QSERDES_TX_LX_PRE_STALL_LDO_BOOST_EN 0x00000038 + +#define REG_HDMI_PHY_QSERDES_TX_LX_TX_BAND 0x0000003c + +#define REG_HDMI_PHY_QSERDES_TX_LX_SLEW_CNTL 0x00000040 + +#define REG_HDMI_PHY_QSERDES_TX_LX_INTERFACE_SELECT 0x00000044 + +#define REG_HDMI_PHY_QSERDES_TX_LX_LPB_EN 0x00000048 + +#define REG_HDMI_PHY_QSERDES_TX_LX_RES_CODE_LANE_TX 0x0000004c + +#define REG_HDMI_PHY_QSERDES_TX_LX_RES_CODE_LANE_RX 0x00000050 + +#define REG_HDMI_PHY_QSERDES_TX_LX_RES_CODE_LANE_OFFSET 0x00000054 + +#define REG_HDMI_PHY_QSERDES_TX_LX_PERL_LENGTH1 0x00000058 + +#define REG_HDMI_PHY_QSERDES_TX_LX_PERL_LENGTH2 0x0000005c + +#define REG_HDMI_PHY_QSERDES_TX_LX_SERDES_BYP_EN_OUT 0x00000060 + +#define REG_HDMI_PHY_QSERDES_TX_LX_DEBUG_BUS_SEL 0x00000064 + +#define REG_HDMI_PHY_QSERDES_TX_LX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN 0x00000068 + +#define REG_HDMI_PHY_QSERDES_TX_LX_TX_POL_INV 0x0000006c + +#define REG_HDMI_PHY_QSERDES_TX_LX_PARRATE_REC_DETECT_IDLE_EN 0x00000070 + +#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN1 0x00000074 + +#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN2 0x00000078 + +#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN3 0x0000007c + +#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN4 0x00000080 + +#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN5 0x00000084 + +#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN6 0x00000088 + +#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN7 0x0000008c + +#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN8 0x00000090 + +#define REG_HDMI_PHY_QSERDES_TX_LX_LANE_MODE 0x00000094 + +#define REG_HDMI_PHY_QSERDES_TX_LX_IDAC_CAL_LANE_MODE 0x00000098 + +#define REG_HDMI_PHY_QSERDES_TX_LX_IDAC_CAL_LANE_MODE_CONFIGURATION 0x0000009c + +#define REG_HDMI_PHY_QSERDES_TX_LX_ATB_SEL1 0x000000a0 + +#define REG_HDMI_PHY_QSERDES_TX_LX_ATB_SEL2 0x000000a4 + +#define REG_HDMI_PHY_QSERDES_TX_LX_RCV_DETECT_LVL 0x000000a8 + +#define REG_HDMI_PHY_QSERDES_TX_LX_RCV_DETECT_LVL_2 0x000000ac + +#define REG_HDMI_PHY_QSERDES_TX_LX_PRBS_SEED1 0x000000b0 + +#define REG_HDMI_PHY_QSERDES_TX_LX_PRBS_SEED2 0x000000b4 + +#define REG_HDMI_PHY_QSERDES_TX_LX_PRBS_SEED3 0x000000b8 + +#define REG_HDMI_PHY_QSERDES_TX_LX_PRBS_SEED4 0x000000bc + +#define REG_HDMI_PHY_QSERDES_TX_LX_RESET_GEN 0x000000c0 + +#define REG_HDMI_PHY_QSERDES_TX_LX_RESET_GEN_MUXES 0x000000c4 + +#define REG_HDMI_PHY_QSERDES_TX_LX_TRAN_DRVR_EMP_EN 0x000000c8 + +#define REG_HDMI_PHY_QSERDES_TX_LX_TX_INTERFACE_MODE 0x000000cc + +#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_CTRL 0x000000d0 + +#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_ENCODED_OR_DATA 0x000000d4 + +#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_1_DIVIDER_BAND2 0x000000d8 + +#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_2_DIVIDER_BAND2 0x000000dc + +#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_3_DIVIDER_BAND2 0x000000e0 + +#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_4_DIVIDER_BAND2 0x000000e4 + +#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_1_DIVIDER_BAND0_1 0x000000e8 + +#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_2_DIVIDER_BAND0_1 0x000000ec + +#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_3_DIVIDER_BAND0_1 0x000000f0 + +#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_4_DIVIDER_BAND0_1 0x000000f4 + +#define REG_HDMI_PHY_QSERDES_TX_LX_VMODE_CTRL1 0x000000f8 + +#define REG_HDMI_PHY_QSERDES_TX_LX_VMODE_CTRL2 0x000000fc + +#define REG_HDMI_PHY_QSERDES_TX_LX_TX_ALOG_INTF_OBSV_CNTL 0x00000100 + +#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_STATUS 0x00000104 + +#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_ERROR_COUNT1 0x00000108 + +#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_ERROR_COUNT2 0x0000010c + +#define REG_HDMI_PHY_QSERDES_TX_LX_TX_ALOG_INTF_OBSV 0x00000110 + #endif /* HDMI_XML */ diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c index 92b69ae8caf9c4..310d92d06229c6 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c @@ -95,13 +95,13 @@ static void hdmi_bridge_pre_enable(struct drm_bridge *bridge) DBG("power up"); if (!hdmi->power_on) { + hdmi_phy_resource_enable(phy); power_on(bridge); hdmi->power_on = true; hdmi_audio_update(hdmi); } - if (phy) - phy->funcs->powerup(phy, hdmi->pixclock); + hdmi_phy_powerup(phy, hdmi->pixclock); hdmi_set_mode(hdmi, true); @@ -129,13 +129,13 @@ static void hdmi_bridge_post_disable(struct drm_bridge *bridge) DBG("power down"); hdmi_set_mode(hdmi, false); - if (phy) - phy->funcs->powerdown(phy); + hdmi_phy_powerdown(phy); if (hdmi->power_on) { power_off(bridge); hdmi->power_on = false; hdmi_audio_update(hdmi); + hdmi_phy_resource_disable(phy); } } diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c index a3b05ae52dae63..d2e818e2e2471b 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c @@ -81,114 +81,53 @@ static int gpio_config(struct hdmi *hdmi, bool on) { struct device *dev = &hdmi->pdev->dev; const struct hdmi_platform_config *config = hdmi->config; - int ret; + int ret, i; if (on) { - if (config->ddc_clk_gpio != -1) { - ret = gpio_request(config->ddc_clk_gpio, "HDMI_DDC_CLK"); - if (ret) { - dev_err(dev, "'%s'(%d) gpio_request failed: %d\n", - "HDMI_DDC_CLK", config->ddc_clk_gpio, ret); - goto error1; - } - gpio_set_value_cansleep(config->ddc_clk_gpio, 1); - } - - if (config->ddc_data_gpio != -1) { - ret = gpio_request(config->ddc_data_gpio, "HDMI_DDC_DATA"); - if (ret) { - dev_err(dev, "'%s'(%d) gpio_request failed: %d\n", - "HDMI_DDC_DATA", config->ddc_data_gpio, ret); - goto error2; - } - gpio_set_value_cansleep(config->ddc_data_gpio, 1); - } - - ret = gpio_request(config->hpd_gpio, "HDMI_HPD"); - if (ret) { - dev_err(dev, "'%s'(%d) gpio_request failed: %d\n", - "HDMI_HPD", config->hpd_gpio, ret); - goto error3; - } - gpio_direction_input(config->hpd_gpio); - gpio_set_value_cansleep(config->hpd_gpio, 1); - - if (config->mux_en_gpio != -1) { - ret = gpio_request(config->mux_en_gpio, "HDMI_MUX_EN"); - if (ret) { - dev_err(dev, "'%s'(%d) gpio_request failed: %d\n", - "HDMI_MUX_EN", config->mux_en_gpio, ret); - goto error4; - } - gpio_set_value_cansleep(config->mux_en_gpio, 1); - } - - if (config->mux_sel_gpio != -1) { - ret = gpio_request(config->mux_sel_gpio, "HDMI_MUX_SEL"); - if (ret) { - dev_err(dev, "'%s'(%d) gpio_request failed: %d\n", - "HDMI_MUX_SEL", config->mux_sel_gpio, ret); - goto error5; + for (i = 0; i < HDMI_MAX_NUM_GPIO; i++) { + struct hdmi_gpio_data gpio = config->gpios[i]; + + if (gpio.num != -1) { + ret = gpio_request(gpio.num, gpio.label); + if (ret) { + dev_err(dev, + "'%s'(%d) gpio_request failed: %d\n", + gpio.label, gpio.num, ret); + goto err; + } + + if (gpio.output) { + gpio_direction_output(gpio.num, + gpio.value); + } else { + gpio_direction_input(gpio.num); + gpio_set_value_cansleep(gpio.num, + gpio.value); + } } - gpio_set_value_cansleep(config->mux_sel_gpio, 0); } - if (config->mux_lpm_gpio != -1) { - ret = gpio_request(config->mux_lpm_gpio, - "HDMI_MUX_LPM"); - if (ret) { - dev_err(dev, - "'%s'(%d) gpio_request failed: %d\n", - "HDMI_MUX_LPM", - config->mux_lpm_gpio, ret); - goto error6; - } - gpio_set_value_cansleep(config->mux_lpm_gpio, 1); - } DBG("gpio on"); } else { - if (config->ddc_clk_gpio != -1) - gpio_free(config->ddc_clk_gpio); - - if (config->ddc_data_gpio != -1) - gpio_free(config->ddc_data_gpio); + for (i = 0; i < HDMI_MAX_NUM_GPIO; i++) { + struct hdmi_gpio_data gpio = config->gpios[i]; - gpio_free(config->hpd_gpio); - - if (config->mux_en_gpio != -1) { - gpio_set_value_cansleep(config->mux_en_gpio, 0); - gpio_free(config->mux_en_gpio); - } + if (gpio.output) { + int value = gpio.value ? 0 : 1; + gpio_set_value_cansleep(gpio.num, value); + } - if (config->mux_sel_gpio != -1) { - gpio_set_value_cansleep(config->mux_sel_gpio, 1); - gpio_free(config->mux_sel_gpio); - } + gpio_free(gpio.num); + }; - if (config->mux_lpm_gpio != -1) { - gpio_set_value_cansleep(config->mux_lpm_gpio, 0); - gpio_free(config->mux_lpm_gpio); - } DBG("gpio off"); } return 0; +err: + while (i--) + gpio_free(config->gpios[i].num); -error6: - if (config->mux_sel_gpio != -1) - gpio_free(config->mux_sel_gpio); -error5: - if (config->mux_en_gpio != -1) - gpio_free(config->mux_en_gpio); -error4: - gpio_free(config->hpd_gpio); -error3: - if (config->ddc_data_gpio != -1) - gpio_free(config->ddc_data_gpio); -error2: - if (config->ddc_clk_gpio != -1) - gpio_free(config->ddc_clk_gpio); -error1: return ret; } @@ -345,10 +284,13 @@ static enum drm_connector_status detect_reg(struct hdmi *hdmi) connector_status_connected : connector_status_disconnected; } +#define HPD_GPIO_INDEX 2 static enum drm_connector_status detect_gpio(struct hdmi *hdmi) { const struct hdmi_platform_config *config = hdmi->config; - return gpio_get_value(config->hpd_gpio) ? + struct hdmi_gpio_data hpd_gpio = config->gpios[HPD_GPIO_INDEX]; + + return gpio_get_value(hpd_gpio.num) ? connector_status_connected : connector_status_disconnected; } @@ -358,9 +300,18 @@ static enum drm_connector_status hdmi_connector_detect( { struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); struct hdmi *hdmi = hdmi_connector->hdmi; + const struct hdmi_platform_config *config = hdmi->config; + struct hdmi_gpio_data hpd_gpio = config->gpios[HPD_GPIO_INDEX]; enum drm_connector_status stat_gpio, stat_reg; int retry = 20; + /* + * some platforms may not have hpd gpio. Rely only on the status + * provided by REG_HDMI_HPD_INT_STATUS in this case. + */ + if (hpd_gpio.num == -1) + return detect_reg(hdmi); + do { stat_gpio = detect_gpio(hdmi); stat_reg = detect_reg(hdmi); diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c new file mode 100644 index 00000000000000..ce981c11c3b5b0 --- /dev/null +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c @@ -0,0 +1,228 @@ +/* + * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include + +#include "hdmi.h" + +static int hdmi_phy_resource_init(struct hdmi_phy *phy) +{ + struct hdmi_phy_cfg *cfg = phy->cfg; + struct device *dev = &phy->pdev->dev; + int i, ret; + + phy->regs = devm_kzalloc(dev, sizeof(phy->regs[0]) * cfg->num_regs, + GFP_KERNEL); + if (!phy->regs) + return -ENOMEM; + + phy->clks = devm_kzalloc(dev, sizeof(phy->clks[0]) * cfg->num_clks, + GFP_KERNEL); + if (!phy->clks) + return -ENOMEM; + + for (i = 0; i < cfg->num_regs; i++) { + struct regulator *reg; + + reg = devm_regulator_get(dev, cfg->reg_names[i]); + if (IS_ERR(reg)) { + ret = PTR_ERR(reg); + dev_err(dev, "failed to get phy regulator: %s (%d)\n", + cfg->reg_names[i], ret); + return ret; + } + + phy->regs[i] = reg; + } + + for (i = 0; i < cfg->num_clks; i++) { + struct clk *clk; + + clk = devm_clk_get(dev, cfg->clk_names[i]); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + dev_err(dev, "failed to get phy clock: %s (%d)\n", + cfg->clk_names[i], ret); + return ret; + } + + phy->clks[i] = clk; + } + + return 0; +} + +int hdmi_phy_resource_enable(struct hdmi_phy *phy) +{ + struct hdmi_phy_cfg *cfg = phy->cfg; + struct device *dev = &phy->pdev->dev; + int i, ret = 0; + + pm_runtime_get_sync(dev); + + for (i = 0; i < cfg->num_regs; i++) { + ret = regulator_enable(phy->regs[i]); + if (ret) + dev_err(dev, "failed to enable regulator: %s (%d)\n", + cfg->reg_names[i], ret); + } + + for (i = 0; i < cfg->num_clks; i++) { + ret = clk_prepare_enable(phy->clks[i]); + if (ret) + dev_err(dev, "failed to enable clock: %s (%d)\n", + cfg->clk_names[i], ret); + } + + return ret; +} + +void hdmi_phy_resource_disable(struct hdmi_phy *phy) +{ + struct hdmi_phy_cfg *cfg = phy->cfg; + struct device *dev = &phy->pdev->dev; + int i; + + for (i = cfg->num_clks - 1; i >= 0; i--) + clk_disable_unprepare(phy->clks[i]); + + for (i = cfg->num_regs - 1; i >= 0; i--) + regulator_disable(phy->regs[i]); + + pm_runtime_put_sync(dev); +} + +void hdmi_phy_powerup(struct hdmi_phy *phy, unsigned long int pixclock) +{ + if (!phy || !phy->cfg->powerup) + return; + + phy->cfg->powerup(phy, pixclock); +} + +void hdmi_phy_powerdown(struct hdmi_phy *phy) +{ + if (!phy || !phy->cfg->powerdown) + return; + + phy->cfg->powerdown(phy); +} + +static int hdmi_phy_pll_init(struct platform_device *pdev, + enum hdmi_phy_type type) +{ + int ret; + + switch (type) { + case MSM_HDMI_PHY_8960: + ret = hdmi_pll_8960_init(pdev); + break; + case MSM_HDMI_PHY_8996: + ret = hdmi_pll_8996_init(pdev); + break; + /* + * we don't have PLL support for these, don't report an error for now + */ + case MSM_HDMI_PHY_8x60: + case MSM_HDMI_PHY_8x74: + default: + ret = 0; + break; + } + + return ret; +} + +static int hdmi_phy_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct hdmi_phy *phy; + int ret; + + phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL); + if (!phy) + return -ENODEV; + + phy->cfg = (struct hdmi_phy_cfg *)of_device_get_match_data(dev); + if (!phy->cfg) + return -ENODEV; + + phy->mmio = msm_ioremap(pdev, "hdmi_phy", "HDMI_PHY"); + if (IS_ERR(phy->mmio)) { + dev_err(dev, "%s: failed to map phy base\n", __func__); + return -ENOMEM; + } + + phy->pdev = pdev; + + ret = hdmi_phy_resource_init(phy); + if (ret) + return ret; + + pm_runtime_enable(&pdev->dev); + + ret = hdmi_phy_resource_enable(phy); + if (ret) + return ret; + + ret = hdmi_phy_pll_init(pdev, phy->cfg->type); + if (ret) { + dev_err(dev, "couldn't init PLL\n"); + hdmi_phy_resource_disable(phy); + return ret; + } + + hdmi_phy_resource_disable(phy); + + platform_set_drvdata(pdev, phy); + + return 0; +} + +static int hdmi_phy_remove(struct platform_device *pdev) +{ + pm_runtime_disable(&pdev->dev); + + return 0; +} + +static const struct of_device_id hdmi_phy_dt_match[] = { + { .compatible = "qcom,hdmi-phy-8x60", + .data = &hdmi_phy_8x60_cfg }, + { .compatible = "qcom,hdmi-phy-8960", + .data = &hdmi_phy_8960_cfg }, + { .compatible = "qcom,hdmi-phy-8x74", + .data = &hdmi_phy_8x74_cfg }, + { .compatible = "qcom,hdmi-phy-8996", + .data = &hdmi_phy_8996_cfg }, + {} +}; + +static struct platform_driver hdmi_phy_platform_driver = { + .probe = hdmi_phy_probe, + .remove = hdmi_phy_remove, + .driver = { + .name = "msm_hdmi_phy", + .of_match_table = hdmi_phy_dt_match, + }, +}; + +void __init hdmi_phy_driver_register(void) +{ + platform_driver_register(&hdmi_phy_platform_driver); +} + +void __exit hdmi_phy_driver_unregister(void) +{ + platform_driver_unregister(&hdmi_phy_platform_driver); +} diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c index 3a01cb5051e2db..46d449e84d713b 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c @@ -15,495 +15,48 @@ * this program. If not, see . */ -#ifdef CONFIG_COMMON_CLK -#include -#include -#endif - #include "hdmi.h" -struct hdmi_phy_8960 { - struct hdmi_phy base; - struct hdmi *hdmi; -#ifdef CONFIG_COMMON_CLK - struct clk_hw pll_hw; - struct clk *pll; - unsigned long pixclk; -#endif -}; -#define to_hdmi_phy_8960(x) container_of(x, struct hdmi_phy_8960, base) - -#ifdef CONFIG_COMMON_CLK -#define clk_to_phy(x) container_of(x, struct hdmi_phy_8960, pll_hw) - -/* - * HDMI PLL: - * - * To get the parent clock setup properly, we need to plug in hdmi pll - * configuration into common-clock-framework. - */ - -struct pll_rate { - unsigned long rate; - struct { - uint32_t val; - uint32_t reg; - } conf[32]; -}; - -/* NOTE: keep sorted highest freq to lowest: */ -static const struct pll_rate freqtbl[] = { - { 154000000, { - { 0x08, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, - { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, - { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, - { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, - { 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, - { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, - { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 }, - { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 }, - { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 }, - { 0x0d, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, - { 0x4d, REG_HDMI_8960_PHY_PLL_SDM_CFG1 }, - { 0x5e, REG_HDMI_8960_PHY_PLL_SDM_CFG2 }, - { 0x42, REG_HDMI_8960_PHY_PLL_SDM_CFG3 }, - { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 }, - { 0, 0 } } - }, - /* 1080p60/1080p50 case */ - { 148500000, { - { 0x02, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, - { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG }, - { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, - { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, - { 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG }, - { 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG }, - { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B }, - { 0x76, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, - { 0x01, REG_HDMI_8960_PHY_PLL_SDM_CFG1 }, - { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG2 }, - { 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3 }, - { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 }, - { 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0 }, - { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1 }, - { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2 }, - { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG3 }, - { 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 }, - { 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 }, - { 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 }, - { 0xe6, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, - { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, - { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, - { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 }, - { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 }, - { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 }, - { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 }, - { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 }, - { 0, 0 } } - }, - { 108000000, { - { 0x08, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, - { 0x21, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, - { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, - { 0x1c, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, - { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, - { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, - { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 }, - { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 }, - { 0x49, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, - { 0x49, REG_HDMI_8960_PHY_PLL_SDM_CFG1 }, - { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG2 }, - { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG3 }, - { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 }, - { 0, 0 } } - }, - /* 720p60/720p50/1080i60/1080i50/1080p24/1080p30/1080p25 */ - { 74250000, { - { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B }, - { 0x12, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, - { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, - { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, - { 0x76, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, - { 0xe6, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, - { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, - { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, - { 0, 0 } } - }, - { 74176000, { - { 0x18, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, - { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, - { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, - { 0xe5, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, - { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, - { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, - { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 }, - { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 }, - { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 }, - { 0x0c, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, - { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG1 }, - { 0x7d, REG_HDMI_8960_PHY_PLL_SDM_CFG2 }, - { 0xbc, REG_HDMI_8960_PHY_PLL_SDM_CFG3 }, - { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 }, - { 0, 0 } } - }, - { 65000000, { - { 0x18, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, - { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, - { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, - { 0x8a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, - { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, - { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, - { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 }, - { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 }, - { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 }, - { 0x0b, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, - { 0x4b, REG_HDMI_8960_PHY_PLL_SDM_CFG1 }, - { 0x7b, REG_HDMI_8960_PHY_PLL_SDM_CFG2 }, - { 0x09, REG_HDMI_8960_PHY_PLL_SDM_CFG3 }, - { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 }, - { 0, 0 } } - }, - /* 480p60/480i60 */ - { 27030000, { - { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B }, - { 0x38, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, - { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG }, - { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, - { 0xff, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, - { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, - { 0x4e, REG_HDMI_8960_PHY_PLL_SDM_CFG1 }, - { 0xd7, REG_HDMI_8960_PHY_PLL_SDM_CFG2 }, - { 0x03, REG_HDMI_8960_PHY_PLL_SDM_CFG3 }, - { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 }, - { 0x2a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, - { 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, - { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, - { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 }, - { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 }, - { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 }, - { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 }, - { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 }, - { 0, 0 } } - }, - /* 576p50/576i50 */ - { 27000000, { - { 0x32, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, - { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG }, - { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, - { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, - { 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG }, - { 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG }, - { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B }, - { 0x7b, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, - { 0x01, REG_HDMI_8960_PHY_PLL_SDM_CFG1 }, - { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG2 }, - { 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3 }, - { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 }, - { 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0 }, - { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1 }, - { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2 }, - { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG3 }, - { 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 }, - { 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 }, - { 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 }, - { 0x2a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, - { 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, - { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, - { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 }, - { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 }, - { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 }, - { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 }, - { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 }, - { 0, 0 } } - }, - /* 640x480p60 */ - { 25200000, { - { 0x32, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, - { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG }, - { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, - { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, - { 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG }, - { 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG }, - { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B }, - { 0x77, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, - { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG1 }, - { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG2 }, - { 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3 }, - { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 }, - { 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0 }, - { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1 }, - { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2 }, - { 0x20, REG_HDMI_8960_PHY_PLL_SSC_CFG3 }, - { 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 }, - { 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 }, - { 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 }, - { 0xf4, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, - { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, - { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, - { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 }, - { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 }, - { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 }, - { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 }, - { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 }, - { 0, 0 } } - }, -}; - -static int hdmi_pll_enable(struct clk_hw *hw) -{ - struct hdmi_phy_8960 *phy_8960 = clk_to_phy(hw); - struct hdmi *hdmi = phy_8960->hdmi; - int timeout_count, pll_lock_retry = 10; - unsigned int val; - - DBG(""); - - /* Assert PLL S/W reset */ - hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x8d); - hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0, 0x10); - hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1, 0x1a); - - /* Wait for a short time before de-asserting - * to allow the hardware to complete its job. - * This much of delay should be fine for hardware - * to assert and de-assert. - */ - udelay(10); - - /* De-assert PLL S/W reset */ - hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x0d); - - val = hdmi_read(hdmi, REG_HDMI_8960_PHY_REG12); - val |= HDMI_8960_PHY_REG12_SW_RESET; - /* Assert PHY S/W reset */ - hdmi_write(hdmi, REG_HDMI_8960_PHY_REG12, val); - val &= ~HDMI_8960_PHY_REG12_SW_RESET; - /* Wait for a short time before de-asserting - to allow the hardware to complete its job. - This much of delay should be fine for hardware - to assert and de-assert. */ - udelay(10); - /* De-assert PHY S/W reset */ - hdmi_write(hdmi, REG_HDMI_8960_PHY_REG12, val); - hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x3f); - - val = hdmi_read(hdmi, REG_HDMI_8960_PHY_REG12); - val |= HDMI_8960_PHY_REG12_PWRDN_B; - hdmi_write(hdmi, REG_HDMI_8960_PHY_REG12, val); - /* Wait 10 us for enabling global power for PHY */ - mb(); - udelay(10); - - val = hdmi_read(hdmi, REG_HDMI_8960_PHY_PLL_PWRDN_B); - val |= HDMI_8960_PHY_PLL_PWRDN_B_PLL_PWRDN_B; - val &= ~HDMI_8960_PHY_PLL_PWRDN_B_PD_PLL; - hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_PWRDN_B, val); - hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x80); - - timeout_count = 1000; - while (--pll_lock_retry > 0) { - - /* are we there yet? */ - val = hdmi_read(hdmi, REG_HDMI_8960_PHY_PLL_STATUS0); - if (val & HDMI_8960_PHY_PLL_STATUS0_PLL_LOCK) - break; - - udelay(1); - - if (--timeout_count > 0) - continue; - - /* - * PLL has still not locked. - * Do a software reset and try again - * Assert PLL S/W reset first - */ - hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x8d); - udelay(10); - hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x0d); - - /* - * Wait for a short duration for the PLL calibration - * before checking if the PLL gets locked - */ - udelay(350); - - timeout_count = 1000; - } - - return 0; -} - -static void hdmi_pll_disable(struct clk_hw *hw) -{ - struct hdmi_phy_8960 *phy_8960 = clk_to_phy(hw); - struct hdmi *hdmi = phy_8960->hdmi; - unsigned int val; - - DBG(""); - - val = hdmi_read(hdmi, REG_HDMI_8960_PHY_REG12); - val &= ~HDMI_8960_PHY_REG12_PWRDN_B; - hdmi_write(hdmi, REG_HDMI_8960_PHY_REG12, val); - - val = hdmi_read(hdmi, REG_HDMI_8960_PHY_PLL_PWRDN_B); - val |= HDMI_8960_PHY_REG12_SW_RESET; - val &= ~HDMI_8960_PHY_REG12_PWRDN_B; - hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_PWRDN_B, val); - /* Make sure HDMI PHY/PLL are powered down */ - mb(); -} - -static const struct pll_rate *find_rate(unsigned long rate) -{ - int i; - for (i = 1; i < ARRAY_SIZE(freqtbl); i++) - if (rate > freqtbl[i].rate) - return &freqtbl[i-1]; - return &freqtbl[i-1]; -} - -static unsigned long hdmi_pll_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) -{ - struct hdmi_phy_8960 *phy_8960 = clk_to_phy(hw); - return phy_8960->pixclk; -} - -static long hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *parent_rate) -{ - const struct pll_rate *pll_rate = find_rate(rate); - return pll_rate->rate; -} - -static int hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate, - unsigned long parent_rate) -{ - struct hdmi_phy_8960 *phy_8960 = clk_to_phy(hw); - struct hdmi *hdmi = phy_8960->hdmi; - const struct pll_rate *pll_rate = find_rate(rate); - int i; - - DBG("rate=%lu", rate); - - for (i = 0; pll_rate->conf[i].reg; i++) - hdmi_write(hdmi, pll_rate->conf[i].reg, pll_rate->conf[i].val); - - phy_8960->pixclk = rate; - - return 0; -} - - -static const struct clk_ops hdmi_pll_ops = { - .enable = hdmi_pll_enable, - .disable = hdmi_pll_disable, - .recalc_rate = hdmi_pll_recalc_rate, - .round_rate = hdmi_pll_round_rate, - .set_rate = hdmi_pll_set_rate, -}; - -static const char *hdmi_pll_parents[] = { - "pxo", -}; - -static struct clk_init_data pll_init = { - .name = "hdmi_pll", - .ops = &hdmi_pll_ops, - .parent_names = hdmi_pll_parents, - .num_parents = ARRAY_SIZE(hdmi_pll_parents), -}; -#endif - -/* - * HDMI Phy: - */ - -static void hdmi_phy_8960_destroy(struct hdmi_phy *phy) -{ - struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy); - kfree(phy_8960); -} - static void hdmi_phy_8960_powerup(struct hdmi_phy *phy, - unsigned long int pixclock) + unsigned long int pixclock) { - struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy); - struct hdmi *hdmi = phy_8960->hdmi; - DBG("pixclock: %lu", pixclock); - hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x00); - hdmi_write(hdmi, REG_HDMI_8960_PHY_REG0, 0x1b); - hdmi_write(hdmi, REG_HDMI_8960_PHY_REG1, 0xf2); - hdmi_write(hdmi, REG_HDMI_8960_PHY_REG4, 0x00); - hdmi_write(hdmi, REG_HDMI_8960_PHY_REG5, 0x00); - hdmi_write(hdmi, REG_HDMI_8960_PHY_REG6, 0x00); - hdmi_write(hdmi, REG_HDMI_8960_PHY_REG7, 0x00); - hdmi_write(hdmi, REG_HDMI_8960_PHY_REG8, 0x00); - hdmi_write(hdmi, REG_HDMI_8960_PHY_REG9, 0x00); - hdmi_write(hdmi, REG_HDMI_8960_PHY_REG10, 0x00); - hdmi_write(hdmi, REG_HDMI_8960_PHY_REG11, 0x00); - hdmi_write(hdmi, REG_HDMI_8960_PHY_REG3, 0x20); + hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG2, 0x00); + hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG0, 0x1b); + hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG1, 0xf2); + hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG4, 0x00); + hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG5, 0x00); + hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG6, 0x00); + hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG7, 0x00); + hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG8, 0x00); + hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG9, 0x00); + hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG10, 0x00); + hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG11, 0x00); + hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG3, 0x20); } static void hdmi_phy_8960_powerdown(struct hdmi_phy *phy) { - struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy); - struct hdmi *hdmi = phy_8960->hdmi; - DBG(""); - hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x7f); + hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG2, 0x7f); } -static const struct hdmi_phy_funcs hdmi_phy_8960_funcs = { - .destroy = hdmi_phy_8960_destroy, - .powerup = hdmi_phy_8960_powerup, - .powerdown = hdmi_phy_8960_powerdown, +static const char * const hdmi_phy_8960_reg_names[] = { + "core-vdda", }; -struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi) -{ - struct hdmi_phy_8960 *phy_8960; - struct hdmi_phy *phy = NULL; - int ret; -#ifdef CONFIG_COMMON_CLK - int i; - - /* sanity check: */ - for (i = 0; i < (ARRAY_SIZE(freqtbl) - 1); i++) - if (WARN_ON(freqtbl[i].rate < freqtbl[i+1].rate)) - return ERR_PTR(-EINVAL); -#endif - - phy_8960 = kzalloc(sizeof(*phy_8960), GFP_KERNEL); - if (!phy_8960) { - ret = -ENOMEM; - goto fail; - } - - phy = &phy_8960->base; - - phy->funcs = &hdmi_phy_8960_funcs; - - phy_8960->hdmi = hdmi; - -#ifdef CONFIG_COMMON_CLK - phy_8960->pll_hw.init = &pll_init; - phy_8960->pll = devm_clk_register(&hdmi->pdev->dev, &phy_8960->pll_hw); - if (IS_ERR(phy_8960->pll)) { - ret = PTR_ERR(phy_8960->pll); - phy_8960->pll = NULL; - goto fail; - } -#endif - - return phy; +static const char * const hdmi_phy_8960_clk_names[] = { + "slave_iface_clk", +}; -fail: - if (phy) - hdmi_phy_8960_destroy(phy); - return ERR_PTR(ret); -} +const struct hdmi_phy_cfg hdmi_phy_8960_cfg = { + .type = MSM_HDMI_PHY_8960, + .powerup = hdmi_phy_8960_powerup, + .powerdown = hdmi_phy_8960_powerdown, + .reg_names = hdmi_phy_8960_reg_names, + .num_regs = ARRAY_SIZE(hdmi_phy_8960_reg_names), + .clk_names = hdmi_phy_8960_clk_names, + .num_clks = ARRAY_SIZE(hdmi_phy_8960_clk_names), +}; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c new file mode 100644 index 00000000000000..ef206f3480949d --- /dev/null +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c @@ -0,0 +1,767 @@ +/* + * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include + +#include "hdmi.h" + +#define HDMI_VCO_MAX_FREQ 12000000000UL +#define HDMI_VCO_MIN_FREQ 8000000000UL + +#define HDMI_PCLK_MAX_FREQ 600000000 +#define HDMI_PCLK_MIN_FREQ 25000000 + +#define HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD 3400000000UL +#define HDMI_DIG_FREQ_BIT_CLK_THRESHOLD 1500000000UL +#define HDMI_MID_FREQ_BIT_CLK_THRESHOLD 750000000UL +#define HDMI_CORECLK_DIV 5 +#define HDMI_DEFAULT_REF_CLOCK 19200000 +#define HDMI_PLL_CMP_CNT 1024 + +#define HDMI_PLL_POLL_MAX_READS 100 +#define HDMI_PLL_POLL_TIMEOUT_US 150 + +#define HDMI_NUM_TX_CHANNEL 4 + +struct hdmi_pll_8996 { + struct platform_device *pdev; + struct clk_hw clk_hw; + + /* pll mmio base */ + void __iomem *mmio_qserdes_com; + /* tx channel base */ + void __iomem *mmio_qserdes_tx[HDMI_NUM_TX_CHANNEL]; +}; + +#define hw_clk_to_pll(x) container_of(x, struct hdmi_pll_8996, clk_hw) + +struct hdmi_8996_phy_pll_reg_cfg { + u32 tx_lx_lane_mode[HDMI_NUM_TX_CHANNEL]; + u32 tx_lx_tx_band[HDMI_NUM_TX_CHANNEL]; + u32 com_svs_mode_clk_sel; + u32 com_hsclk_sel; + u32 com_pll_cctrl_mode0; + u32 com_pll_rctrl_mode0; + u32 com_cp_ctrl_mode0; + u32 com_dec_start_mode0; + u32 com_div_frac_start1_mode0; + u32 com_div_frac_start2_mode0; + u32 com_div_frac_start3_mode0; + u32 com_integloop_gain0_mode0; + u32 com_integloop_gain1_mode0; + u32 com_lock_cmp_en; + u32 com_lock_cmp1_mode0; + u32 com_lock_cmp2_mode0; + u32 com_lock_cmp3_mode0; + u32 com_core_clk_en; + u32 com_coreclk_div; + u32 com_vco_tune_ctrl; + + u32 tx_lx_tx_drv_lvl[HDMI_NUM_TX_CHANNEL]; + u32 tx_lx_tx_emp_post1_lvl[HDMI_NUM_TX_CHANNEL]; + u32 tx_lx_vmode_ctrl1[HDMI_NUM_TX_CHANNEL]; + u32 tx_lx_vmode_ctrl2[HDMI_NUM_TX_CHANNEL]; + u32 tx_lx_res_code_lane_tx[HDMI_NUM_TX_CHANNEL]; + u32 tx_lx_hp_pd_enables[HDMI_NUM_TX_CHANNEL]; + + u32 phy_mode; +}; + +struct hdmi_8996_post_divider { + u64 vco_freq; + int hsclk_divsel; + int vco_ratio; + int tx_band_sel; + int half_rate_mode; +}; + +static inline struct hdmi_phy *pll_get_phy(struct hdmi_pll_8996 *pll) +{ + return platform_get_drvdata(pll->pdev); +} + +static inline void hdmi_pll_write(struct hdmi_pll_8996 *pll, int offset, + u32 data) +{ + msm_writel(data, pll->mmio_qserdes_com + offset); +} + +static inline u32 hdmi_pll_read(struct hdmi_pll_8996 *pll, int offset) +{ + return msm_readl(pll->mmio_qserdes_com + offset); +} + +static inline void hdmi_tx_chan_write(struct hdmi_pll_8996 *pll, int channel, + int offset, int data) +{ + msm_writel(data, pll->mmio_qserdes_tx[channel] + offset); +} + +static inline u32 pll_get_cpctrl(u64 frac_start, unsigned long ref_clk, + bool gen_ssc) +{ + if ((frac_start != 0) || gen_ssc) + return (11000000 / (ref_clk / 20)); + + return 0x23; +} + +static inline u32 pll_get_rctrl(u64 frac_start, bool gen_ssc) +{ + if ((frac_start != 0) || gen_ssc) + return 0x16; + + return 0x10; +} + +static inline u32 pll_get_cctrl(u64 frac_start, bool gen_ssc) +{ + if ((frac_start != 0) || gen_ssc) + return 0x28; + + return 0x1; +} + +static inline u32 pll_get_integloop_gain(u64 frac_start, u64 bclk, u32 ref_clk, + bool gen_ssc) +{ + int digclk_divsel = bclk >= HDMI_DIG_FREQ_BIT_CLK_THRESHOLD ? 1 : 2; + u64 base; + + if ((frac_start != 0) || gen_ssc) + base = (64 * ref_clk) / HDMI_DEFAULT_REF_CLOCK; + else + base = (1022 * ref_clk) / 100; + + base <<= digclk_divsel; + + return (base <= 2046 ? base : 2046); +} + +static inline u32 pll_get_pll_cmp(u64 fdata, unsigned long ref_clk) +{ + u64 dividend = HDMI_PLL_CMP_CNT * fdata; + u32 divisor = ref_clk * 10; + u32 rem; + + rem = do_div(dividend, divisor); + if (rem > (divisor >> 1)) + dividend++; + + return dividend - 1; +} + +static inline u64 pll_cmp_to_fdata(u32 pll_cmp, unsigned long ref_clk) +{ + u64 fdata = ((u64)pll_cmp) * ref_clk * 10; + + do_div(fdata, HDMI_PLL_CMP_CNT); + + return fdata; +} + +static int pll_get_post_div(struct hdmi_8996_post_divider *pd, u64 bclk) +{ + int ratio[] = { 2, 3, 4, 5, 6, 9, 10, 12, 14, 15, 20, 21, 25, 28, 35 }; + int hs_divsel[] = { 0, 4, 8, 12, 1, 5, 2, 9, 3, 13, 10, 7, 14, 11, 15 }; + int tx_band_sel[] = { 0, 1, 2, 3 }; + u64 vco_freq[60]; + u64 vco, vco_optimal; + int half_rate_mode = 0; + int vco_optimal_index, vco_freq_index; + int i, j; + +retry: + vco_optimal = HDMI_VCO_MAX_FREQ; + vco_optimal_index = -1; + vco_freq_index = 0; + for (i = 0; i < 15; i++) { + for (j = 0; j < 4; j++) { + u32 ratio_mult = ratio[i] << tx_band_sel[j]; + + vco = bclk >> half_rate_mode; + vco *= ratio_mult; + vco_freq[vco_freq_index++] = vco; + } + } + + for (i = 0; i < 60; i++) { + u64 vco_tmp = vco_freq[i]; + + if ((vco_tmp >= HDMI_VCO_MIN_FREQ) && + (vco_tmp <= vco_optimal)) { + vco_optimal = vco_tmp; + vco_optimal_index = i; + } + } + + if (vco_optimal_index == -1) { + if (!half_rate_mode) { + half_rate_mode = 1; + goto retry; + } + } else { + pd->vco_freq = vco_optimal; + pd->tx_band_sel = tx_band_sel[vco_optimal_index % 4]; + pd->vco_ratio = ratio[vco_optimal_index / 4]; + pd->hsclk_divsel = hs_divsel[vco_optimal_index / 4]; + + return 0; + } + + return -EINVAL; +} + +static int pll_calculate(unsigned long pix_clk, unsigned long ref_clk, + struct hdmi_8996_phy_pll_reg_cfg *cfg) +{ + struct hdmi_8996_post_divider pd; + u64 bclk; + u64 tmds_clk; + u64 dec_start; + u64 frac_start; + u64 fdata; + u32 pll_divisor; + u32 rem; + u32 cpctrl; + u32 rctrl; + u32 cctrl; + u32 integloop_gain; + u32 pll_cmp; + int i, ret; + + /* bit clk = 10 * pix_clk */ + bclk = ((u64)pix_clk) * 10; + + if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) + tmds_clk = pix_clk >> 2; + else + tmds_clk = pix_clk; + + ret = pll_get_post_div(&pd, bclk); + if (ret) + return ret; + + dec_start = pd.vco_freq; + pll_divisor = 4 * ref_clk; + do_div(dec_start, pll_divisor); + + frac_start = pd.vco_freq * (1 << 20); + + rem = do_div(frac_start, pll_divisor); + frac_start -= dec_start * (1 << 20); + if (rem > (pll_divisor >> 1)) + frac_start++; + + cpctrl = pll_get_cpctrl(frac_start, ref_clk, false); + rctrl = pll_get_rctrl(frac_start, false); + cctrl = pll_get_cctrl(frac_start, false); + integloop_gain = pll_get_integloop_gain(frac_start, bclk, + ref_clk, false); + + fdata = pd.vco_freq; + do_div(fdata, pd.vco_ratio); + + pll_cmp = pll_get_pll_cmp(fdata, ref_clk); + + DBG("VCO freq: %llu", pd.vco_freq); + DBG("fdata: %llu", fdata); + DBG("pix_clk: %lu", pix_clk); + DBG("tmds clk: %llu", tmds_clk); + DBG("HSCLK_SEL: %d", pd.hsclk_divsel); + DBG("DEC_START: %llu", dec_start); + DBG("DIV_FRAC_START: %llu", frac_start); + DBG("PLL_CPCTRL: %u", cpctrl); + DBG("PLL_RCTRL: %u", rctrl); + DBG("PLL_CCTRL: %u", cctrl); + DBG("INTEGLOOP_GAIN: %u", integloop_gain); + DBG("TX_BAND: %d", pd.tx_band_sel); + DBG("PLL_CMP: %u", pll_cmp); + + /* Convert these values to register specific values */ + if (bclk > HDMI_DIG_FREQ_BIT_CLK_THRESHOLD) + cfg->com_svs_mode_clk_sel = 1; + else + cfg->com_svs_mode_clk_sel = 2; + + cfg->com_hsclk_sel = (0x20 | pd.hsclk_divsel); + cfg->com_pll_cctrl_mode0 = cctrl; + cfg->com_pll_rctrl_mode0 = rctrl; + cfg->com_cp_ctrl_mode0 = cpctrl; + cfg->com_dec_start_mode0 = dec_start; + cfg->com_div_frac_start1_mode0 = (frac_start & 0xff); + cfg->com_div_frac_start2_mode0 = ((frac_start & 0xff00) >> 8); + cfg->com_div_frac_start3_mode0 = ((frac_start & 0xf0000) >> 16); + cfg->com_integloop_gain0_mode0 = (integloop_gain & 0xff); + cfg->com_integloop_gain1_mode0 = ((integloop_gain & 0xf00) >> 8); + cfg->com_lock_cmp1_mode0 = (pll_cmp & 0xff); + cfg->com_lock_cmp2_mode0 = ((pll_cmp & 0xff00) >> 8); + cfg->com_lock_cmp3_mode0 = ((pll_cmp & 0x30000) >> 16); + cfg->com_lock_cmp_en = 0x0; + cfg->com_core_clk_en = 0x2c; + cfg->com_coreclk_div = HDMI_CORECLK_DIV; + cfg->phy_mode = (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) ? 0x10 : 0x0; + cfg->com_vco_tune_ctrl = 0x0; + + cfg->tx_lx_lane_mode[0] = + cfg->tx_lx_lane_mode[2] = 0x43; + + cfg->tx_lx_hp_pd_enables[0] = + cfg->tx_lx_hp_pd_enables[1] = + cfg->tx_lx_hp_pd_enables[2] = 0x0c; + cfg->tx_lx_hp_pd_enables[3] = 0x3; + + for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) + cfg->tx_lx_tx_band[i] = pd.tx_band_sel + 4; + + if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) { + cfg->tx_lx_tx_drv_lvl[0] = + cfg->tx_lx_tx_drv_lvl[1] = + cfg->tx_lx_tx_drv_lvl[2] = 0x25; + cfg->tx_lx_tx_drv_lvl[3] = 0x22; + + cfg->tx_lx_tx_emp_post1_lvl[0] = + cfg->tx_lx_tx_emp_post1_lvl[1] = + cfg->tx_lx_tx_emp_post1_lvl[2] = 0x23; + cfg->tx_lx_tx_emp_post1_lvl[3] = 0x27; + + cfg->tx_lx_vmode_ctrl1[0] = + cfg->tx_lx_vmode_ctrl1[1] = + cfg->tx_lx_vmode_ctrl1[2] = + cfg->tx_lx_vmode_ctrl1[3] = 0x00; + + cfg->tx_lx_vmode_ctrl2[0] = + cfg->tx_lx_vmode_ctrl2[1] = + cfg->tx_lx_vmode_ctrl2[2] = 0x0D; + + cfg->tx_lx_vmode_ctrl2[3] = 0x00; + } else if (bclk > HDMI_MID_FREQ_BIT_CLK_THRESHOLD) { + for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) { + cfg->tx_lx_tx_drv_lvl[i] = 0x25; + cfg->tx_lx_tx_emp_post1_lvl[i] = 0x23; + cfg->tx_lx_vmode_ctrl1[i] = 0x00; + } + + cfg->tx_lx_vmode_ctrl2[0] = + cfg->tx_lx_vmode_ctrl2[1] = + cfg->tx_lx_vmode_ctrl2[2] = 0x0D; + cfg->tx_lx_vmode_ctrl2[3] = 0x00; + } else { + for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) { + cfg->tx_lx_tx_drv_lvl[i] = 0x20; + cfg->tx_lx_tx_emp_post1_lvl[i] = 0x20; + cfg->tx_lx_vmode_ctrl1[i] = 0x00; + cfg->tx_lx_vmode_ctrl2[i] = 0x0E; + } + } + + DBG("com_svs_mode_clk_sel = 0x%x", cfg->com_svs_mode_clk_sel); + DBG("com_hsclk_sel = 0x%x", cfg->com_hsclk_sel); + DBG("com_lock_cmp_en = 0x%x", cfg->com_lock_cmp_en); + DBG("com_pll_cctrl_mode0 = 0x%x", cfg->com_pll_cctrl_mode0); + DBG("com_pll_rctrl_mode0 = 0x%x", cfg->com_pll_rctrl_mode0); + DBG("com_cp_ctrl_mode0 = 0x%x", cfg->com_cp_ctrl_mode0); + DBG("com_dec_start_mode0 = 0x%x", cfg->com_dec_start_mode0); + DBG("com_div_frac_start1_mode0 = 0x%x", cfg->com_div_frac_start1_mode0); + DBG("com_div_frac_start2_mode0 = 0x%x", cfg->com_div_frac_start2_mode0); + DBG("com_div_frac_start3_mode0 = 0x%x", cfg->com_div_frac_start3_mode0); + DBG("com_integloop_gain0_mode0 = 0x%x", cfg->com_integloop_gain0_mode0); + DBG("com_integloop_gain1_mode0 = 0x%x", cfg->com_integloop_gain1_mode0); + DBG("com_lock_cmp1_mode0 = 0x%x", cfg->com_lock_cmp1_mode0); + DBG("com_lock_cmp2_mode0 = 0x%x", cfg->com_lock_cmp2_mode0); + DBG("com_lock_cmp3_mode0 = 0x%x", cfg->com_lock_cmp3_mode0); + DBG("com_core_clk_en = 0x%x", cfg->com_core_clk_en); + DBG("com_coreclk_div = 0x%x", cfg->com_coreclk_div); + DBG("phy_mode = 0x%x", cfg->phy_mode); + + DBG("tx_l0_lane_mode = 0x%x", cfg->tx_lx_lane_mode[0]); + DBG("tx_l2_lane_mode = 0x%x", cfg->tx_lx_lane_mode[2]); + + for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) { + DBG("tx_l%d_tx_band = 0x%x", i, cfg->tx_lx_tx_band[i]); + DBG("tx_l%d_tx_drv_lvl = 0x%x", i, cfg->tx_lx_tx_drv_lvl[i]); + DBG("tx_l%d_tx_emp_post1_lvl = 0x%x", i, + cfg->tx_lx_tx_emp_post1_lvl[i]); + DBG("tx_l%d_vmode_ctrl1 = 0x%x", i, cfg->tx_lx_vmode_ctrl1[i]); + DBG("tx_l%d_vmode_ctrl2 = 0x%x", i, cfg->tx_lx_vmode_ctrl2[i]); + } + + return 0; +} + +static int hdmi_8996_pll_set_clk_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct hdmi_pll_8996 *pll = hw_clk_to_pll(hw); + struct hdmi_phy *phy = pll_get_phy(pll); + struct hdmi_8996_phy_pll_reg_cfg cfg; + int i, ret; + + memset(&cfg, 0x00, sizeof(cfg)); + + ret = pll_calculate(rate, parent_rate, &cfg); + if (ret) { + DRM_ERROR("PLL calculation failed\n"); + return ret; + } + + /* Initially shut down PHY */ + DBG("Disabling PHY"); + hdmi_phy_write(phy, REG_HDMI_8996_PHY_PD_CTL, 0x0); + udelay(500); + + /* Power up sequence */ + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_BG_CTRL, 0x04); + + hdmi_phy_write(phy, REG_HDMI_8996_PHY_PD_CTL, 0x1); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_RESETSM_CNTRL, 0x20); + hdmi_phy_write(phy, REG_HDMI_8996_PHY_TX0_TX1_LANE_CTL, 0x0F); + hdmi_phy_write(phy, REG_HDMI_8996_PHY_TX2_TX3_LANE_CTL, 0x0F); + + for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) { + hdmi_tx_chan_write(pll, i, + REG_HDMI_PHY_QSERDES_TX_LX_CLKBUF_ENABLE, + 0x03); + hdmi_tx_chan_write(pll, i, + REG_HDMI_PHY_QSERDES_TX_LX_TX_BAND, + cfg.tx_lx_tx_band[i]); + hdmi_tx_chan_write(pll, i, + REG_HDMI_PHY_QSERDES_TX_LX_RESET_TSYNC_EN, + 0x03); + } + + hdmi_tx_chan_write(pll, 0, REG_HDMI_PHY_QSERDES_TX_LX_LANE_MODE, + cfg.tx_lx_lane_mode[0]); + hdmi_tx_chan_write(pll, 2, REG_HDMI_PHY_QSERDES_TX_LX_LANE_MODE, + cfg.tx_lx_lane_mode[2]); + + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1E); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x07); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SYSCLK_EN_SEL, 0x37); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SYS_CLK_CTRL, 0x02); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_CLK_ENABLE1, 0x0E); + + /* Bypass VCO calibration */ + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SVS_MODE_CLK_SEL, + cfg.com_svs_mode_clk_sel); + + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_BG_TRIM, 0x0F); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_PLL_IVCO, 0x0F); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_CTRL, + cfg.com_vco_tune_ctrl); + + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_BG_CTRL, 0x06); + + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_CLK_SELECT, 0x30); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_HSCLK_SEL, + cfg.com_hsclk_sel); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP_EN, + cfg.com_lock_cmp_en); + + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_PLL_CCTRL_MODE0, + cfg.com_pll_cctrl_mode0); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_PLL_RCTRL_MODE0, + cfg.com_pll_rctrl_mode0); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_CP_CTRL_MODE0, + cfg.com_cp_ctrl_mode0); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_DEC_START_MODE0, + cfg.com_dec_start_mode0); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START1_MODE0, + cfg.com_div_frac_start1_mode0); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START2_MODE0, + cfg.com_div_frac_start2_mode0); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START3_MODE0, + cfg.com_div_frac_start3_mode0); + + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN0_MODE0, + cfg.com_integloop_gain0_mode0); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN1_MODE0, + cfg.com_integloop_gain1_mode0); + + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP1_MODE0, + cfg.com_lock_cmp1_mode0); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP2_MODE0, + cfg.com_lock_cmp2_mode0); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP3_MODE0, + cfg.com_lock_cmp3_mode0); + + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MAP, 0x00); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_CORE_CLK_EN, + cfg.com_core_clk_en); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_CORECLK_DIV, + cfg.com_coreclk_div); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_CMN_CONFIG, 0x02); + + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_RESCODE_DIV_NUM, 0x15); + + /* TX lanes setup (TX 0/1/2/3) */ + for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) { + hdmi_tx_chan_write(pll, i, + REG_HDMI_PHY_QSERDES_TX_LX_TX_DRV_LVL, + cfg.tx_lx_tx_drv_lvl[i]); + hdmi_tx_chan_write(pll, i, + REG_HDMI_PHY_QSERDES_TX_LX_TX_EMP_POST1_LVL, + cfg.tx_lx_tx_emp_post1_lvl[i]); + hdmi_tx_chan_write(pll, i, + REG_HDMI_PHY_QSERDES_TX_LX_VMODE_CTRL1, + cfg.tx_lx_vmode_ctrl1[i]); + hdmi_tx_chan_write(pll, i, + REG_HDMI_PHY_QSERDES_TX_LX_VMODE_CTRL2, + cfg.tx_lx_vmode_ctrl2[i]); + hdmi_tx_chan_write(pll, i, + REG_HDMI_PHY_QSERDES_TX_LX_TX_DRV_LVL_OFFSET, + 0x00); + hdmi_tx_chan_write(pll, i, + REG_HDMI_PHY_QSERDES_TX_LX_RES_CODE_LANE_OFFSET, + 0x00); + hdmi_tx_chan_write(pll, i, + REG_HDMI_PHY_QSERDES_TX_LX_TRAN_DRVR_EMP_EN, + 0x03); + hdmi_tx_chan_write(pll, i, + REG_HDMI_PHY_QSERDES_TX_LX_PARRATE_REC_DETECT_IDLE_EN, + 0x40); + hdmi_tx_chan_write(pll, i, + REG_HDMI_PHY_QSERDES_TX_LX_HP_PD_ENABLES, + cfg.tx_lx_hp_pd_enables[i]); + } + + hdmi_phy_write(phy, REG_HDMI_8996_PHY_MODE, cfg.phy_mode); + hdmi_phy_write(phy, REG_HDMI_8996_PHY_PD_CTL, 0x1F); + + /* + * Ensure that vco configuration gets flushed to hardware before + * enabling the PLL + */ + wmb(); + + return 0; +} + +static int hdmi_8996_phy_ready_status(struct hdmi_phy *phy) +{ + u32 nb_tries = HDMI_PLL_POLL_MAX_READS; + unsigned long timeout = HDMI_PLL_POLL_TIMEOUT_US; + u32 status; + int phy_ready = 0; + + DBG("Waiting for PHY ready"); + + while (nb_tries--) { + status = hdmi_phy_read(phy, REG_HDMI_8996_PHY_STATUS); + phy_ready = status & BIT(0); + + if (phy_ready) + break; + + udelay(timeout); + } + + DBG("PHY is %sready", phy_ready ? "" : "*not* "); + + return phy_ready; +} + +static int hdmi_8996_pll_lock_status(struct hdmi_pll_8996 *pll) +{ + u32 status; + int nb_tries = HDMI_PLL_POLL_MAX_READS; + unsigned long timeout = HDMI_PLL_POLL_TIMEOUT_US; + int pll_locked = 0; + + DBG("Waiting for PLL lock"); + + while (nb_tries--) { + status = hdmi_pll_read(pll, + REG_HDMI_PHY_QSERDES_COM_C_READY_STATUS); + pll_locked = status & BIT(0); + + if (pll_locked) + break; + + udelay(timeout); + } + + DBG("HDMI PLL is %slocked", pll_locked ? "" : "*not* "); + + return pll_locked; +} + +static int hdmi_8996_pll_prepare(struct clk_hw *hw) +{ + struct hdmi_pll_8996 *pll = hw_clk_to_pll(hw); + struct hdmi_phy *phy = pll_get_phy(pll); + int i, ret = 0; + + hdmi_phy_write(phy, REG_HDMI_8996_PHY_CFG, 0x1); + udelay(100); + + hdmi_phy_write(phy, REG_HDMI_8996_PHY_CFG, 0x19); + udelay(100); + + ret = hdmi_8996_pll_lock_status(pll); + if (!ret) + return ret; + + for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) + hdmi_tx_chan_write(pll, i, + REG_HDMI_PHY_QSERDES_TX_LX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, + 0x6F); + + /* Disable SSC */ + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SSC_PER1, 0x0); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SSC_PER2, 0x0); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SSC_STEP_SIZE1, 0x0); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SSC_STEP_SIZE2, 0x0); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SSC_EN_CENTER, 0x2); + + ret = hdmi_8996_phy_ready_status(phy); + if (!ret) + return ret; + + /* Restart the retiming buffer */ + hdmi_phy_write(phy, REG_HDMI_8996_PHY_CFG, 0x18); + udelay(1); + hdmi_phy_write(phy, REG_HDMI_8996_PHY_CFG, 0x19); + + return 0; +} + +static long hdmi_8996_pll_round_rate(struct clk_hw *hw, + unsigned long rate, + unsigned long *parent_rate) +{ + if (rate < HDMI_PCLK_MIN_FREQ) + return HDMI_PCLK_MIN_FREQ; + else if (rate > HDMI_PCLK_MAX_FREQ) + return HDMI_PCLK_MAX_FREQ; + else + return rate; +} + +static unsigned long hdmi_8996_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct hdmi_pll_8996 *pll = hw_clk_to_pll(hw); + u64 fdata; + u32 cmp1, cmp2, cmp3, pll_cmp; + + cmp1 = hdmi_pll_read(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP1_MODE0); + cmp2 = hdmi_pll_read(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP2_MODE0); + cmp3 = hdmi_pll_read(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP3_MODE0); + + pll_cmp = cmp1 | (cmp2 << 8) | (cmp3 << 16); + + fdata = pll_cmp_to_fdata(pll_cmp + 1, parent_rate); + + do_div(fdata, 10); + + return fdata; +} + +static void hdmi_8996_pll_unprepare(struct clk_hw *hw) +{ +} + +static int hdmi_8996_pll_is_enabled(struct clk_hw *hw) +{ + struct hdmi_pll_8996 *pll = hw_clk_to_pll(hw); + u32 status; + int pll_locked; + + status = hdmi_pll_read(pll, REG_HDMI_PHY_QSERDES_COM_C_READY_STATUS); + pll_locked = status & BIT(0); + + return pll_locked; +} + +static struct clk_ops hdmi_8996_pll_ops = { + .set_rate = hdmi_8996_pll_set_clk_rate, + .round_rate = hdmi_8996_pll_round_rate, + .recalc_rate = hdmi_8996_pll_recalc_rate, + .prepare = hdmi_8996_pll_prepare, + .unprepare = hdmi_8996_pll_unprepare, + .is_enabled = hdmi_8996_pll_is_enabled, +}; + +static const char * const hdmi_pll_parents[] = { + "xo", +}; + +static struct clk_init_data pll_init = { + .name = "hdmipll", + .ops = &hdmi_8996_pll_ops, + .parent_names = hdmi_pll_parents, + .num_parents = ARRAY_SIZE(hdmi_pll_parents), +}; + +int hdmi_pll_8996_init(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct hdmi_pll_8996 *pll; + struct clk *clk; + int i; + + pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL); + if (!pll) + return -ENOMEM; + + pll->pdev = pdev; + + pll->mmio_qserdes_com = msm_ioremap(pdev, "hdmi_pll", "HDMI_PLL"); + if (IS_ERR(pll->mmio_qserdes_com)) { + dev_err(dev, "failed to map pll base\n"); + return -ENOMEM; + } + + for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) { + char name[32], label[32]; + + snprintf(name, sizeof(name), "hdmi_tx_l%d", i); + snprintf(label, sizeof(label), "HDMI_TX_L%d", i); + + pll->mmio_qserdes_tx[i] = msm_ioremap(pdev, name, label); + if (IS_ERR(pll->mmio_qserdes_tx[i])) { + dev_err(dev, "failed to map pll base\n"); + return -ENOMEM; + } + } + pll->clk_hw.init = &pll_init; + + clk = devm_clk_register(dev, &pll->clk_hw); + if (IS_ERR(clk)) { + dev_err(dev, "failed to register pll clock\n"); + return -EINVAL; + } + + return 0; +} + +static const char * const hdmi_phy_8996_reg_names[] = { + "vddio", + "vcca", +}; + +static const char * const hdmi_phy_8996_clk_names[] = { + "mmagic_iface_clk", + "iface_clk", + "ref_clk", +}; + +const struct hdmi_phy_cfg hdmi_phy_8996_cfg = { + .type = MSM_HDMI_PHY_8996, + .reg_names = hdmi_phy_8996_reg_names, + .num_regs = ARRAY_SIZE(hdmi_phy_8996_reg_names), + .clk_names = hdmi_phy_8996_clk_names, + .num_clks = ARRAY_SIZE(hdmi_phy_8996_clk_names), +}; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c index cb01421ae1e4ba..38022b3af8c1a0 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c @@ -17,166 +17,122 @@ #include "hdmi.h" -struct hdmi_phy_8x60 { - struct hdmi_phy base; - struct hdmi *hdmi; -}; -#define to_hdmi_phy_8x60(x) container_of(x, struct hdmi_phy_8x60, base) - -static void hdmi_phy_8x60_destroy(struct hdmi_phy *phy) -{ - struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy); - kfree(phy_8x60); -} - static void hdmi_phy_8x60_powerup(struct hdmi_phy *phy, unsigned long int pixclock) { - struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy); - struct hdmi *hdmi = phy_8x60->hdmi; - /* De-serializer delay D/C for non-lbk mode: */ - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG0, - HDMI_8x60_PHY_REG0_DESER_DEL_CTRL(3)); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG0, + HDMI_8x60_PHY_REG0_DESER_DEL_CTRL(3)); if (pixclock == 27000000) { /* video_format == HDMI_VFRMT_720x480p60_16_9 */ - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG1, - HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(5) | - HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(3)); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG1, + HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(5) | + HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(3)); } else { - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG1, - HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(5) | - HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(4)); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG1, + HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(5) | + HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(4)); } /* No matter what, start from the power down mode: */ - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2, - HDMI_8x60_PHY_REG2_PD_PWRGEN | - HDMI_8x60_PHY_REG2_PD_PLL | - HDMI_8x60_PHY_REG2_PD_DRIVE_4 | - HDMI_8x60_PHY_REG2_PD_DRIVE_3 | - HDMI_8x60_PHY_REG2_PD_DRIVE_2 | - HDMI_8x60_PHY_REG2_PD_DRIVE_1 | - HDMI_8x60_PHY_REG2_PD_DESER); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2, + HDMI_8x60_PHY_REG2_PD_PWRGEN | + HDMI_8x60_PHY_REG2_PD_PLL | + HDMI_8x60_PHY_REG2_PD_DRIVE_4 | + HDMI_8x60_PHY_REG2_PD_DRIVE_3 | + HDMI_8x60_PHY_REG2_PD_DRIVE_2 | + HDMI_8x60_PHY_REG2_PD_DRIVE_1 | + HDMI_8x60_PHY_REG2_PD_DESER); /* Turn PowerGen on: */ - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2, - HDMI_8x60_PHY_REG2_PD_PLL | - HDMI_8x60_PHY_REG2_PD_DRIVE_4 | - HDMI_8x60_PHY_REG2_PD_DRIVE_3 | - HDMI_8x60_PHY_REG2_PD_DRIVE_2 | - HDMI_8x60_PHY_REG2_PD_DRIVE_1 | - HDMI_8x60_PHY_REG2_PD_DESER); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2, + HDMI_8x60_PHY_REG2_PD_PLL | + HDMI_8x60_PHY_REG2_PD_DRIVE_4 | + HDMI_8x60_PHY_REG2_PD_DRIVE_3 | + HDMI_8x60_PHY_REG2_PD_DRIVE_2 | + HDMI_8x60_PHY_REG2_PD_DRIVE_1 | + HDMI_8x60_PHY_REG2_PD_DESER); /* Turn PLL power on: */ - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2, - HDMI_8x60_PHY_REG2_PD_DRIVE_4 | - HDMI_8x60_PHY_REG2_PD_DRIVE_3 | - HDMI_8x60_PHY_REG2_PD_DRIVE_2 | - HDMI_8x60_PHY_REG2_PD_DRIVE_1 | - HDMI_8x60_PHY_REG2_PD_DESER); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2, + HDMI_8x60_PHY_REG2_PD_DRIVE_4 | + HDMI_8x60_PHY_REG2_PD_DRIVE_3 | + HDMI_8x60_PHY_REG2_PD_DRIVE_2 | + HDMI_8x60_PHY_REG2_PD_DRIVE_1 | + HDMI_8x60_PHY_REG2_PD_DESER); /* Write to HIGH after PLL power down de-assert: */ - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG3, - HDMI_8x60_PHY_REG3_PLL_ENABLE); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG3, + HDMI_8x60_PHY_REG3_PLL_ENABLE); /* ASIC power on; PHY REG9 = 0 */ - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG9, 0); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG9, 0); /* Enable PLL lock detect, PLL lock det will go high after lock * Enable the re-time logic */ - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG12, - HDMI_8x60_PHY_REG12_RETIMING_EN | - HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG12, + HDMI_8x60_PHY_REG12_RETIMING_EN | + HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN); /* Drivers are on: */ - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2, - HDMI_8x60_PHY_REG2_PD_DESER); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2, + HDMI_8x60_PHY_REG2_PD_DESER); /* If the RX detector is needed: */ - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2, - HDMI_8x60_PHY_REG2_RCV_SENSE_EN | - HDMI_8x60_PHY_REG2_PD_DESER); - - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG4, 0); - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG5, 0); - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG6, 0); - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG7, 0); - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG8, 0); - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG9, 0); - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG10, 0); - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG11, 0); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2, + HDMI_8x60_PHY_REG2_RCV_SENSE_EN | + HDMI_8x60_PHY_REG2_PD_DESER); + + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG4, 0); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG5, 0); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG6, 0); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG7, 0); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG8, 0); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG9, 0); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG10, 0); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG11, 0); /* If we want to use lock enable based on counting: */ - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG12, - HDMI_8x60_PHY_REG12_RETIMING_EN | - HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN | - HDMI_8x60_PHY_REG12_FORCE_LOCK); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG12, + HDMI_8x60_PHY_REG12_RETIMING_EN | + HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN | + HDMI_8x60_PHY_REG12_FORCE_LOCK); } static void hdmi_phy_8x60_powerdown(struct hdmi_phy *phy) { - struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy); - struct hdmi *hdmi = phy_8x60->hdmi; - /* Assert RESET PHY from controller */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - HDMI_PHY_CTRL_SW_RESET); + hdmi_phy_write(phy, REG_HDMI_PHY_CTRL, + HDMI_PHY_CTRL_SW_RESET); udelay(10); /* De-assert RESET PHY from controller */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, 0); + hdmi_phy_write(phy, REG_HDMI_PHY_CTRL, 0); /* Turn off Driver */ - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2, - HDMI_8x60_PHY_REG2_PD_DRIVE_4 | - HDMI_8x60_PHY_REG2_PD_DRIVE_3 | - HDMI_8x60_PHY_REG2_PD_DRIVE_2 | - HDMI_8x60_PHY_REG2_PD_DRIVE_1 | - HDMI_8x60_PHY_REG2_PD_DESER); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2, + HDMI_8x60_PHY_REG2_PD_DRIVE_4 | + HDMI_8x60_PHY_REG2_PD_DRIVE_3 | + HDMI_8x60_PHY_REG2_PD_DRIVE_2 | + HDMI_8x60_PHY_REG2_PD_DRIVE_1 | + HDMI_8x60_PHY_REG2_PD_DESER); udelay(10); /* Disable PLL */ - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG3, 0); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG3, 0); /* Power down PHY, but keep RX-sense: */ - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2, - HDMI_8x60_PHY_REG2_RCV_SENSE_EN | - HDMI_8x60_PHY_REG2_PD_PWRGEN | - HDMI_8x60_PHY_REG2_PD_PLL | - HDMI_8x60_PHY_REG2_PD_DRIVE_4 | - HDMI_8x60_PHY_REG2_PD_DRIVE_3 | - HDMI_8x60_PHY_REG2_PD_DRIVE_2 | - HDMI_8x60_PHY_REG2_PD_DRIVE_1 | - HDMI_8x60_PHY_REG2_PD_DESER); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2, + HDMI_8x60_PHY_REG2_RCV_SENSE_EN | + HDMI_8x60_PHY_REG2_PD_PWRGEN | + HDMI_8x60_PHY_REG2_PD_PLL | + HDMI_8x60_PHY_REG2_PD_DRIVE_4 | + HDMI_8x60_PHY_REG2_PD_DRIVE_3 | + HDMI_8x60_PHY_REG2_PD_DRIVE_2 | + HDMI_8x60_PHY_REG2_PD_DRIVE_1 | + HDMI_8x60_PHY_REG2_PD_DESER); } -static const struct hdmi_phy_funcs hdmi_phy_8x60_funcs = { - .destroy = hdmi_phy_8x60_destroy, - .powerup = hdmi_phy_8x60_powerup, - .powerdown = hdmi_phy_8x60_powerdown, +const struct hdmi_phy_cfg hdmi_phy_8x60_cfg = { + .type = MSM_HDMI_PHY_8x60, + .powerup = hdmi_phy_8x60_powerup, + .powerdown = hdmi_phy_8x60_powerdown, }; - -struct hdmi_phy *hdmi_phy_8x60_init(struct hdmi *hdmi) -{ - struct hdmi_phy_8x60 *phy_8x60; - struct hdmi_phy *phy = NULL; - int ret; - - phy_8x60 = kzalloc(sizeof(*phy_8x60), GFP_KERNEL); - if (!phy_8x60) { - ret = -ENOMEM; - goto fail; - } - - phy = &phy_8x60->base; - - phy->funcs = &hdmi_phy_8x60_funcs; - - phy_8x60->hdmi = hdmi; - - return phy; - -fail: - if (phy) - hdmi_phy_8x60_destroy(phy); - return ERR_PTR(ret); -} diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c index 56ab8917ee9a35..145e38eb17e253 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c @@ -17,84 +17,40 @@ #include "hdmi.h" -struct hdmi_phy_8x74 { - struct hdmi_phy base; - void __iomem *mmio; -}; -#define to_hdmi_phy_8x74(x) container_of(x, struct hdmi_phy_8x74, base) - - -static void phy_write(struct hdmi_phy_8x74 *phy, u32 reg, u32 data) -{ - msm_writel(data, phy->mmio + reg); -} - -//static u32 phy_read(struct hdmi_phy_8x74 *phy, u32 reg) -//{ -// return msm_readl(phy->mmio + reg); -//} - -static void hdmi_phy_8x74_destroy(struct hdmi_phy *phy) -{ - struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy); - kfree(phy_8x74); -} - static void hdmi_phy_8x74_powerup(struct hdmi_phy *phy, unsigned long int pixclock) { - struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy); - - phy_write(phy_8x74, REG_HDMI_8x74_ANA_CFG0, 0x1b); - phy_write(phy_8x74, REG_HDMI_8x74_ANA_CFG1, 0xf2); - phy_write(phy_8x74, REG_HDMI_8x74_BIST_CFG0, 0x0); - phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN0, 0x0); - phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN1, 0x0); - phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN2, 0x0); - phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN3, 0x0); - phy_write(phy_8x74, REG_HDMI_8x74_PD_CTRL1, 0x20); + hdmi_phy_write(phy, REG_HDMI_8x74_ANA_CFG0, 0x1b); + hdmi_phy_write(phy, REG_HDMI_8x74_ANA_CFG1, 0xf2); + hdmi_phy_write(phy, REG_HDMI_8x74_BIST_CFG0, 0x0); + hdmi_phy_write(phy, REG_HDMI_8x74_BIST_PATN0, 0x0); + hdmi_phy_write(phy, REG_HDMI_8x74_BIST_PATN1, 0x0); + hdmi_phy_write(phy, REG_HDMI_8x74_BIST_PATN2, 0x0); + hdmi_phy_write(phy, REG_HDMI_8x74_BIST_PATN3, 0x0); + hdmi_phy_write(phy, REG_HDMI_8x74_PD_CTRL1, 0x20); } static void hdmi_phy_8x74_powerdown(struct hdmi_phy *phy) { - struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy); - phy_write(phy_8x74, REG_HDMI_8x74_PD_CTRL0, 0x7f); + hdmi_phy_write(phy, REG_HDMI_8x74_PD_CTRL0, 0x7f); } -static const struct hdmi_phy_funcs hdmi_phy_8x74_funcs = { - .destroy = hdmi_phy_8x74_destroy, - .powerup = hdmi_phy_8x74_powerup, - .powerdown = hdmi_phy_8x74_powerdown, +static const char * const hdmi_phy_8x74_reg_names[] = { + "core-vdda", + "vddio", }; -struct hdmi_phy *hdmi_phy_8x74_init(struct hdmi *hdmi) -{ - struct hdmi_phy_8x74 *phy_8x74; - struct hdmi_phy *phy = NULL; - int ret; - - phy_8x74 = kzalloc(sizeof(*phy_8x74), GFP_KERNEL); - if (!phy_8x74) { - ret = -ENOMEM; - goto fail; - } - - phy = &phy_8x74->base; - - phy->funcs = &hdmi_phy_8x74_funcs; - - /* for 8x74, the phy mmio is mapped separately: */ - phy_8x74->mmio = msm_ioremap(hdmi->pdev, - "phy_physical", "HDMI_8x74"); - if (IS_ERR(phy_8x74->mmio)) { - ret = PTR_ERR(phy_8x74->mmio); - goto fail; - } - - return phy; +static const char * const hdmi_phy_8x74_clk_names[] = { + "iface_clk", + "alt_iface_clk" +}; -fail: - if (phy) - hdmi_phy_8x74_destroy(phy); - return ERR_PTR(ret); -} +const struct hdmi_phy_cfg hdmi_phy_8x74_cfg = { + .type = MSM_HDMI_PHY_8x74, + .powerup = hdmi_phy_8x74_powerup, + .powerdown = hdmi_phy_8x74_powerdown, + .reg_names = hdmi_phy_8x74_reg_names, + .num_regs = ARRAY_SIZE(hdmi_phy_8x74_reg_names), + .clk_names = hdmi_phy_8x74_clk_names, + .num_clks = ARRAY_SIZE(hdmi_phy_8x74_clk_names), +}; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c new file mode 100644 index 00000000000000..e0464fa7f4410b --- /dev/null +++ b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c @@ -0,0 +1,462 @@ +/* + * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include "hdmi.h" + +struct hdmi_pll_8960 { + struct platform_device *pdev; + struct clk_hw clk_hw; + void __iomem *mmio; + + unsigned long pixclk; +}; + +#define hw_clk_to_pll(x) container_of(x, struct hdmi_pll_8960, clk_hw) + +/* + * HDMI PLL: + * + * To get the parent clock setup properly, we need to plug in hdmi pll + * configuration into common-clock-framework. + */ + +struct pll_rate { + unsigned long rate; + int num_reg; + struct { + u32 val; + u32 reg; + } conf[32]; +}; + +/* NOTE: keep sorted highest freq to lowest: */ +static const struct pll_rate freqtbl[] = { + { 154000000, 14, { + { 0x08, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, + { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, + { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, + { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, + { 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, + { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 }, + { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 }, + { 0x0d, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, + { 0x4d, REG_HDMI_8960_PHY_PLL_SDM_CFG1 }, + { 0x5e, REG_HDMI_8960_PHY_PLL_SDM_CFG2 }, + { 0x42, REG_HDMI_8960_PHY_PLL_SDM_CFG3 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 }, + } + }, + /* 1080p60/1080p50 case */ + { 148500000, 27, { + { 0x02, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, + { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG }, + { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, + { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, + { 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG }, + { 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG }, + { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B }, + { 0x76, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, + { 0x01, REG_HDMI_8960_PHY_PLL_SDM_CFG1 }, + { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG2 }, + { 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 }, + { 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG3 }, + { 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 }, + { 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 }, + { 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 }, + { 0xe6, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, + { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, + { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 }, + { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 }, + { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 }, + } + }, + { 108000000, 13, { + { 0x08, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, + { 0x21, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, + { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, + { 0x1c, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, + { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, + { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, + { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 }, + { 0x49, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, + { 0x49, REG_HDMI_8960_PHY_PLL_SDM_CFG1 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG2 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG3 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 }, + } + }, + /* 720p60/720p50/1080i60/1080i50/1080p24/1080p30/1080p25 */ + { 74250000, 8, { + { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B }, + { 0x12, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, + { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, + { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, + { 0x76, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, + { 0xe6, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, + { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, + { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, + } + }, + { 74176000, 14, { + { 0x18, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, + { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, + { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, + { 0xe5, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, + { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, + { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 }, + { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 }, + { 0x0c, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, + { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG1 }, + { 0x7d, REG_HDMI_8960_PHY_PLL_SDM_CFG2 }, + { 0xbc, REG_HDMI_8960_PHY_PLL_SDM_CFG3 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 }, + } + }, + { 65000000, 14, { + { 0x18, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, + { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, + { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, + { 0x8a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, + { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, + { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 }, + { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 }, + { 0x0b, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, + { 0x4b, REG_HDMI_8960_PHY_PLL_SDM_CFG1 }, + { 0x7b, REG_HDMI_8960_PHY_PLL_SDM_CFG2 }, + { 0x09, REG_HDMI_8960_PHY_PLL_SDM_CFG3 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 }, + } + }, + /* 480p60/480i60 */ + { 27030000, 18, { + { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B }, + { 0x38, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, + { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG }, + { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, + { 0xff, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, + { 0x4e, REG_HDMI_8960_PHY_PLL_SDM_CFG1 }, + { 0xd7, REG_HDMI_8960_PHY_PLL_SDM_CFG2 }, + { 0x03, REG_HDMI_8960_PHY_PLL_SDM_CFG3 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 }, + { 0x2a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, + { 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, + { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 }, + { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 }, + { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 }, + } + }, + /* 576p50/576i50 */ + { 27000000, 27, { + { 0x32, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, + { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG }, + { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, + { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, + { 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG }, + { 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG }, + { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B }, + { 0x7b, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, + { 0x01, REG_HDMI_8960_PHY_PLL_SDM_CFG1 }, + { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG2 }, + { 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 }, + { 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG3 }, + { 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 }, + { 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 }, + { 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 }, + { 0x2a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, + { 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, + { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 }, + { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 }, + { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 }, + } + }, + /* 640x480p60 */ + { 25200000, 27, { + { 0x32, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, + { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG }, + { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, + { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, + { 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG }, + { 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG }, + { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B }, + { 0x77, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, + { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG1 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG2 }, + { 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 }, + { 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2 }, + { 0x20, REG_HDMI_8960_PHY_PLL_SSC_CFG3 }, + { 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 }, + { 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 }, + { 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 }, + { 0xf4, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, + { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, + { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 }, + { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 }, + { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 }, + } + }, +}; + +static inline void pll_write(struct hdmi_pll_8960 *pll, u32 reg, u32 data) +{ + msm_writel(data, pll->mmio + reg); +} + +static inline u32 pll_read(struct hdmi_pll_8960 *pll, u32 reg) +{ + return msm_readl(pll->mmio + reg); +} + +static inline struct hdmi_phy *pll_get_phy(struct hdmi_pll_8960 *pll) +{ + return platform_get_drvdata(pll->pdev); +} + +static int hdmi_pll_enable(struct clk_hw *hw) +{ + struct hdmi_pll_8960 *pll = hw_clk_to_pll(hw); + struct hdmi_phy *phy = pll_get_phy(pll); + int timeout_count, pll_lock_retry = 10; + unsigned int val; + + DBG(""); + + /* Assert PLL S/W reset */ + pll_write(pll, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x8d); + pll_write(pll, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0, 0x10); + pll_write(pll, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1, 0x1a); + + /* Wait for a short time before de-asserting + * to allow the hardware to complete its job. + * This much of delay should be fine for hardware + * to assert and de-assert. + */ + udelay(10); + + /* De-assert PLL S/W reset */ + pll_write(pll, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x0d); + + val = hdmi_phy_read(phy, REG_HDMI_8960_PHY_REG12); + val |= HDMI_8960_PHY_REG12_SW_RESET; + /* Assert PHY S/W reset */ + hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG12, val); + val &= ~HDMI_8960_PHY_REG12_SW_RESET; + /* + * Wait for a short time before de-asserting to allow the hardware to + * complete its job. This much of delay should be fine for hardware to + * assert and de-assert. + */ + udelay(10); + /* De-assert PHY S/W reset */ + hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG12, val); + hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG2, 0x3f); + + val = hdmi_phy_read(phy, REG_HDMI_8960_PHY_REG12); + val |= HDMI_8960_PHY_REG12_PWRDN_B; + hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG12, val); + /* Wait 10 us for enabling global power for PHY */ + mb(); + udelay(10); + + val = pll_read(pll, REG_HDMI_8960_PHY_PLL_PWRDN_B); + val |= HDMI_8960_PHY_PLL_PWRDN_B_PLL_PWRDN_B; + val &= ~HDMI_8960_PHY_PLL_PWRDN_B_PD_PLL; + pll_write(pll, REG_HDMI_8960_PHY_PLL_PWRDN_B, val); + hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG2, 0x80); + + timeout_count = 1000; + while (--pll_lock_retry > 0) { + + /* are we there yet? */ + val = pll_read(pll, REG_HDMI_8960_PHY_PLL_STATUS0); + if (val & HDMI_8960_PHY_PLL_STATUS0_PLL_LOCK) + break; + + udelay(1); + + if (--timeout_count > 0) + continue; + + /* + * PLL has still not locked. + * Do a software reset and try again + * Assert PLL S/W reset first + */ + pll_write(pll, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x8d); + udelay(10); + pll_write(pll, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x0d); + + /* + * Wait for a short duration for the PLL calibration + * before checking if the PLL gets locked + */ + udelay(350); + + timeout_count = 1000; + } + + return 0; +} + +static void hdmi_pll_disable(struct clk_hw *hw) +{ + struct hdmi_pll_8960 *pll = hw_clk_to_pll(hw); + struct hdmi_phy *phy = pll_get_phy(pll); + unsigned int val; + + DBG(""); + + val = hdmi_phy_read(phy, REG_HDMI_8960_PHY_REG12); + val &= ~HDMI_8960_PHY_REG12_PWRDN_B; + hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG12, val); + + val = pll_read(pll, REG_HDMI_8960_PHY_PLL_PWRDN_B); + val |= HDMI_8960_PHY_REG12_SW_RESET; + val &= ~HDMI_8960_PHY_REG12_PWRDN_B; + pll_write(pll, REG_HDMI_8960_PHY_PLL_PWRDN_B, val); + /* Make sure HDMI PHY/PLL are powered down */ + mb(); +} + +static const struct pll_rate *find_rate(unsigned long rate) +{ + int i; + + for (i = 1; i < ARRAY_SIZE(freqtbl); i++) + if (rate > freqtbl[i].rate) + return &freqtbl[i - 1]; + + return &freqtbl[i - 1]; +} + +static unsigned long hdmi_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct hdmi_pll_8960 *pll = hw_clk_to_pll(hw); + + return pll->pixclk; +} + +static long hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + const struct pll_rate *pll_rate = find_rate(rate); + + return pll_rate->rate; +} + +static int hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct hdmi_pll_8960 *pll = hw_clk_to_pll(hw); + const struct pll_rate *pll_rate = find_rate(rate); + int i; + + DBG("rate=%lu", rate); + + for (i = 0; i < pll_rate->num_reg; i++) + pll_write(pll, pll_rate->conf[i].reg, pll_rate->conf[i].val); + + pll->pixclk = rate; + + return 0; +} + +static const struct clk_ops hdmi_pll_ops = { + .enable = hdmi_pll_enable, + .disable = hdmi_pll_disable, + .recalc_rate = hdmi_pll_recalc_rate, + .round_rate = hdmi_pll_round_rate, + .set_rate = hdmi_pll_set_rate, +}; + +static const char * const hdmi_pll_parents[] = { + "pxo", +}; + +static struct clk_init_data pll_init = { + .name = "hdmi_pll", + .ops = &hdmi_pll_ops, + .parent_names = hdmi_pll_parents, + .num_parents = ARRAY_SIZE(hdmi_pll_parents), +}; + +int hdmi_pll_8960_init(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct hdmi_pll_8960 *pll; + struct clk *clk; + int i; + + /* sanity check: */ + for (i = 0; i < (ARRAY_SIZE(freqtbl) - 1); i++) + if (WARN_ON(freqtbl[i].rate < freqtbl[i + 1].rate)) + return -EINVAL; + + pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL); + if (!pll) + return -ENOMEM; + + pll->mmio = msm_ioremap(pdev, "hdmi_pll", "HDMI_PLL"); + if (IS_ERR(pll->mmio)) { + dev_err(dev, "failed to map pll base\n"); + return -ENOMEM; + } + + pll->pdev = pdev; + pll->clk_hw.init = &pll_init; + + clk = devm_clk_register(dev, &pll->clk_hw); + if (IS_ERR(clk)) { + dev_err(dev, "failed to register pll clock\n"); + return -EINVAL; + } + + return 0; +} diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index e115318402bd87..a7a21164dc696c 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -170,6 +170,30 @@ static const struct mdp_kms_funcs kms_funcs = { .set_irqmask = mdp5_set_irqmask, }; +static void mdp5_disable_bus_clocks(struct mdp5_kms *mdp5_kms) +{ + if (mdp5_kms->mmagic_mdss_axi_clk) + clk_disable_unprepare(mdp5_kms->mmagic_mdss_axi_clk); + if (mdp5_kms->mmagic_mmss_axi_clk) + clk_disable_unprepare(mdp5_kms->mmagic_mmss_axi_clk); + if (mdp5_kms->mmss_s0_axi_clk) + clk_disable_unprepare(mdp5_kms->mmss_s0_axi_clk); + if (mdp5_kms->mmagic_bimc_axi_clk) + clk_disable_unprepare(mdp5_kms->mmagic_bimc_axi_clk); +} + +static void mdp5_enable_bus_clocks(struct mdp5_kms *mdp5_kms) +{ + if (mdp5_kms->mmagic_bimc_axi_clk) + clk_prepare_enable(mdp5_kms->mmagic_bimc_axi_clk); + if (mdp5_kms->mmss_s0_axi_clk) + clk_prepare_enable(mdp5_kms->mmss_s0_axi_clk); + if (mdp5_kms->mmagic_mmss_axi_clk) + clk_prepare_enable(mdp5_kms->mmagic_mmss_axi_clk); + if (mdp5_kms->mmagic_mdss_axi_clk) + clk_prepare_enable(mdp5_kms->mmagic_mdss_axi_clk); +} + int mdp5_disable(struct mdp5_kms *mdp5_kms) { DBG(""); @@ -177,9 +201,14 @@ int mdp5_disable(struct mdp5_kms *mdp5_kms) clk_disable_unprepare(mdp5_kms->ahb_clk); clk_disable_unprepare(mdp5_kms->axi_clk); clk_disable_unprepare(mdp5_kms->core_clk); + if (mdp5_kms->iommu_clk) + clk_disable_unprepare(mdp5_kms->iommu_clk); if (mdp5_kms->lut_clk) clk_disable_unprepare(mdp5_kms->lut_clk); + if (mdp5_kms->mmagic_ahb_clk) + clk_disable_unprepare(mdp5_kms->mmagic_ahb_clk); + mdp5_disable_bus_clocks(mdp5_kms); return 0; } @@ -187,11 +216,17 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms) { DBG(""); + mdp5_enable_bus_clocks(mdp5_kms); + + if (mdp5_kms->mmagic_ahb_clk) + clk_prepare_enable(mdp5_kms->mmagic_ahb_clk); clk_prepare_enable(mdp5_kms->ahb_clk); clk_prepare_enable(mdp5_kms->axi_clk); clk_prepare_enable(mdp5_kms->core_clk); if (mdp5_kms->lut_clk) clk_prepare_enable(mdp5_kms->lut_clk); + if (mdp5_kms->iommu_clk) + clk_prepare_enable(mdp5_kms->iommu_clk); return 0; } @@ -443,7 +478,7 @@ static void read_hw_revision(struct mdp5_kms *mdp5_kms, mdp5_enable(mdp5_kms); version = mdp5_read(mdp5_kms, REG_MDSS_HW_VERSION); - mdp5_disable(mdp5_kms); + //mdp5_disable(mdp5_kms); *major = FIELD(version, MDSS_HW_VERSION_MAJOR); *minor = FIELD(version, MDSS_HW_VERSION_MINOR); @@ -589,6 +624,23 @@ static u32 mdp5_get_vblank_counter(struct drm_device *dev, unsigned int pipe) return mdp5_encoder_get_framecount(encoder); } +/* HACK: enable bimc gdsc */ +static void enable_mmagic_bimc_gdsc(void) +{ + void __iomem *base = ioremap(0x8c5000, SZ_1K); + u32 val; + + val = ioread32(base + 0x29c); + val &= ~0x1; + iowrite32(val, base + 0x29c); + + wmb(); + + iounmap(base); + + msleep(5); +} + struct msm_kms *mdp5_kms_init(struct drm_device *dev) { struct platform_device *pdev = dev->platformdev; @@ -627,6 +679,8 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) goto fail; } + enable_mmagic_bimc_gdsc(); + mdp5_kms->vdd = devm_regulator_get(&pdev->dev, "vdd"); if (IS_ERR(mdp5_kms->vdd)) { ret = PTR_ERR(mdp5_kms->vdd); @@ -658,6 +712,18 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) /* optional clocks: */ get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk", false); + get_clk(pdev, &mdp5_kms->mmagic_ahb_clk, "mmagic_iface_clk", false); + get_clk(pdev, &mdp5_kms->iommu_clk, "iommu_clk", false); + + /* HACK: get bus clocks */ + get_clk(pdev, &mdp5_kms->mmagic_bimc_axi_clk, "mmagic_bimc_bus_clk", + false); + get_clk(pdev, &mdp5_kms->mmss_s0_axi_clk, "mmss_s0_bus_clk", + false); + get_clk(pdev, &mdp5_kms->mmagic_mmss_axi_clk, "mmagic_mmss_bus_clk", + false); + get_clk(pdev, &mdp5_kms->mmagic_mdss_axi_clk, "mmagic_mdss_bus_clk", + false); /* we need to set a default rate before enabling. Set a safe * rate first, then figure out hw revision, and then set a @@ -680,6 +746,10 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) /* TODO: compute core clock rate at runtime */ clk_set_rate(mdp5_kms->src_clk, config->hw->max_clk); + /* HACK : set the axi clock to some valid rate */ + if (mdp5_kms->mmagic_mdss_axi_clk) + clk_set_rate(mdp5_kms->mmagic_mdss_axi_clk, 75000000); + /* * Some chipsets have a Shared Memory Pool (SMP), while others * have dedicated latency buffering per source pipe instead; diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h index 00730ba08a60ac..8bf0ba2651f913 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h @@ -53,6 +53,14 @@ struct mdp5_kms { struct clk *core_clk; struct clk *lut_clk; struct clk *vsync_clk; + struct clk *mmagic_ahb_clk; + struct clk *iommu_clk; + + /* bus clocks */ + struct clk *mmagic_bimc_axi_clk; + struct clk *mmss_s0_axi_clk; + struct clk *mmagic_mmss_axi_clk; + struct clk *mmagic_mdss_axi_clk; /* * lock to protect access to global resources: ie., following register: diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index c1e7bba2fdb7f6..f44a091cc601dd 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -242,16 +242,47 @@ struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev, struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev); struct hdmi; +#ifdef CONFIG_DRM_MSM_HDMI int hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev, struct drm_encoder *encoder); void __init hdmi_register(void); void __exit hdmi_unregister(void); +#else +static inline int hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev, + struct drm_encoder *encoder) +{ + return -EINVAL; +} + +static inline void __init hdmi_register(void) +{ +} + +static inline void __exit hdmi_unregister(void) +{ +} +#endif struct msm_edp; +#ifdef CONFIG_DRM_MSM_EDP void __init msm_edp_register(void); void __exit msm_edp_unregister(void); int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev, struct drm_encoder *encoder); +#else +static inline void __init msm_edp_register(void) +{ +} +static inline void __exit msm_edp_unregister(void) +{ +} +static inline int msm_edp_modeset_init(struct msm_edp *edp, + struct drm_device *dev, + struct drm_encoder *encoder) +{ + return -EINVAL; +} +#endif struct msm_dsi; enum msm_dsi_encoder_id { diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c index 56a17ec5b5efe2..cdd7ee34c16568 100644 --- a/drivers/regulator/qcom_smd-regulator.c +++ b/drivers/regulator/qcom_smd-regulator.c @@ -291,6 +291,55 @@ static const struct regulator_desc pm8916_buck_hvo_smps = { .ops = &rpm_smps_ldo_ops, }; +static const struct regulator_desc pm8994_hfsmps = { + .linear_ranges = (struct regulator_linear_range[]) { + REGULATOR_LINEAR_RANGE( 375000, 0, 95, 12500), + REGULATOR_LINEAR_RANGE(1550000, 96, 158, 25000), + }, + .n_linear_ranges = 2, + .n_voltages = 159, + .ops = &rpm_smps_ldo_ops, +}; + +static const struct regulator_desc pm8994_ftsmps = { + .linear_ranges = (struct regulator_linear_range[]) { + REGULATOR_LINEAR_RANGE(350000, 0, 199, 5000), + REGULATOR_LINEAR_RANGE(700000, 200, 349, 10000), + }, + .n_linear_ranges = 2, + .n_voltages = 350, + .ops = &rpm_smps_ldo_ops, +}; + +static const struct regulator_desc pm8994_nldo = { + .linear_ranges = (struct regulator_linear_range[]) { + REGULATOR_LINEAR_RANGE(750000, 0, 63, 12500), + }, + .n_linear_ranges = 1, + .n_voltages = 64, + .ops = &rpm_smps_ldo_ops, +}; + +static const struct regulator_desc pm8994_pldo = { + .linear_ranges = (struct regulator_linear_range[]) { + REGULATOR_LINEAR_RANGE( 750000, 0, 30, 25000), + REGULATOR_LINEAR_RANGE(1500000, 31, 99, 50000), + }, + .n_linear_ranges = 2, + .n_voltages = 100, + .ops = &rpm_smps_ldo_ops, +}; + +static const struct regulator_desc pm8994_switch = { + .ops = &rpm_switch_ops, +}; + +static const struct regulator_desc pm8994_lnldo = { + .fixed_uV = 1740000, + .n_voltages = 1, + .ops = &rpm_smps_ldo_ops, +}; + struct rpm_regulator_data { const char *name; u32 type; @@ -429,10 +478,62 @@ static const struct rpm_regulator_data rpm_pma8084_regulators[] = { {} }; +static const struct rpm_regulator_data rpm_pm8994_regulators[] = { + { "s1", QCOM_SMD_RPM_SMPA, 1, &pm8994_ftsmps, "vdd_s1" }, + { "s2", QCOM_SMD_RPM_SMPA, 2, &pm8994_ftsmps, "vdd_s2" }, + { "s3", QCOM_SMD_RPM_SMPA, 3, &pm8994_hfsmps, "vdd_s3" }, + { "s4", QCOM_SMD_RPM_SMPA, 4, &pm8994_hfsmps, "vdd_s4" }, + { "s5", QCOM_SMD_RPM_SMPA, 5, &pm8994_hfsmps, "vdd_s5" }, + { "s6", QCOM_SMD_RPM_SMPA, 6, &pm8994_ftsmps, "vdd_s6" }, + { "s7", QCOM_SMD_RPM_SMPA, 7, &pm8994_hfsmps, "vdd_s7" }, + { "s8", QCOM_SMD_RPM_SMPA, 8, &pm8994_ftsmps, "vdd_s8" }, + { "s9", QCOM_SMD_RPM_SMPA, 9, &pm8994_ftsmps, "vdd_s9" }, + { "s10", QCOM_SMD_RPM_SMPA, 10, &pm8994_ftsmps, "vdd_s10" }, + { "s11", QCOM_SMD_RPM_SMPA, 11, &pm8994_ftsmps, "vdd_s11" }, + { "s12", QCOM_SMD_RPM_SMPA, 12, &pm8994_ftsmps, "vdd_s12" }, + { "l1", QCOM_SMD_RPM_LDOA, 1, &pm8994_nldo, "vdd_l1" }, + { "l2", QCOM_SMD_RPM_LDOA, 2, &pm8994_nldo, "vdd_l2" }, + { "l3", QCOM_SMD_RPM_LDOA, 3, &pm8994_nldo, "vdd_l3" }, + { "l4", QCOM_SMD_RPM_LDOA, 4, &pm8994_nldo, "vdd_l4" }, + { "l5", QCOM_SMD_RPM_LDOA, 5, &pm8994_lnldo, "vdd_l5" }, + { "l6", QCOM_SMD_RPM_LDOA, 6, &pm8994_pldo, "vdd_l6" }, + { "l7", QCOM_SMD_RPM_LDOA, 7, &pm8994_lnldo, "vdd_l7" }, + { "l8", QCOM_SMD_RPM_LDOA, 8, &pm8994_pldo, "vdd_l8" }, + { "l9", QCOM_SMD_RPM_LDOA, 9, &pm8994_pldo, "vdd_l9" }, + { "l10", QCOM_SMD_RPM_LDOA, 10, &pm8994_pldo, "vdd_l10" }, + { "l11", QCOM_SMD_RPM_LDOA, 11, &pm8994_nldo, "vdd_l11" }, + { "l12", QCOM_SMD_RPM_LDOA, 12, &pm8994_pldo, "vdd_l12" }, + { "l13", QCOM_SMD_RPM_LDOA, 13, &pm8994_pldo, "vdd_l13" }, + { "l14", QCOM_SMD_RPM_LDOA, 14, &pm8994_pldo, "vdd_l14" }, + { "l15", QCOM_SMD_RPM_LDOA, 15, &pm8994_pldo, "vdd_l15" }, + { "l16", QCOM_SMD_RPM_LDOA, 16, &pm8994_pldo, "vdd_l16" }, + { "l17", QCOM_SMD_RPM_LDOA, 17, &pm8994_pldo, "vdd_l17" }, + { "l18", QCOM_SMD_RPM_LDOA, 18, &pm8994_pldo, "vdd_l18" }, + { "l19", QCOM_SMD_RPM_LDOA, 19, &pm8994_pldo, "vdd_l19" }, + { "l20", QCOM_SMD_RPM_LDOA, 20, &pm8994_pldo, "vdd_l20" }, + { "l21", QCOM_SMD_RPM_LDOA, 21, &pm8994_pldo, "vdd_l21" }, + { "l22", QCOM_SMD_RPM_LDOA, 22, &pm8994_pldo, "vdd_l22" }, + { "l23", QCOM_SMD_RPM_LDOA, 23, &pm8994_pldo, "vdd_l23" }, + { "l24", QCOM_SMD_RPM_LDOA, 24, &pm8994_pldo, "vdd_l24" }, + { "l25", QCOM_SMD_RPM_LDOA, 25, &pm8994_pldo, "vdd_l25" }, + { "l26", QCOM_SMD_RPM_LDOA, 26, &pm8994_nldo, "vdd_l26" }, + { "l27", QCOM_SMD_RPM_LDOA, 27, &pm8994_nldo, "vdd_l27" }, + { "l28", QCOM_SMD_RPM_LDOA, 28, &pm8994_nldo, "vdd_l28" }, + { "l29", QCOM_SMD_RPM_LDOA, 29, &pm8994_pldo, "vdd_l29" }, + { "l30", QCOM_SMD_RPM_LDOA, 30, &pm8994_pldo, "vdd_l30" }, + { "l31", QCOM_SMD_RPM_LDOA, 31, &pm8994_nldo, "vdd_l31" }, + { "l32", QCOM_SMD_RPM_LDOA, 32, &pm8994_pldo, "vdd_l32" }, + { "lvs1", QCOM_SMD_RPM_VSA, 1, &pm8994_switch, "vdd_lvs1_2" }, + { "lvs2", QCOM_SMD_RPM_VSA, 2, &pm8994_switch, "vdd_lvs1_2" }, + + {} +}; + static const struct of_device_id rpm_of_match[] = { { .compatible = "qcom,rpm-pm8841-regulators", .data = &rpm_pm8841_regulators }, { .compatible = "qcom,rpm-pm8916-regulators", .data = &rpm_pm8916_regulators }, { .compatible = "qcom,rpm-pm8941-regulators", .data = &rpm_pm8941_regulators }, + { .compatible = "qcom,rpm-pm8994-regulators", .data = &rpm_pm8994_regulators }, { .compatible = "qcom,rpm-pma8084-regulators", .data = &rpm_pma8084_regulators }, {} }; diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 461b387d03cce5..79ab5bf55f0dd5 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -49,6 +49,15 @@ config QCOM_SMD_RPM frequencies controlled by the RPM on these devices. Say M here if you want to include support for the Qualcomm RPM as a + +config MSM_GLINK + tristate "Generic Link (G-Link)" + help + G-Link is a generic link transport that replaces SMD. It is used + within a System-on-Chip (SoC) for communication between both internal + processors and external peripherals. The actual physical transport + is handled by transport plug-ins that can be individually enabled and + configured separately. module. This will build a module called "qcom-smd-rpm". config QCOM_SMEM_STATE @@ -76,3 +85,22 @@ config QCOM_WCNSS_CTRL help Client driver for the WCNSS_CTRL SMD channel, used to download nv firmware to a newly booted WCNSS chip. + +config MSM_GLINK_SMEM_NATIVE_XPRT + depends on QCOM_SMEM + depends on MSM_GLINK + tristate "Generic Link (G-Link) SMEM Native Transport" + help + G-Link SMEM Native Transport is a G-Link Transport plug-in. It allows + G-Link communication to remote entities through a shared memory + physical transport. The nature of shared memory limits this G-Link + transport to only connecting with entities internal to the + System-on-Chip. + +config QCOM_SMEM + tristate "Qualcomm Shared Memory Manager (SMEM)" + depends on ARCH_QCOM + help + Say y here to enable support for the Qualcomm Shared Memory Manager. + The driver provides an interface to items in a heap shared among all + processors in a Qualcomm platform. diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index fdd664edf0bdf3..6fcf95488bec13 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -1,9 +1,11 @@ obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o obj-$(CONFIG_QCOM_PM) += spm.o -obj-$(CONFIG_QCOM_SMD) += smd.o +obj-$(CONFIG_QCOM_SMD) += smd.o glink.o obj-$(CONFIG_QCOM_SMD_RPM) += smd-rpm.o obj-$(CONFIG_QCOM_SMEM) += smem.o obj-$(CONFIG_QCOM_SMEM_STATE) += smem_state.o obj-$(CONFIG_QCOM_SMP2P) += smp2p.o obj-$(CONFIG_QCOM_SMSM) += smsm.o obj-$(CONFIG_QCOM_WCNSS_CTRL) += wcnss_ctrl.o +obj-$(CONFIG_MSM_GLINK) += glink.o glink_debugfs.o +obj-$(CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT) += glink_smem_native_xprt.o diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c new file mode 100644 index 00000000000000..f398d0c48fc224 --- /dev/null +++ b/drivers/soc/qcom/glink.c @@ -0,0 +1,5780 @@ +/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "glink_core_if.h" +#include "glink_private.h" +#include "glink_xprt_if.h" + +/* Number of internal IPC Logging log pages */ +#define NUM_LOG_PAGES 10 +#define GLINK_PM_QOS_HOLDOFF_MS 10 +#define GLINK_QOS_DEF_NUM_TOKENS 10 +#define GLINK_QOS_DEF_NUM_PRIORITY 1 +#define GLINK_QOS_DEF_MTU 2048 + +/** + * struct glink_qos_priority_bin - Packet Scheduler's priority bucket + * @max_rate_kBps: Maximum rate supported by the priority bucket. + * @power_state: Transport power state for this priority bin. + * @tx_ready: List of channels ready for tx in the priority bucket. + * @active_ch_cnt: Active channels of this priority. + */ +struct glink_qos_priority_bin { + unsigned long max_rate_kBps; + uint32_t power_state; + struct list_head tx_ready; + uint32_t active_ch_cnt; +}; + +/** + * struct glink_core_xprt_ctx - transport representation structure + * @xprt_state_lhb0: controls read/write access to transport state + * @list_node: used to chain this transport in a global + * transport list + * @name: name of this transport + * @edge: what this transport connects to + * @id: the id to use for channel migration + * @versions: array of transport versions this implementation + * supports + * @versions_entries: number of entries in @versions + * @local_version_idx: local version index into @versions this + * transport is currently running + * @remote_version_idx: remote version index into @versions this + * transport is currently running + * @l_features: Features negotiated by the local side + * @capabilities: Capabilities of underlying transport + * @ops: transport defined implementation of common + * operations + * @local_state: value from local_channel_state_e representing + * the local state of this transport + * @remote_neg_completed: is the version negotiation with the remote end + * completed + * @xprt_ctx_lock_lhb1 lock to protect @next_lcid and @channels + * @next_lcid: logical channel identifier to assign to the next + * created channel + * @max_cid: maximum number of channel identifiers supported + * @max_iid: maximum number of intent identifiers supported + * @tx_work: work item to process @tx_ready + * @tx_wq: workqueue to run @tx_work + * @channels: list of all existing channels on this transport + * @mtu: MTU supported by this transport. + * @token_count: Number of tokens to be assigned per assignment. + * @curr_qos_rate_kBps: Aggregate of currently supported QoS requests. + * @threshold_rate_kBps: Maximum Rate allocated for QoS traffic. + * @num_priority: Number of priority buckets in the transport. + * @tx_ready_lock_lhb2: lock to protect @tx_ready + * @active_high_prio: Highest priority of active channels. + * @prio_bin: Pointer to priority buckets. + * @pm_qos_req: power management QoS request for TX path + * @qos_req_active: a vote is active with the PM QoS system + * @tx_path_activity: transmit activity has occurred + * @pm_qos_work: removes PM QoS vote due to inactivity + * @xprt_dbgfs_lock_lhb3: debugfs channel structure lock + * @log_ctx: IPC logging context for this transport. + */ +struct glink_core_xprt_ctx { + struct rwref_lock xprt_state_lhb0; + struct list_head list_node; + char name[GLINK_NAME_SIZE]; + char edge[GLINK_NAME_SIZE]; + uint16_t id; + const struct glink_core_version *versions; + size_t versions_entries; + uint32_t local_version_idx; + uint32_t remote_version_idx; + uint32_t l_features; + uint32_t capabilities; + struct glink_transport_if *ops; + enum transport_state_e local_state; + bool remote_neg_completed; + + spinlock_t xprt_ctx_lock_lhb1; + struct list_head channels; + uint32_t next_lcid; + struct list_head free_lcid_list; + + uint32_t max_cid; + uint32_t max_iid; + struct work_struct tx_work; + struct workqueue_struct *tx_wq; + + size_t mtu; + uint32_t token_count; + unsigned long curr_qos_rate_kBps; + unsigned long threshold_rate_kBps; + uint32_t num_priority; + spinlock_t tx_ready_lock_lhb2; + uint32_t active_high_prio; + struct glink_qos_priority_bin *prio_bin; + + struct pm_qos_request pm_qos_req; + bool qos_req_active; + bool tx_path_activity; + struct delayed_work pm_qos_work; + + struct mutex xprt_dbgfs_lock_lhb3; + void *log_ctx; +}; + +/** + * Channel Context + * @xprt_state_lhb0: controls read/write access to channel state + * @port_list_node: channel list node used by transport "channels" list + * @tx_ready_list_node: channels that have data ready to transmit + * @name: name of the channel + * + * @user_priv: user opaque data type passed into glink_open() + * @notify_rx: RX notification function + * @notify_tx_done: TX-done notification function (remote side is done) + * @notify_state: Channel state (connected / disconnected) notifications + * @notify_rx_intent_req: Request from remote side for an intent + * @notify_rxv: RX notification function (for io buffer chain) + * @notify_rx_sigs: RX signal change notification + * @notify_rx_abort: Channel close RX Intent aborted + * @notify_tx_abort: Channel close TX aborted + * @notify_rx_tracer_pkt: Receive notification for tracer packet + * @notify_remote_rx_intent: Receive notification for remote-queued RX intent + * + * @transport_ptr: Transport this channel uses + * @lcid: Local channel ID + * @rcid: Remote channel ID + * @local_open_state: Local channel state + * @remote_opened: Remote channel state (opened or closed) + * @int_req_ack: Remote side intent request ACK state + * @int_req_ack_complete: Intent tracking completion - received remote ACK + * @int_req_complete: Intent tracking completion - received intent + * @rx_intent_req_timeout_jiffies: Timeout for requesting an RX intent, in + * jiffies; if set to 0, timeout is infinite + * + * @local_rx_intent_lst_lock_lhc1: RX intent list lock + * @local_rx_intent_list: Active RX Intents queued by client + * @local_rx_intent_ntfy_list: Client notified, waiting for rx_done() + * @local_rx_intent_free_list: Available intent container structure + * + * @rmt_rx_intent_lst_lock_lhc2: Remote RX intent list lock + * @rmt_rx_intent_list: Remote RX intent list + * + * @max_used_liid: Maximum Local Intent ID used + * @dummy_riid: Dummy remote intent ID + * + * @tx_lists_lock_lhc3: TX list lock + * @tx_active: Ready to transmit + * + * @tx_pending_rmt_done_lock_lhc4: Remote-done list lock + * @tx_pending_remote_done: Transmitted, waiting for remote done + * @lsigs: Local signals + * @rsigs: Remote signals + * @pending_delete: waiting for channel to be deleted + * @no_migrate: The local client does not want to + * migrate transports + * @local_xprt_req: The transport the local side requested + * @local_xprt_resp: The response to @local_xprt_req + * @remote_xprt_req: The transport the remote side requested + * @remote_xprt_resp: The response to @remote_xprt_req + * @curr_priority: Channel's current priority. + * @initial_priority: Channel's initial priority. + * @token_count: Tokens for consumption by packet. + * @txd_len: Transmitted data size in the current + * token assignment cycle. + * @token_start_time: Time at which tokens are assigned. + * @req_rate_kBps: Current QoS request by the channel. + * @tx_intent_cnt: Intent count to transmit soon in future. + * @tx_cnt: Packets to be picked by tx scheduler. + */ +struct channel_ctx { + struct rwref_lock ch_state_lhc0; + struct list_head port_list_node; + struct list_head tx_ready_list_node; + char name[GLINK_NAME_SIZE]; + + /* user info */ + void *user_priv; + int (*notify_rx)(void *handle, const void *data, size_t size); + void (*notify_tx_done)(void *handle, const void *priv, + const void *pkt_priv, const void *ptr); + void (*notify_state)(void *handle, const void *priv, unsigned event); + bool (*notify_rx_intent_req)(void *handle, const void *priv, + size_t req_size); + void (*notify_rxv)(void *handle, const void *priv, const void *pkt_priv, + void *iovec, size_t size, + void * (*vbuf_provider)(void *iovec, size_t offset, + size_t *size), + void * (*pbuf_provider)(void *iovec, size_t offset, + size_t *size)); + void (*notify_rx_sigs)(void *handle, const void *priv, + uint32_t old_sigs, uint32_t new_sigs); + void (*notify_rx_abort)(void *handle, const void *priv, + const void *pkt_priv); + void (*notify_tx_abort)(void *handle, const void *priv, + const void *pkt_priv); + void (*notify_rx_tracer_pkt)(void *handle, const void *priv, + const void *pkt_priv, const void *ptr, size_t size); + void (*notify_remote_rx_intent)(void *handle, const void *priv, + size_t size); + + /* internal port state */ + struct glink_core_xprt_ctx *transport_ptr; + uint32_t lcid; + uint32_t rcid; + enum local_channel_state_e local_open_state; + bool remote_opened; + bool int_req_ack; + struct completion int_req_ack_complete; + struct completion int_req_complete; + unsigned long rx_intent_req_timeout_jiffies; + + spinlock_t local_rx_intent_lst_lock_lhc1; + struct list_head local_rx_intent_list; + struct list_head local_rx_intent_ntfy_list; + struct list_head local_rx_intent_free_list; + + spinlock_t rmt_rx_intent_lst_lock_lhc2; + struct list_head rmt_rx_intent_list; + + uint32_t max_used_liid; + uint32_t dummy_riid; + + spinlock_t tx_lists_lock_lhc3; + struct list_head tx_active; + + spinlock_t tx_pending_rmt_done_lock_lhc4; + struct list_head tx_pending_remote_done; + + uint32_t lsigs; + uint32_t rsigs; + bool pending_delete; + + bool no_migrate; + uint16_t local_xprt_req; + uint16_t local_xprt_resp; + uint16_t remote_xprt_req; + uint16_t remote_xprt_resp; + + uint32_t curr_priority; + uint32_t initial_priority; + uint32_t token_count; + size_t txd_len; + unsigned long token_start_time; + unsigned long req_rate_kBps; + uint32_t tx_intent_cnt; + uint32_t tx_cnt; +}; + +static struct glink_core_if core_impl; +static void *log_ctx; +static unsigned glink_debug_mask = QCOM_GLINK_INFO; +module_param_named(debug_mask, glink_debug_mask, + uint, S_IRUGO | S_IWUSR | S_IWGRP); + +static unsigned glink_pm_qos; +module_param_named(pm_qos_enable, glink_pm_qos, + uint, S_IRUGO | S_IWUSR | S_IWGRP); + + +static LIST_HEAD(transport_list); + +/* + * Used while notifying the clients about link state events. Since the clients + * need to store the callback information temporarily and since all the + * existing accesses to transport list are in non-IRQ context, defining the + * transport_list_lock as a mutex. + */ +static DEFINE_MUTEX(transport_list_lock_lha0); + +struct link_state_notifier_info { + struct list_head list; + char transport[GLINK_NAME_SIZE]; + char edge[GLINK_NAME_SIZE]; + void (*glink_link_state_notif_cb)( + struct glink_link_state_cb_info *cb_info, void *priv); + void *priv; +}; +static LIST_HEAD(link_state_notifier_list); +static DEFINE_MUTEX(link_state_notifier_lock_lha1); + +static struct glink_core_xprt_ctx *find_open_transport(const char *edge, + const char *name, + bool initial_xprt, + uint16_t *best_id); + +static bool xprt_is_fully_opened(struct glink_core_xprt_ctx *xprt); + +static struct channel_ctx *xprt_lcid_to_ch_ctx_get( + struct glink_core_xprt_ctx *xprt_ctx, + uint32_t lcid); + +static struct channel_ctx *xprt_rcid_to_ch_ctx_get( + struct glink_core_xprt_ctx *xprt_ctx, + uint32_t rcid); + +static void xprt_schedule_tx(struct glink_core_xprt_ctx *xprt_ptr, + struct channel_ctx *ch_ptr, + struct glink_core_tx_pkt *tx_info); + +static int xprt_single_threaded_tx(struct glink_core_xprt_ctx *xprt_ptr, + struct channel_ctx *ch_ptr, + struct glink_core_tx_pkt *tx_info); + +static void tx_work_func(struct work_struct *work); + +static struct channel_ctx *ch_name_to_ch_ctx_create( + struct glink_core_xprt_ctx *xprt_ctx, + const char *name); + +static void ch_push_remote_rx_intent(struct channel_ctx *ctx, size_t size, + uint32_t riid); + +static int ch_pop_remote_rx_intent(struct channel_ctx *ctx, size_t size, + uint32_t *riid_ptr, size_t *intent_size); + +static struct glink_core_rx_intent *ch_push_local_rx_intent( + struct channel_ctx *ctx, const void *pkt_priv, size_t size); + +static void ch_remove_local_rx_intent(struct channel_ctx *ctx, uint32_t liid); + +static struct glink_core_rx_intent *ch_get_local_rx_intent( + struct channel_ctx *ctx, uint32_t liid); + +static void ch_set_local_rx_intent_notified(struct channel_ctx *ctx, + struct glink_core_rx_intent *intent_ptr); + +static struct glink_core_rx_intent *ch_get_local_rx_intent_notified( + struct channel_ctx *ctx, const void *ptr); + +static void ch_remove_local_rx_intent_notified(struct channel_ctx *ctx, + struct glink_core_rx_intent *liid_ptr, bool reuse); + +static struct glink_core_rx_intent *ch_get_free_local_rx_intent( + struct channel_ctx *ctx); + +static void ch_purge_intent_lists(struct channel_ctx *ctx); + +static void ch_add_rcid(struct glink_core_xprt_ctx *xprt_ctx, + struct channel_ctx *ctx, + uint32_t rcid); + +static bool ch_is_fully_opened(struct channel_ctx *ctx); +static bool ch_is_fully_closed(struct channel_ctx *ctx); + +struct glink_core_tx_pkt *ch_get_tx_pending_remote_done(struct channel_ctx *ctx, + uint32_t riid); + +static void ch_remove_tx_pending_remote_done(struct channel_ctx *ctx, + struct glink_core_tx_pkt *tx_pkt); + +static void glink_core_rx_cmd_rx_intent_req_ack(struct glink_transport_if + *if_ptr, uint32_t rcid, bool granted); + +static bool glink_core_remote_close_common(struct channel_ctx *ctx); + +static void check_link_notifier_and_notify(struct glink_core_xprt_ctx *xprt_ptr, + enum glink_link_state link_state); + +static void glink_core_channel_cleanup(struct glink_core_xprt_ctx *xprt_ptr); +static void glink_pm_qos_vote(struct glink_core_xprt_ctx *xprt_ptr); +static void glink_pm_qos_unvote(struct glink_core_xprt_ctx *xprt_ptr); +static void glink_pm_qos_cancel_worker(struct work_struct *work); +static bool ch_update_local_state(struct channel_ctx *ctx, + enum local_channel_state_e lstate); +static bool ch_update_rmt_state(struct channel_ctx *ctx, bool rstate); +static void glink_core_deinit_xprt_qos_cfg( + struct glink_core_xprt_ctx *xprt_ptr); + +#define glink_prio_to_power_state(xprt_ctx, priority) \ + ((xprt_ctx)->prio_bin[priority].power_state) + +#define GLINK_GET_CH_TX_STATE(ctx) \ + ((ctx)->tx_intent_cnt || (ctx)->tx_cnt) + +/** + * glink_ssr() - Clean up locally for SSR by simulating remote close + * @subsystem: The name of the subsystem being restarted + * + * Call into the transport using the ssr(if_ptr) function to allow it to + * clean up any necessary structures, then simulate a remote close from + * subsystem for all channels on that edge. + * + * Return: Standard error codes. + */ +int glink_ssr(const char *subsystem) +{ + int ret = 0; + bool transport_found = false; + struct glink_core_xprt_ctx *xprt_ctx = NULL; + struct channel_ctx *ch_ctx, *temp_ch_ctx; + uint32_t i; + unsigned long flags; + + mutex_lock(&transport_list_lock_lha0); + list_for_each_entry(xprt_ctx, &transport_list, list_node) { + if (!strcmp(subsystem, xprt_ctx->edge) && + xprt_is_fully_opened(xprt_ctx)) { + GLINK_INFO_XPRT(xprt_ctx, "%s: SSR\n", __func__); + spin_lock_irqsave(&xprt_ctx->tx_ready_lock_lhb2, + flags); + for (i = 0; i < xprt_ctx->num_priority; i++) + list_for_each_entry_safe(ch_ctx, temp_ch_ctx, + &xprt_ctx->prio_bin[i].tx_ready, + tx_ready_list_node) + list_del_init( + &ch_ctx->tx_ready_list_node); + spin_unlock_irqrestore(&xprt_ctx->tx_ready_lock_lhb2, + flags); + + xprt_ctx->ops->ssr(xprt_ctx->ops); + transport_found = true; + } + } + mutex_unlock(&transport_list_lock_lha0); + + if (!transport_found) + ret = -ENODEV; + + return ret; +} +EXPORT_SYMBOL(glink_ssr); + +/** + * glink_core_ch_close_ack_common() - handles the common operations during + * close ack. + * @ctx: Pointer to channel instance. + * + * Return: True if the channel is fully closed after the state change, + * false otherwise. + */ +static bool glink_core_ch_close_ack_common(struct channel_ctx *ctx) +{ + bool is_fully_closed; + + if (ctx == NULL) + return false; + is_fully_closed = ch_update_local_state(ctx, GLINK_CHANNEL_CLOSED); + GLINK_INFO_PERF_CH(ctx, + "%s: local:GLINK_CHANNEL_CLOSING->GLINK_CHANNEL_CLOSED\n", + __func__); + + if (ctx->notify_state) { + ctx->notify_state(ctx, ctx->user_priv, + GLINK_LOCAL_DISCONNECTED); + ch_purge_intent_lists(ctx); + GLINK_INFO_PERF_CH(ctx, + "%s: notify state: GLINK_LOCAL_DISCONNECTED\n", + __func__); + } + + return is_fully_closed; +} + +/** + * glink_core_remote_close_common() - Handles the common operations during + * a remote close. + * @ctx: Pointer to channel instance. + * + * Return: True if the channel is fully closed after the state change, + * false otherwise. + */ +static bool glink_core_remote_close_common(struct channel_ctx *ctx) +{ + bool is_fully_closed; + + if (ctx == NULL) + return false; + is_fully_closed = ch_update_rmt_state(ctx, false); + ctx->rcid = 0; + + if (ctx->local_open_state != GLINK_CHANNEL_CLOSED && + ctx->local_open_state != GLINK_CHANNEL_CLOSING) { + if (ctx->notify_state) + ctx->notify_state(ctx, ctx->user_priv, + GLINK_REMOTE_DISCONNECTED); + GLINK_INFO_CH(ctx, + "%s: %s: GLINK_REMOTE_DISCONNECTED\n", + __func__, "notify state"); + } + + if (ctx->local_open_state == GLINK_CHANNEL_CLOSED) + GLINK_INFO_CH(ctx, + "%s: %s, %s\n", __func__, + "Did not send GLINK_REMOTE_DISCONNECTED", + "local state is already CLOSED"); + + ctx->int_req_ack = false; + complete_all(&ctx->int_req_ack_complete); + complete_all(&ctx->int_req_complete); + ch_purge_intent_lists(ctx); + + return is_fully_closed; +} + +/** + * glink_qos_calc_rate_kBps() - Calculate the transmit rate in kBps + * @pkt_size: Worst case packet size per transmission. + * @interval_us: Packet transmit interval in us. + * + * This function is used to calculate the rate of transmission rate of + * a channel in kBps. + * + * Return: Transmission rate in kBps. + */ +static unsigned long glink_qos_calc_rate_kBps(size_t pkt_size, + unsigned long interval_us) +{ + unsigned long rate_kBps, rem; + + rate_kBps = pkt_size * USEC_PER_SEC; + rem = do_div(rate_kBps, (interval_us * 1024)); + return rate_kBps; +} + +/** + * glink_qos_check_feasibility() - Feasibility test on a QoS Request + * @xprt_ctx: Transport in which the QoS request is made. + * @req_rate_kBps: QoS Request. + * + * This function is used to perform the schedulability test on a QoS request + * over a specific transport. + * + * Return: 0 on success, standard Linux error codes on failure. + */ +static int glink_qos_check_feasibility(struct glink_core_xprt_ctx *xprt_ctx, + unsigned long req_rate_kBps) +{ + unsigned long new_rate_kBps; + + if (xprt_ctx->num_priority == GLINK_QOS_DEF_NUM_PRIORITY) + return -EOPNOTSUPP; + + new_rate_kBps = xprt_ctx->curr_qos_rate_kBps + req_rate_kBps; + if (new_rate_kBps > xprt_ctx->threshold_rate_kBps) { + GLINK_ERR_XPRT(xprt_ctx, + "New_rate(%lu + %lu) > threshold_rate(%lu)\n", + xprt_ctx->curr_qos_rate_kBps, req_rate_kBps, + xprt_ctx->threshold_rate_kBps); + return -EBUSY; + } + return 0; +} + +/** + * glink_qos_update_ch_prio() - Update the channel priority + * @ctx: Channel context whose priority is updated. + * @new_priority: New priority of the channel. + * + * This function is called to update the channel priority during QoS request, + * QoS Cancel or Priority evaluation by packet scheduler. This function must + * be called with transport's tx_ready_lock_lhb2 lock and channel's + * tx_lists_lock_lhc3 locked. + */ +static void glink_qos_update_ch_prio(struct channel_ctx *ctx, + uint32_t new_priority) +{ + uint32_t old_priority; + + if (unlikely(!ctx)) + return; + + old_priority = ctx->curr_priority; + if (!list_empty(&ctx->tx_ready_list_node)) { + ctx->transport_ptr->prio_bin[old_priority].active_ch_cnt--; + list_move(&ctx->tx_ready_list_node, + &ctx->transport_ptr->prio_bin[new_priority].tx_ready); + ctx->transport_ptr->prio_bin[new_priority].active_ch_cnt++; + } + ctx->curr_priority = new_priority; +} + +/** + * glink_qos_assign_priority() - Assign priority to a channel + * @ctx: Channel for which the priority has to be assigned. + * @req_rate_kBps: QoS request by the channel. + * + * This function is used to assign a priority to the channel depending on its + * QoS Request. + * + * Return: 0 on success, standard Linux error codes on failure. + */ +static int glink_qos_assign_priority(struct channel_ctx *ctx, + unsigned long req_rate_kBps) +{ + int ret; + uint32_t i; + unsigned long flags; + + spin_lock_irqsave(&ctx->transport_ptr->tx_ready_lock_lhb2, flags); + if (ctx->req_rate_kBps) { + spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb2, + flags); + GLINK_ERR_CH(ctx, "%s: QoS Request already exists\n", __func__); + return -EINVAL; + } + + ret = glink_qos_check_feasibility(ctx->transport_ptr, req_rate_kBps); + if (ret < 0) { + spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb2, + flags); + return ret; + } + + spin_lock(&ctx->tx_lists_lock_lhc3); + i = ctx->transport_ptr->num_priority - 1; + while (i > 0 && + ctx->transport_ptr->prio_bin[i-1].max_rate_kBps >= req_rate_kBps) + i--; + + ctx->initial_priority = i; + glink_qos_update_ch_prio(ctx, i); + ctx->req_rate_kBps = req_rate_kBps; + if (i > 0) { + ctx->transport_ptr->curr_qos_rate_kBps += req_rate_kBps; + ctx->token_count = ctx->transport_ptr->token_count; + ctx->txd_len = 0; + ctx->token_start_time = arch_counter_get_cntpct(); + } + spin_unlock(&ctx->tx_lists_lock_lhc3); + spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb2, flags); + return 0; +} + +/** + * glink_qos_reset_priority() - Reset the channel priority + * @ctx: Channel for which the priority is reset. + * + * This function is used to reset the channel priority when the QoS request + * is cancelled by the channel. + * + * Return: 0 on success, standard Linux error codes on failure. + */ +static int glink_qos_reset_priority(struct channel_ctx *ctx) +{ + unsigned long flags; + + spin_lock_irqsave(&ctx->transport_ptr->tx_ready_lock_lhb2, flags); + spin_lock(&ctx->tx_lists_lock_lhc3); + if (ctx->initial_priority > 0) { + ctx->initial_priority = 0; + glink_qos_update_ch_prio(ctx, 0); + ctx->transport_ptr->curr_qos_rate_kBps -= ctx->req_rate_kBps; + ctx->txd_len = 0; + ctx->req_rate_kBps = 0; + } + spin_unlock(&ctx->tx_lists_lock_lhc3); + spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb2, flags); + return 0; +} + +/** + * glink_qos_ch_vote_xprt() - Vote the transport that channel is active + * @ctx: Channel context which is active. + * + * This function is called to vote for the transport either when the channel + * is transmitting or when it shows an intention to transmit sooner. This + * function must be called with transport's tx_ready_lock_lhb2 lock and + * channel's tx_lists_lock_lhc3 locked. + * + * Return: 0 on success, standard Linux error codes on failure. + */ +static int glink_qos_ch_vote_xprt(struct channel_ctx *ctx) +{ + uint32_t prio; + + if (unlikely(!ctx || !ctx->transport_ptr)) + return -EINVAL; + + prio = ctx->curr_priority; + ctx->transport_ptr->prio_bin[prio].active_ch_cnt++; + + if (ctx->transport_ptr->prio_bin[prio].active_ch_cnt == 1 && + ctx->transport_ptr->active_high_prio < prio) { + /* + * One active channel in this priority and this is the + * highest active priority bucket + */ + ctx->transport_ptr->active_high_prio = prio; + return ctx->transport_ptr->ops->power_vote( + ctx->transport_ptr->ops, + glink_prio_to_power_state(ctx->transport_ptr, + prio)); + } + return 0; +} + +/** + * glink_qos_ch_unvote_xprt() - Unvote the transport when channel is inactive + * @ctx: Channel context which is inactive. + * + * This function is called to unvote for the transport either when all the + * packets queued by the channel are transmitted by the scheduler. This + * function must be called with transport's tx_ready_lock_lhb2 lock and + * channel's tx_lists_lock_lhc3 locked. + * + * Return: 0 on success, standard Linux error codes on failure. + */ +static int glink_qos_ch_unvote_xprt(struct channel_ctx *ctx) +{ + uint32_t prio; + + if (unlikely(!ctx || !ctx->transport_ptr)) + return -EINVAL; + + prio = ctx->curr_priority; + ctx->transport_ptr->prio_bin[prio].active_ch_cnt--; + + if (ctx->transport_ptr->prio_bin[prio].active_ch_cnt || + ctx->transport_ptr->active_high_prio > prio) + return 0; + + /* + * No active channel in this priority and this is the + * highest active priority bucket + */ + while (prio > 0) { + prio--; + if (!ctx->transport_ptr->prio_bin[prio].active_ch_cnt) + continue; + + ctx->transport_ptr->active_high_prio = prio; + return ctx->transport_ptr->ops->power_vote( + ctx->transport_ptr->ops, + glink_prio_to_power_state(ctx->transport_ptr, + prio)); + } + return ctx->transport_ptr->ops->power_unvote(ctx->transport_ptr->ops); +} + +/** + * glink_qos_add_ch_tx_intent() - Add the channel's intention to transmit soon + * @ctx: Channel context which is going to be active. + * + * This function is called to update the channel state when it is intending to + * transmit sooner. This function must be called with transport's + * tx_ready_lock_lhb2 lock and channel's tx_lists_lock_lhc3 locked. + * + * Return: 0 on success, standard Linux error codes on failure. + */ +static int glink_qos_add_ch_tx_intent(struct channel_ctx *ctx) +{ + bool active_tx; + + if (unlikely(!ctx)) + return -EINVAL; + + active_tx = GLINK_GET_CH_TX_STATE(ctx); + ctx->tx_intent_cnt++; + if (!active_tx) + glink_qos_ch_vote_xprt(ctx); + return 0; +} + +/** + * glink_qos_do_ch_tx() - Update the channel's state that it is transmitting + * @ctx: Channel context which is transmitting. + * + * This function is called to update the channel state when it is queueing a + * packet to transmit. This function must be called with transport's + * tx_ready_lock_lhb2 lock and channel's tx_lists_lock_lhc3 locked. + * + * Return: 0 on success, standard Linux error codes on failure. + */ +static int glink_qos_do_ch_tx(struct channel_ctx *ctx) +{ + bool active_tx; + + if (unlikely(!ctx)) + return -EINVAL; + + active_tx = GLINK_GET_CH_TX_STATE(ctx); + ctx->tx_cnt++; + if (ctx->tx_intent_cnt) + ctx->tx_intent_cnt--; + if (!active_tx) + glink_qos_ch_vote_xprt(ctx); + return 0; +} + +/** + * glink_qos_done_ch_tx() - Update the channel's state when transmission is done + * @ctx: Channel context for which all packets are transmitted. + * + * This function is called to update the channel state when all packets in its + * transmit queue are successfully transmitted. This function must be called + * with transport's tx_ready_lock_lhb2 lock and channel's tx_lists_lock_lhc3 + * locked. + * + * Return: 0 on success, standard Linux error codes on failure. + */ +static int glink_qos_done_ch_tx(struct channel_ctx *ctx) +{ + bool active_tx; + + if (unlikely(!ctx)) + return -EINVAL; + + WARN_ON(ctx->tx_cnt == 0); + ctx->tx_cnt = 0; + active_tx = GLINK_GET_CH_TX_STATE(ctx); + if (!active_tx) + glink_qos_ch_unvote_xprt(ctx); + return 0; +} + +/** + * tx_linear_vbuf_provider() - Virtual Buffer Provider for linear buffers + * @iovec: Pointer to the beginning of the linear buffer. + * @offset: Offset into the buffer whose address is needed. + * @size: Pointer to hold the length of the contiguous buffer space. + * + * This function is used when a linear buffer is transmitted. + * + * Return: Address of the buffer which is at offset "offset" from the beginning + * of the buffer. + */ +static void *tx_linear_vbuf_provider(void *iovec, size_t offset, size_t *size) +{ + struct glink_core_tx_pkt *tx_info = (struct glink_core_tx_pkt *)iovec; + + if (unlikely(!iovec || !size)) + return NULL; + + if (offset >= tx_info->size) + return NULL; + + if (unlikely(OVERFLOW_ADD_UNSIGNED(void *, tx_info->data, offset))) + return NULL; + + *size = tx_info->size - offset; + + return (void *)tx_info->data + offset; +} + +/** + * linearize_vector() - Linearize the vector buffer + * @iovec: Pointer to the vector buffer. + * @size: Size of data in the vector buffer. + * vbuf_provider: Virtual address-space Buffer Provider for the vector. + * pbuf_provider: Physical address-space Buffer Provider for the vector. + * + * This function is used to linearize the vector buffer provided by the + * transport when the client has registered to receive only the vector + * buffer. + * + * Return: address of the linear buffer on success, NULL on failure. + */ +static void *linearize_vector(void *iovec, size_t size, + void * (*vbuf_provider)(void *iovec, size_t offset, size_t *buf_size), + void * (*pbuf_provider)(void *iovec, size_t offset, size_t *buf_size)) +{ + void *bounce_buf; + void *pdata; + void *vdata; + size_t data_size; + size_t offset = 0; + + bounce_buf = kmalloc(size, GFP_KERNEL); + if (!bounce_buf) + return ERR_PTR(-ENOMEM); + + do { + if (vbuf_provider) { + vdata = vbuf_provider(iovec, offset, &data_size); + } else { + pdata = pbuf_provider(iovec, offset, &data_size); + vdata = phys_to_virt((unsigned long)pdata); + } + + if (!vdata) + break; + + if (OVERFLOW_ADD_UNSIGNED(size_t, data_size, offset)) { + GLINK_ERR("%s: overflow data_size %zu + offset %zu\n", + __func__, data_size, offset); + goto err; + } + + memcpy(bounce_buf + offset, vdata, data_size); + offset += data_size; + } while (offset < size); + + if (offset != size) { + GLINK_ERR("%s: Error size_copied %zu != total_size %zu\n", + __func__, offset, size); + goto err; + } + return bounce_buf; + +err: + kfree(bounce_buf); + return NULL; +} + +/** + * xprt_lcid_to_ch_ctx_get() - lookup a channel by local id + * @xprt_ctx: Transport to search for a matching channel. + * @lcid: Local channel identifier corresponding to the desired channel. + * + * If the channel is found, the reference count is incremented to ensure the + * lifetime of the channel context. The caller must call rwref_put() when done. + * + * Return: The channel corresponding to @lcid or NULL if a matching channel + * is not found. + */ +static struct channel_ctx *xprt_lcid_to_ch_ctx_get( + struct glink_core_xprt_ctx *xprt_ctx, + uint32_t lcid) +{ + struct channel_ctx *entry; + unsigned long flags; + + spin_lock_irqsave(&xprt_ctx->xprt_ctx_lock_lhb1, flags); + list_for_each_entry(entry, &xprt_ctx->channels, port_list_node) + if (entry->lcid == lcid) { + rwref_get(&entry->ch_state_lhc0); + spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1, + flags); + return entry; + } + spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1, flags); + + return NULL; +} + +/** + * xprt_rcid_to_ch_ctx_get() - lookup a channel by remote id + * @xprt_ctx: Transport to search for a matching channel. + * @rcid: Remote channel identifier corresponding to the desired channel. + * + * If the channel is found, the reference count is incremented to ensure the + * lifetime of the channel context. The caller must call rwref_put() when done. + * + * Return: The channel corresponding to @rcid or NULL if a matching channel + * is not found. + */ +static struct channel_ctx *xprt_rcid_to_ch_ctx_get( + struct glink_core_xprt_ctx *xprt_ctx, + uint32_t rcid) +{ + struct channel_ctx *entry; + unsigned long flags; + + spin_lock_irqsave(&xprt_ctx->xprt_ctx_lock_lhb1, flags); + list_for_each_entry(entry, &xprt_ctx->channels, port_list_node) + if (entry->rcid == rcid) { + rwref_get(&entry->ch_state_lhc0); + spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1, + flags); + return entry; + } + spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1, flags); + + return NULL; +} + +/** + * ch_check_duplicate_riid() - Checks for duplicate riid + * @ctx: Local channel context + * @riid: Remote intent ID + * + * This functions check the riid is present in the remote_rx_list or not + */ +bool ch_check_duplicate_riid(struct channel_ctx *ctx, int riid) +{ + struct glink_core_rx_intent *intent; + unsigned long flags; + + spin_lock_irqsave(&ctx->rmt_rx_intent_lst_lock_lhc2, flags); + list_for_each_entry(intent, &ctx->rmt_rx_intent_list, list) { + if (riid == intent->id) { + spin_unlock_irqrestore( + &ctx->rmt_rx_intent_lst_lock_lhc2, flags); + return true; + } + } + spin_unlock_irqrestore(&ctx->rmt_rx_intent_lst_lock_lhc2, flags); + return false; +} + +/** + * ch_pop_remote_rx_intent() - Finds a matching RX intent + * @ctx: Local channel context + * @size: Size of Intent + * @riid_ptr: Pointer to return value of remote intent ID + * + * This functions searches for an RX intent that is >= to the requested size. + */ +int ch_pop_remote_rx_intent(struct channel_ctx *ctx, size_t size, + uint32_t *riid_ptr, size_t *intent_size) +{ + struct glink_core_rx_intent *intent; + struct glink_core_rx_intent *intent_tmp; + unsigned long flags; + + if (GLINK_MAX_PKT_SIZE < size) { + GLINK_ERR_CH(ctx, "%s: R[]:%zu Invalid size.\n", __func__, + size); + return -EINVAL; + } + + if (riid_ptr == NULL) + return -EINVAL; + + *riid_ptr = 0; + spin_lock_irqsave(&ctx->rmt_rx_intent_lst_lock_lhc2, flags); + if (ctx->transport_ptr->capabilities & GCAP_INTENTLESS) { + *riid_ptr = ++ctx->dummy_riid; + spin_unlock_irqrestore(&ctx->rmt_rx_intent_lst_lock_lhc2, + flags); + return 0; + } + list_for_each_entry_safe(intent, intent_tmp, &ctx->rmt_rx_intent_list, + list) { + if (intent->intent_size >= size) { + list_del(&intent->list); + GLINK_DBG_CH(ctx, + "%s: R[%u]:%zu Removed remote intent\n", + __func__, + intent->id, + intent->intent_size); + *riid_ptr = intent->id; + *intent_size = intent->intent_size; + kfree(intent); + spin_unlock_irqrestore( + &ctx->rmt_rx_intent_lst_lock_lhc2, flags); + return 0; + } + } + spin_unlock_irqrestore(&ctx->rmt_rx_intent_lst_lock_lhc2, flags); + return -EAGAIN; +} + +/** + * ch_push_remote_rx_intent() - Registers a remote RX intent + * @ctx: Local channel context + * @size: Size of Intent + * @riid: Remote intent ID + * + * This functions adds a remote RX intent to the remote RX intent list. + */ +void ch_push_remote_rx_intent(struct channel_ctx *ctx, size_t size, + uint32_t riid) +{ + struct glink_core_rx_intent *intent; + unsigned long flags; + gfp_t gfp_flag; + + if (GLINK_MAX_PKT_SIZE < size) { + GLINK_ERR_CH(ctx, "%s: R[%u]:%zu Invalid size.\n", __func__, + riid, size); + return; + } + + if (ch_check_duplicate_riid(ctx, riid)) { + GLINK_ERR_CH(ctx, "%s: R[%d]:%zu Duplicate RIID found\n", + __func__, riid, size); + return; + } + + gfp_flag = (ctx->transport_ptr->capabilities & GCAP_AUTO_QUEUE_RX_INT) ? + GFP_ATOMIC : GFP_KERNEL; + intent = kzalloc(sizeof(struct glink_core_rx_intent), gfp_flag); + if (!intent) { + GLINK_ERR_CH(ctx, + "%s: R[%u]:%zu Memory allocation for intent failed\n", + __func__, riid, size); + return; + } + intent->id = riid; + intent->intent_size = size; + + spin_lock_irqsave(&ctx->rmt_rx_intent_lst_lock_lhc2, flags); + list_add_tail(&intent->list, &ctx->rmt_rx_intent_list); + + complete_all(&ctx->int_req_complete); + if (ctx->notify_remote_rx_intent) + ctx->notify_remote_rx_intent(ctx, ctx->user_priv, size); + spin_unlock_irqrestore(&ctx->rmt_rx_intent_lst_lock_lhc2, flags); + + GLINK_DBG_CH(ctx, "%s: R[%u]:%zu Pushed remote intent\n", __func__, + intent->id, + intent->intent_size); +} + +/** + * ch_push_local_rx_intent() - Create an rx_intent + * @ctx: Local channel context + * @pkt_priv: Opaque private pointer provided by client to be returned later + * @size: Size of intent + * + * This functions creates a local intent and adds it to the local + * intent list. + */ +struct glink_core_rx_intent *ch_push_local_rx_intent(struct channel_ctx *ctx, + const void *pkt_priv, size_t size) +{ + struct glink_core_rx_intent *intent; + unsigned long flags; + int ret; + + if (GLINK_MAX_PKT_SIZE < size) { + GLINK_ERR_CH(ctx, + "%s: L[]:%zu Invalid size\n", __func__, size); + return NULL; + } + + intent = ch_get_free_local_rx_intent(ctx); + if (!intent) { + if (ctx->max_used_liid >= ctx->transport_ptr->max_iid) { + GLINK_ERR_CH(ctx, + "%s: All intents are in USE max_iid[%d]", + __func__, ctx->transport_ptr->max_iid); + return NULL; + } + + intent = kzalloc(sizeof(struct glink_core_rx_intent), + GFP_KERNEL); + if (!intent) { + GLINK_ERR_CH(ctx, + "%s: Memory Allocation for local rx_intent failed", + __func__); + return NULL; + } + intent->id = ++ctx->max_used_liid; + } + + /* transport is responsible for allocating/reserving for the intent */ + ret = ctx->transport_ptr->ops->allocate_rx_intent( + ctx->transport_ptr->ops, size, intent); + if (ret < 0) { + /* intent data allocation failure */ + GLINK_ERR_CH(ctx, "%s: unable to allocate intent sz[%zu] %d", + __func__, size, ret); + spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags); + list_add_tail(&intent->list, + &ctx->local_rx_intent_free_list); + spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, + flags); + return NULL; + } + + intent->pkt_priv = pkt_priv; + intent->intent_size = size; + intent->write_offset = 0; + intent->pkt_size = 0; + intent->bounce_buf = NULL; + + spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags); + list_add_tail(&intent->list, &ctx->local_rx_intent_list); + spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags); + GLINK_DBG_CH(ctx, "%s: L[%u]:%zu Pushed intent\n", __func__, + intent->id, + intent->intent_size); + return intent; +} + +/** + * ch_remove_local_rx_intent() - Find and remove RX Intent from list + * @ctx: Local channel context + * @liid: Local channel Intent ID + * + * This functions parses the local intent list for a specific channel + * and checks for the intent using the intent ID. If found, the intent + * is deleted from the list. + */ +void ch_remove_local_rx_intent(struct channel_ctx *ctx, uint32_t liid) +{ + struct glink_core_rx_intent *intent, *tmp_intent; + unsigned long flags; + + if (ctx->transport_ptr->max_iid < liid) { + GLINK_ERR_CH(ctx, "%s: L[%u] Invalid ID.\n", __func__, + liid); + return; + } + + spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags); + list_for_each_entry_safe(intent, tmp_intent, &ctx->local_rx_intent_list, + list) { + if (liid == intent->id) { + list_del(&intent->list); + list_add_tail(&intent->list, + &ctx->local_rx_intent_free_list); + spin_unlock_irqrestore( + &ctx->local_rx_intent_lst_lock_lhc1, + flags); + GLINK_DBG_CH(ctx, + "%s: L[%u]:%zu moved intent to Free/unused list\n", + __func__, + intent->id, + intent->intent_size); + return; + } + } + spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags); + GLINK_ERR_CH(ctx, "%s: L[%u] Intent not found.\n", __func__, + liid); +} + +/** + * ch_get_dummy_rx_intent() - Get a dummy rx_intent + * @ctx: Local channel context + * @liid: Local channel Intent ID + * + * This functions parses the local intent list for a specific channel and + * returns either a matching intent or allocates a dummy one if no matching + * intents can be found. + * + * Return: Pointer to the intent if intent is found else NULL + */ +struct glink_core_rx_intent *ch_get_dummy_rx_intent(struct channel_ctx *ctx, + uint32_t liid) +{ + struct glink_core_rx_intent *intent; + unsigned long flags; + + spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags); + if (!list_empty(&ctx->local_rx_intent_list)) { + intent = list_first_entry(&ctx->local_rx_intent_list, + struct glink_core_rx_intent, list); + spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, + flags); + return intent; + } + spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags); + + intent = ch_get_free_local_rx_intent(ctx); + if (!intent) { + intent = kzalloc(sizeof(struct glink_core_rx_intent), + GFP_ATOMIC); + if (!intent) { + GLINK_ERR_CH(ctx, + "%s: Memory Allocation for local rx_intent failed", + __func__); + return NULL; + } + intent->id = ++ctx->max_used_liid; + } + intent->intent_size = 0; + intent->write_offset = 0; + intent->pkt_size = 0; + intent->bounce_buf = NULL; + intent->pkt_priv = NULL; + + spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags); + list_add_tail(&intent->list, &ctx->local_rx_intent_list); + spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags); + GLINK_DBG_CH(ctx, "%s: L[%u]:%zu Pushed intent\n", __func__, + intent->id, + intent->intent_size); + return intent; +} + +/** + * ch_get_local_rx_intent() - Search for an rx_intent + * @ctx: Local channel context + * @liid: Local channel Intent ID + * + * This functions parses the local intent list for a specific channel + * and checks for the intent using the intent ID. If found, pointer to + * the intent is returned. + * + * Return: Pointer to the intent if intent is found else NULL + */ +struct glink_core_rx_intent *ch_get_local_rx_intent(struct channel_ctx *ctx, + uint32_t liid) +{ + struct glink_core_rx_intent *intent; + unsigned long flags; + + if (ctx->transport_ptr->max_iid < liid) { + GLINK_ERR_CH(ctx, "%s: L[%u] Invalid ID.\n", __func__, + liid); + return NULL; + } + + if (ctx->transport_ptr->capabilities & GCAP_INTENTLESS) + return ch_get_dummy_rx_intent(ctx, liid); + + spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags); + list_for_each_entry(intent, &ctx->local_rx_intent_list, list) { + if (liid == intent->id) { + spin_unlock_irqrestore( + &ctx->local_rx_intent_lst_lock_lhc1, flags); + return intent; + } + } + spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags); + GLINK_ERR_CH(ctx, "%s: L[%u] Intent not found.\n", __func__, + liid); + return NULL; +} + +/** + * ch_set_local_rx_intent_notified() - Add a rx intent to local intent + * notified list + * @ctx: Local channel context + * @intent_ptr: Pointer to the local intent + * + * This functions parses the local intent list for a specific channel + * and checks for the intent. If found, the function deletes the intent + * from local_rx_intent list and adds it to local_rx_intent_notified list. + */ +void ch_set_local_rx_intent_notified(struct channel_ctx *ctx, + struct glink_core_rx_intent *intent_ptr) +{ + struct glink_core_rx_intent *tmp_intent, *intent; + unsigned long flags; + + spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags); + list_for_each_entry_safe(intent, tmp_intent, &ctx->local_rx_intent_list, + list) { + if (intent == intent_ptr) { + list_del(&intent->list); + list_add_tail(&intent->list, + &ctx->local_rx_intent_ntfy_list); + GLINK_DBG_CH(ctx, + "%s: L[%u]:%zu Moved intent %s", + __func__, + intent_ptr->id, + intent_ptr->intent_size, + "from local to notify list\n"); + spin_unlock_irqrestore( + &ctx->local_rx_intent_lst_lock_lhc1, + flags); + return; + } + } + spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags); + GLINK_ERR_CH(ctx, "%s: L[%u] Intent not found.\n", __func__, + intent_ptr->id); +} + +/** + * ch_get_local_rx_intent_notified() - Find rx intent in local notified list + * @ctx: Local channel context + * @ptr: Pointer to the rx intent + * + * This functions parses the local intent notify list for a specific channel + * and checks for the intent. + * + * Return: Pointer to the intent if intent is found else NULL. + */ +struct glink_core_rx_intent *ch_get_local_rx_intent_notified( + struct channel_ctx *ctx, const void *ptr) +{ + struct glink_core_rx_intent *ptr_intent; + unsigned long flags; + + spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags); + list_for_each_entry(ptr_intent, &ctx->local_rx_intent_ntfy_list, + list) { + if (ptr_intent->data == ptr || ptr_intent->iovec == ptr || + ptr_intent->bounce_buf == ptr) { + spin_unlock_irqrestore( + &ctx->local_rx_intent_lst_lock_lhc1, + flags); + return ptr_intent; + } + } + spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags); + GLINK_ERR_CH(ctx, "%s: Local intent not found\n", __func__); + return NULL; +} + +/** + * ch_remove_local_rx_intent_notified() - Remove a rx intent in local intent + * notified list + * @ctx: Local channel context + * @ptr: Pointer to the rx intent + * @reuse: Reuse the rx intent + * + * This functions parses the local intent notify list for a specific channel + * and checks for the intent. If found, the function deletes the intent + * from local_rx_intent_notified list and adds it to local_rx_intent_free list. + */ +void ch_remove_local_rx_intent_notified(struct channel_ctx *ctx, + struct glink_core_rx_intent *liid_ptr, bool reuse) +{ + struct glink_core_rx_intent *ptr_intent, *tmp_intent; + unsigned long flags; + + spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags); + list_for_each_entry_safe(ptr_intent, tmp_intent, + &ctx->local_rx_intent_ntfy_list, list) { + if (ptr_intent == liid_ptr) { + list_del(&ptr_intent->list); + GLINK_DBG_CH(ctx, + "%s: L[%u]:%zu Removed intent from notify list\n", + __func__, + ptr_intent->id, + ptr_intent->intent_size); + kfree(ptr_intent->bounce_buf); + ptr_intent->bounce_buf = NULL; + ptr_intent->write_offset = 0; + ptr_intent->pkt_size = 0; + if (reuse) + list_add_tail(&ptr_intent->list, + &ctx->local_rx_intent_list); + else + list_add_tail(&ptr_intent->list, + &ctx->local_rx_intent_free_list); + spin_unlock_irqrestore( + &ctx->local_rx_intent_lst_lock_lhc1, + flags); + return; + } + } + spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags); + GLINK_ERR_CH(ctx, "%s: L[%u] Intent not found.\n", __func__, + liid_ptr->id); +} + +/** + * ch_get_free_local_rx_intent() - Return a rx intent in local intent + * free list + * @ctx: Local channel context + * + * This functions parses the local_rx_intent_free list for a specific channel + * and checks for the free unused intent. If found, the function returns + * the free intent pointer else NULL pointer. + */ +struct glink_core_rx_intent *ch_get_free_local_rx_intent( + struct channel_ctx *ctx) +{ + struct glink_core_rx_intent *ptr_intent = NULL; + unsigned long flags; + + spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags); + if (!list_empty(&ctx->local_rx_intent_free_list)) { + ptr_intent = list_first_entry(&ctx->local_rx_intent_free_list, + struct glink_core_rx_intent, + list); + list_del(&ptr_intent->list); + } + spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags); + return ptr_intent; +} + +/** + * ch_purge_intent_lists() - Remove all intents for a channel + * + * @ctx: Local channel context + * + * This functions parses the local intent lists for a specific channel and + * removes and frees all intents. + */ +void ch_purge_intent_lists(struct channel_ctx *ctx) +{ + struct glink_core_rx_intent *ptr_intent, *tmp_intent; + struct glink_core_tx_pkt *tx_info, *tx_info_temp; + unsigned long flags; + + spin_lock_irqsave(&ctx->tx_lists_lock_lhc3, flags); + list_for_each_entry_safe(tx_info, tx_info_temp, &ctx->tx_active, + list_node) { + ctx->notify_tx_abort(ctx, ctx->user_priv, + tx_info->pkt_priv); + rwref_put(&tx_info->pkt_ref); + } + spin_unlock_irqrestore(&ctx->tx_lists_lock_lhc3, flags); + + spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags); + list_for_each_entry_safe(ptr_intent, tmp_intent, + &ctx->local_rx_intent_list, list) { + ctx->notify_rx_abort(ctx, ctx->user_priv, + ptr_intent->pkt_priv); + list_del(&ptr_intent->list); + kfree(ptr_intent); + } + + if (!list_empty(&ctx->local_rx_intent_ntfy_list)) + /* + * The client is still processing an rx_notify() call and has + * not yet called glink_rx_done() to return the pointer to us. + * glink_rx_done() will do the appropriate cleanup when this + * call occurs, but log a message here just for internal state + * tracking. + */ + GLINK_INFO_CH(ctx, "%s: waiting on glink_rx_done()\n", + __func__); + + list_for_each_entry_safe(ptr_intent, tmp_intent, + &ctx->local_rx_intent_free_list, list) { + list_del(&ptr_intent->list); + kfree(ptr_intent); + } + ctx->max_used_liid = 0; + spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags); + + spin_lock_irqsave(&ctx->rmt_rx_intent_lst_lock_lhc2, flags); + list_for_each_entry_safe(ptr_intent, tmp_intent, + &ctx->rmt_rx_intent_list, list) { + list_del(&ptr_intent->list); + kfree(ptr_intent); + } + spin_unlock_irqrestore(&ctx->rmt_rx_intent_lst_lock_lhc2, flags); +} + +/** + * ch_get_tx_pending_remote_done() - Lookup for a packet that is waiting for + * the remote-done notification. + * @ctx: Pointer to the channel context + * @riid: riid of transmit packet + * + * This function adds a packet to the tx_pending_remote_done list. + * + * The tx_lists_lock_lhc3 lock needs to be held while calling this function. + * + * Return: Pointer to the tx packet + */ +struct glink_core_tx_pkt *ch_get_tx_pending_remote_done( + struct channel_ctx *ctx, uint32_t riid) +{ + struct glink_core_tx_pkt *tx_pkt; + unsigned long flags; + + if (!ctx) { + GLINK_ERR("%s: Invalid context pointer", __func__); + return NULL; + } + + spin_lock_irqsave(&ctx->tx_pending_rmt_done_lock_lhc4, flags); + list_for_each_entry(tx_pkt, &ctx->tx_pending_remote_done, list_done) { + if (tx_pkt->riid == riid) { + if (tx_pkt->size_remaining) { + GLINK_ERR_CH(ctx, "%s: R[%u] TX not complete", + __func__, riid); + tx_pkt = NULL; + } + spin_unlock_irqrestore( + &ctx->tx_pending_rmt_done_lock_lhc4, flags); + return tx_pkt; + } + } + spin_unlock_irqrestore(&ctx->tx_pending_rmt_done_lock_lhc4, flags); + + GLINK_ERR_CH(ctx, "%s: R[%u] Tx packet for intent not found.\n", + __func__, riid); + return NULL; +} + +/** + * ch_remove_tx_pending_remote_done() - Removes a packet transmit context for a + * packet that is waiting for the remote-done notification + * @ctx: Pointer to the channel context + * @tx_pkt: Pointer to the transmit packet + * + * This function parses through tx_pending_remote_done and removes a + * packet that matches with the tx_pkt. + */ +void ch_remove_tx_pending_remote_done(struct channel_ctx *ctx, + struct glink_core_tx_pkt *tx_pkt) +{ + struct glink_core_tx_pkt *local_tx_pkt, *tmp_tx_pkt; + unsigned long flags; + + if (!ctx || !tx_pkt) { + GLINK_ERR("%s: Invalid input", __func__); + return; + } + + spin_lock_irqsave(&ctx->tx_pending_rmt_done_lock_lhc4, flags); + list_for_each_entry_safe(local_tx_pkt, tmp_tx_pkt, + &ctx->tx_pending_remote_done, list_done) { + if (tx_pkt == local_tx_pkt) { + list_del_init(&tx_pkt->list_done); + GLINK_DBG_CH(ctx, + "%s: R[%u] Removed Tx packet for intent\n", + __func__, + tx_pkt->riid); + rwref_put(&tx_pkt->pkt_ref); + spin_unlock_irqrestore( + &ctx->tx_pending_rmt_done_lock_lhc4, flags); + return; + } + } + spin_unlock_irqrestore(&ctx->tx_pending_rmt_done_lock_lhc4, flags); + + GLINK_ERR_CH(ctx, "%s: R[%u] Tx packet for intent not found", __func__, + tx_pkt->riid); +} + +/** + * glink_add_free_lcid_list() - adds the lcid of a to be deleted channel to + * available lcid list + * @ctx: Pointer to channel context. + */ +static void glink_add_free_lcid_list(struct channel_ctx *ctx) +{ + struct channel_lcid *free_lcid; + unsigned long flags; + + free_lcid = kzalloc(sizeof(*free_lcid), GFP_KERNEL); + if (!free_lcid) { + GLINK_ERR( + "%s: allocation failed on xprt:edge [%s:%s] for lcid [%d]\n", + __func__, ctx->transport_ptr->name, + ctx->transport_ptr->edge, ctx->lcid); + return; + } + free_lcid->lcid = ctx->lcid; + spin_lock_irqsave(&ctx->transport_ptr->xprt_ctx_lock_lhb1, flags); + list_add_tail(&free_lcid->list_node, + &ctx->transport_ptr->free_lcid_list); + spin_unlock_irqrestore(&ctx->transport_ptr->xprt_ctx_lock_lhb1, + flags); +} + +/** + * glink_ch_ctx_release - Free the channel context + * @ch_st_lock: handle to the rwref_lock associated with the chanel + * + * This should only be called when the reference count associated with the + * channel goes to zero. + */ +static void glink_ch_ctx_release(struct rwref_lock *ch_st_lock) +{ + struct channel_ctx *ctx = container_of(ch_st_lock, struct channel_ctx, + ch_state_lhc0); + ctx->transport_ptr = NULL; + kfree(ctx); + GLINK_INFO("%s: freed the channel ctx in pid [%d]\n", __func__, + current->pid); + ctx = NULL; +} + +/** + * ch_name_to_ch_ctx_create() - lookup a channel by name, create the channel if + * it is not found. + * @xprt_ctx: Transport to search for a matching channel. + * @name: Name of the desired channel. + * + * Return: The channel corresponding to @name, NULL if a matching channel was + * not found AND a new channel could not be created. + */ +static struct channel_ctx *ch_name_to_ch_ctx_create( + struct glink_core_xprt_ctx *xprt_ctx, + const char *name) +{ + struct channel_ctx *entry; + struct channel_ctx *ctx; + struct channel_ctx *temp; + unsigned long flags; + struct channel_lcid *flcid; + + ctx = kzalloc(sizeof(struct channel_ctx), GFP_KERNEL); + if (!ctx) { + GLINK_ERR_XPRT(xprt_ctx, "%s: Failed to allocated ctx, %s", + "checking if there is one existing\n", + __func__); + goto check_ctx; + } + + ctx->local_open_state = GLINK_CHANNEL_CLOSED; + strlcpy(ctx->name, name, GLINK_NAME_SIZE); + rwref_lock_init(&ctx->ch_state_lhc0, glink_ch_ctx_release); + INIT_LIST_HEAD(&ctx->tx_ready_list_node); + init_completion(&ctx->int_req_ack_complete); + init_completion(&ctx->int_req_complete); + INIT_LIST_HEAD(&ctx->local_rx_intent_list); + INIT_LIST_HEAD(&ctx->local_rx_intent_ntfy_list); + INIT_LIST_HEAD(&ctx->local_rx_intent_free_list); + spin_lock_init(&ctx->local_rx_intent_lst_lock_lhc1); + INIT_LIST_HEAD(&ctx->rmt_rx_intent_list); + spin_lock_init(&ctx->rmt_rx_intent_lst_lock_lhc2); + INIT_LIST_HEAD(&ctx->tx_active); + spin_lock_init(&ctx->tx_pending_rmt_done_lock_lhc4); + INIT_LIST_HEAD(&ctx->tx_pending_remote_done); + spin_lock_init(&ctx->tx_lists_lock_lhc3); + +check_ctx: + rwref_write_get(&xprt_ctx->xprt_state_lhb0); + if (xprt_ctx->local_state != GLINK_XPRT_OPENED) { + kfree(ctx); + rwref_write_put(&xprt_ctx->xprt_state_lhb0); + return NULL; + } + spin_lock_irqsave(&xprt_ctx->xprt_ctx_lock_lhb1, flags); + list_for_each_entry_safe(entry, temp, &xprt_ctx->channels, + port_list_node) + if (!strcmp(entry->name, name) && !entry->pending_delete) { + spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1, + flags); + kfree(ctx); + rwref_write_put(&xprt_ctx->xprt_state_lhb0); + return entry; + } + + if (ctx) { + if (list_empty(&xprt_ctx->free_lcid_list)) { + if (xprt_ctx->next_lcid > xprt_ctx->max_cid) { + /* no more channels available */ + GLINK_ERR_XPRT(xprt_ctx, + "%s: unable to exceed %u channels\n", + __func__, xprt_ctx->max_cid); + spin_unlock_irqrestore( + &xprt_ctx->xprt_ctx_lock_lhb1, + flags); + kfree(ctx); + rwref_write_put(&xprt_ctx->xprt_state_lhb0); + return NULL; + } else { + ctx->lcid = xprt_ctx->next_lcid++; + } + } else { + flcid = list_first_entry(&xprt_ctx->free_lcid_list, + struct channel_lcid, list_node); + ctx->lcid = flcid->lcid; + list_del(&flcid->list_node); + kfree(flcid); + } + + list_add_tail(&ctx->port_list_node, &xprt_ctx->channels); + + GLINK_INFO_PERF_CH_XPRT(ctx, xprt_ctx, + "%s: local:GLINK_CHANNEL_CLOSED\n", + __func__); + } + spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1, flags); + rwref_write_put(&xprt_ctx->xprt_state_lhb0); + mutex_lock(&xprt_ctx->xprt_dbgfs_lock_lhb3); + if (ctx != NULL) + glink_debugfs_add_channel(ctx, xprt_ctx); + mutex_unlock(&xprt_ctx->xprt_dbgfs_lock_lhb3); + return ctx; +} + +/** + * ch_add_rcid() - add a remote channel identifier to an existing channel + * @xprt_ctx: Transport the channel resides on. + * @ctx: Channel receiving the identifier. + * @rcid: The remote channel identifier. + */ +static void ch_add_rcid(struct glink_core_xprt_ctx *xprt_ctx, + struct channel_ctx *ctx, + uint32_t rcid) +{ + ctx->rcid = rcid; +} + +/** + * ch_update_local_state() - Update the local channel state + * @ctx: Pointer to channel context. + * @lstate: Local channel state. + * + * Return: True if the channel is fully closed as a result of this update, + * false otherwise. + */ +static bool ch_update_local_state(struct channel_ctx *ctx, + enum local_channel_state_e lstate) +{ + bool is_fully_closed; + + rwref_write_get(&ctx->ch_state_lhc0); + ctx->local_open_state = lstate; + is_fully_closed = ch_is_fully_closed(ctx); + rwref_write_put(&ctx->ch_state_lhc0); + + return is_fully_closed; +} + +/** + * ch_update_local_state() - Update the local channel state + * @ctx: Pointer to channel context. + * @rstate: Remote Channel state. + * + * Return: True if the channel is fully closed as result of this update, + * false otherwise. + */ +static bool ch_update_rmt_state(struct channel_ctx *ctx, bool rstate) +{ + bool is_fully_closed; + + rwref_write_get(&ctx->ch_state_lhc0); + ctx->remote_opened = rstate; + is_fully_closed = ch_is_fully_closed(ctx); + rwref_write_put(&ctx->ch_state_lhc0); + + return is_fully_closed; +} + +/* + * ch_is_fully_opened() - Verify if a channel is open + * ctx: Pointer to channel context + * + * Return: True if open, else flase + */ +static bool ch_is_fully_opened(struct channel_ctx *ctx) +{ + if (ctx->remote_opened && ctx->local_open_state == GLINK_CHANNEL_OPENED) + return true; + + return false; +} + +/* + * ch_is_fully_closed() - Verify if a channel is closed on both sides + * @ctx: Pointer to channel context + * @returns: True if open, else flase + */ +static bool ch_is_fully_closed(struct channel_ctx *ctx) +{ + if (!ctx->remote_opened && + ctx->local_open_state == GLINK_CHANNEL_CLOSED) + return true; + + return false; +} + +/** + * find_open_transport() - find a specific open transport + * @edge: Edge the transport is on. + * @name: Name of the transport (or NULL if no preference) + * @initial_xprt: The specified transport is the start for migration + * @best_id: The best transport found for this connection + * + * Find an open transport corresponding to the specified @name and @edge. @edge + * is expected to be valid. @name is expected to be NULL (unspecified) or + * valid. If @name is not specified, then the best transport found on the + * specified edge will be returned. + * + * Return: Transport with the specified name on the specified edge, if open. + * NULL if the transport exists, but is not fully open. ENODEV if no such + * transport exists. + */ +static struct glink_core_xprt_ctx *find_open_transport(const char *edge, + const char *name, + bool initial_xprt, + uint16_t *best_id) +{ + struct glink_core_xprt_ctx *xprt; + struct glink_core_xprt_ctx *best_xprt = NULL; + struct glink_core_xprt_ctx *ret; + bool first = true; + + ret = (struct glink_core_xprt_ctx *)ERR_PTR(-ENODEV); + *best_id = USHRT_MAX; + + mutex_lock(&transport_list_lock_lha0); + list_for_each_entry(xprt, &transport_list, list_node) { + if (strcmp(edge, xprt->edge)) + continue; + if (first) { + first = false; + ret = NULL; + } + if (!xprt_is_fully_opened(xprt)) + continue; + + if (xprt->id < *best_id) { + *best_id = xprt->id; + best_xprt = xprt; + } + + /* + * Braces are required in this instacne because the else will + * attach to the wrong if otherwise. + */ + if (name) { + if (!strcmp(name, xprt->name)) + ret = xprt; + } else { + ret = best_xprt; + } + } + + mutex_unlock(&transport_list_lock_lha0); + + if (IS_ERR_OR_NULL(ret)) + return ret; + if (!initial_xprt) + *best_id = ret->id; + + return ret; +} + +/** + * xprt_is_fully_opened() - check the open status of a transport + * @xprt: Transport being checked. + * + * Return: True if the transport is fully opened, false otherwise. + */ +static bool xprt_is_fully_opened(struct glink_core_xprt_ctx *xprt) +{ + if (xprt->remote_neg_completed && + xprt->local_state == GLINK_XPRT_OPENED) + return true; + + return false; +} + +/** + * glink_dummy_notify_rx_intent_req() - Dummy RX Request + * + * @handle: Channel handle (ignored) + * @priv: Private data pointer (ignored) + * @req_size: Requested size (ignored) + * + * Dummy RX intent request if client does not implement the optional callback + * function. + * + * Return: False + */ +static bool glink_dummy_notify_rx_intent_req(void *handle, const void *priv, + size_t req_size) +{ + return false; +} + +/** + * glink_dummy_notify_rx_sigs() - Dummy signal callback + * + * @handle: Channel handle (ignored) + * @priv: Private data pointer (ignored) + * @req_size: Requested size (ignored) + * + * Dummy signal callback if client does not implement the optional callback + * function. + * + * Return: False + */ +static void glink_dummy_notify_rx_sigs(void *handle, const void *priv, + uint32_t old_sigs, uint32_t new_sigs) +{ + /* intentionally left blank */ +} + +/** + * glink_dummy_rx_abort() - Dummy rx abort callback + * + * handle: Channel handle (ignored) + * priv: Private data pointer (ignored) + * pkt_priv: Private intent data pointer (ignored) + * + * Dummy rx abort callback if client does not implement the optional callback + * function. + */ +static void glink_dummy_notify_rx_abort(void *handle, const void *priv, + const void *pkt_priv) +{ + /* intentionally left blank */ +} + +/** + * glink_dummy_tx_abort() - Dummy tx abort callback + * + * @handle: Channel handle (ignored) + * @priv: Private data pointer (ignored) + * @pkt_priv: Private intent data pointer (ignored) + * + * Dummy tx abort callback if client does not implement the optional callback + * function. + */ +static void glink_dummy_notify_tx_abort(void *handle, const void *priv, + const void *pkt_priv) +{ + /* intentionally left blank */ +} + +/** + * dummy_poll() - a dummy poll() for transports that don't define one + * @if_ptr: The transport interface handle for this transport. + * @lcid: The channel to poll. + * + * Return: An error to indicate that this operation is unsupported. + */ +static int dummy_poll(struct glink_transport_if *if_ptr, uint32_t lcid) +{ + return -EOPNOTSUPP; +} + +/** + * dummy_reuse_rx_intent() - a dummy reuse_rx_intent() for transports that + * don't define one + * @if_ptr: The transport interface handle for this transport. + * @intent: The intent to reuse. + * + * Return: Success. + */ +static int dummy_reuse_rx_intent(struct glink_transport_if *if_ptr, + struct glink_core_rx_intent *intent) +{ + return 0; +} + +/** + * dummy_mask_rx_irq() - a dummy mask_rx_irq() for transports that don't define + * one + * @if_ptr: The transport interface handle for this transport. + * @lcid: The local channel id for this channel. + * @mask: True to mask the irq, false to unmask. + * @pstruct: Platform defined structure with data necessary for masking. + * + * Return: An error to indicate that this operation is unsupported. + */ +static int dummy_mask_rx_irq(struct glink_transport_if *if_ptr, uint32_t lcid, + bool mask, void *pstruct) +{ + return -EOPNOTSUPP; +} + +/** + * dummy_wait_link_down() - a dummy wait_link_down() for transports that don't + * define one + * @if_ptr: The transport interface handle for this transport. + * + * Return: An error to indicate that this operation is unsupported. + */ +static int dummy_wait_link_down(struct glink_transport_if *if_ptr) +{ + return -EOPNOTSUPP; +} + +/** + * dummy_allocate_rx_intent() - a dummy RX intent allocation function that does + * not allocate anything + * @if_ptr: The transport the intent is associated with. + * @size: Size of intent. + * @intent: Pointer to the intent structure. + * + * Return: Success. + */ +static int dummy_allocate_rx_intent(struct glink_transport_if *if_ptr, + size_t size, struct glink_core_rx_intent *intent) +{ + return 0; +} + +/** + * dummy_tx_cmd_tracer_pkt() - a dummy tracer packet tx cmd for transports + * that don't define one + * @if_ptr: The transport interface handle for this transport. + * @lcid: The channel in which the tracer packet is transmitted. + * @pctx: Context of the packet to be transmitted. + * + * Return: 0. + */ +static int dummy_tx_cmd_tracer_pkt(struct glink_transport_if *if_ptr, + uint32_t lcid, struct glink_core_tx_pkt *pctx) +{ + pctx->size_remaining = 0; + return 0; +} + +/** + * dummy_deallocate_rx_intent() - a dummy rx intent deallocation function that + * does not deallocate anything + * @if_ptr: The transport the intent is associated with. + * @intent: Pointer to the intent structure. + * + * Return: Success. + */ +static int dummy_deallocate_rx_intent(struct glink_transport_if *if_ptr, + struct glink_core_rx_intent *intent) +{ + return 0; +} + +/** + * dummy_tx_cmd_local_rx_intent() - dummy local rx intent request + * @if_ptr: The transport to transmit on. + * @lcid: The local channel id to encode. + * @size: The intent size to encode. + * @liid: The local intent id to encode. + * + * Return: Success. + */ +static int dummy_tx_cmd_local_rx_intent(struct glink_transport_if *if_ptr, + uint32_t lcid, size_t size, uint32_t liid) +{ + return 0; +} + +/** + * dummy_tx_cmd_local_rx_done() - dummy rx done command + * @if_ptr: The transport to transmit on. + * @lcid: The local channel id to encode. + * @liid: The local intent id to encode. + * @reuse: Reuse the consumed intent. + */ +static void dummy_tx_cmd_local_rx_done(struct glink_transport_if *if_ptr, + uint32_t lcid, uint32_t liid, bool reuse) +{ + /* intentionally left blank */ +} + +/** + * dummy_tx() - dummy tx() that does not send anything + * @if_ptr: The transport to transmit on. + * @lcid: The local channel id to encode. + * @pctx: The data to encode. + * + * Return: Number of bytes written i.e. zero. + */ +static int dummy_tx(struct glink_transport_if *if_ptr, uint32_t lcid, + struct glink_core_tx_pkt *pctx) +{ + return 0; +} + +/** + * dummy_tx_cmd_rx_intent_req() - dummy rx intent request functon + * @if_ptr: The transport to transmit on. + * @lcid: The local channel id to encode. + * @size: The requested intent size to encode. + * + * Return: Success. + */ +static int dummy_tx_cmd_rx_intent_req(struct glink_transport_if *if_ptr, + uint32_t lcid, size_t size) +{ + return 0; +} + +/** + * dummy_tx_cmd_rx_intent_req_ack() - dummy rx intent request ack + * @if_ptr: The transport to transmit on. + * @lcid: The local channel id to encode. + * @granted: The request response to encode. + * + * Return: Success. + */ +static int dummy_tx_cmd_remote_rx_intent_req_ack( + struct glink_transport_if *if_ptr, + uint32_t lcid, bool granted) +{ + return 0; +} + +/** + * dummy_tx_cmd_set_sigs() - dummy signals ack transmit function + * @if_ptr: The transport to transmit on. + * @lcid: The local channel id to encode. + * @sigs: The signals to encode. + * + * Return: Success. + */ +static int dummy_tx_cmd_set_sigs(struct glink_transport_if *if_ptr, + uint32_t lcid, uint32_t sigs) +{ + return 0; +} + +/** + * dummy_tx_cmd_ch_close() - dummy channel close transmit function + * @if_ptr: The transport to transmit on. + * @lcid: The local channel id to encode. + * + * Return: Success. + */ +static int dummy_tx_cmd_ch_close(struct glink_transport_if *if_ptr, + uint32_t lcid) +{ + return 0; +} + +/** + * dummy_tx_cmd_ch_remote_close_ack() - dummy channel close ack sending function + * @if_ptr: The transport to transmit on. + * @rcid: The remote channel id to encode. + */ +static void dummy_tx_cmd_ch_remote_close_ack(struct glink_transport_if *if_ptr, + uint32_t rcid) +{ + /* intentionally left blank */ +} + +/** + * dummy_get_power_vote_ramp_time() - Dummy Power vote ramp time + * @if_ptr: The transport to transmit on. + * @state: The power state being requested from the transport. + */ +static unsigned long dummy_get_power_vote_ramp_time( + struct glink_transport_if *if_ptr, uint32_t state) +{ + return (unsigned long)-EOPNOTSUPP; +} + +/** + * dummy_power_vote() - Dummy Power vote operation + * @if_ptr: The transport to transmit on. + * @state: The power state being requested from the transport. + */ +static int dummy_power_vote(struct glink_transport_if *if_ptr, + uint32_t state) +{ + return -EOPNOTSUPP; +} + +/** + * dummy_power_unvote() - Dummy Power unvote operation + * @if_ptr: The transport to transmit on. + */ +static int dummy_power_unvote(struct glink_transport_if *if_ptr) +{ + return -EOPNOTSUPP; +} + +/** + * notif_if_up_all_xprts() - Check and notify existing transport state if up + * @notif_info: Data structure containing transport information to be notified. + * + * This function is called when the client registers a notifier to know about + * the state of a transport. This function matches the existing transports with + * the transport in the "notif_info" parameter. When a matching transport is + * found, the callback function in the "notif_info" parameter is called with + * the state of the matching transport. + * + * If an edge or transport is not defined, then all edges and/or transports + * will be matched and will receive up notifications. + */ +static void notif_if_up_all_xprts( + struct link_state_notifier_info *notif_info) +{ + struct glink_core_xprt_ctx *xprt_ptr; + struct glink_link_state_cb_info cb_info; + + cb_info.link_state = GLINK_LINK_STATE_UP; + mutex_lock(&transport_list_lock_lha0); + list_for_each_entry(xprt_ptr, &transport_list, list_node) { + if (strlen(notif_info->edge) && + strcmp(notif_info->edge, xprt_ptr->edge)) + continue; + + if (strlen(notif_info->transport) && + strcmp(notif_info->transport, xprt_ptr->name)) + continue; + + if (!xprt_is_fully_opened(xprt_ptr)) + continue; + + cb_info.transport = xprt_ptr->name; + cb_info.edge = xprt_ptr->edge; + notif_info->glink_link_state_notif_cb(&cb_info, + notif_info->priv); + } + mutex_unlock(&transport_list_lock_lha0); +} + +/** + * check_link_notifier_and_notify() - Check and notify clients about link state + * @xprt_ptr: Transport whose state to be notified. + * @link_state: State of the transport to be notified. + * + * This function is called when the state of the transport changes. This + * function matches the transport with the clients that have registered to + * be notified about the state changes. When a matching client notifier is + * found, the callback function in the client notifier is called with the + * new state of the transport. + */ +static void check_link_notifier_and_notify(struct glink_core_xprt_ctx *xprt_ptr, + enum glink_link_state link_state) +{ + struct link_state_notifier_info *notif_info; + struct glink_link_state_cb_info cb_info; + + cb_info.link_state = link_state; + mutex_lock(&link_state_notifier_lock_lha1); + list_for_each_entry(notif_info, &link_state_notifier_list, list) { + if (strlen(notif_info->edge) && + strcmp(notif_info->edge, xprt_ptr->edge)) + continue; + + if (strlen(notif_info->transport) && + strcmp(notif_info->transport, xprt_ptr->name)) + continue; + + cb_info.transport = xprt_ptr->name; + cb_info.edge = xprt_ptr->edge; + notif_info->glink_link_state_notif_cb(&cb_info, + notif_info->priv); + } + mutex_unlock(&link_state_notifier_lock_lha1); +} + +/** + * Open GLINK channel. + * + * @cfg_ptr: Open configuration structure (the structure is copied before + * glink_open returns). All unused fields should be zero-filled. + * + * This should not be called from link state callback context by clients. + * It is recommended that client should invoke this function from their own + * thread. + * + * Return: Pointer to channel on success, PTR_ERR() with standard Linux + * error code on failure. + */ +void *glink_open(const struct glink_open_config *cfg) +{ + struct channel_ctx *ctx = NULL; + struct glink_core_xprt_ctx *transport_ptr; + size_t len; + int ret; + uint16_t best_id; + + if (!cfg->edge || !cfg->name) { + GLINK_ERR("%s: !cfg->edge || !cfg->name\n", __func__); + return ERR_PTR(-EINVAL); + } + + len = strlen(cfg->edge); + if (len == 0 || len >= GLINK_NAME_SIZE) { + GLINK_ERR("%s: [EDGE] len == 0 || len >= GLINK_NAME_SIZE\n", + __func__); + return ERR_PTR(-EINVAL); + } + + len = strlen(cfg->name); + if (len == 0 || len >= GLINK_NAME_SIZE) { + GLINK_ERR("%s: [NAME] len == 0 || len >= GLINK_NAME_SIZE\n", + __func__); + return ERR_PTR(-EINVAL); + } + + if (cfg->transport) { + len = strlen(cfg->transport); + if (len == 0 || len >= GLINK_NAME_SIZE) { + GLINK_ERR("%s: [TRANSPORT] len == 0 || %s\n", + __func__, + "len >= GLINK_NAME_SIZE"); + return ERR_PTR(-EINVAL); + } + } + + /* confirm required notification parameters */ + if (!(cfg->notify_rx || cfg->notify_rxv) || !cfg->notify_tx_done + || !cfg->notify_state + || ((cfg->options & GLINK_OPT_RX_INTENT_NOTIF) + && !cfg->notify_remote_rx_intent)) { + GLINK_ERR("%s: Incorrect notification parameters\n", __func__); + return ERR_PTR(-EINVAL); + } + + /* find transport */ + transport_ptr = find_open_transport(cfg->edge, cfg->transport, + cfg->options & GLINK_OPT_INITIAL_XPORT, + &best_id); + if (IS_ERR_OR_NULL(transport_ptr)) { + GLINK_ERR("%s:%s %s: Error %d - unable to find transport\n", + cfg->transport, cfg->edge, __func__, + (unsigned)PTR_ERR(transport_ptr)); + return ERR_PTR(-ENODEV); + } + + /* + * look for an existing port structure which can occur in + * reopen and remote-open-first cases + */ + ctx = ch_name_to_ch_ctx_create(transport_ptr, cfg->name); + if (ctx == NULL) { + GLINK_ERR("%s:%s %s: Error - unable to allocate new channel\n", + cfg->transport, cfg->edge, __func__); + return ERR_PTR(-ENOMEM); + } + + /* port already exists */ + if (ctx->local_open_state != GLINK_CHANNEL_CLOSED) { + /* not ready to be re-opened */ + GLINK_INFO_CH_XPRT(ctx, transport_ptr, + "%s: Channel not ready to be re-opened. State: %u\n", + __func__, ctx->local_open_state); + return ERR_PTR(-EBUSY); + } + + /* initialize port structure */ + ctx->user_priv = cfg->priv; + ctx->rx_intent_req_timeout_jiffies = + msecs_to_jiffies(cfg->rx_intent_req_timeout_ms); + ctx->notify_rx = cfg->notify_rx; + ctx->notify_tx_done = cfg->notify_tx_done; + ctx->notify_state = cfg->notify_state; + ctx->notify_rx_intent_req = cfg->notify_rx_intent_req; + ctx->notify_rxv = cfg->notify_rxv; + ctx->notify_rx_sigs = cfg->notify_rx_sigs; + ctx->notify_rx_abort = cfg->notify_rx_abort; + ctx->notify_tx_abort = cfg->notify_tx_abort; + ctx->notify_rx_tracer_pkt = cfg->notify_rx_tracer_pkt; + ctx->notify_remote_rx_intent = cfg->notify_remote_rx_intent; + + if (!ctx->notify_rx_intent_req) + ctx->notify_rx_intent_req = glink_dummy_notify_rx_intent_req; + if (!ctx->notify_rx_sigs) + ctx->notify_rx_sigs = glink_dummy_notify_rx_sigs; + if (!ctx->notify_rx_abort) + ctx->notify_rx_abort = glink_dummy_notify_rx_abort; + if (!ctx->notify_tx_abort) + ctx->notify_tx_abort = glink_dummy_notify_tx_abort; + + if (!ctx->rx_intent_req_timeout_jiffies) + ctx->rx_intent_req_timeout_jiffies = MAX_SCHEDULE_TIMEOUT; + + ctx->local_xprt_req = best_id; + ctx->no_migrate = cfg->transport && + !(cfg->options & GLINK_OPT_INITIAL_XPORT); + ctx->transport_ptr = transport_ptr; + ctx->local_open_state = GLINK_CHANNEL_OPENING; + GLINK_INFO_PERF_CH(ctx, + "%s: local:GLINK_CHANNEL_CLOSED->GLINK_CHANNEL_OPENING\n", + __func__); + + /* start local-open sequence */ + ret = ctx->transport_ptr->ops->tx_cmd_ch_open(ctx->transport_ptr->ops, + ctx->lcid, cfg->name, best_id); + if (ret) { + /* failure to send open command (transport failure) */ + ctx->local_open_state = GLINK_CHANNEL_CLOSED; + GLINK_ERR_CH(ctx, "%s: Unable to send open command %d\n", + __func__, ret); + return ERR_PTR(ret); + } + + GLINK_INFO_CH(ctx, "%s: Created channel, sent OPEN command. ctx %p\n", + __func__, ctx); + + return ctx; +} +EXPORT_SYMBOL(glink_open); + +/** + * glink_get_channel_id_for_handle() - Get logical channel ID + * + * @handle: handle of channel + * + * Used internally by G-Link debugfs. + * + * Return: Logical Channel ID or standard Linux error code + */ +int glink_get_channel_id_for_handle(void *handle) +{ + struct channel_ctx *ctx = (struct channel_ctx *)handle; + if (ctx == NULL) + return -EINVAL; + + return ctx->lcid; +} +EXPORT_SYMBOL(glink_get_channel_id_for_handle); + +/** + * glink_get_channel_name_for_handle() - return channel name + * + * @handle: handle of channel + * + * Used internally by G-Link debugfs. + * + * Return: Channel name or NULL + */ +char *glink_get_channel_name_for_handle(void *handle) +{ + struct channel_ctx *ctx = (struct channel_ctx *)handle; + if (ctx == NULL) + return NULL; + + return ctx->name; +} +EXPORT_SYMBOL(glink_get_channel_name_for_handle); + +/** + * glink_delete_ch_from_list() - delete the channel from the list + * @ctx: Pointer to channel context. + * @add_flcid: Boolean value to decide whether the lcid should be added or not. + * + * This function deletes the channel from the list along with the debugfs + * information associated with it. It also adds the channel lcid to the free + * lcid list except if the channel is deleted in case of ssr/unregister case. + * It can only called when channel is fully closed. + */ +static void glink_delete_ch_from_list(struct channel_ctx *ctx, bool add_flcid) +{ + unsigned long flags; + spin_lock_irqsave(&ctx->transport_ptr->xprt_ctx_lock_lhb1, + flags); + if (!list_empty(&ctx->port_list_node)) + list_del_init(&ctx->port_list_node); + spin_unlock_irqrestore( + &ctx->transport_ptr->xprt_ctx_lock_lhb1, + flags); + if (add_flcid) + glink_add_free_lcid_list(ctx); + mutex_lock(&ctx->transport_ptr->xprt_dbgfs_lock_lhb3); + glink_debugfs_remove_channel(ctx, ctx->transport_ptr); + mutex_unlock(&ctx->transport_ptr->xprt_dbgfs_lock_lhb3); + rwref_put(&ctx->ch_state_lhc0); +} + +/** + * glink_close() - Close a previously opened channel. + * + * @handle: handle to close + * + * Once the closing process has been completed, the GLINK_LOCAL_DISCONNECTED + * state event will be sent and the channel can be reopened. + * + * Return: 0 on success; -EINVAL for invalid handle, -EBUSY is close is + * already in progress, standard Linux Error code otherwise. + */ +int glink_close(void *handle) +{ + struct glink_core_xprt_ctx *xprt_ctx = NULL; + struct channel_ctx *ctx = (struct channel_ctx *)handle; + int ret = 0; + unsigned long flags; + + if (!ctx) + return -EINVAL; + + GLINK_INFO_CH(ctx, "%s: Closing channel, ctx: %p\n", __func__, ctx); + if (ctx->local_open_state == GLINK_CHANNEL_CLOSED) + return 0; + + if (ctx->local_open_state == GLINK_CHANNEL_CLOSING) { + /* close already pending */ + return -EBUSY; + } + + /* Set the channel state before removing it from xprt's list(s) */ + GLINK_INFO_PERF_CH(ctx, + "%s: local:%u->GLINK_CHANNEL_CLOSING\n", + __func__, ctx->local_open_state); + ctx->local_open_state = GLINK_CHANNEL_CLOSING; + + ctx->pending_delete = true; + ctx->int_req_ack = false; + complete_all(&ctx->int_req_ack_complete); + complete_all(&ctx->int_req_complete); + + spin_lock_irqsave(&ctx->transport_ptr->tx_ready_lock_lhb2, flags); + if (!list_empty(&ctx->tx_ready_list_node)) + list_del_init(&ctx->tx_ready_list_node); + spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb2, flags); + + if (ctx->transport_ptr->local_state != GLINK_XPRT_DOWN) { + glink_qos_reset_priority(ctx); + ret = ctx->transport_ptr->ops->tx_cmd_ch_close( + ctx->transport_ptr->ops, + ctx->lcid); + } else if (!strcmp(ctx->transport_ptr->name, "dummy")) { + /* + * This check will avoid any race condition when clients call + * glink_close before the dummy xprt swapping happens in link + * down scenario. + */ + ret = 0; + xprt_ctx = ctx->transport_ptr; + rwref_write_get(&xprt_ctx->xprt_state_lhb0); + glink_core_ch_close_ack_common(ctx); + if (ch_is_fully_closed(ctx)) { + glink_delete_ch_from_list(ctx, false); + rwref_put(&xprt_ctx->xprt_state_lhb0); + if (list_empty(&xprt_ctx->channels)) + /* For the xprt reference */ + rwref_put(&xprt_ctx->xprt_state_lhb0); + } else { + GLINK_ERR_CH(ctx, + "channel Not closed yet local state [%d] remote_state [%d]\n", + ctx->local_open_state, ctx->remote_opened); + } + rwref_write_put(&xprt_ctx->xprt_state_lhb0); + } + + return ret; +} +EXPORT_SYMBOL(glink_close); + +/** + * glink_tx_pkt_release() - Release a packet's transmit information + * @tx_pkt_ref: Packet information which needs to be released. + * + * This function is called when all the references to a packet information + * is dropped. + */ +static void glink_tx_pkt_release(struct rwref_lock *tx_pkt_ref) +{ + struct glink_core_tx_pkt *tx_info = container_of(tx_pkt_ref, + struct glink_core_tx_pkt, + pkt_ref); + if (!list_empty(&tx_info->list_done)) + list_del_init(&tx_info->list_done); + if (!list_empty(&tx_info->list_node)) + list_del_init(&tx_info->list_node); + kfree(tx_info); +} + +/** + * glink_tx_common() - Common TX implementation + * + * @handle: handle returned by glink_open() + * @pkt_priv: opaque data value that will be returned to client with + * notify_tx_done notification + * @data: pointer to the data + * @size: size of data + * @vbuf_provider: Virtual Address-space Buffer Provider for the tx buffer. + * @vbuf_provider: Physical Address-space Buffer Provider for the tx buffer. + * @tx_flags: Flags to indicate transmit options + * + * Return: -EINVAL for invalid handle; -EBUSY if channel isn't ready for + * transmit operation (not fully opened); -EAGAIN if remote side + * has not provided a receive intent that is big enough. + */ +static int glink_tx_common(void *handle, void *pkt_priv, + void *data, void *iovec, size_t size, + void * (*vbuf_provider)(void *iovec, size_t offset, size_t *size), + void * (*pbuf_provider)(void *iovec, size_t offset, size_t *size), + uint32_t tx_flags) +{ + struct channel_ctx *ctx = (struct channel_ctx *)handle; + uint32_t riid; + int ret = 0; + struct glink_core_tx_pkt *tx_info; + size_t intent_size; + bool is_atomic = + tx_flags & (GLINK_TX_SINGLE_THREADED | GLINK_TX_ATOMIC); + unsigned long flags; + + if (!size) + return -EINVAL; + + if (!ctx) + return -EINVAL; + + rwref_get(&ctx->ch_state_lhc0); + if (!(vbuf_provider || pbuf_provider)) { + rwref_put(&ctx->ch_state_lhc0); + return -EINVAL; + } + + if (!ch_is_fully_opened(ctx)) { + rwref_put(&ctx->ch_state_lhc0); + return -EBUSY; + } + + if (size > GLINK_MAX_PKT_SIZE) { + rwref_put(&ctx->ch_state_lhc0); + return -EINVAL; + } + + if (unlikely(tx_flags & GLINK_TX_TRACER_PKT)) { + if (!(ctx->transport_ptr->capabilities & GCAP_TRACER_PKT)) { + rwref_put(&ctx->ch_state_lhc0); + return -EOPNOTSUPP; + } + tracer_pkt_log_event(data, GLINK_CORE_TX); + } + + /* find matching rx intent (first-fit algorithm for now) */ + if (ch_pop_remote_rx_intent(ctx, size, &riid, &intent_size)) { + if (!(tx_flags & GLINK_TX_REQ_INTENT)) { + /* no rx intent available */ + GLINK_ERR_CH(ctx, + "%s: R[%u]:%zu Intent not present for lcid\n", + __func__, riid, size); + rwref_put(&ctx->ch_state_lhc0); + return -EAGAIN; + } + if (is_atomic && !(ctx->transport_ptr->capabilities & + GCAP_AUTO_QUEUE_RX_INT)) { + GLINK_ERR_CH(ctx, + "%s: Cannot request intent in atomic context\n", + __func__); + rwref_put(&ctx->ch_state_lhc0); + return -EINVAL; + } + + /* request intent of correct size */ + reinit_completion(&ctx->int_req_ack_complete); + ret = ctx->transport_ptr->ops->tx_cmd_rx_intent_req( + ctx->transport_ptr->ops, ctx->lcid, size); + if (ret) { + GLINK_ERR_CH(ctx, "%s: Request intent failed %d\n", + __func__, ret); + rwref_put(&ctx->ch_state_lhc0); + return ret; + } + + while (ch_pop_remote_rx_intent(ctx, size, &riid, + &intent_size)) { + if (is_atomic) { + GLINK_ERR_CH(ctx, + "%s Intent of size %zu not ready\n", + __func__, size); + rwref_put(&ctx->ch_state_lhc0); + return -EAGAIN; + } + + if (ctx->transport_ptr->local_state == GLINK_XPRT_DOWN + || !ch_is_fully_opened(ctx)) { + GLINK_ERR_CH(ctx, + "%s: Channel closed while waiting for intent\n", + __func__); + rwref_put(&ctx->ch_state_lhc0); + return -EBUSY; + } + + /* wait for the remote intent req ack */ + if (!wait_for_completion_timeout( + &ctx->int_req_ack_complete, + ctx->rx_intent_req_timeout_jiffies)) { + GLINK_ERR_CH(ctx, + "%s: Intent request ack with size: %zu not granted for lcid\n", + __func__, size); + rwref_put(&ctx->ch_state_lhc0); + return -ETIMEDOUT; + } + + if (!ctx->int_req_ack) { + GLINK_ERR_CH(ctx, + "%s: Intent Request with size: %zu %s", + __func__, size, + "not granted for lcid\n"); + rwref_put(&ctx->ch_state_lhc0); + return -EAGAIN; + } + + /* wait for the rx_intent from remote side */ + if (!wait_for_completion_timeout( + &ctx->int_req_complete, + ctx->rx_intent_req_timeout_jiffies)) { + GLINK_ERR_CH(ctx, + "%s: Intent request with size: %zu not granted for lcid\n", + __func__, size); + rwref_put(&ctx->ch_state_lhc0); + return -ETIMEDOUT; + } + + reinit_completion(&ctx->int_req_complete); + } + } + + if (!is_atomic) { + spin_lock_irqsave(&ctx->transport_ptr->tx_ready_lock_lhb2, + flags); + glink_pm_qos_vote(ctx->transport_ptr); + spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb2, + flags); + } + + GLINK_INFO_PERF_CH(ctx, "%s: R[%u]:%zu data[%p], size[%zu]. TID %u\n", + __func__, riid, intent_size, + data ? data : iovec, size, current->pid); + tx_info = kzalloc(sizeof(struct glink_core_tx_pkt), + is_atomic ? GFP_ATOMIC : GFP_KERNEL); + if (!tx_info) { + GLINK_ERR_CH(ctx, "%s: No memory for allocation\n", __func__); + ch_push_remote_rx_intent(ctx, intent_size, riid); + rwref_put(&ctx->ch_state_lhc0); + return -ENOMEM; + } + rwref_lock_init(&tx_info->pkt_ref, glink_tx_pkt_release); + INIT_LIST_HEAD(&tx_info->list_done); + INIT_LIST_HEAD(&tx_info->list_node); + tx_info->pkt_priv = pkt_priv; + tx_info->data = data; + tx_info->riid = riid; + tx_info->rcid = ctx->rcid; + tx_info->size = size; + tx_info->size_remaining = size; + tx_info->tracer_pkt = tx_flags & GLINK_TX_TRACER_PKT ? true : false; + tx_info->iovec = iovec ? iovec : (void *)tx_info; + tx_info->vprovider = vbuf_provider; + tx_info->pprovider = pbuf_provider; + tx_info->intent_size = intent_size; + + /* schedule packet for transmit */ + if ((tx_flags & GLINK_TX_SINGLE_THREADED) && + (ctx->transport_ptr->capabilities & GCAP_INTENTLESS)) + ret = xprt_single_threaded_tx(ctx->transport_ptr, + ctx, tx_info); + else + xprt_schedule_tx(ctx->transport_ptr, ctx, tx_info); + + rwref_put(&ctx->ch_state_lhc0); + return ret; +} + +/** + * glink_tx() - Transmit packet. + * + * @handle: handle returned by glink_open() + * @pkt_priv: opaque data value that will be returned to client with + * notify_tx_done notification + * @data: pointer to the data + * @size: size of data + * @tx_flags: Flags to specify transmit specific options + * + * Return: -EINVAL for invalid handle; -EBUSY if channel isn't ready for + * transmit operation (not fully opened); -EAGAIN if remote side + * has not provided a receive intent that is big enough. + */ +int glink_tx(void *handle, void *pkt_priv, void *data, size_t size, + uint32_t tx_flags) +{ + return glink_tx_common(handle, pkt_priv, data, NULL, size, + tx_linear_vbuf_provider, NULL, tx_flags); +} +EXPORT_SYMBOL(glink_tx); + +/** + * glink_queue_rx_intent() - Register an intent to receive data. + * + * @handle: handle returned by glink_open() + * @pkt_priv: opaque data type that is returned when a packet is received + * size: maximum size of data to receive + * + * Return: 0 for success; standard Linux error code for failure case + */ +int glink_queue_rx_intent(void *handle, const void *pkt_priv, size_t size) +{ + struct channel_ctx *ctx = (struct channel_ctx *)handle; + struct glink_core_rx_intent *intent_ptr; + int ret = 0; + + if (!ctx) + return -EINVAL; + + if (!ch_is_fully_opened(ctx)) { + /* Can only queue rx intents if channel is fully opened */ + GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n", + __func__); + return -EBUSY; + } + + intent_ptr = ch_push_local_rx_intent(ctx, pkt_priv, size); + if (!intent_ptr) { + GLINK_ERR_CH(ctx, + "%s: Intent pointer allocation failed size[%zu]\n", + __func__, size); + return -ENOMEM; + } + GLINK_DBG_CH(ctx, "%s: L[%u]:%zu\n", __func__, intent_ptr->id, + intent_ptr->intent_size); + + if (ctx->transport_ptr->capabilities & GCAP_INTENTLESS) + return ret; + + /* notify remote side of rx intent */ + ret = ctx->transport_ptr->ops->tx_cmd_local_rx_intent( + ctx->transport_ptr->ops, ctx->lcid, size, intent_ptr->id); + if (ret) + /* unable to transmit, dequeue intent */ + ch_remove_local_rx_intent(ctx, intent_ptr->id); + + return ret; +} +EXPORT_SYMBOL(glink_queue_rx_intent); + +/** + * glink_rx_intent_exists() - Check if an intent exists. + * + * @handle: handle returned by glink_open() + * @size: size of an intent to check or 0 for any intent + * + * Return: TRUE if an intent exists with greater than or equal to the size + * else FALSE + */ +bool glink_rx_intent_exists(void *handle, size_t size) +{ + struct channel_ctx *ctx = (struct channel_ctx *)handle; + struct glink_core_rx_intent *intent; + unsigned long flags; + + if (!ctx || !ch_is_fully_opened(ctx)) + return false; + + spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags); + list_for_each_entry(intent, &ctx->local_rx_intent_list, list) { + if (size <= intent->intent_size) { + spin_unlock_irqrestore( + &ctx->local_rx_intent_lst_lock_lhc1, flags); + return true; + } + } + spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags); + + return false; +} +EXPORT_SYMBOL(glink_rx_intent_exists); + +/** + * glink_rx_done() - Return receive buffer to remote side. + * + * @handle: handle returned by glink_open() + * @ptr: data pointer provided in the notify_rx() call + * @reuse: if true, receive intent is re-used + * + * Return: 0 for success; standard Linux error code for failure case + */ +int glink_rx_done(void *handle, const void *ptr, bool reuse) +{ + struct channel_ctx *ctx = (struct channel_ctx *)handle; + struct glink_core_rx_intent *liid_ptr; + uint32_t id; + int ret = 0; + + liid_ptr = ch_get_local_rx_intent_notified(ctx, ptr); + + if (IS_ERR_OR_NULL(liid_ptr)) { + /* invalid pointer */ + GLINK_ERR_CH(ctx, "%s: Invalid pointer %p\n", __func__, ptr); + return -EINVAL; + } + + GLINK_INFO_PERF_CH(ctx, "%s: L[%u]: data[%p]. TID %u\n", + __func__, liid_ptr->id, ptr, current->pid); + id = liid_ptr->id; + if (reuse) { + ret = ctx->transport_ptr->ops->reuse_rx_intent( + ctx->transport_ptr->ops, liid_ptr); + if (ret) { + GLINK_ERR_CH(ctx, "%s: Intent reuse err %d for %p\n", + __func__, ret, ptr); + ret = -ENOBUFS; + reuse = false; + ctx->transport_ptr->ops->deallocate_rx_intent( + ctx->transport_ptr->ops, liid_ptr); + } + } else { + ctx->transport_ptr->ops->deallocate_rx_intent( + ctx->transport_ptr->ops, liid_ptr); + } + ch_remove_local_rx_intent_notified(ctx, liid_ptr, reuse); + /* send rx done */ + ctx->transport_ptr->ops->tx_cmd_local_rx_done(ctx->transport_ptr->ops, + ctx->lcid, id, reuse); + + return ret; +} +EXPORT_SYMBOL(glink_rx_done); + +/** + * glink_txv() - Transmit a packet in vector form. + * + * @handle: handle returned by glink_open() + * @pkt_priv: opaque data value that will be returned to client with + * notify_tx_done notification + * @iovec: pointer to the vector (must remain valid until notify_tx_done + * notification) + * @size: size of data/vector + * @vbuf_provider: Client provided helper function to iterate the vector + * in physical address space + * @pbuf_provider: Client provided helper function to iterate the vector + * in virtual address space + * @tx_flags: Flags to specify transmit specific options + * + * Return: -EINVAL for invalid handle; -EBUSY if channel isn't ready for + * transmit operation (not fully opened); -EAGAIN if remote side has + * not provided a receive intent that is big enough. + */ +int glink_txv(void *handle, void *pkt_priv, + void *iovec, size_t size, + void * (*vbuf_provider)(void *iovec, size_t offset, size_t *size), + void * (*pbuf_provider)(void *iovec, size_t offset, size_t *size), + uint32_t tx_flags) +{ + return glink_tx_common(handle, pkt_priv, NULL, iovec, size, + vbuf_provider, pbuf_provider, tx_flags); +} +EXPORT_SYMBOL(glink_txv); + +/** + * glink_sigs_set() - Set the local signals for the GLINK channel + * + * @handle: handle returned by glink_open() + * @sigs: modified signal value + * + * Return: 0 for success; standard Linux error code for failure case + */ +int glink_sigs_set(void *handle, uint32_t sigs) +{ + struct channel_ctx *ctx = (struct channel_ctx *)handle; + int ret; + + if (!ctx) + return -EINVAL; + + if (!ch_is_fully_opened(ctx)) { + GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n", + __func__); + return -EBUSY; + } + + ctx->lsigs = sigs; + + ret = ctx->transport_ptr->ops->tx_cmd_set_sigs(ctx->transport_ptr->ops, + ctx->lcid, ctx->lsigs); + GLINK_INFO_CH(ctx, "%s: Sent SIGNAL SET command\n", __func__); + + return ret; +} +EXPORT_SYMBOL(glink_sigs_set); + +/** + * glink_sigs_local_get() - Get the local signals for the GLINK channel + * + * handle: handle returned by glink_open() + * sigs: Pointer to hold the signals + * + * Return: 0 for success; standard Linux error code for failure case + */ +int glink_sigs_local_get(void *handle, uint32_t *sigs) +{ + struct channel_ctx *ctx = (struct channel_ctx *)handle; + + if (!ctx || !sigs) + return -EINVAL; + + if (!ch_is_fully_opened(ctx)) { + GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n", + __func__); + return -EBUSY; + } + + *sigs = ctx->lsigs; + return 0; +} +EXPORT_SYMBOL(glink_sigs_local_get); + +/** + * glink_sigs_remote_get() - Get the Remote signals for the GLINK channel + * + * handle: handle returned by glink_open() + * sigs: Pointer to hold the signals + * + * Return: 0 for success; standard Linux error code for failure case + */ +int glink_sigs_remote_get(void *handle, uint32_t *sigs) +{ + struct channel_ctx *ctx = (struct channel_ctx *)handle; + + if (!ctx || !sigs) + return -EINVAL; + + if (!ch_is_fully_opened(ctx)) { + GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n", + __func__); + return -EBUSY; + } + + *sigs = ctx->rsigs; + return 0; +} +EXPORT_SYMBOL(glink_sigs_remote_get); + +/** + * glink_register_link_state_cb() - Register for link state notification + * @link_info: Data structure containing the link identification and callback. + * @priv: Private information to be passed with the callback. + * + * This function is used to register a notifier to receive the updates about a + * link's/transport's state. This notifier needs to be registered first before + * an attempt to open a channel. + * + * Return: a reference to the notifier handle. + */ +void *glink_register_link_state_cb(struct glink_link_info *link_info, + void *priv) +{ + struct link_state_notifier_info *notif_info; + + if (!link_info || !link_info->glink_link_state_notif_cb) + return ERR_PTR(-EINVAL); + + notif_info = kzalloc(sizeof(*notif_info), GFP_KERNEL); + if (!notif_info) { + GLINK_ERR("%s: Error allocating link state notifier info\n", + __func__); + return ERR_PTR(-ENOMEM); + } + if (link_info->transport) + strlcpy(notif_info->transport, link_info->transport, + GLINK_NAME_SIZE); + + if (link_info->edge) + strlcpy(notif_info->edge, link_info->edge, GLINK_NAME_SIZE); + notif_info->priv = priv; + notif_info->glink_link_state_notif_cb = + link_info->glink_link_state_notif_cb; + + mutex_lock(&link_state_notifier_lock_lha1); + list_add_tail(¬if_info->list, &link_state_notifier_list); + mutex_unlock(&link_state_notifier_lock_lha1); + + notif_if_up_all_xprts(notif_info); + return notif_info; +} +EXPORT_SYMBOL(glink_register_link_state_cb); + +/** + * glink_unregister_link_state_cb() - Unregister the link state notification + * notif_handle: Handle to be unregistered. + * + * This function is used to unregister a notifier to stop receiving the updates + * about a link's/ transport's state. + */ +void glink_unregister_link_state_cb(void *notif_handle) +{ + struct link_state_notifier_info *notif_info, *tmp_notif_info; + + if (IS_ERR_OR_NULL(notif_handle)) + return; + + mutex_lock(&link_state_notifier_lock_lha1); + list_for_each_entry_safe(notif_info, tmp_notif_info, + &link_state_notifier_list, list) { + if (notif_info == notif_handle) { + list_del(¬if_info->list); + mutex_unlock(&link_state_notifier_lock_lha1); + kfree(notif_info); + return; + } + } + mutex_unlock(&link_state_notifier_lock_lha1); + return; +} +EXPORT_SYMBOL(glink_unregister_link_state_cb); + +/** + * glink_qos_latency() - Register the latency QoS requirement + * @handle: Channel handle in which the latency is required. + * @latency_us: Latency requirement in units of micro-seconds. + * @pkt_size: Worst case packet size for which the latency is required. + * + * This function is used to register the latency requirement for a channel + * and ensures that the latency requirement for this channel is met without + * impacting the existing latency requirements of other channels. + * + * Return: 0 if QoS request is achievable, standard Linux error codes on error + */ +int glink_qos_latency(void *handle, unsigned long latency_us, size_t pkt_size) +{ + struct channel_ctx *ctx = (struct channel_ctx *)handle; + int ret; + unsigned long req_rate_kBps; + + if (!ctx || !latency_us || !pkt_size) + return -EINVAL; + + if (!ch_is_fully_opened(ctx)) { + GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n", + __func__); + return -EBUSY; + } + + req_rate_kBps = glink_qos_calc_rate_kBps(pkt_size, latency_us); + + ret = glink_qos_assign_priority(ctx, req_rate_kBps); + if (ret < 0) + GLINK_ERR_CH(ctx, "%s: QoS %lu:%zu cannot be met\n", + __func__, latency_us, pkt_size); + + return ret; +} +EXPORT_SYMBOL(glink_qos_latency); + +/** + * glink_qos_cancel() - Cancel or unregister the QoS request + * @handle: Channel handle for which the QoS request is cancelled. + * + * This function is used to cancel/unregister the QoS requests for a channel. + * + * Return: 0 on success, standard Linux error codes on failure + */ +int glink_qos_cancel(void *handle) +{ + struct channel_ctx *ctx = (struct channel_ctx *)handle; + int ret; + + if (!ctx) + return -EINVAL; + + if (!ch_is_fully_opened(ctx)) { + GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n", + __func__); + return -EBUSY; + } + + ret = glink_qos_reset_priority(ctx); + return ret; +} +EXPORT_SYMBOL(glink_qos_cancel); + +/** + * glink_qos_start() - Start of the transmission requiring QoS + * @handle: Channel handle in which the transmit activity is performed. + * + * This function is called by the clients to indicate G-Link regarding the + * start of the transmission which requires a certain QoS. The clients + * must account for the QoS ramp time to ensure meeting the QoS. + * + * Return: 0 on success, standard Linux error codes on failure + */ +int glink_qos_start(void *handle) +{ + struct channel_ctx *ctx = (struct channel_ctx *)handle; + int ret; + unsigned long flags; + + if (!ctx) + return -EINVAL; + + if (!ch_is_fully_opened(ctx)) { + GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n", + __func__); + return -EBUSY; + } + + spin_lock_irqsave(&ctx->transport_ptr->tx_ready_lock_lhb2, flags); + spin_lock(&ctx->tx_lists_lock_lhc3); + ret = glink_qos_add_ch_tx_intent(ctx); + spin_unlock(&ctx->tx_lists_lock_lhc3); + spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb2, flags); + return ret; +} +EXPORT_SYMBOL(glink_qos_start); + +/** + * glink_qos_get_ramp_time() - Get the QoS ramp time + * @handle: Channel handle for which the QoS ramp time is required. + * @pkt_size: Worst case packet size. + * + * This function is called by the clients to obtain the ramp time required + * to meet the QoS requirements. + * + * Return: QoS ramp time is returned in units of micro-seconds on success, + * standard Linux error codes cast to unsigned long on error. + */ +unsigned long glink_qos_get_ramp_time(void *handle, size_t pkt_size) +{ + struct channel_ctx *ctx = (struct channel_ctx *)handle; + + if (!ctx) + return (unsigned long)-EINVAL; + + if (!ch_is_fully_opened(ctx)) { + GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n", + __func__); + return (unsigned long)-EBUSY; + } + + return ctx->transport_ptr->ops->get_power_vote_ramp_time( + ctx->transport_ptr->ops, + glink_prio_to_power_state(ctx->transport_ptr, + ctx->initial_priority)); +} +EXPORT_SYMBOL(glink_qos_get_ramp_time); + +/** + * glink_rpm_rx_poll() - Poll and receive any available events + * @handle: Channel handle in which this operation is performed. + * + * This function is used to poll and receive events and packets while the + * receive interrupt from RPM is disabled. + * + * Note that even if a return value > 0 is returned indicating that some events + * were processed, clients should only use the notification functions passed + * into glink_open() to determine if an entire packet has been received since + * some events may be internal details that are not visible to clients. + * + * Return: 0 for no packets available; > 0 for events available; standard + * Linux error codes on failure. + */ +int glink_rpm_rx_poll(void *handle) +{ + struct channel_ctx *ctx = (struct channel_ctx *)handle; + + if (!ctx) + return -EINVAL; + + if (!ch_is_fully_opened(ctx)) + return -EBUSY; + + if (!ctx->transport_ptr || + !(ctx->transport_ptr->capabilities & GCAP_INTENTLESS)) + return -EOPNOTSUPP; + + return ctx->transport_ptr->ops->poll(ctx->transport_ptr->ops, + ctx->lcid); +} +EXPORT_SYMBOL(glink_rpm_rx_poll); + +/** + * glink_rpm_mask_rx_interrupt() - Mask or unmask the RPM receive interrupt + * @handle: Channel handle in which this operation is performed. + * @mask: Flag to mask or unmask the interrupt. + * @pstruct: Pointer to any platform specific data. + * + * This function is used to mask or unmask the receive interrupt from RPM. + * "mask" set to true indicates masking the interrupt and when set to false + * indicates unmasking the interrupt. + * + * Return: 0 on success, standard Linux error codes on failure. + */ +int glink_rpm_mask_rx_interrupt(void *handle, bool mask, void *pstruct) +{ + struct channel_ctx *ctx = (struct channel_ctx *)handle; + + if (!ctx) + return -EINVAL; + + if (!ch_is_fully_opened(ctx)) + return -EBUSY; + + if (!ctx->transport_ptr || + !(ctx->transport_ptr->capabilities & GCAP_INTENTLESS)) + return -EOPNOTSUPP; + + return ctx->transport_ptr->ops->mask_rx_irq(ctx->transport_ptr->ops, + ctx->lcid, mask, pstruct); + +} +EXPORT_SYMBOL(glink_rpm_mask_rx_interrupt); + +/** + * glink_wait_link_down() - Get status of link + * @handle: Channel handle in which this operation is performed + * + * This function will query the transport for its status, to allow clients to + * proceed in cleanup operations. + */ +int glink_wait_link_down(void *handle) +{ + struct channel_ctx *ctx = (struct channel_ctx *)handle; + + if (!ctx) + return -EINVAL; + if (!ctx->transport_ptr) + return -EOPNOTSUPP; + + return ctx->transport_ptr->ops->wait_link_down(ctx->transport_ptr->ops); +} +EXPORT_SYMBOL(glink_wait_link_down); + +/** + * glink_xprt_ctx_release - Free the transport context + * @ch_st_lock: handle to the rwref_lock associated with the transport + * + * This should only be called when the reference count associated with the + * transport goes to zero. + */ +void glink_xprt_ctx_release(struct rwref_lock *xprt_st_lock) +{ + struct glink_dbgfs xprt_rm_dbgfs; + struct glink_core_xprt_ctx *xprt_ctx = container_of(xprt_st_lock, + struct glink_core_xprt_ctx, xprt_state_lhb0); + GLINK_INFO("%s: freeing transport [%s->%s]context\n", __func__, + xprt_ctx->name, + xprt_ctx->edge); + xprt_rm_dbgfs.curr_name = xprt_ctx->name; + xprt_rm_dbgfs.par_name = "xprt"; + glink_debugfs_remove_recur(&xprt_rm_dbgfs); + GLINK_INFO("%s: xprt debugfs removec\n", __func__); + destroy_workqueue(xprt_ctx->tx_wq); + glink_core_deinit_xprt_qos_cfg(xprt_ctx); + kfree(xprt_ctx); + xprt_ctx = NULL; +} + +/** + * glink_dummy_xprt_ctx_release - free the dummy transport context + * @xprt_st_lock: Handle to the rwref_lock associated with the transport. + * + * The release function is called when all the channels on this dummy + * transport are closed and the reference count goes to zero. + */ +static void glink_dummy_xprt_ctx_release(struct rwref_lock *xprt_st_lock) +{ + struct glink_core_xprt_ctx *xprt_ctx = container_of(xprt_st_lock, + struct glink_core_xprt_ctx, xprt_state_lhb0); + GLINK_INFO("%s: freeing transport [%s->%s]context\n", __func__, + xprt_ctx->name, + xprt_ctx->edge); + kfree(xprt_ctx); +} + +/** + * glink_xprt_name_to_id() - convert transport name to id + * @name: Name of the transport. + * @id: Assigned id. + * + * Return: 0 on success or standard Linux error code. + */ +int glink_xprt_name_to_id(const char *name, uint16_t *id) +{ + if (!strcmp(name, "smem")) { + *id = SMEM_XPRT_ID; + return 0; + } + if (!strcmp(name, "mailbox")) { + *id = SMEM_XPRT_ID; + return 0; + } + if (!strcmp(name, "smd_trans")) { + *id = SMD_TRANS_XPRT_ID; + return 0; + } + if (!strcmp(name, "lloop")) { + *id = LLOOP_XPRT_ID; + return 0; + } + if (!strcmp(name, "mock")) { + *id = MOCK_XPRT_ID; + return 0; + } + if (!strcmp(name, "mock_low")) { + *id = MOCK_XPRT_LOW_ID; + return 0; + } + if (!strcmp(name, "mock_high")) { + *id = MOCK_XPRT_HIGH_ID; + return 0; + } + return -ENODEV; +} +EXPORT_SYMBOL(glink_xprt_name_to_id); + +/** + * of_get_glink_core_qos_cfg() - Parse the qos related dt entries + * @phandle: The handle to the qos related node in DT. + * @cfg: The transport configuration to be filled. + * + * Return: 0 on Success, standard Linux error otherwise. + */ +int of_get_glink_core_qos_cfg(struct device_node *phandle, + struct glink_core_transport_cfg *cfg) +{ + int rc, i; + char *key; + uint32_t num_flows; + uint32_t *arr32; + + if (!phandle) { + GLINK_ERR("%s: phandle is NULL\n", __func__); + return -EINVAL; + } + + key = "qcom,mtu-size"; + rc = of_property_read_u32(phandle, key, (uint32_t *)&cfg->mtu); + if (rc) { + GLINK_ERR("%s: missing key %s\n", __func__, key); + return -ENODEV; + } + + key = "qcom,tput-stats-cycle"; + rc = of_property_read_u32(phandle, key, &cfg->token_count); + if (rc) { + GLINK_ERR("%s: missing key %s\n", __func__, key); + rc = -ENODEV; + goto error; + } + + key = "qcom,flow-info"; + if (!of_find_property(phandle, key, &num_flows)) { + GLINK_ERR("%s: missing key %s\n", __func__, key); + rc = -ENODEV; + goto error; + } + + num_flows /= sizeof(uint32_t); + if (num_flows % 2) { + GLINK_ERR("%s: Invalid flow info length\n", __func__); + rc = -EINVAL; + goto error; + } + + num_flows /= 2; + cfg->num_flows = num_flows; + + cfg->flow_info = kmalloc_array(num_flows, sizeof(*(cfg->flow_info)), + GFP_KERNEL); + if (!cfg->flow_info) { + GLINK_ERR("%s: Memory allocation for flow info failed\n", + __func__); + rc = -ENOMEM; + goto error; + } + arr32 = kmalloc_array(num_flows * 2, sizeof(uint32_t), GFP_KERNEL); + if (!arr32) { + GLINK_ERR("%s: Memory allocation for temporary array failed\n", + __func__); + rc = -ENOMEM; + goto temp_mem_alloc_fail; + } + + of_property_read_u32_array(phandle, key, arr32, num_flows * 2); + + for (i = 0; i < num_flows; i++) { + cfg->flow_info[i].mtu_tx_time_us = arr32[2 * i]; + cfg->flow_info[i].power_state = arr32[2 * i + 1]; + } + + kfree(arr32); + of_node_put(phandle); + return 0; + +temp_mem_alloc_fail: + kfree(cfg->flow_info); +error: + cfg->mtu = 0; + cfg->token_count = 0; + cfg->num_flows = 0; + cfg->flow_info = NULL; + return rc; +} +EXPORT_SYMBOL(of_get_glink_core_qos_cfg); + +/** + * glink_core_init_xprt_qos_cfg() - Initialize a transport's QoS configuration + * @xprt_ptr: Transport to be initialized with QoS configuration. + * @cfg: Data structure containing QoS configuration. + * + * This function is used during the transport registration to initialize it + * with QoS configuration. + * + * Return: 0 on success, standard Linux error codes on failure. + */ +static int glink_core_init_xprt_qos_cfg(struct glink_core_xprt_ctx *xprt_ptr, + struct glink_core_transport_cfg *cfg) +{ + int i; + + xprt_ptr->mtu = cfg->mtu ? cfg->mtu : GLINK_QOS_DEF_MTU; + xprt_ptr->num_priority = cfg->num_flows ? cfg->num_flows : + GLINK_QOS_DEF_NUM_PRIORITY; + xprt_ptr->token_count = cfg->token_count ? cfg->token_count : + GLINK_QOS_DEF_NUM_TOKENS; + + xprt_ptr->prio_bin = kzalloc(xprt_ptr->num_priority * + sizeof(struct glink_qos_priority_bin), + GFP_KERNEL); + if (!xprt_ptr->prio_bin) { + GLINK_ERR("%s: unable to allocate priority bins\n", __func__); + return -ENOMEM; + } + for (i = 1; i < xprt_ptr->num_priority; i++) { + xprt_ptr->prio_bin[i].max_rate_kBps = + glink_qos_calc_rate_kBps(xprt_ptr->mtu, + cfg->flow_info[i].mtu_tx_time_us); + xprt_ptr->prio_bin[i].power_state = + cfg->flow_info[i].power_state; + INIT_LIST_HEAD(&xprt_ptr->prio_bin[i].tx_ready); + } + xprt_ptr->prio_bin[0].max_rate_kBps = 0; + if (cfg->flow_info) + xprt_ptr->prio_bin[0].power_state = + cfg->flow_info[0].power_state; + INIT_LIST_HEAD(&xprt_ptr->prio_bin[0].tx_ready); + xprt_ptr->threshold_rate_kBps = + xprt_ptr->prio_bin[xprt_ptr->num_priority - 1].max_rate_kBps; + + return 0; +} + +/** + * glink_core_deinit_xprt_qos_cfg() - Reset a transport's QoS configuration + * @xprt_ptr: Transport to be deinitialized. + * + * This function is used during the time of transport unregistration to + * de-initialize the QoS configuration from a transport. + */ +static void glink_core_deinit_xprt_qos_cfg(struct glink_core_xprt_ctx *xprt_ptr) +{ + kfree(xprt_ptr->prio_bin); + xprt_ptr->prio_bin = NULL; + xprt_ptr->mtu = 0; + xprt_ptr->num_priority = 0; + xprt_ptr->token_count = 0; + xprt_ptr->threshold_rate_kBps = 0; +} + +/** + * glink_core_register_transport() - register a new transport + * @if_ptr: The interface to the transport. + * @cfg: Description and configuration of the transport. + * + * Return: 0 on success, EINVAL for invalid input. + */ +int glink_core_register_transport(struct glink_transport_if *if_ptr, + struct glink_core_transport_cfg *cfg) +{ + struct glink_core_xprt_ctx *xprt_ptr; + size_t len; + uint16_t id; + int ret; + char log_name[GLINK_NAME_SIZE*2+2] = {0}; + + if (!if_ptr || !cfg || !cfg->name || !cfg->edge) + return -EINVAL; + + len = strlen(cfg->name); + if (len == 0 || len >= GLINK_NAME_SIZE) + return -EINVAL; + + len = strlen(cfg->edge); + if (len == 0 || len >= GLINK_NAME_SIZE) + return -EINVAL; + + if (cfg->versions_entries < 1) + return -EINVAL; + + ret = glink_xprt_name_to_id(cfg->name, &id); + if (ret) + return ret; + + xprt_ptr = kzalloc(sizeof(struct glink_core_xprt_ctx), GFP_KERNEL); + if (xprt_ptr == NULL) + return -ENOMEM; + + xprt_ptr->id = id; + rwref_lock_init(&xprt_ptr->xprt_state_lhb0, + glink_xprt_ctx_release); + strlcpy(xprt_ptr->name, cfg->name, GLINK_NAME_SIZE); + strlcpy(xprt_ptr->edge, cfg->edge, GLINK_NAME_SIZE); + xprt_ptr->versions = cfg->versions; + xprt_ptr->versions_entries = cfg->versions_entries; + xprt_ptr->local_version_idx = cfg->versions_entries - 1; + xprt_ptr->remote_version_idx = cfg->versions_entries - 1; + xprt_ptr->l_features = + cfg->versions[cfg->versions_entries - 1].features; + if (!if_ptr->poll) + if_ptr->poll = dummy_poll; + if (!if_ptr->mask_rx_irq) + if_ptr->mask_rx_irq = dummy_mask_rx_irq; + if (!if_ptr->reuse_rx_intent) + if_ptr->reuse_rx_intent = dummy_reuse_rx_intent; + if (!if_ptr->wait_link_down) + if_ptr->wait_link_down = dummy_wait_link_down; + if (!if_ptr->tx_cmd_tracer_pkt) + if_ptr->tx_cmd_tracer_pkt = dummy_tx_cmd_tracer_pkt; + if (!if_ptr->get_power_vote_ramp_time) + if_ptr->get_power_vote_ramp_time = + dummy_get_power_vote_ramp_time; + if (!if_ptr->power_vote) + if_ptr->power_vote = dummy_power_vote; + if (!if_ptr->power_unvote) + if_ptr->power_unvote = dummy_power_unvote; + xprt_ptr->capabilities = 0; + xprt_ptr->ops = if_ptr; + spin_lock_init(&xprt_ptr->xprt_ctx_lock_lhb1); + xprt_ptr->next_lcid = 1; /* 0 reserved for default unconfigured */ + INIT_LIST_HEAD(&xprt_ptr->free_lcid_list); + xprt_ptr->max_cid = cfg->max_cid; + xprt_ptr->max_iid = cfg->max_iid; + xprt_ptr->local_state = GLINK_XPRT_DOWN; + xprt_ptr->remote_neg_completed = false; + INIT_LIST_HEAD(&xprt_ptr->channels); + ret = glink_core_init_xprt_qos_cfg(xprt_ptr, cfg); + if (ret < 0) { + kfree(xprt_ptr); + return ret; + } + spin_lock_init(&xprt_ptr->tx_ready_lock_lhb2); + mutex_init(&xprt_ptr->xprt_dbgfs_lock_lhb3); + INIT_WORK(&xprt_ptr->tx_work, tx_work_func); + xprt_ptr->tx_wq = create_singlethread_workqueue("glink_tx"); + if (IS_ERR_OR_NULL(xprt_ptr->tx_wq)) { + GLINK_ERR("%s: unable to allocate workqueue\n", __func__); + glink_core_deinit_xprt_qos_cfg(xprt_ptr); + kfree(xprt_ptr); + return -ENOMEM; + } + INIT_DELAYED_WORK(&xprt_ptr->pm_qos_work, glink_pm_qos_cancel_worker); + pm_qos_add_request(&xprt_ptr->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, + PM_QOS_DEFAULT_VALUE); + + if_ptr->glink_core_priv = xprt_ptr; + if_ptr->glink_core_if_ptr = &core_impl; + + mutex_lock(&transport_list_lock_lha0); + list_add_tail(&xprt_ptr->list_node, &transport_list); + mutex_unlock(&transport_list_lock_lha0); + glink_debugfs_add_xprt(xprt_ptr); + snprintf(log_name, sizeof(log_name), "%s_%s", + xprt_ptr->edge, xprt_ptr->name); + xprt_ptr->log_ctx = ipc_log_context_create(NUM_LOG_PAGES, log_name, 0); + if (!xprt_ptr->log_ctx) + GLINK_ERR("%s: unable to create log context for [%s:%s]\n", + __func__, xprt_ptr->edge, xprt_ptr->name); + + return 0; +} +EXPORT_SYMBOL(glink_core_register_transport); + +/** + * glink_core_unregister_transport() - unregister a transport + * + * @if_ptr: The interface to the transport. + */ +void glink_core_unregister_transport(struct glink_transport_if *if_ptr) +{ + struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv; + + GLINK_DBG_XPRT(xprt_ptr, "%s: destroying transport\n", __func__); + if (xprt_ptr->local_state != GLINK_XPRT_DOWN) { + GLINK_ERR_XPRT(xprt_ptr, + "%s: link_down should have been called before this\n", + __func__); + return; + } + + mutex_lock(&transport_list_lock_lha0); + list_del(&xprt_ptr->list_node); + mutex_unlock(&transport_list_lock_lha0); + flush_delayed_work(&xprt_ptr->pm_qos_work); + pm_qos_remove_request(&xprt_ptr->pm_qos_req); + ipc_log_context_destroy(xprt_ptr->log_ctx); + xprt_ptr->log_ctx = NULL; + rwref_put(&xprt_ptr->xprt_state_lhb0); +} +EXPORT_SYMBOL(glink_core_unregister_transport); + +/** + * glink_core_link_up() - transport link-up notification + * + * @if_ptr: pointer to transport interface + */ +static void glink_core_link_up(struct glink_transport_if *if_ptr) +{ + struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv; + + /* start local negotiation */ + xprt_ptr->local_state = GLINK_XPRT_NEGOTIATING; + xprt_ptr->local_version_idx = xprt_ptr->versions_entries - 1; + xprt_ptr->l_features = + xprt_ptr->versions[xprt_ptr->local_version_idx].features; + if_ptr->tx_cmd_version(if_ptr, + xprt_ptr->versions[xprt_ptr->local_version_idx].version, + xprt_ptr->versions[xprt_ptr->local_version_idx].features); + +} + +/** + * glink_core_link_down() - transport link-down notification + * + * @if_ptr: pointer to transport interface + */ +static void glink_core_link_down(struct glink_transport_if *if_ptr) +{ + struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv; + + rwref_write_get(&xprt_ptr->xprt_state_lhb0); + xprt_ptr->next_lcid = 1; + xprt_ptr->local_state = GLINK_XPRT_DOWN; + xprt_ptr->local_version_idx = xprt_ptr->versions_entries - 1; + xprt_ptr->remote_version_idx = xprt_ptr->versions_entries - 1; + xprt_ptr->l_features = + xprt_ptr->versions[xprt_ptr->local_version_idx].features; + xprt_ptr->remote_neg_completed = false; + rwref_write_put(&xprt_ptr->xprt_state_lhb0); + GLINK_DBG_XPRT(xprt_ptr, + "%s: Flushing work from tx_wq. Thread: %u\n", __func__, + current->pid); + flush_workqueue(xprt_ptr->tx_wq); + glink_core_channel_cleanup(xprt_ptr); + check_link_notifier_and_notify(xprt_ptr, GLINK_LINK_STATE_DOWN); +} + +/** + * glink_create_dummy_xprt_ctx() - create a dummy transport that replaces all + * the transport interface functions with a dummy + * @orig_xprt_ctx: Pointer to the original transport context. + * + * The dummy transport is used only when it is swapped with the actual transport + * pointer in ssr/unregister case. + * + * Return: Pointer to dummy transport context. + */ +static struct glink_core_xprt_ctx *glink_create_dummy_xprt_ctx( + struct glink_core_xprt_ctx *orig_xprt_ctx) +{ + + struct glink_core_xprt_ctx *xprt_ptr; + struct glink_transport_if *if_ptr; + + xprt_ptr = kzalloc(sizeof(*xprt_ptr), GFP_KERNEL); + if (!xprt_ptr) + return ERR_PTR(-ENOMEM); + if_ptr = kmalloc(sizeof(*if_ptr), GFP_KERNEL); + if (!if_ptr) { + kfree(xprt_ptr); + return ERR_PTR(-ENOMEM); + } + rwref_lock_init(&xprt_ptr->xprt_state_lhb0, + glink_dummy_xprt_ctx_release); + + strlcpy(xprt_ptr->name, "dummy", GLINK_NAME_SIZE); + strlcpy(xprt_ptr->edge, orig_xprt_ctx->edge, GLINK_NAME_SIZE); + if_ptr->poll = dummy_poll; + if_ptr->mask_rx_irq = dummy_mask_rx_irq; + if_ptr->reuse_rx_intent = dummy_reuse_rx_intent; + if_ptr->wait_link_down = dummy_wait_link_down; + if_ptr->allocate_rx_intent = dummy_allocate_rx_intent; + if_ptr->deallocate_rx_intent = dummy_deallocate_rx_intent; + if_ptr->tx_cmd_local_rx_intent = dummy_tx_cmd_local_rx_intent; + if_ptr->tx_cmd_local_rx_done = dummy_tx_cmd_local_rx_done; + if_ptr->tx = dummy_tx; + if_ptr->tx_cmd_rx_intent_req = dummy_tx_cmd_rx_intent_req; + if_ptr->tx_cmd_remote_rx_intent_req_ack = + dummy_tx_cmd_remote_rx_intent_req_ack; + if_ptr->tx_cmd_set_sigs = dummy_tx_cmd_set_sigs; + if_ptr->tx_cmd_ch_close = dummy_tx_cmd_ch_close; + if_ptr->tx_cmd_ch_remote_close_ack = dummy_tx_cmd_ch_remote_close_ack; + + xprt_ptr->ops = if_ptr; + xprt_ptr->log_ctx = log_ctx; + spin_lock_init(&xprt_ptr->xprt_ctx_lock_lhb1); + INIT_LIST_HEAD(&xprt_ptr->free_lcid_list); + xprt_ptr->local_state = GLINK_XPRT_DOWN; + xprt_ptr->remote_neg_completed = false; + INIT_LIST_HEAD(&xprt_ptr->channels); + spin_lock_init(&xprt_ptr->tx_ready_lock_lhb2); + mutex_init(&xprt_ptr->xprt_dbgfs_lock_lhb3); + return xprt_ptr; +} + +/** + * glink_core_channel_cleanup() - cleanup all channels for the transport + * + * @xprt_ptr: pointer to transport context + * + * This function should be called either from link_down or ssr + */ +static void glink_core_channel_cleanup(struct glink_core_xprt_ctx *xprt_ptr) +{ + unsigned long flags, d_flags; + struct channel_ctx *ctx, *tmp_ctx; + struct channel_lcid *temp_lcid, *temp_lcid1; + struct glink_core_xprt_ctx *dummy_xprt_ctx; + + dummy_xprt_ctx = glink_create_dummy_xprt_ctx(xprt_ptr); + if (IS_ERR_OR_NULL(dummy_xprt_ctx)) { + GLINK_ERR("%s: Dummy Transport creation failed\n", __func__); + return; + } + + rwref_get(&dummy_xprt_ctx->xprt_state_lhb0); + spin_lock_irqsave(&xprt_ptr->xprt_ctx_lock_lhb1, flags); + list_for_each_entry_safe(ctx, tmp_ctx, &xprt_ptr->channels, + port_list_node) { + rwref_get(&ctx->ch_state_lhc0); + if (ctx->local_open_state == GLINK_CHANNEL_OPENED || + ctx->local_open_state == GLINK_CHANNEL_OPENING) { + rwref_get(&dummy_xprt_ctx->xprt_state_lhb0); + spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, + d_flags); + list_move_tail(&ctx->port_list_node, + &dummy_xprt_ctx->channels); + spin_unlock_irqrestore( + &dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags); + ctx->transport_ptr = dummy_xprt_ctx; + } else { + /* local state is in either CLOSED or CLOSING */ + spin_unlock_irqrestore(&xprt_ptr->xprt_ctx_lock_lhb1, + flags); + glink_core_remote_close_common(ctx); + if (ctx->local_open_state == GLINK_CHANNEL_CLOSING) + glink_core_ch_close_ack_common(ctx); + /* Channel should be fully closed now. Delete here */ + if (ch_is_fully_closed(ctx)) + glink_delete_ch_from_list(ctx, false); + spin_lock_irqsave(&xprt_ptr->xprt_ctx_lock_lhb1, flags); + } + rwref_put(&ctx->ch_state_lhc0); + } + list_for_each_entry_safe(temp_lcid, temp_lcid1, + &xprt_ptr->free_lcid_list, list_node) { + list_del(&temp_lcid->list_node); + kfree(&temp_lcid->list_node); + } + spin_unlock_irqrestore(&xprt_ptr->xprt_ctx_lock_lhb1, flags); + + spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags); + list_for_each_entry_safe(ctx, tmp_ctx, &dummy_xprt_ctx->channels, + port_list_node) { + rwref_get(&ctx->ch_state_lhc0); + spin_unlock_irqrestore(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, + d_flags); + glink_core_remote_close_common(ctx); + spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, + d_flags); + rwref_put(&ctx->ch_state_lhc0); + } + spin_unlock_irqrestore(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags); + rwref_put(&dummy_xprt_ctx->xprt_state_lhb0); +} +/** + * glink_core_rx_cmd_version() - receive version/features from remote system + * + * @if_ptr: pointer to transport interface + * @r_version: remote version + * @r_features: remote features + * + * This function is called in response to a remote-initiated version/feature + * negotiation sequence. + */ +static void glink_core_rx_cmd_version(struct glink_transport_if *if_ptr, + uint32_t r_version, uint32_t r_features) +{ + struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv; + const struct glink_core_version *versions = xprt_ptr->versions; + bool neg_complete = false; + uint32_t l_version; + + if (xprt_is_fully_opened(xprt_ptr)) { + GLINK_ERR_XPRT(xprt_ptr, + "%s: Negotiation already complete\n", __func__); + return; + } + + l_version = versions[xprt_ptr->remote_version_idx].version; + + GLINK_INFO_XPRT(xprt_ptr, + "%s: [local]%x:%08x [remote]%x:%08x\n", __func__, + l_version, xprt_ptr->l_features, r_version, r_features); + + if (l_version > r_version) { + /* Find matching version */ + while (true) { + uint32_t rver_idx; + + if (xprt_ptr->remote_version_idx == 0) { + /* version negotiation failed */ + GLINK_ERR_XPRT(xprt_ptr, + "%s: Transport negotiation failed\n", + __func__); + l_version = 0; + xprt_ptr->l_features = 0; + break; + } + --xprt_ptr->remote_version_idx; + rver_idx = xprt_ptr->remote_version_idx; + + if (versions[rver_idx].version <= r_version) { + /* found a potential match */ + l_version = versions[rver_idx].version; + xprt_ptr->l_features = + versions[rver_idx].features; + break; + } + } + } + + if (l_version == r_version) { + GLINK_INFO_XPRT(xprt_ptr, + "%s: Remote and local version are matched %x:%08x\n", + __func__, r_version, r_features); + if (xprt_ptr->l_features != r_features) { + uint32_t rver_idx = xprt_ptr->remote_version_idx; + + xprt_ptr->l_features = versions[rver_idx] + .negotiate_features(if_ptr, + &xprt_ptr->versions[rver_idx], + r_features); + GLINK_INFO_XPRT(xprt_ptr, + "%s: negotiate features %x:%08x\n", + __func__, l_version, xprt_ptr->l_features); + } + neg_complete = true; + } + if_ptr->tx_cmd_version_ack(if_ptr, l_version, xprt_ptr->l_features); + + if (neg_complete) { + GLINK_INFO_XPRT(xprt_ptr, + "%s: Remote negotiation complete %x:%08x\n", __func__, + l_version, xprt_ptr->l_features); + + if (xprt_ptr->local_state == GLINK_XPRT_OPENED) { + xprt_ptr->capabilities = if_ptr->set_version(if_ptr, + l_version, + xprt_ptr->l_features); + } + if_ptr->glink_core_priv->remote_neg_completed = true; + if (xprt_is_fully_opened(xprt_ptr)) + check_link_notifier_and_notify(xprt_ptr, + GLINK_LINK_STATE_UP); + } +} + +/** + * glink_core_rx_cmd_version_ack() - receive negotiation ack from remote system + * + * @if_ptr: pointer to transport interface + * @r_version: remote version response + * @r_features: remote features response + * + * This function is called in response to a local-initiated version/feature + * negotiation sequence and is the counter-offer from the remote side based + * upon the initial version and feature set requested. + */ +static void glink_core_rx_cmd_version_ack(struct glink_transport_if *if_ptr, + uint32_t r_version, uint32_t r_features) +{ + struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv; + const struct glink_core_version *versions = xprt_ptr->versions; + uint32_t l_version; + bool neg_complete = false; + + if (xprt_is_fully_opened(xprt_ptr)) { + GLINK_ERR_XPRT(xprt_ptr, + "%s: Negotiation already complete\n", __func__); + return; + } + + l_version = versions[xprt_ptr->local_version_idx].version; + + GLINK_INFO_XPRT(xprt_ptr, + "%s: [local]%x:%08x [remote]%x:%08x\n", __func__, + l_version, xprt_ptr->l_features, r_version, r_features); + + if (l_version > r_version) { + /* find matching version */ + while (true) { + uint32_t lver_idx = xprt_ptr->local_version_idx; + + if (xprt_ptr->local_version_idx == 0) { + /* version negotiation failed */ + xprt_ptr->local_state = GLINK_XPRT_FAILED; + GLINK_ERR_XPRT(xprt_ptr, + "%s: Transport negotiation failed\n", + __func__); + l_version = 0; + xprt_ptr->l_features = 0; + break; + } + --xprt_ptr->local_version_idx; + lver_idx = xprt_ptr->local_version_idx; + + if (versions[lver_idx].version <= r_version) { + /* found a potential match */ + l_version = versions[lver_idx].version; + xprt_ptr->l_features = + versions[lver_idx].features; + break; + } + } + } else if (l_version == r_version) { + if (xprt_ptr->l_features != r_features) { + /* version matches, negotiate features */ + uint32_t lver_idx = xprt_ptr->local_version_idx; + + xprt_ptr->l_features = versions[lver_idx] + .negotiate_features(if_ptr, + &versions[lver_idx], + r_features); + GLINK_INFO_XPRT(xprt_ptr, + "%s: negotiation features %x:%08x\n", + __func__, l_version, xprt_ptr->l_features); + } else { + neg_complete = true; + } + } else { + /* + * r_version > l_version + * + * Remote responded with a version greater than what we + * requested which is invalid and is treated as failure of the + * negotiation algorithm. + */ + GLINK_ERR_XPRT(xprt_ptr, + "%s: [local]%x:%08x [remote]%x:%08x neg failure\n", + __func__, l_version, xprt_ptr->l_features, r_version, + r_features); + xprt_ptr->local_state = GLINK_XPRT_FAILED; + l_version = 0; + xprt_ptr->l_features = 0; + } + + if (neg_complete) { + /* negotiation complete */ + GLINK_INFO_XPRT(xprt_ptr, + "%s: Local negotiation complete %x:%08x\n", + __func__, l_version, xprt_ptr->l_features); + + if (xprt_ptr->remote_neg_completed) { + xprt_ptr->capabilities = if_ptr->set_version(if_ptr, + l_version, + xprt_ptr->l_features); + } + + xprt_ptr->local_state = GLINK_XPRT_OPENED; + if (xprt_is_fully_opened(xprt_ptr)) + check_link_notifier_and_notify(xprt_ptr, + GLINK_LINK_STATE_UP); + } else { + if_ptr->tx_cmd_version(if_ptr, l_version, xprt_ptr->l_features); + } +} + +/** + * find_l_ctx_get() - find a local channel context based on a remote one + * @r_ctx: The remote channel to use as a lookup key. + * + * If the channel is found, the reference count is incremented to ensure the + * lifetime of the channel context. The caller must call rwref_put() when done. + * + * Return: The corresponding local ctx or NULL is not found. + */ +static struct channel_ctx *find_l_ctx_get(struct channel_ctx *r_ctx) +{ + struct glink_core_xprt_ctx *xprt; + struct channel_ctx *ctx; + unsigned long flags; + struct channel_ctx *l_ctx = NULL; + + mutex_lock(&transport_list_lock_lha0); + list_for_each_entry(xprt, &transport_list, list_node) + if (!strcmp(r_ctx->transport_ptr->edge, xprt->edge)) { + rwref_write_get(&xprt->xprt_state_lhb0); + if (xprt->local_state != GLINK_XPRT_OPENED) { + rwref_write_put(&xprt->xprt_state_lhb0); + continue; + } + spin_lock_irqsave(&xprt->xprt_ctx_lock_lhb1, flags); + list_for_each_entry(ctx, &xprt->channels, + port_list_node) + if (!strcmp(ctx->name, r_ctx->name) && + ctx->local_xprt_req && + ctx->local_xprt_resp) { + l_ctx = ctx; + rwref_get(&l_ctx->ch_state_lhc0); + } + spin_unlock_irqrestore(&xprt->xprt_ctx_lock_lhb1, + flags); + rwref_write_put(&xprt->xprt_state_lhb0); + } + mutex_unlock(&transport_list_lock_lha0); + + return l_ctx; +} + +/** + * find_r_ctx_get() - find a remote channel context based on a local one + * @l_ctx: The local channel to use as a lookup key. + * + * If the channel is found, the reference count is incremented to ensure the + * lifetime of the channel context. The caller must call rwref_put() when done. + * + * Return: The corresponding remote ctx or NULL is not found. + */ +static struct channel_ctx *find_r_ctx_get(struct channel_ctx *l_ctx) +{ + struct glink_core_xprt_ctx *xprt; + struct channel_ctx *ctx; + unsigned long flags; + struct channel_ctx *r_ctx = NULL; + + mutex_lock(&transport_list_lock_lha0); + list_for_each_entry(xprt, &transport_list, list_node) + if (!strcmp(l_ctx->transport_ptr->edge, xprt->edge)) { + rwref_write_get(&xprt->xprt_state_lhb0); + if (xprt->local_state != GLINK_XPRT_OPENED) { + rwref_write_put(&xprt->xprt_state_lhb0); + continue; + } + spin_lock_irqsave(&xprt->xprt_ctx_lock_lhb1, flags); + list_for_each_entry(ctx, &xprt->channels, + port_list_node) + if (!strcmp(ctx->name, l_ctx->name) && + ctx->remote_xprt_req && + ctx->remote_xprt_resp) { + r_ctx = ctx; + rwref_get(&r_ctx->ch_state_lhc0); + } + spin_unlock_irqrestore(&xprt->xprt_ctx_lock_lhb1, + flags); + rwref_write_put(&xprt->xprt_state_lhb0); + } + mutex_unlock(&transport_list_lock_lha0); + + return r_ctx; +} + +/** + * will_migrate() - will a channel migrate to a different transport + * @l_ctx: The local channel to migrate. + * @r_ctx: The remote channel to migrate. + * + * One of the channel contexts can be NULL if not known, but at least one ctx + * must be provided. + * + * Return: Bool indicating if migration will occur. + */ +static bool will_migrate(struct channel_ctx *l_ctx, struct channel_ctx *r_ctx) +{ + uint16_t new_xprt; + bool migrate = false; + + if (!r_ctx) + r_ctx = find_r_ctx_get(l_ctx); + else + rwref_get(&r_ctx->ch_state_lhc0); + if (!r_ctx) + return migrate; + + if (!l_ctx) + l_ctx = find_l_ctx_get(r_ctx); + else + rwref_get(&l_ctx->ch_state_lhc0); + if (!l_ctx) + goto exit; + + if (l_ctx->local_xprt_req == r_ctx->remote_xprt_req && + l_ctx->local_xprt_req == l_ctx->transport_ptr->id) + goto exit; + if (l_ctx->no_migrate) + goto exit; + + if (l_ctx->local_xprt_req > r_ctx->transport_ptr->id) + l_ctx->local_xprt_req = r_ctx->transport_ptr->id; + + new_xprt = max(l_ctx->local_xprt_req, r_ctx->remote_xprt_req); + + if (new_xprt == l_ctx->transport_ptr->id) + goto exit; + + migrate = true; +exit: + if (l_ctx) + rwref_put(&l_ctx->ch_state_lhc0); + if (r_ctx) + rwref_put(&r_ctx->ch_state_lhc0); + + return migrate; +} + +/** + * ch_migrate() - migrate a channel to a different transport + * @l_ctx: The local channel to migrate. + * @r_ctx: The remote channel to migrate. + * + * One of the channel contexts can be NULL if not known, but at least one ctx + * must be provided. + * + * Return: Bool indicating if migration occurred. + */ +static bool ch_migrate(struct channel_ctx *l_ctx, struct channel_ctx *r_ctx) +{ + uint16_t new_xprt; + struct glink_core_xprt_ctx *xprt; + unsigned long flags; + struct channel_lcid *flcid; + uint16_t best_xprt = USHRT_MAX; + struct channel_ctx *ctx_clone; + bool migrated = false; + + if (!r_ctx) + r_ctx = find_r_ctx_get(l_ctx); + else + rwref_get(&r_ctx->ch_state_lhc0); + if (!r_ctx) + return migrated; + + if (!l_ctx) + l_ctx = find_l_ctx_get(r_ctx); + else + rwref_get(&l_ctx->ch_state_lhc0); + if (!l_ctx) { + rwref_put(&r_ctx->ch_state_lhc0); + return migrated; + } + + if (l_ctx->local_xprt_req == r_ctx->remote_xprt_req && + l_ctx->local_xprt_req == l_ctx->transport_ptr->id) + goto exit; + if (l_ctx->no_migrate) + goto exit; + + if (l_ctx->local_xprt_req > r_ctx->transport_ptr->id) + l_ctx->local_xprt_req = r_ctx->transport_ptr->id; + + new_xprt = max(l_ctx->local_xprt_req, r_ctx->remote_xprt_req); + + if (new_xprt == l_ctx->transport_ptr->id) + goto exit; + + ctx_clone = kmalloc(sizeof(*ctx_clone), GFP_KERNEL); + if (!ctx_clone) + goto exit; + + mutex_lock(&transport_list_lock_lha0); + list_for_each_entry(xprt, &transport_list, list_node) + if (!strcmp(l_ctx->transport_ptr->edge, xprt->edge)) + if (xprt->id == new_xprt) + break; + mutex_unlock(&transport_list_lock_lha0); + + spin_lock_irqsave(&l_ctx->transport_ptr->xprt_ctx_lock_lhb1, flags); + list_del_init(&l_ctx->port_list_node); + spin_unlock_irqrestore(&l_ctx->transport_ptr->xprt_ctx_lock_lhb1, + flags); + + memcpy(ctx_clone, l_ctx, sizeof(*ctx_clone)); + ctx_clone->local_xprt_req = 0; + ctx_clone->local_xprt_resp = 0; + ctx_clone->remote_xprt_req = 0; + ctx_clone->remote_xprt_resp = 0; + ctx_clone->notify_state = NULL; + ctx_clone->local_open_state = GLINK_CHANNEL_CLOSING; + rwref_lock_init(&ctx_clone->ch_state_lhc0, glink_ch_ctx_release); + init_completion(&ctx_clone->int_req_ack_complete); + init_completion(&ctx_clone->int_req_complete); + spin_lock_init(&ctx_clone->local_rx_intent_lst_lock_lhc1); + spin_lock_init(&ctx_clone->rmt_rx_intent_lst_lock_lhc2); + INIT_LIST_HEAD(&ctx_clone->tx_ready_list_node); + INIT_LIST_HEAD(&ctx_clone->local_rx_intent_list); + INIT_LIST_HEAD(&ctx_clone->local_rx_intent_ntfy_list); + INIT_LIST_HEAD(&ctx_clone->local_rx_intent_free_list); + INIT_LIST_HEAD(&ctx_clone->rmt_rx_intent_list); + INIT_LIST_HEAD(&ctx_clone->tx_active); + spin_lock_init(&ctx_clone->tx_pending_rmt_done_lock_lhc4); + INIT_LIST_HEAD(&ctx_clone->tx_pending_remote_done); + spin_lock_init(&ctx_clone->tx_lists_lock_lhc3); + spin_lock_irqsave(&l_ctx->transport_ptr->xprt_ctx_lock_lhb1, flags); + list_add_tail(&ctx_clone->port_list_node, + &l_ctx->transport_ptr->channels); + spin_unlock_irqrestore(&l_ctx->transport_ptr->xprt_ctx_lock_lhb1, + flags); + + l_ctx->transport_ptr->ops->tx_cmd_ch_close(l_ctx->transport_ptr->ops, + l_ctx->lcid); + + l_ctx->transport_ptr = xprt; + l_ctx->local_xprt_req = 0; + l_ctx->local_xprt_resp = 0; + if (new_xprt != r_ctx->transport_ptr->id) { + r_ctx->local_xprt_req = 0; + r_ctx->local_xprt_resp = 0; + r_ctx->remote_xprt_req = 0; + r_ctx->remote_xprt_resp = 0; + + l_ctx->remote_xprt_req = 0; + l_ctx->remote_xprt_resp = 0; + l_ctx->remote_opened = false; + + rwref_write_get(&xprt->xprt_state_lhb0); + spin_lock_irqsave(&xprt->xprt_ctx_lock_lhb1, flags); + if (list_empty(&xprt->free_lcid_list)) { + l_ctx->lcid = xprt->next_lcid++; + } else { + flcid = list_first_entry(&xprt->free_lcid_list, + struct channel_lcid, list_node); + l_ctx->lcid = flcid->lcid; + list_del(&flcid->list_node); + kfree(flcid); + } + list_add_tail(&l_ctx->port_list_node, &xprt->channels); + spin_unlock_irqrestore(&xprt->xprt_ctx_lock_lhb1, flags); + rwref_write_put(&xprt->xprt_state_lhb0); + } else { + l_ctx->lcid = r_ctx->lcid; + l_ctx->rcid = r_ctx->rcid; + l_ctx->remote_opened = r_ctx->remote_opened; + l_ctx->remote_xprt_req = r_ctx->remote_xprt_req; + l_ctx->remote_xprt_resp = r_ctx->remote_xprt_resp; + glink_delete_ch_from_list(r_ctx, false); + + spin_lock_irqsave(&xprt->xprt_ctx_lock_lhb1, flags); + list_add_tail(&l_ctx->port_list_node, &xprt->channels); + spin_unlock_irqrestore(&xprt->xprt_ctx_lock_lhb1, flags); + } + + + mutex_lock(&transport_list_lock_lha0); + list_for_each_entry(xprt, &transport_list, list_node) + if (!strcmp(l_ctx->transport_ptr->edge, xprt->edge)) + if (xprt->id < best_xprt) + best_xprt = xprt->id; + mutex_unlock(&transport_list_lock_lha0); + l_ctx->local_open_state = GLINK_CHANNEL_OPENING; + l_ctx->local_xprt_req = best_xprt; + l_ctx->transport_ptr->ops->tx_cmd_ch_open(l_ctx->transport_ptr->ops, + l_ctx->lcid, l_ctx->name, best_xprt); + + migrated = true; +exit: + rwref_put(&l_ctx->ch_state_lhc0); + rwref_put(&r_ctx->ch_state_lhc0); + + return migrated; +} + +/** + * calculate_xprt_resp() - calculate the response to a remote xprt request + * @r_ctx: The channel the remote xprt request is for. + * + * Return: The calculated response. + */ +static uint16_t calculate_xprt_resp(struct channel_ctx *r_ctx) +{ + struct channel_ctx *l_ctx; + + l_ctx = find_l_ctx_get(r_ctx); + if (!l_ctx) { + r_ctx->remote_xprt_resp = r_ctx->transport_ptr->id; + } else if (r_ctx->remote_xprt_req == r_ctx->transport_ptr->id) { + r_ctx->remote_xprt_resp = r_ctx->remote_xprt_req; + } else { + if (!l_ctx->local_xprt_req) + r_ctx->remote_xprt_resp = r_ctx->remote_xprt_req; + else if (l_ctx->no_migrate) + r_ctx->remote_xprt_resp = l_ctx->local_xprt_req; + else + r_ctx->remote_xprt_resp = max(l_ctx->local_xprt_req, + r_ctx->remote_xprt_req); + } + + if (l_ctx) + rwref_put(&l_ctx->ch_state_lhc0); + + return r_ctx->remote_xprt_resp; +} + +/** + * glink_core_rx_cmd_ch_remote_open() - Remote-initiated open command + * + * @if_ptr: Pointer to transport instance + * @rcid: Remote Channel ID + * @name: Channel name + * @req_xprt: Requested transport to migrate to + */ +static void glink_core_rx_cmd_ch_remote_open(struct glink_transport_if *if_ptr, + uint32_t rcid, const char *name, uint16_t req_xprt) +{ + struct channel_ctx *ctx; + uint16_t xprt_resp; + bool do_migrate; + + ctx = ch_name_to_ch_ctx_create(if_ptr->glink_core_priv, name); + if (ctx == NULL) { + GLINK_ERR_XPRT(if_ptr->glink_core_priv, + "%s: invalid rcid %u received, name '%s'\n", + __func__, rcid, name); + return; + } + + /* port already exists */ + if (ctx->remote_opened) { + GLINK_ERR_CH(ctx, + "%s: Duplicate remote open for rcid %u, name '%s'\n", + __func__, rcid, name); + return; + } + + ctx->remote_opened = true; + ch_add_rcid(if_ptr->glink_core_priv, ctx, rcid); + ctx->transport_ptr = if_ptr->glink_core_priv; + + ctx->remote_xprt_req = req_xprt; + xprt_resp = calculate_xprt_resp(ctx); + + do_migrate = will_migrate(NULL, ctx); + GLINK_INFO_CH(ctx, "%s: remote: CLOSED->OPENED ; xprt req:resp %u:%u\n", + __func__, req_xprt, xprt_resp); + + if_ptr->tx_cmd_ch_remote_open_ack(if_ptr, rcid, xprt_resp); + if (!do_migrate && ch_is_fully_opened(ctx)) + ctx->notify_state(ctx, ctx->user_priv, GLINK_CONNECTED); + + + if (do_migrate) + ch_migrate(NULL, ctx); +} + +/** + * glink_core_rx_cmd_ch_open_ack() - Receive ack to previously sent open request + * + * if_ptr: Pointer to transport instance + * lcid: Local Channel ID + * @xprt_resp: Response to the transport migration request + */ +static void glink_core_rx_cmd_ch_open_ack(struct glink_transport_if *if_ptr, + uint32_t lcid, uint16_t xprt_resp) +{ + struct channel_ctx *ctx; + + ctx = xprt_lcid_to_ch_ctx_get(if_ptr->glink_core_priv, lcid); + if (!ctx) { + /* unknown LCID received - this shouldn't happen */ + GLINK_ERR_XPRT(if_ptr->glink_core_priv, + "%s: invalid lcid %u received\n", __func__, + (unsigned)lcid); + return; + } + + if (ctx->local_open_state != GLINK_CHANNEL_OPENING) { + GLINK_ERR_CH(ctx, + "%s: unexpected open ack receive for lcid. Current state: %u. Thread: %u\n", + __func__, ctx->local_open_state, current->pid); + rwref_put(&ctx->ch_state_lhc0); + return; + } + + ctx->local_xprt_resp = xprt_resp; + if (!ch_migrate(ctx, NULL)) { + ctx->local_open_state = GLINK_CHANNEL_OPENED; + GLINK_INFO_PERF_CH(ctx, + "%s: local:GLINK_CHANNEL_OPENING_WAIT->GLINK_CHANNEL_OPENED\n", + __func__); + + if (ch_is_fully_opened(ctx)) { + ctx->notify_state(ctx, ctx->user_priv, GLINK_CONNECTED); + GLINK_INFO_PERF_CH(ctx, + "%s: notify state: GLINK_CONNECTED\n", + __func__); + } + } + rwref_put(&ctx->ch_state_lhc0); +} + +/** + * glink_core_rx_cmd_ch_remote_close() - Receive remote close command + * + * if_ptr: Pointer to transport instance + * rcid: Remote Channel ID + */ +static void glink_core_rx_cmd_ch_remote_close( + struct glink_transport_if *if_ptr, uint32_t rcid) +{ + struct channel_ctx *ctx; + bool is_ch_fully_closed; + + ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid); + if (!ctx) { + /* unknown LCID received - this shouldn't happen */ + GLINK_ERR_XPRT(if_ptr->glink_core_priv, + "%s: invalid rcid %u received\n", __func__, + (unsigned)rcid); + return; + } + + if (!ctx->remote_opened) { + GLINK_ERR_CH(ctx, + "%s: unexpected remote close receive for rcid %u\n", + __func__, (unsigned)rcid); + rwref_put(&ctx->ch_state_lhc0); + return; + } + GLINK_INFO_CH(ctx, "%s: remote: OPENED->CLOSED\n", __func__); + + is_ch_fully_closed = glink_core_remote_close_common(ctx); + + ctx->pending_delete = true; + if_ptr->tx_cmd_ch_remote_close_ack(if_ptr, rcid); + + if (is_ch_fully_closed) { + glink_delete_ch_from_list(ctx, true); + flush_workqueue(ctx->transport_ptr->tx_wq); + } + rwref_put(&ctx->ch_state_lhc0); +} + +/** + * glink_core_rx_cmd_ch_close_ack() - Receive locally-request close ack + * + * if_ptr: Pointer to transport instance + * lcid: Local Channel ID + */ +static void glink_core_rx_cmd_ch_close_ack(struct glink_transport_if *if_ptr, + uint32_t lcid) +{ + struct channel_ctx *ctx; + bool is_ch_fully_closed; + + ctx = xprt_lcid_to_ch_ctx_get(if_ptr->glink_core_priv, lcid); + if (!ctx) { + /* unknown LCID received - this shouldn't happen */ + GLINK_ERR_XPRT(if_ptr->glink_core_priv, + "%s: invalid lcid %u received\n", __func__, + (unsigned)lcid); + return; + } + + if (ctx->local_open_state != GLINK_CHANNEL_CLOSING) { + GLINK_ERR_CH(ctx, + "%s: unexpected close ack receive for lcid %u\n", + __func__, (unsigned)lcid); + rwref_put(&ctx->ch_state_lhc0); + return; + } + + is_ch_fully_closed = glink_core_ch_close_ack_common(ctx); + if (is_ch_fully_closed) { + glink_delete_ch_from_list(ctx, true); + flush_workqueue(ctx->transport_ptr->tx_wq); + } + rwref_put(&ctx->ch_state_lhc0); +} + +/** + * glink_core_remote_rx_intent_put() - Receive remove intent + * + * @if_ptr: Pointer to transport instance + * @rcid: Remote Channel ID + * @riid: Remote Intent ID + * @size: Size of the remote intent ID + */ +static void glink_core_remote_rx_intent_put(struct glink_transport_if *if_ptr, + uint32_t rcid, uint32_t riid, size_t size) +{ + struct channel_ctx *ctx; + + ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid); + if (!ctx) { + /* unknown rcid received - this shouldn't happen */ + GLINK_ERR_XPRT(if_ptr->glink_core_priv, + "%s: invalid rcid received %u\n", __func__, + (unsigned)rcid); + return; + } + + ch_push_remote_rx_intent(ctx, size, riid); + rwref_put(&ctx->ch_state_lhc0); +} + +/** + * glink_core_rx_cmd_remote_rx_intent_req() - Receive a request for rx_intent + * from remote side + * if_ptr: Pointer to the transport interface + * rcid: Remote channel ID + * size: size of the intent + * + * The function searches for the local channel to which the request for + * rx_intent has arrived and informs this request to the local channel through + * notify_rx_intent_req callback registered by the local channel. + */ +static void glink_core_rx_cmd_remote_rx_intent_req( + struct glink_transport_if *if_ptr, uint32_t rcid, size_t size) +{ + struct channel_ctx *ctx; + bool cb_ret; + + ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid); + if (!ctx) { + GLINK_ERR_XPRT(if_ptr->glink_core_priv, + "%s: invalid rcid received %u\n", __func__, + (unsigned)rcid); + return; + } + if (!ctx->notify_rx_intent_req) { + GLINK_ERR_CH(ctx, + "%s: Notify function not defined for local channel", + __func__); + rwref_put(&ctx->ch_state_lhc0); + return; + } + + cb_ret = ctx->notify_rx_intent_req(ctx, ctx->user_priv, size); + if_ptr->tx_cmd_remote_rx_intent_req_ack(if_ptr, ctx->lcid, cb_ret); + rwref_put(&ctx->ch_state_lhc0); +} + +/** + * glink_core_rx_cmd_remote_rx_intent_req_ack()- Receive ack from remote side + * for a local rx_intent request + * if_ptr: Pointer to the transport interface + * rcid: Remote channel ID + * size: size of the intent + * + * This function receives the ack for rx_intent request from local channel. + */ +static void glink_core_rx_cmd_rx_intent_req_ack(struct glink_transport_if + *if_ptr, uint32_t rcid, bool granted) +{ + struct channel_ctx *ctx; + + ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid); + if (!ctx) { + GLINK_ERR_XPRT(if_ptr->glink_core_priv, + "%s: Invalid rcid received %u\n", __func__, + (unsigned)rcid); + return; + } + ctx->int_req_ack = granted; + complete_all(&ctx->int_req_ack_complete); + rwref_put(&ctx->ch_state_lhc0); +} + +/** + * glink_core_rx_get_pkt_ctx() - lookup RX intent structure + * + * if_ptr: Pointer to the transport interface + * rcid: Remote channel ID + * liid: Local RX Intent ID + * + * Note that this function is designed to always be followed by a call to + * glink_core_rx_put_pkt_ctx() to complete an RX operation by the transport. + * + * Return: Pointer to RX intent structure (or NULL if none found) + */ +static struct glink_core_rx_intent *glink_core_rx_get_pkt_ctx( + struct glink_transport_if *if_ptr, uint32_t rcid, uint32_t liid) +{ + struct channel_ctx *ctx; + struct glink_core_rx_intent *intent_ptr; + + ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid); + if (!ctx) { + /* unknown LCID received - this shouldn't happen */ + GLINK_ERR_XPRT(if_ptr->glink_core_priv, + "%s: invalid rcid received %u\n", __func__, + (unsigned)rcid); + return NULL; + } + + /* match pending intent */ + intent_ptr = ch_get_local_rx_intent(ctx, liid); + if (intent_ptr == NULL) { + GLINK_ERR_CH(ctx, + "%s: L[%u]: No matching rx intent\n", + __func__, liid); + rwref_put(&ctx->ch_state_lhc0); + return NULL; + } + + rwref_put(&ctx->ch_state_lhc0); + return intent_ptr; +} + +/** + * glink_core_rx_put_pkt_ctx() - lookup RX intent structure + * + * if_ptr: Pointer to the transport interface + * rcid: Remote channel ID + * intent_ptr: Pointer to the RX intent + * complete: Packet has been completely received + * + * Note that this function should always be preceded by a call to + * glink_core_rx_get_pkt_ctx(). + */ +void glink_core_rx_put_pkt_ctx(struct glink_transport_if *if_ptr, + uint32_t rcid, struct glink_core_rx_intent *intent_ptr, bool complete) +{ + struct channel_ctx *ctx; + + if (!complete) { + GLINK_DBG_XPRT(if_ptr->glink_core_priv, + "%s: rcid[%u] liid[%u] pkt_size[%zu] write_offset[%zu] Fragment received\n", + __func__, rcid, intent_ptr->id, + intent_ptr->pkt_size, + intent_ptr->write_offset); + return; + } + + /* packet complete */ + ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid); + if (!ctx) { + /* unknown LCID received - this shouldn't happen */ + GLINK_ERR_XPRT(if_ptr->glink_core_priv, + "%s: invalid rcid received %u\n", __func__, + (unsigned)rcid); + return; + } + + if (unlikely(intent_ptr->tracer_pkt)) { + tracer_pkt_log_event(intent_ptr->data, GLINK_CORE_RX); + ch_set_local_rx_intent_notified(ctx, intent_ptr); + if (ctx->notify_rx_tracer_pkt) + ctx->notify_rx_tracer_pkt(ctx, ctx->user_priv, + intent_ptr->pkt_priv, intent_ptr->data, + intent_ptr->pkt_size); + rwref_put(&ctx->ch_state_lhc0); + return; + } + + GLINK_PERF_CH(ctx, "%s: L[%u]: data[%p] size[%zu]\n", + __func__, intent_ptr->id, + intent_ptr->data ? intent_ptr->data : intent_ptr->iovec, + intent_ptr->write_offset); + if (!intent_ptr->data && !ctx->notify_rxv) { + /* Received a vector, but client can't handle a vector */ + intent_ptr->bounce_buf = linearize_vector(intent_ptr->iovec, + intent_ptr->pkt_size, + intent_ptr->vprovider, + intent_ptr->pprovider); + if (IS_ERR_OR_NULL(intent_ptr->bounce_buf)) { + GLINK_ERR_XPRT(if_ptr->glink_core_priv, + "%s: Error %ld linearizing vector\n", __func__, + PTR_ERR(intent_ptr->bounce_buf)); + BUG(); + rwref_put(&ctx->ch_state_lhc0); + return; + } + } + + ch_set_local_rx_intent_notified(ctx, intent_ptr); + if (ctx->notify_rx && (intent_ptr->data || intent_ptr->bounce_buf)) { + ctx->notify_rx(ctx->user_priv, intent_ptr->data ? + intent_ptr->data : intent_ptr->bounce_buf, + intent_ptr->pkt_size); + } else if (ctx->notify_rxv) { + ctx->notify_rxv(ctx, ctx->user_priv, intent_ptr->pkt_priv, + intent_ptr->iovec, intent_ptr->pkt_size, + intent_ptr->vprovider, intent_ptr->pprovider); + } else { + GLINK_ERR_XPRT(if_ptr->glink_core_priv, + "%s: Unable to process rx data\n", __func__); + BUG(); + } + rwref_put(&ctx->ch_state_lhc0); +} + +/** + * glink_core_rx_cmd_tx_done() - Receive Transmit Done Command + * @xprt_ptr: Transport to send packet on. + * @rcid: Remote channel ID + * @riid: Remote intent ID + * @reuse: Reuse the consumed intent + */ +void glink_core_rx_cmd_tx_done(struct glink_transport_if *if_ptr, + uint32_t rcid, uint32_t riid, bool reuse) +{ + struct channel_ctx *ctx; + struct glink_core_tx_pkt *tx_pkt; + unsigned long flags; + size_t intent_size; + + ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid); + if (!ctx) { + /* unknown RCID received - this shouldn't happen */ + GLINK_ERR_XPRT(if_ptr->glink_core_priv, + "%s: invalid rcid %u received\n", __func__, + rcid); + return; + } + + spin_lock_irqsave(&ctx->tx_lists_lock_lhc3, flags); + tx_pkt = ch_get_tx_pending_remote_done(ctx, riid); + if (IS_ERR_OR_NULL(tx_pkt)) { + /* + * FUTURE - in the case of a zero-copy transport, this is a + * fatal protocol failure since memory corruption could occur + * in this case. Prevent this by adding code in glink_close() + * to recall any buffers in flight / wait for them to be + * returned. + */ + GLINK_ERR_CH(ctx, "%s: R[%u]: No matching tx\n", + __func__, + (unsigned)riid); + spin_unlock_irqrestore(&ctx->tx_lists_lock_lhc3, flags); + rwref_put(&ctx->ch_state_lhc0); + return; + } + + /* notify client */ + ctx->notify_tx_done(ctx, ctx->user_priv, tx_pkt->pkt_priv, + tx_pkt->data ? tx_pkt->data : tx_pkt->iovec); + intent_size = tx_pkt->intent_size; + ch_remove_tx_pending_remote_done(ctx, tx_pkt); + spin_unlock_irqrestore(&ctx->tx_lists_lock_lhc3, flags); + + if (reuse) + ch_push_remote_rx_intent(ctx, intent_size, riid); + rwref_put(&ctx->ch_state_lhc0); +} + +/** + * xprt_schedule_tx() - Schedules packet for transmit. + * @xprt_ptr: Transport to send packet on. + * @ch_ptr: Channel to send packet on. + * @tx_info: Packet to transmit. + */ +static void xprt_schedule_tx(struct glink_core_xprt_ctx *xprt_ptr, + struct channel_ctx *ch_ptr, + struct glink_core_tx_pkt *tx_info) +{ + unsigned long flags; + + if (unlikely(xprt_ptr->local_state == GLINK_XPRT_DOWN)) { + GLINK_ERR_CH(ch_ptr, "%s: Error XPRT is down\n", __func__); + kfree(tx_info); + return; + } + + spin_lock_irqsave(&xprt_ptr->tx_ready_lock_lhb2, flags); + if (unlikely(!ch_is_fully_opened(ch_ptr))) { + spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb2, flags); + GLINK_ERR_CH(ch_ptr, "%s: Channel closed before tx\n", + __func__); + kfree(tx_info); + return; + } + if (list_empty(&ch_ptr->tx_ready_list_node)) + list_add_tail(&ch_ptr->tx_ready_list_node, + &xprt_ptr->prio_bin[ch_ptr->curr_priority].tx_ready); + + spin_lock(&ch_ptr->tx_lists_lock_lhc3); + list_add_tail(&tx_info->list_node, &ch_ptr->tx_active); + glink_qos_do_ch_tx(ch_ptr); + if (unlikely(tx_info->tracer_pkt)) + tracer_pkt_log_event((void *)(tx_info->data), + GLINK_QUEUE_TO_SCHEDULER); + + spin_unlock(&ch_ptr->tx_lists_lock_lhc3); + spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb2, flags); + + queue_work(xprt_ptr->tx_wq, &xprt_ptr->tx_work); +} + +/** + * xprt_single_threaded_tx() - Transmit in the context of sender. + * @xprt_ptr: Transport to send packet on. + * @ch_ptr: Channel to send packet on. + * @tx_info: Packet to transmit. + */ +static int xprt_single_threaded_tx(struct glink_core_xprt_ctx *xprt_ptr, + struct channel_ctx *ch_ptr, + struct glink_core_tx_pkt *tx_info) +{ + int ret; + unsigned long flags; + + spin_lock_irqsave(&ch_ptr->tx_pending_rmt_done_lock_lhc4, flags); + do { + ret = xprt_ptr->ops->tx(ch_ptr->transport_ptr->ops, + ch_ptr->lcid, tx_info); + } while (ret == -EAGAIN); + if (ret < 0 || tx_info->size_remaining) { + GLINK_ERR_CH(ch_ptr, "%s: Error %d writing data\n", + __func__, ret); + kfree(tx_info); + } else { + list_add_tail(&tx_info->list_done, + &ch_ptr->tx_pending_remote_done); + ret = 0; + } + spin_unlock_irqrestore(&ch_ptr->tx_pending_rmt_done_lock_lhc4, flags); + return ret; +} + +/** + * glink_scheduler_eval_prio() - Evaluate the channel priority + * @ctx: Channel whose priority is evaluated. + * @xprt_ctx: Transport in which the channel is part of. + * + * This function is called by the packet scheduler to measure the traffic + * rate observed in the channel and compare it against the traffic rate + * requested by the channel. The comparison result is used to evaluate the + * priority of the channel. + */ +static void glink_scheduler_eval_prio(struct channel_ctx *ctx, + struct glink_core_xprt_ctx *xprt_ctx) +{ + unsigned long token_end_time; + unsigned long token_consume_time, rem; + unsigned long obs_rate_kBps; + + if (ctx->initial_priority == 0) + return; + + if (ctx->token_count) + return; + + token_end_time = arch_counter_get_cntpct(); + + token_consume_time = NSEC_PER_SEC; + rem = do_div(token_consume_time, arch_timer_get_rate()); + token_consume_time = (token_end_time - ctx->token_start_time) * + token_consume_time; + rem = do_div(token_consume_time, 1000); + obs_rate_kBps = glink_qos_calc_rate_kBps(ctx->txd_len, + token_consume_time); + if (obs_rate_kBps > ctx->req_rate_kBps) { + GLINK_INFO_CH(ctx, "%s: Obs. Rate (%lu) > Req. Rate (%lu)\n", + __func__, obs_rate_kBps, ctx->req_rate_kBps); + glink_qos_update_ch_prio(ctx, 0); + } else { + glink_qos_update_ch_prio(ctx, ctx->initial_priority); + } + + ctx->token_count = xprt_ctx->token_count; + ctx->txd_len = 0; + ctx->token_start_time = arch_counter_get_cntpct(); +} + +/** + * glink_scheduler_tx() - Transmit operation by the scheduler + * @ctx: Channel which is scheduled for transmission. + * @xprt_ctx: Transport context in which the transmission is performed. + * + * This function is called by the scheduler after scheduling a channel for + * transmission over the transport. + * + * Return: return value as returned by the transport on success, + * standard Linux error codes on failure. + */ +static int glink_scheduler_tx(struct channel_ctx *ctx, + struct glink_core_xprt_ctx *xprt_ctx) +{ + unsigned long flags; + struct glink_core_tx_pkt *tx_info; + size_t txd_len = 0; + size_t tx_len = 0; + uint32_t num_pkts = 0; + int ret; + + spin_lock_irqsave(&ctx->tx_lists_lock_lhc3, flags); + while (txd_len < xprt_ctx->mtu && + !list_empty(&ctx->tx_active)) { + tx_info = list_first_entry(&ctx->tx_active, + struct glink_core_tx_pkt, list_node); + rwref_get(&tx_info->pkt_ref); + + spin_lock(&ctx->tx_pending_rmt_done_lock_lhc4); + if (list_empty(&tx_info->list_done)) + list_add(&tx_info->list_done, + &ctx->tx_pending_remote_done); + spin_unlock(&ctx->tx_pending_rmt_done_lock_lhc4); + spin_unlock_irqrestore(&ctx->tx_lists_lock_lhc3, flags); + + if (unlikely(tx_info->tracer_pkt)) { + tracer_pkt_log_event((void *)(tx_info->data), + GLINK_SCHEDULER_TX); + ret = xprt_ctx->ops->tx_cmd_tracer_pkt(xprt_ctx->ops, + ctx->lcid, tx_info); + } else { + tx_len = tx_info->size_remaining < + (xprt_ctx->mtu - txd_len) ? + tx_info->size_remaining : + (xprt_ctx->mtu - txd_len); + tx_info->tx_len = tx_len; + ret = xprt_ctx->ops->tx(xprt_ctx->ops, + ctx->lcid, tx_info); + } + spin_lock_irqsave(&ctx->tx_lists_lock_lhc3, flags); + if (ret == -EAGAIN) { + /* + * transport unable to send at the moment and will call + * tx_resume() when it can send again. + */ + rwref_put(&tx_info->pkt_ref); + break; + } else if (ret < 0) { + /* + * General failure code that indicates that the + * transport is unable to recover. In this case, the + * communication failure will be detected at a higher + * level and a subsystem restart of the affected system + * will be triggered. + */ + GLINK_ERR_XPRT(xprt_ctx, + "%s: unrecoverable xprt failure %d\n", + __func__, ret); + rwref_put(&tx_info->pkt_ref); + break; + } else if (!ret && tx_info->size_remaining) { + /* + * Transport unable to send any data on this channel. + * Break out of the loop so that the scheduler can + * continue with the next channel. + */ + break; + } else { + txd_len += tx_len; + } + + if (!tx_info->size_remaining) { + num_pkts++; + list_del_init(&tx_info->list_node); + rwref_put(&tx_info->pkt_ref); + } + } + + ctx->txd_len += txd_len; + if (txd_len) { + if (num_pkts >= ctx->token_count) + ctx->token_count = 0; + else if (num_pkts) + ctx->token_count -= num_pkts; + else + ctx->token_count--; + } + spin_unlock_irqrestore(&ctx->tx_lists_lock_lhc3, flags); + + return ret; +} + +/** + * tx_work_func() - Transmit worker + * @work: Linux work structure + */ +static void tx_work_func(struct work_struct *work) +{ + struct glink_core_xprt_ctx *xprt_ptr = + container_of(work, struct glink_core_xprt_ctx, tx_work); + struct channel_ctx *ch_ptr; + uint32_t prio; + uint32_t tx_ready_head_prio = 0; + struct channel_ctx *tx_ready_head = NULL; + bool transmitted_successfully = true; + unsigned long flags; + int ret = 0; + + GLINK_PERF("%s: worker starting\n", __func__); + + while (1) { + prio = xprt_ptr->num_priority - 1; + spin_lock_irqsave(&xprt_ptr->tx_ready_lock_lhb2, flags); + while (list_empty(&xprt_ptr->prio_bin[prio].tx_ready)) { + if (prio == 0) { + spin_unlock_irqrestore( + &xprt_ptr->tx_ready_lock_lhb2, flags); + return; + } + prio--; + } + glink_pm_qos_vote(xprt_ptr); + ch_ptr = list_first_entry(&xprt_ptr->prio_bin[prio].tx_ready, + struct channel_ctx, tx_ready_list_node); + spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb2, flags); + + if (tx_ready_head == NULL || tx_ready_head_prio < prio) { + tx_ready_head = ch_ptr; + tx_ready_head_prio = prio; + } + + if (ch_ptr == tx_ready_head && !transmitted_successfully) { + GLINK_ERR_XPRT(xprt_ptr, + "%s: Unable to send data on this transport.\n", + __func__); + break; + } + transmitted_successfully = false; + + ret = glink_scheduler_tx(ch_ptr, xprt_ptr); + if (ret == -EAGAIN) { + /* + * transport unable to send at the moment and will call + * tx_resume() when it can send again. + */ + break; + } else if (ret < 0) { + /* + * General failure code that indicates that the + * transport is unable to recover. In this case, the + * communication failure will be detected at a higher + * level and a subsystem restart of the affected system + * will be triggered. + */ + GLINK_ERR_XPRT(xprt_ptr, + "%s: unrecoverable xprt failure %d\n", + __func__, ret); + break; + } else if (!ret) { + /* + * Transport unable to send any data on this channel, + * but didn't return an error. Move to the next channel + * and continue. + */ + spin_lock_irqsave(&xprt_ptr->tx_ready_lock_lhb2, flags); + list_rotate_left(&xprt_ptr->prio_bin[prio].tx_ready); + spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb2, + flags); + continue; + } + + spin_lock_irqsave(&xprt_ptr->tx_ready_lock_lhb2, flags); + spin_lock(&ch_ptr->tx_lists_lock_lhc3); + + glink_scheduler_eval_prio(ch_ptr, xprt_ptr); + if (list_empty(&ch_ptr->tx_active)) { + list_del_init(&ch_ptr->tx_ready_list_node); + glink_qos_done_ch_tx(ch_ptr); + } + + spin_unlock(&ch_ptr->tx_lists_lock_lhc3); + spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb2, flags); + + tx_ready_head = NULL; + transmitted_successfully = true; + } + glink_pm_qos_unvote(xprt_ptr); + GLINK_PERF("%s: worker exiting\n", __func__); +} + +static void glink_core_tx_resume(struct glink_transport_if *if_ptr) +{ + queue_work(if_ptr->glink_core_priv->tx_wq, + &if_ptr->glink_core_priv->tx_work); +} + +/** + * glink_pm_qos_vote() - Add Power Management QoS Vote + * @xprt_ptr: Transport for power vote + * + * Note - must be called with tx_ready_lock_lhb2 locked. + */ +static void glink_pm_qos_vote(struct glink_core_xprt_ctx *xprt_ptr) +{ + if (glink_pm_qos && !xprt_ptr->qos_req_active) { + GLINK_PERF("%s: qos vote %u us\n", __func__, glink_pm_qos); + pm_qos_update_request(&xprt_ptr->pm_qos_req, glink_pm_qos); + xprt_ptr->qos_req_active = true; + } + xprt_ptr->tx_path_activity = true; +} + +/** + * glink_pm_qos_unvote() - Schedule Power Management QoS Vote Removal + * @xprt_ptr: Transport for power vote removal + * + * Note - must be called with tx_ready_lock_lhb2 locked. + */ +static void glink_pm_qos_unvote(struct glink_core_xprt_ctx *xprt_ptr) +{ + xprt_ptr->tx_path_activity = false; + if (xprt_ptr->qos_req_active) { + GLINK_PERF("%s: qos unvote\n", __func__); + schedule_delayed_work(&xprt_ptr->pm_qos_work, + msecs_to_jiffies(GLINK_PM_QOS_HOLDOFF_MS)); + } +} + +/** + * glink_pm_qos_cancel_worker() - Remove Power Management QoS Vote + * @work: Delayed work structure + * + * Removes PM QoS vote if no additional transmit activity has occurred between + * the unvote and when this worker runs. + */ +static void glink_pm_qos_cancel_worker(struct work_struct *work) +{ + struct glink_core_xprt_ctx *xprt_ptr; + unsigned long flags; + + xprt_ptr = container_of(to_delayed_work(work), + struct glink_core_xprt_ctx, pm_qos_work); + + spin_lock_irqsave(&xprt_ptr->tx_ready_lock_lhb2, flags); + if (!xprt_ptr->tx_path_activity) { + /* no more tx activity */ + GLINK_PERF("%s: qos off\n", __func__); + pm_qos_update_request(&xprt_ptr->pm_qos_req, + PM_QOS_DEFAULT_VALUE); + xprt_ptr->qos_req_active = false; + } + xprt_ptr->tx_path_activity = false; + spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb2, flags); +} + +/** + * glink_core_rx_cmd_remote_sigs() - Receive remote channel signal command + * + * if_ptr: Pointer to transport instance + * rcid: Remote Channel ID + */ +static void glink_core_rx_cmd_remote_sigs(struct glink_transport_if *if_ptr, + uint32_t rcid, uint32_t sigs) +{ + struct channel_ctx *ctx; + uint32_t old_sigs; + + ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid); + if (!ctx) { + /* unknown LCID received - this shouldn't happen */ + GLINK_ERR_XPRT(if_ptr->glink_core_priv, + "%s: invalid rcid %u received\n", __func__, + (unsigned)rcid); + return; + } + + if (!ch_is_fully_opened(ctx)) { + GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n", + __func__); + rwref_put(&ctx->ch_state_lhc0); + return; + } + + old_sigs = ctx->rsigs; + ctx->rsigs = sigs; + if (ctx->notify_rx_sigs) { + ctx->notify_rx_sigs(ctx, ctx->user_priv, old_sigs, ctx->rsigs); + GLINK_INFO_CH(ctx, "%s: notify rx sigs old:0x%x new:0x%x\n", + __func__, old_sigs, ctx->rsigs); + } + rwref_put(&ctx->ch_state_lhc0); +} + +static struct glink_core_if core_impl = { + .link_up = glink_core_link_up, + .link_down = glink_core_link_down, + .rx_cmd_version = glink_core_rx_cmd_version, + .rx_cmd_version_ack = glink_core_rx_cmd_version_ack, + .rx_cmd_ch_remote_open = glink_core_rx_cmd_ch_remote_open, + .rx_cmd_ch_open_ack = glink_core_rx_cmd_ch_open_ack, + .rx_cmd_ch_remote_close = glink_core_rx_cmd_ch_remote_close, + .rx_cmd_ch_close_ack = glink_core_rx_cmd_ch_close_ack, + .rx_get_pkt_ctx = glink_core_rx_get_pkt_ctx, + .rx_put_pkt_ctx = glink_core_rx_put_pkt_ctx, + .rx_cmd_remote_rx_intent_put = glink_core_remote_rx_intent_put, + .rx_cmd_remote_rx_intent_req = glink_core_rx_cmd_remote_rx_intent_req, + .rx_cmd_rx_intent_req_ack = glink_core_rx_cmd_rx_intent_req_ack, + .rx_cmd_tx_done = glink_core_rx_cmd_tx_done, + .tx_resume = glink_core_tx_resume, + .rx_cmd_remote_sigs = glink_core_rx_cmd_remote_sigs, +}; + +/** + * glink_xprt_ctx_iterator_init() - Initializes the transport context list iterator + * @xprt_i: pointer to the transport context iterator. + * + * This function acquires the transport context lock which must then be + * released by glink_xprt_ctx_iterator_end() + */ +void glink_xprt_ctx_iterator_init(struct xprt_ctx_iterator *xprt_i) +{ + if (xprt_i == NULL) + return; + + mutex_lock(&transport_list_lock_lha0); + xprt_i->xprt_list = &transport_list; + xprt_i->i_curr = list_entry(&transport_list, + struct glink_core_xprt_ctx, list_node); +} +EXPORT_SYMBOL(glink_xprt_ctx_iterator_init); + +/** + * glink_xprt_ctx_iterator_end() - Ends the transport context list iteration + * @xprt_i: pointer to the transport context iterator. + */ +void glink_xprt_ctx_iterator_end(struct xprt_ctx_iterator *xprt_i) +{ + if (xprt_i == NULL) + return; + + xprt_i->xprt_list = NULL; + xprt_i->i_curr = NULL; + mutex_unlock(&transport_list_lock_lha0); +} +EXPORT_SYMBOL(glink_xprt_ctx_iterator_end); + +/** + * glink_xprt_ctx_iterator_next() - iterates element by element in transport context list + * @xprt_i: pointer to the transport context iterator. + * + * Return: pointer to the transport context structure + */ +struct glink_core_xprt_ctx *glink_xprt_ctx_iterator_next( + struct xprt_ctx_iterator *xprt_i) +{ + struct glink_core_xprt_ctx *xprt_ctx = NULL; + + if (xprt_i == NULL) + return xprt_ctx; + + if (list_empty(xprt_i->xprt_list)) + return xprt_ctx; + + list_for_each_entry_continue(xprt_i->i_curr, + xprt_i->xprt_list, list_node) { + xprt_ctx = xprt_i->i_curr; + break; + } + return xprt_ctx; +} +EXPORT_SYMBOL(glink_xprt_ctx_iterator_next); + +/** + * glink_get_xprt_name() - get the transport name + * @xprt_ctx: pointer to the transport context. + * + * Return: name of the transport + */ +char *glink_get_xprt_name(struct glink_core_xprt_ctx *xprt_ctx) +{ + if (xprt_ctx == NULL) + return NULL; + + return xprt_ctx->name; +} +EXPORT_SYMBOL(glink_get_xprt_name); + +/** + * glink_get_xprt_name() - get the name of the remote processor/edge + * of the transport + * @xprt_ctx: pointer to the transport context. + * + * Return: Name of the remote processor/edge + */ +char *glink_get_xprt_edge_name(struct glink_core_xprt_ctx *xprt_ctx) +{ + if (xprt_ctx == NULL) + return NULL; + return xprt_ctx->edge; +} +EXPORT_SYMBOL(glink_get_xprt_edge_name); + +/** + * glink_get_xprt_state() - get the state of the transport + * @xprt_ctx: pointer to the transport context. + * + * Return: Name of the transport state, NULL in case of invalid input + */ +const char *glink_get_xprt_state(struct glink_core_xprt_ctx *xprt_ctx) +{ + if (xprt_ctx == NULL) + return NULL; + + return glink_get_xprt_state_string(xprt_ctx->local_state); +} +EXPORT_SYMBOL(glink_get_xprt_state); + +/** + * glink_get_xprt_version_features() - get the version and feature set + * of local transport in glink + * @xprt_ctx: pointer to the transport context. + * + * Return: pointer to the glink_core_version + */ +const struct glink_core_version *glink_get_xprt_version_features( + struct glink_core_xprt_ctx *xprt_ctx) +{ + const struct glink_core_version *ver = NULL; + if (xprt_ctx == NULL) + return ver; + + ver = &xprt_ctx->versions[xprt_ctx->local_version_idx]; + return ver; +} +EXPORT_SYMBOL(glink_get_xprt_version_features); + +/** + * glink_ch_ctx_iterator_init() - Initializes the channel context list iterator + * @ch_iter: pointer to the channel context iterator. + * xprt: pointer to the transport context that holds the channel list + * + * This function acquires the channel context lock which must then be + * released by glink_ch_ctx_iterator_end() + */ +void glink_ch_ctx_iterator_init(struct ch_ctx_iterator *ch_iter, + struct glink_core_xprt_ctx *xprt) +{ + unsigned long flags; + + if (ch_iter == NULL || xprt == NULL) + return; + + spin_lock_irqsave(&xprt->xprt_ctx_lock_lhb1, flags); + ch_iter->ch_list = &(xprt->channels); + ch_iter->i_curr = list_entry(&(xprt->channels), + struct channel_ctx, port_list_node); + ch_iter->ch_list_flags = flags; +} +EXPORT_SYMBOL(glink_ch_ctx_iterator_init); + +/** + * glink_ch_ctx_iterator_end() - Ends the channel context list iteration + * @ch_iter: pointer to the channel context iterator. + */ +void glink_ch_ctx_iterator_end(struct ch_ctx_iterator *ch_iter, + struct glink_core_xprt_ctx *xprt) +{ + if (ch_iter == NULL || xprt == NULL) + return; + + spin_unlock_irqrestore(&xprt->xprt_ctx_lock_lhb1, + ch_iter->ch_list_flags); + ch_iter->ch_list = NULL; + ch_iter->i_curr = NULL; +} +EXPORT_SYMBOL(glink_ch_ctx_iterator_end); + +/** + * glink_ch_ctx_iterator_next() - iterates element by element in channel context list + * @c_i: pointer to the channel context iterator. + * + * Return: pointer to the channel context structure + */ +struct channel_ctx *glink_ch_ctx_iterator_next(struct ch_ctx_iterator *c_i) +{ + struct channel_ctx *ch_ctx = NULL; + + if (c_i == NULL) + return ch_ctx; + + if (list_empty(c_i->ch_list)) + return ch_ctx; + + list_for_each_entry_continue(c_i->i_curr, + c_i->ch_list, port_list_node) { + ch_ctx = c_i->i_curr; + break; + } + return ch_ctx; +} +EXPORT_SYMBOL(glink_ch_ctx_iterator_next); + +/** + * glink_get_ch_name() - get the channel name + * @ch_ctx: pointer to the channel context. + * + * Return: name of the channel, NULL in case of invalid input + */ +char *glink_get_ch_name(struct channel_ctx *ch_ctx) +{ + if (ch_ctx == NULL) + return NULL; + + return ch_ctx->name; +} +EXPORT_SYMBOL(glink_get_ch_name); + +/** + * glink_get_ch_edge_name() - get the edge on whcih channel is created + * @ch_ctx: pointer to the channel context. + * + * Return: name of the edge, NULL in case of invalid input + */ +char *glink_get_ch_edge_name(struct channel_ctx *ch_ctx) +{ + if (ch_ctx == NULL) + return NULL; + + return ch_ctx->transport_ptr->edge; +} +EXPORT_SYMBOL(glink_get_ch_edge_name); + +/** + * glink_get_ch_lcid() - get the local channel ID + * @c_i: pointer to the channel context. + * + * Return: local channel id, -EINVAL in case of invalid input + */ +int glink_get_ch_lcid(struct channel_ctx *ch_ctx) +{ + if (ch_ctx == NULL) + return -EINVAL; + + return ch_ctx->lcid; +} +EXPORT_SYMBOL(glink_get_ch_lcid); + +/** + * glink_get_ch_rcid() - get the remote channel ID + * @ch_ctx: pointer to the channel context. + * + * Return: remote channel id, -EINVAL in case of invalid input + */ +int glink_get_ch_rcid(struct channel_ctx *ch_ctx) +{ + if (ch_ctx == NULL) + return -EINVAL; + + return ch_ctx->rcid; +} +EXPORT_SYMBOL(glink_get_ch_rcid); + +/** + * glink_get_ch_lstate() - get the local channel state + * @ch_ctx: pointer to the channel context. + * + * Return: Name of the local channel state, NUll in case of invalid input + */ +const char *glink_get_ch_lstate(struct channel_ctx *ch_ctx) +{ + if (ch_ctx == NULL) + return NULL; + + return glink_get_ch_state_string(ch_ctx->local_open_state); +} +EXPORT_SYMBOL(glink_get_ch_lstate); + +/** + * glink_get_ch_rstate() - get the remote channel state + * @ch_ctx: pointer to the channel context. + * + * Return: true if remote side is opened false otherwise + */ +bool glink_get_ch_rstate(struct channel_ctx *ch_ctx) +{ + if (ch_ctx == NULL) + return NULL; + + return ch_ctx->remote_opened; +} +EXPORT_SYMBOL(glink_get_ch_rstate); + +/** + * glink_get_ch_xprt_name() - get the name of the transport to which + * the channel belongs + * @ch_ctx: pointer to the channel context. + * + * Return: name of the export, NULL in case of invalid input + */ +char *glink_get_ch_xprt_name(struct channel_ctx *ch_ctx) +{ + if (ch_ctx == NULL) + return NULL; + + return ch_ctx->transport_ptr->name; +} +EXPORT_SYMBOL(glink_get_ch_xprt_name); + +/** + * glink_get_tx_pkt_count() - get the total number of packets sent + * through this channel + * @ch_ctx: pointer to the channel context. + * + * Return: number of packets transmitted, -EINVAL in case of invalid input + */ +int glink_get_ch_tx_pkt_count(struct channel_ctx *ch_ctx) +{ + if (ch_ctx == NULL) + return -EINVAL; + + /* FUTURE: packet stats not yet implemented */ + + return -ENOSYS; +} +EXPORT_SYMBOL(glink_get_ch_tx_pkt_count); + +/** + * glink_get_ch_rx_pkt_count() - get the total number of packets + * recieved at this channel + * @ch_ctx: pointer to the channel context. + * + * Return: number of packets recieved, -EINVAL in case of invalid input + */ +int glink_get_ch_rx_pkt_count(struct channel_ctx *ch_ctx) +{ + if (ch_ctx == NULL) + return -EINVAL; + + /* FUTURE: packet stats not yet implemented */ + + return -ENOSYS; +} +EXPORT_SYMBOL(glink_get_ch_rx_pkt_count); + +/** + * glink_get_ch_lintents_queued() - get the total number of intents queued + * at local side + * @ch_ctx: pointer to the channel context. + * + * Return: number of intents queued, -EINVAL in case of invalid input + */ +int glink_get_ch_lintents_queued(struct channel_ctx *ch_ctx) +{ + struct glink_core_rx_intent *intent; + int ilrx_count = 0; + + if (ch_ctx == NULL) + return -EINVAL; + + list_for_each_entry(intent, &ch_ctx->local_rx_intent_list, list) + ilrx_count++; + + return ilrx_count; +} +EXPORT_SYMBOL(glink_get_ch_lintents_queued); + +/** + * glink_get_ch_rintents_queued() - get the total number of intents queued + * from remote side + * @ch_ctx: pointer to the channel context. + * + * Return: number of intents queued, -EINVAL in case of invalid input + */ +int glink_get_ch_rintents_queued(struct channel_ctx *ch_ctx) +{ + struct glink_core_rx_intent *intent; + int irrx_count = 0; + + if (ch_ctx == NULL) + return -EINVAL; + + list_for_each_entry(intent, &ch_ctx->rmt_rx_intent_list, list) + irrx_count++; + + return irrx_count; +} +EXPORT_SYMBOL(glink_get_ch_rintents_queued); + +/** + * glink_get_ch_intent_info() - get the intent details of a channel + * @ch_ctx: pointer to the channel context. + * ch_ctx_i: pointer to a structure that will contain intent details + * + * This function is used to get all the channel intent details including locks. + */ +void glink_get_ch_intent_info(struct channel_ctx *ch_ctx, + struct glink_ch_intent_info *ch_ctx_i) +{ + if (ch_ctx == NULL || ch_ctx_i == NULL) + return; + + ch_ctx_i->li_lst_lock = &ch_ctx->local_rx_intent_lst_lock_lhc1; + ch_ctx_i->li_avail_list = &ch_ctx->local_rx_intent_list; + ch_ctx_i->li_used_list = &ch_ctx->local_rx_intent_ntfy_list; + ch_ctx_i->ri_lst_lock = &ch_ctx->rmt_rx_intent_lst_lock_lhc2; + ch_ctx_i->ri_list = &ch_ctx->rmt_rx_intent_list; +} +EXPORT_SYMBOL(glink_get_ch_intent_info); + +/** + * glink_get_debug_mask() - Return debug mask attribute + * + * Return: debug mask attribute + */ +unsigned glink_get_debug_mask(void) +{ + return glink_debug_mask; +} +EXPORT_SYMBOL(glink_get_debug_mask); + +/** + * glink_get_log_ctx() - Return log context for other GLINK modules. + * + * Return: Log context or NULL if none. + */ +void *glink_get_log_ctx(void) +{ + return log_ctx; +} +EXPORT_SYMBOL(glink_get_log_ctx); + +/** + * glink_get_xprt_log_ctx() - Return log context for GLINK xprts. + * + * Return: Log context or NULL if none. + */ +void *glink_get_xprt_log_ctx(struct glink_core_xprt_ctx *xprt) +{ + if (xprt) + return xprt->log_ctx; + else + return NULL; +} +EXPORT_SYMBOL(glink_get_xprt_log_ctx); + +static int glink_init(void) +{ + log_ctx = ipc_log_context_create(NUM_LOG_PAGES, "glink", 0); + if (!log_ctx) + GLINK_ERR("%s: unable to create log context\n", __func__); + glink_debugfs_init(); + + return 0; +} +arch_initcall(glink_init); + +MODULE_DESCRIPTION("MSM Generic Link (G-Link) Transport"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/glink_core_if.h b/drivers/soc/qcom/glink_core_if.h new file mode 100644 index 00000000000000..93c59d9c4aa1fc --- /dev/null +++ b/drivers/soc/qcom/glink_core_if.h @@ -0,0 +1,213 @@ +/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef _SOC_QCOM_GLINK_CORE_IF_H_ +#define _SOC_QCOM_GLINK_CORE_IF_H_ + +#include +#include +#include "glink_private.h" + +/* Local Channel state */ +enum local_channel_state_e { + GLINK_CHANNEL_CLOSED = 0, + GLINK_CHANNEL_OPENING, + GLINK_CHANNEL_OPENED, + GLINK_CHANNEL_CLOSING, +}; + +/* Transport Negotiation State */ +enum transport_state_e { + GLINK_XPRT_DOWN, + GLINK_XPRT_NEGOTIATING, + GLINK_XPRT_OPENED, + GLINK_XPRT_FAILED, +}; + +struct channel_ctx; +struct glink_core_xprt_ctx; +struct glink_transport_if; +struct glink_core_version; + +/** + * struct glink_core_version - Individual version element + * + * version: supported version + * features: all supported features for version + */ +struct glink_core_version { + uint32_t version; + uint32_t features; + + uint32_t (*negotiate_features)(struct glink_transport_if *if_ptr, + const struct glink_core_version *version_ptr, + uint32_t features); +}; + +/** + * RX intent + * + * data: pointer to the data (may be NULL for zero-copy) + * id: remote or local intent ID + * pkt_size: total size of packet + * write_offset: next write offset (initially 0) + * intent_size: size of the original intent (do not modify) + * tracer_pkt: Flag to indicate if the data is a tracer packet + * iovec: Pointer to vector buffer if the transport passes a vector buffer + * vprovider: Virtual address-space buffer provider for a vector buffer + * pprovider: Physical address-space buffer provider for a vector buffer + * pkt_priv: G-Link core owned packet-private data + * list: G-Link core owned list node + * bounce_buf: Pointer to the temporary/internal bounce buffer + */ +struct glink_core_rx_intent { + void *data; + uint32_t id; + size_t pkt_size; + size_t write_offset; + size_t intent_size; + bool tracer_pkt; + void *iovec; + void * (*vprovider)(void *iovec, size_t offset, size_t *size); + void * (*pprovider)(void *iovec, size_t offset, size_t *size); + + /* G-Link-Core-owned elements - please ignore */ + struct list_head list; + const void *pkt_priv; + void *bounce_buf; +}; + +/** + * struct glink_core_flow_info - Flow specific Information + * @mtu_tx_time_us: Time to transmit an MTU in microseconds. + * @power_state: Power state associated with the traffic flow. + */ +struct glink_core_flow_info { + unsigned long mtu_tx_time_us; + uint32_t power_state; +}; + +/** + * struct glink_core_transport_cfg - configuration of a new transport + * @name: Name of the transport. + * @edge: Subsystem the transport connects to. + * @versions: Array of transport versions supported. + * @versions_entries: Number of entries in @versions. + * @max_cid: Maximum number of channel identifiers supported. + * @max_iid: Maximum number of intent identifiers supported. + * @mtu: MTU supported by this transport. + * @num_flows: Number of traffic flows/priority buckets. + * @flow_info: Information about each flow/priority. + * @token_count: Number of tokens per assignment. + */ +struct glink_core_transport_cfg { + const char *name; + const char *edge; + const struct glink_core_version *versions; + size_t versions_entries; + uint32_t max_cid; + uint32_t max_iid; + + size_t mtu; + uint32_t num_flows; + struct glink_core_flow_info *flow_info; + uint32_t token_count; +}; + +struct glink_core_if { + /* Negotiation */ + void (*link_up)(struct glink_transport_if *if_ptr); + void (*link_down)(struct glink_transport_if *if_ptr); + void (*rx_cmd_version)(struct glink_transport_if *if_ptr, + uint32_t version, + uint32_t features); + void (*rx_cmd_version_ack)(struct glink_transport_if *if_ptr, + uint32_t version, + uint32_t features); + + /* channel management */ + void (*rx_cmd_ch_remote_open)(struct glink_transport_if *if_ptr, + uint32_t rcid, const char *name, uint16_t req_xprt); + void (*rx_cmd_ch_open_ack)(struct glink_transport_if *if_ptr, + uint32_t lcid, uint16_t xprt_resp); + void (*rx_cmd_ch_remote_close)(struct glink_transport_if *if_ptr, + uint32_t rcid); + void (*rx_cmd_ch_close_ack)(struct glink_transport_if *if_ptr, + uint32_t lcid); + + /* channel data */ + struct glink_core_rx_intent * (*rx_get_pkt_ctx)( + struct glink_transport_if *if_ptr, + uint32_t rcid, uint32_t liid); + void (*rx_put_pkt_ctx)(struct glink_transport_if *if_ptr, uint32_t rcid, + struct glink_core_rx_intent *intent_ptr, bool complete); + void (*rx_cmd_remote_rx_intent_put)(struct glink_transport_if *if_ptr, + uint32_t rcid, uint32_t riid, size_t size); + void (*rx_cmd_tx_done)(struct glink_transport_if *if_ptr, uint32_t rcid, + uint32_t riid, bool reuse); + void (*rx_cmd_remote_rx_intent_req)(struct glink_transport_if *if_ptr, + uint32_t rcid, size_t size); + void (*rx_cmd_rx_intent_req_ack)(struct glink_transport_if *if_ptr, + uint32_t rcid, bool granted); + void (*rx_cmd_remote_sigs)(struct glink_transport_if *if_ptr, + uint32_t rcid, uint32_t sigs); + + /* channel scheduling */ + void (*tx_resume)(struct glink_transport_if *if_ptr); +}; + +int glink_core_register_transport(struct glink_transport_if *if_ptr, + struct glink_core_transport_cfg *cfg); + +void glink_core_unregister_transport(struct glink_transport_if *if_ptr); + +/** + * of_get_glink_core_qos_cfg() - Parse the qos related dt entries + * @phandle: The handle to the qos related node in DT. + * @cfg: The transport configuration to be filled. + * + * Return: 0 on Success, standard Linux error otherwise. + */ +int of_get_glink_core_qos_cfg(struct device_node *phandle, + struct glink_core_transport_cfg *cfg); + +/** + * rx_linear_vbuf_provider() - Virtual Buffer Provider for linear buffers + * iovec: Pointer to the beginning of the linear buffer. + * offset: Offset into the buffer whose address is needed. + * size: Pointer to hold the length of the contiguous buffer space. + * + * This function is used when a linear buffer is received while the client has + * registered to receive vector buffers. + * + * Return: Address of the buffer which is at offset "offset" from the beginning + * of the buffer. + */ +static inline void *rx_linear_vbuf_provider(void *iovec, size_t offset, + size_t *size) +{ + struct glink_core_rx_intent *rx_info = + (struct glink_core_rx_intent *)iovec; + + if (unlikely(!iovec || !size)) + return NULL; + + if (unlikely(offset >= rx_info->pkt_size)) + return NULL; + + if (unlikely(OVERFLOW_ADD_UNSIGNED(void *, rx_info->data, offset))) + return NULL; + + *size = rx_info->pkt_size - offset; + return rx_info->data + offset; +} + +#endif /* _SOC_QCOM_GLINK_CORE_IF_H_ */ diff --git a/drivers/soc/qcom/glink_debugfs.c b/drivers/soc/qcom/glink_debugfs.c new file mode 100644 index 00000000000000..8e65e4ac9b8ea8 --- /dev/null +++ b/drivers/soc/qcom/glink_debugfs.c @@ -0,0 +1,783 @@ +/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include "glink_private.h" +#include "glink_core_if.h" + + +static const char * const ss_string[] = { + [GLINK_DBGFS_MPSS] = "mpss", + [GLINK_DBGFS_APSS] = "apss", + [GLINK_DBGFS_LPASS] = "lpass", + [GLINK_DBGFS_DSPS] = "dsps", + [GLINK_DBGFS_RPM] = "rpm", + [GLINK_DBGFS_WCNSS] = "wcnss", + [GLINK_DBGFS_LLOOP] = "lloop", + [GLINK_DBGFS_MOCK] = "mock" +}; + +static const char * const xprt_string[] = { + [GLINK_DBGFS_SMEM] = "smem", + [GLINK_DBGFS_SMD] = "smd", + [GLINK_DBGFS_XLLOOP] = "lloop", + [GLINK_DBGFS_XMOCK] = "mock", + [GLINK_DBGFS_XMOCK_LOW] = "mock_low", + [GLINK_DBGFS_XMOCK_HIGH] = "mock_high" +}; + +static const char * const ch_st_string[] = { + [GLINK_CHANNEL_CLOSED] = "CLOSED", + [GLINK_CHANNEL_OPENING] = "OPENING", + [GLINK_CHANNEL_OPENED] = "OPENED", + [GLINK_CHANNEL_CLOSING] = "CLOSING", +}; + +static const char * const xprt_st_string[] = { + [GLINK_XPRT_DOWN] = "DOWN", + [GLINK_XPRT_NEGOTIATING] = "NEGOT", + [GLINK_XPRT_OPENED] = "OPENED", + [GLINK_XPRT_FAILED] = "FAILED" +}; + +#if defined(CONFIG_DEBUG_FS) +#define GLINK_DBGFS_NAME_SIZE (2 * GLINK_NAME_SIZE + 1) + +struct glink_dbgfs_dent { + struct list_head list_node; + char par_name[GLINK_DBGFS_NAME_SIZE]; + char self_name[GLINK_DBGFS_NAME_SIZE]; + struct dentry *parent; + struct dentry *self; + spinlock_t file_list_lock_lhb0; + struct list_head file_list; +}; + +static struct dentry *dent; +static LIST_HEAD(dent_list); +static DEFINE_MUTEX(dent_list_lock_lha0); + +static int debugfs_show(struct seq_file *s, void *data) +{ + struct glink_dbgfs_data *dfs_d; + dfs_d = s->private; + dfs_d->o_func(s); + return 0; +} + +static int debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, debugfs_show, inode->i_private); +} + +static const struct file_operations debug_ops = { + .open = debug_open, + .release = single_release, + .read = seq_read, + .llseek = seq_lseek, +}; +#endif + +/** + * glink_get_ss_enum_string() - get the name of the subsystem based on enum value + * @enum_id: enum id of a specific subsystem. + * + * Return: name of the subsystem, NULL in case of invalid input + */ +const char *glink_get_ss_enum_string(unsigned int enum_id) +{ + if (enum_id >= ARRAY_SIZE(ss_string)) + return NULL; + + return ss_string[enum_id]; +} +EXPORT_SYMBOL(glink_get_ss_enum_string); + +/** + * glink_get_xprt_enum_string() - get the name of the transport based on enum value + * @enum_id: enum id of a specific transport. + * + * Return: name of the transport, NULL in case of invalid input + */ +const char *glink_get_xprt_enum_string(unsigned int enum_id) +{ + if (enum_id >= ARRAY_SIZE(xprt_string)) + return NULL; + return xprt_string[enum_id]; +} +EXPORT_SYMBOL(glink_get_xprt_enum_string); + +/** + * glink_get_xprt_state_string() - get the name of the transport based on enum value + * @enum_id: enum id of the state of the transport. + * + * Return: name of the transport state, NULL in case of invalid input + */ +const char *glink_get_xprt_state_string( + enum transport_state_e enum_id) +{ + if (enum_id >= ARRAY_SIZE(xprt_st_string)) + return NULL; + + return xprt_st_string[enum_id]; +} +EXPORT_SYMBOL(glink_get_xprt_state_string); + +/** + * glink_get_ch_state_string() - get the name of the transport based on enum value + * @enum_id: enum id of a specific state of the channel. + * + * Return: name of the channel state, NULL in case of invalid input + */ +const char *glink_get_ch_state_string( + enum local_channel_state_e enum_id) +{ + if (enum_id >= ARRAY_SIZE(ch_st_string)) + return NULL; + + return ch_st_string[enum_id]; +} +EXPORT_SYMBOL(glink_get_ch_state_string); + +#if defined(CONFIG_DEBUG_FS) +/** + * glink_dfs_create_file() - create the debugfs file + * @name: debugfs file name + * @parent: pointer to the parent dentry structure + * @show: pointer to the actual function which will be invoked upon + * opening this file. + * + * Return: pointer to the allocated glink_dbgfs_data structure or + * NULL in case of an error. + * + * This function actually create a debugfs file under the parent directory + */ +static struct glink_dbgfs_data *glink_dfs_create_file(const char *name, + struct dentry *parent, void (*show)(struct seq_file *s), + void *dbgfs_data, bool b_free_req) +{ + struct dentry *file; + struct glink_dbgfs_data *dfs_d; + + dfs_d = kzalloc(sizeof(struct glink_dbgfs_data), GFP_KERNEL); + if (dfs_d == NULL) + return NULL; + + dfs_d->o_func = show; + if (dbgfs_data != NULL) { + dfs_d->priv_data = dbgfs_data; + dfs_d->b_priv_free_req = b_free_req; + } + file = debugfs_create_file(name, 0400, parent, dfs_d, &debug_ops); + if (!file) + GLINK_DBG("%s: unable to create file '%s'\n", __func__, + name); + dfs_d->dent = file; + return dfs_d; +} + +/** + * write_ch_intent() - write channel intent details + * @s: pointer to the sequential file + * @intent: pointer glink core intent structure + * @i_type: type of intent + * @count: serial number of the intent. + * + * This function is a helper function of glink_dfs_update_ch_intents() + * that prints out details of any specific intent. + */ +static void write_ch_intent(struct seq_file *s, + struct glink_core_rx_intent *intent, + char *i_type, unsigned int count) +{ + char *intent_type; + /* + * formatted, human readable channel state output, ie: + * TYPE |SN |ID |PKT_SIZE|W_OFFSET|INT_SIZE| + * -------------------------------------------------------------- + * LOCAL_LIST|#2 |1 |0 |0 |8 | + */ + if (count == 1) { + intent_type = i_type; + seq_puts(s, + "\n--------------------------------------------------------\n"); + } else { + intent_type = ""; + } + seq_printf(s, "%-20s|#%-5d|%-6u|%-10zu|%-10zu|%-10zu|\n", + intent_type, + count, + intent->id, + intent->pkt_size, + intent->write_offset, + intent->intent_size); +} + +/** + * glink_dfs_update_ch_intent() - writes the intent details of a specific + * channel to the corresponding debugfs file + * @s: pointer to the sequential file + * + * This function extracts the intent details of a channel & prints them to + * corrseponding debugfs file of that channel. + */ +static void glink_dfs_update_ch_intent(struct seq_file *s) +{ + struct glink_dbgfs_data *dfs_d; + struct channel_ctx *ch_ctx; + struct glink_core_rx_intent *intent; + struct glink_core_rx_intent *intent_temp; + struct glink_ch_intent_info ch_intent_info; + unsigned long flags; + unsigned int count = 0; + + dfs_d = s->private; + ch_ctx = dfs_d->priv_data; + if (ch_ctx != NULL) { + glink_get_ch_intent_info(ch_ctx, &ch_intent_info); + seq_puts(s, + "---------------------------------------------------------------\n"); + seq_printf(s, "%-20s|%-6s|%-6s|%-10s|%-10s|%-10s|\n", + "INTENT TYPE", + "SN", + "ID", + "PKT_SIZE", + "W_OFFSET", + "INT_SIZE"); + seq_puts(s, + "---------------------------------------------------------------\n"); + spin_lock_irqsave(ch_intent_info.li_lst_lock, flags); + list_for_each_entry_safe(intent, intent_temp, + ch_intent_info.li_avail_list, list) { + count++; + write_ch_intent(s, intent, "LOCAL_AVAIL_LIST", count); + } + + count = 0; + list_for_each_entry_safe(intent, intent_temp, + ch_intent_info.li_used_list, list) { + count++; + write_ch_intent(s, intent, "LOCAL_USED_LIST", count); + } + spin_unlock_irqrestore(ch_intent_info.li_lst_lock, flags); + + count = 0; + spin_lock_irqsave(ch_intent_info.ri_lst_lock, flags); + list_for_each_entry_safe(intent, intent_temp, + ch_intent_info.ri_list, list) { + count++; + write_ch_intent(s, intent, "REMOTE_LIST", count); + } + spin_unlock_irqrestore(ch_intent_info.ri_lst_lock, + flags); + seq_puts(s, + "---------------------------------------------------------------\n"); + } +} + +/** + * glink_dfs_update_ch_stats() - writes statistics of a specific + * channel to the corresponding debugfs file + * @s: pointer to the sequential file + * + * This function extracts other statistics of a channel & prints them to + * corrseponding debugfs file of that channel + */ +static void glink_dfs_update_ch_stats(struct seq_file *s) +{ + /* FUTURE: add channel statistics */ + seq_puts(s, "not yet implemented\n"); +} + +/** + * glink_debugfs_remove_channel() - remove all channel specifc files & folder in + * debugfs when channel is fully closed + * @ch_ctx: pointer to the channel_contenxt + * @xprt_ctx: pointer to the transport_context + * + * This function is invoked when any channel is fully closed. It removes the + * folders & other files in debugfs for that channel. + */ +void glink_debugfs_remove_channel(struct channel_ctx *ch_ctx, + struct glink_core_xprt_ctx *xprt_ctx){ + + struct glink_dbgfs ch_rm_dbgfs; + char *edge_name; + char curr_dir_name[GLINK_DBGFS_NAME_SIZE]; + char *xprt_name; + + ch_rm_dbgfs.curr_name = glink_get_ch_name(ch_ctx); + edge_name = glink_get_xprt_edge_name(xprt_ctx); + xprt_name = glink_get_xprt_name(xprt_ctx); + if (!xprt_name || !edge_name) { + GLINK_ERR("%s: Invalid xprt_name or edge_name for ch '%s'\n", + __func__, ch_rm_dbgfs.curr_name); + return; + } + snprintf(curr_dir_name, sizeof(curr_dir_name), "%s_%s", + edge_name, xprt_name); + ch_rm_dbgfs.par_name = curr_dir_name; + glink_debugfs_remove_recur(&ch_rm_dbgfs); +} +EXPORT_SYMBOL(glink_debugfs_remove_channel); + +/** + * glink_debugfs_add_channel() - create channel specifc files & folder in + * debugfs when channel is added + * @ch_ctx: pointer to the channel_contenxt + * @xprt_ctx: pointer to the transport_context + * + * This function is invoked when a new channel is created. It creates the + * folders & other files in debugfs for that channel + */ +void glink_debugfs_add_channel(struct channel_ctx *ch_ctx, + struct glink_core_xprt_ctx *xprt_ctx) +{ + struct glink_dbgfs ch_dbgfs; + char *ch_name; + char *edge_name; + char *xprt_name; + char curr_dir_name[GLINK_DBGFS_NAME_SIZE]; + + if (ch_ctx == NULL) { + GLINK_ERR("%s: Channel Context is NULL\n", __func__); + return; + } + + ch_name = glink_get_ch_name(ch_ctx); + edge_name = glink_get_xprt_edge_name(xprt_ctx); + xprt_name = glink_get_xprt_name(xprt_ctx); + if (!xprt_name || !edge_name) { + GLINK_ERR("%s: Invalid xprt_name or edge_name for ch '%s'\n", + __func__, ch_name); + return; + } + snprintf(curr_dir_name, sizeof(curr_dir_name), "%s_%s", + edge_name, xprt_name); + + ch_dbgfs.curr_name = curr_dir_name; + ch_dbgfs.par_name = "channel"; + ch_dbgfs.b_dir_create = true; + glink_debugfs_create(ch_name, NULL, &ch_dbgfs, NULL, false); + + ch_dbgfs.par_name = ch_dbgfs.curr_name; + ch_dbgfs.curr_name = ch_name; + ch_dbgfs.b_dir_create = false; + glink_debugfs_create("stats", glink_dfs_update_ch_stats, + &ch_dbgfs, (void *)ch_ctx, false); + glink_debugfs_create("intents", glink_dfs_update_ch_intent, + &ch_dbgfs, (void *)ch_ctx, false); +} +EXPORT_SYMBOL(glink_debugfs_add_channel); + +/** + * glink_debugfs_add_xprt() - create transport specifc files & folder in + * debugfs when new transport is registerd + * @xprt_ctx: pointer to the transport_context + * + * This function is invoked when a new transport is registered. It creates the + * folders & other files in debugfs for that transport + */ +void glink_debugfs_add_xprt(struct glink_core_xprt_ctx *xprt_ctx) +{ + struct glink_dbgfs xprt_dbgfs; + char *xprt_name; + char *edge_name; + char curr_dir_name[GLINK_DBGFS_NAME_SIZE]; + + if (xprt_ctx == NULL) + GLINK_ERR("%s: Transport Context is NULL\n", __func__); + xprt_name = glink_get_xprt_name(xprt_ctx); + edge_name = glink_get_xprt_edge_name(xprt_ctx); + if (!xprt_name || !edge_name) { + GLINK_ERR("%s: xprt name or edge name is NULL\n", __func__); + return; + } + snprintf(curr_dir_name, sizeof(curr_dir_name), "%s_%s", + edge_name, xprt_name); + xprt_dbgfs.par_name = "glink"; + xprt_dbgfs.curr_name = "xprt"; + xprt_dbgfs.b_dir_create = true; + glink_debugfs_create(curr_dir_name, NULL, &xprt_dbgfs, NULL, false); + xprt_dbgfs.curr_name = "channel"; + glink_debugfs_create(curr_dir_name, NULL, &xprt_dbgfs, NULL, false); +} +EXPORT_SYMBOL(glink_debugfs_add_xprt); + +/** + * glink_dfs_create_channel_list() - create & update the channel details + * s: pointer to seq_file + * + * This function updates channel details in debugfs + * file present in /glink/channel/channels + */ +static void glink_dfs_create_channel_list(struct seq_file *s) +{ + struct xprt_ctx_iterator xprt_iter; + struct ch_ctx_iterator ch_iter; + + struct glink_core_xprt_ctx *xprt_ctx; + struct channel_ctx *ch_ctx; + int count = 0; + /* + * formatted, human readable channel state output, ie: + * NAME |LCID|RCID|XPRT|EDGE|LSTATE |RSTATE|LINT-Q|RINT-Q| + * -------------------------------------------------------------------- + * LOCAL_LOOPBACK_CLNT|2 |1 |lloop|local|OPENED|OPENED|5 |6 | + * N.B. Number of TX & RX Packets not implemented yet. -ENOSYS is printed + */ + seq_printf(s, "%-20s|%-4s|%-4s|%-10s|%-6s|%-7s|%-7s|%-5s|%-5s|\n", + "NAME", + "LCID", + "RCID", + "XPRT", + "EDGE", + "LSTATE", + "RSTATE", + "LINTQ", + "RINTQ"); + seq_puts(s, + "-------------------------------------------------------------------------------\n"); + glink_xprt_ctx_iterator_init(&xprt_iter); + xprt_ctx = glink_xprt_ctx_iterator_next(&xprt_iter); + + while (xprt_ctx != NULL) { + glink_ch_ctx_iterator_init(&ch_iter, xprt_ctx); + ch_ctx = glink_ch_ctx_iterator_next(&ch_iter); + while (ch_ctx != NULL) { + count++; + seq_printf(s, "%-20s|%-4i|%-4i|%-10s|%-6s|%-7s|", + glink_get_ch_name(ch_ctx), + glink_get_ch_lcid(ch_ctx), + glink_get_ch_rcid(ch_ctx), + glink_get_ch_xprt_name(ch_ctx), + glink_get_ch_edge_name(ch_ctx), + glink_get_ch_lstate(ch_ctx)); + seq_printf(s, "%-7s|%-5i|%-5i|\n", + (glink_get_ch_rstate(ch_ctx) ? "OPENED" : "CLOSED"), + glink_get_ch_lintents_queued(ch_ctx), + glink_get_ch_rintents_queued(ch_ctx)); + + ch_ctx = glink_ch_ctx_iterator_next(&ch_iter); + } + glink_ch_ctx_iterator_end(&ch_iter, xprt_ctx); + xprt_ctx = glink_xprt_ctx_iterator_next(&xprt_iter); + } + + glink_xprt_ctx_iterator_end(&xprt_iter); +} + +/** + * glink_dfs_create_xprt_list() - create & update the transport details + * @s: pointer to seq_file + * + * This function updates channel details in debugfs file present + * in /glink/xprt/xprts + */ +static void glink_dfs_create_xprt_list(struct seq_file *s) +{ + struct xprt_ctx_iterator xprt_iter; + struct glink_core_xprt_ctx *xprt_ctx; + const struct glink_core_version *gver; + uint32_t version; + uint32_t features; + int count = 0; + /* + * formatted, human readable channel state output, ie: + * XPRT_NAME|REMOTE |STATE|VERSION |FEATURES| + * --------------------------------------------- + * smd_trans|lpass |2 |0 |1 | + * smem |mpss |0 |0 |0 | + */ + seq_printf(s, "%-20s|%-20s|%-6s|%-8s|%-8s|\n", + "XPRT_NAME", + "REMOTE", + "STATE", + "VERSION", + "FEATURES"); + seq_puts(s, + "-------------------------------------------------------------------------------\n"); + glink_xprt_ctx_iterator_init(&xprt_iter); + xprt_ctx = glink_xprt_ctx_iterator_next(&xprt_iter); + + while (xprt_ctx != NULL) { + count++; + seq_printf(s, "%-20s|%-20s|", + glink_get_xprt_name(xprt_ctx), + glink_get_xprt_edge_name(xprt_ctx)); + gver = glink_get_xprt_version_features(xprt_ctx); + if (gver != NULL) { + version = gver->version; + features = gver->features; + seq_printf(s, "%-6s|%-8i|%-8i|\n", + glink_get_xprt_state(xprt_ctx), + version, + features); + } else { + seq_printf(s, "%-6s|%-8i|%-8i|\n", + glink_get_xprt_state(xprt_ctx), + -ENODATA, + -ENODATA); + } + xprt_ctx = glink_xprt_ctx_iterator_next(&xprt_iter); + + } + + glink_xprt_ctx_iterator_end(&xprt_iter); +} + +/** + * glink_dfs_update_list() - update the internally maintained dentry linked list + * @curr_dent: pointer to the current dentry object + * @parent: pointer to the parent dentry object + * @curr: current directory name + * @par_dir: parent directory name + */ +void glink_dfs_update_list(struct dentry *curr_dent, struct dentry *parent, + const char *curr, const char *par_dir) +{ + struct glink_dbgfs_dent *dbgfs_dent_s; + if (curr_dent != NULL) { + dbgfs_dent_s = kzalloc(sizeof(struct glink_dbgfs_dent), + GFP_KERNEL); + if (dbgfs_dent_s != NULL) { + INIT_LIST_HEAD(&dbgfs_dent_s->file_list); + spin_lock_init(&dbgfs_dent_s->file_list_lock_lhb0); + dbgfs_dent_s->parent = parent; + dbgfs_dent_s->self = curr_dent; + strlcpy(dbgfs_dent_s->self_name, + curr, strlen(curr) + 1); + strlcpy(dbgfs_dent_s->par_name, par_dir, + strlen(par_dir) + 1); + mutex_lock(&dent_list_lock_lha0); + list_add_tail(&dbgfs_dent_s->list_node, &dent_list); + mutex_unlock(&dent_list_lock_lha0); + } + } else { + GLINK_DBG("%s:create directory failed for par:curr [%s:%s]\n", + __func__, par_dir, curr); + } + return; +} + +/** + * glink_remove_dfs_entry() - remove the the entries from dent_list + * @entry: pointer to the glink_dbgfs_dent structure + * + * This function removes the removes the entries from internally maintained + * linked list of dentries. It also deletes the file list and associated memory + * if present. + */ +void glink_remove_dfs_entry(struct glink_dbgfs_dent *entry) +{ + struct glink_dbgfs_data *fentry, *fentry_temp; + unsigned long flags; + + if (entry == NULL) + return; + if (!list_empty(&entry->file_list)) { + spin_lock_irqsave(&entry->file_list_lock_lhb0, flags); + list_for_each_entry_safe(fentry, fentry_temp, + &entry->file_list, flist) { + if (fentry->b_priv_free_req) + kfree(fentry->priv_data); + list_del(&fentry->flist); + kfree(fentry); + fentry = NULL; + } + spin_unlock_irqrestore(&entry->file_list_lock_lhb0, flags); + } + list_del(&entry->list_node); + kfree(entry); + entry = NULL; +} + +/** + * glink_debugfs_remove_recur() - remove the the directory & files recursively + * @rm_dfs: pointer to the structure glink_dbgfs + * + * This function removes the files & directories below the given directory. + * This also takes care of freeing any memory associated with the debugfs file. + */ +void glink_debugfs_remove_recur(struct glink_dbgfs *rm_dfs) +{ + const char *c_dir_name; + const char *p_dir_name; + struct glink_dbgfs_dent *entry, *entry_temp; + struct dentry *par_dent = NULL; + + if (rm_dfs == NULL) + return; + + c_dir_name = rm_dfs->curr_name; + p_dir_name = rm_dfs->par_name; + + mutex_lock(&dent_list_lock_lha0); + list_for_each_entry_safe(entry, entry_temp, &dent_list, list_node) { + if (!strcmp(entry->par_name, c_dir_name)) { + glink_remove_dfs_entry(entry); + } else if (!strcmp(entry->self_name, c_dir_name) + && !strcmp(entry->par_name, p_dir_name)) { + par_dent = entry->self; + glink_remove_dfs_entry(entry); + } + } + mutex_unlock(&dent_list_lock_lha0); + if (par_dent != NULL) + debugfs_remove_recursive(par_dent); +} +EXPORT_SYMBOL(glink_debugfs_remove_recur); + +/** + * glink_debugfs_create() - create the debugfs file + * @name: debugfs file name + * @show: pointer to the actual function which will be invoked upon + * opening this file. + * @dir: pointer to a structure debugfs_dir + * dbgfs_data: pointer to any private data need to be associated with debugfs + * b_free_req: boolean value to decide to free the memory associated with + * @dbgfs_data during deletion of the file + * + * Return: pointer to the file/directory created, NULL in case of error + * + * This function checks which directory will be used to create the debugfs file + * and calls glink_dfs_create_file. Anybody who intend to allocate some memory + * for the dbgfs_data and required to free it in deletion, need to set + * b_free_req to true. Otherwise, there will be a memory leak. + */ +struct dentry *glink_debugfs_create(const char *name, + void (*show)(struct seq_file *), + struct glink_dbgfs *dir, void *dbgfs_data, bool b_free_req) +{ + struct dentry *parent = NULL; + struct dentry *dent = NULL; + struct glink_dbgfs_dent *entry; + struct glink_dbgfs_data *file_data; + const char *c_dir_name; + const char *p_dir_name; + unsigned long flags; + + if (dir == NULL) { + GLINK_ERR("%s: debugfs_dir strucutre is null\n", __func__); + return NULL; + } + c_dir_name = dir->curr_name; + p_dir_name = dir->par_name; + + mutex_lock(&dent_list_lock_lha0); + list_for_each_entry(entry, &dent_list, list_node) + if (!strcmp(entry->par_name, p_dir_name) + && !strcmp(entry->self_name, c_dir_name)) { + parent = entry->self; + break; + } + mutex_unlock(&dent_list_lock_lha0); + p_dir_name = c_dir_name; + c_dir_name = name; + if (parent != NULL) { + if (dir->b_dir_create) { + dent = debugfs_create_dir(name, parent); + if (dent != NULL) + glink_dfs_update_list(dent, parent, + c_dir_name, p_dir_name); + } else { + file_data = glink_dfs_create_file(name, parent, show, + dbgfs_data, b_free_req); + spin_lock_irqsave(&entry->file_list_lock_lhb0, flags); + if (file_data != NULL) + list_add_tail(&file_data->flist, + &entry->file_list); + spin_unlock_irqrestore(&entry->file_list_lock_lhb0, + flags); + } + } else { + GLINK_DBG("%s: parent dentry is null for [%s]\n", + __func__, name); + } + return dent; +} +EXPORT_SYMBOL(glink_debugfs_create); + +/** + * glink_debugfs_init() - initialize the glink debugfs directory structure + * + * Return: 0 in success otherwise appropriate error code + * + * This function initializes the debugfs directory for glink + */ +int glink_debugfs_init(void) +{ + struct glink_dbgfs dbgfs; + + /* fake parent name */ + dent = debugfs_create_dir("glink", NULL); + if (IS_ERR_OR_NULL(dent)) + return PTR_ERR(dent); + + glink_dfs_update_list(dent, NULL, "glink", "root"); + + dbgfs.b_dir_create = true; + dbgfs.curr_name = "glink"; + dbgfs.par_name = "root"; + glink_debugfs_create("xprt", NULL, &dbgfs, NULL, false); + glink_debugfs_create("channel", NULL, &dbgfs, NULL, false); + + dbgfs.curr_name = "channel"; + dbgfs.par_name = "glink"; + dbgfs.b_dir_create = false; + glink_debugfs_create("channels", glink_dfs_create_channel_list, + &dbgfs, NULL, false); + dbgfs.curr_name = "xprt"; + glink_debugfs_create("xprts", glink_dfs_create_xprt_list, + &dbgfs, NULL, false); + + return 0; +} +EXPORT_SYMBOL(glink_debugfs_init); + +/** + * glink_debugfs_exit() - removes the glink debugfs directory + * + * This function recursively remove all the debugfs directories + * starting from dent + */ +void glink_debugfs_exit(void) +{ + if (dent != NULL) + debugfs_remove_recursive(dent); +} +EXPORT_SYMBOL(glink_debugfs_exit); +#else +void glink_debugfs_remove_recur(struct glink_dbgfs *dfs) { } +EXPORT_SYMBOL(glink_debugfs_remove_recur); + +void glink_debugfs_remove_channel(struct channel_ctx *ch_ctx, + struct glink_core_xprt_ctx *xprt_ctx) { } +EXPORT_SYMBOL(glink_debugfs_remove_channel); + +void glink_debugfs_add_channel(struct channel_ctx *ch_ctx, + struct glink_core_xprt_ctx *xprt_ctx) { } +EXPORT_SYMBOL(glink_debugfs_add_channel); + +void glink_debugfs_add_xprt(struct glink_core_xprt_ctx *xprt_ctx) { } +EXPORT_SYMBOL(glink_debugfs_add_xprt); + +int glink_debugfs_init(void) { return 0; } +EXPORT_SYMBOL(glink_debugfs_init); + +void glink_debugfs_exit(void) { } +EXPORT_SYMBOL(glink_debugfs_exit); +#endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/soc/qcom/glink_private.h b/drivers/soc/qcom/glink_private.h new file mode 100644 index 00000000000000..d79893227cd269 --- /dev/null +++ b/drivers/soc/qcom/glink_private.h @@ -0,0 +1,1044 @@ +/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef _SOC_QCOM_GLINK_PRIVATE_H_ +#define _SOC_QCOM_GLINK_PRIVATE_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct glink_core_xprt_ctx; +struct channel_ctx; +enum transport_state_e; +enum local_channel_state_e; + +/* Logging Macros */ +enum { + QCOM_GLINK_INFO = 1U << 0, + QCOM_GLINK_DEBUG = 1U << 1, + QCOM_GLINK_GPIO = 1U << 2, + QCOM_GLINK_PERF = 1U << 3, +}; + +enum glink_dbgfs_ss { + GLINK_DBGFS_MPSS, + GLINK_DBGFS_APSS, + GLINK_DBGFS_LPASS, + GLINK_DBGFS_DSPS, + GLINK_DBGFS_RPM, + GLINK_DBGFS_WCNSS, + GLINK_DBGFS_LLOOP, + GLINK_DBGFS_MOCK, + GLINK_DBGFS_MAX_NUM_SUBS +}; + +enum glink_dbgfs_xprt { + GLINK_DBGFS_SMEM, + GLINK_DBGFS_SMD, + GLINK_DBGFS_XLLOOP, + GLINK_DBGFS_XMOCK, + GLINK_DBGFS_XMOCK_LOW, + GLINK_DBGFS_XMOCK_HIGH, + GLINK_DBGFS_MAX_NUM_XPRTS +}; + +struct glink_dbgfs { + const char *curr_name; + const char *par_name; + bool b_dir_create; +}; + +struct glink_dbgfs_data { + struct list_head flist; + struct dentry *dent; + void (*o_func)(struct seq_file *s); + void *priv_data; + bool b_priv_free_req; +}; + +struct xprt_ctx_iterator { + struct list_head *xprt_list; + struct glink_core_xprt_ctx *i_curr; + unsigned long xprt_list_flags; +}; + +struct ch_ctx_iterator { + struct list_head *ch_list; + struct channel_ctx *i_curr; + unsigned long ch_list_flags; +}; + +struct glink_ch_intent_info { + spinlock_t *li_lst_lock; + struct list_head *li_avail_list; + struct list_head *li_used_list; + spinlock_t *ri_lst_lock; + struct list_head *ri_list; +}; + +/* Tracer Packet Event IDs for G-Link */ +enum glink_tracer_pkt_events { + GLINK_CORE_TX = 1, + GLINK_QUEUE_TO_SCHEDULER = 2, + GLINK_SCHEDULER_TX = 3, + GLINK_XPRT_TX = 4, + GLINK_XPRT_RX = 5, + GLINK_CORE_RX = 6, +}; + +/** + * glink_get_ss_enum_string() - get the name of the subsystem based on enum value + * @enum_id: enum id of a specific subsystem. + * + * Return: name of the subsystem, NULL in case of invalid input + */ +const char *glink_get_ss_enum_string(unsigned int enum_id); + +/** + * glink_get_xprt_enum_string() - get the name of the transport based on enum value + * @enum_id: enum id of a specific transport. + * + * Return: name of the transport, NULL in case of invalid input + */ +const char *glink_get_xprt_enum_string(unsigned int enum_id); + +/** + * glink_get_xprt_state_string() - get the name of the transport based on enum value + * @enum_id: enum id of the state of the transport. + * + * Return: name of the transport state, NULL in case of invalid input + */ +const char *glink_get_xprt_state_string(enum transport_state_e enum_id); + +/** + * glink_get_ch_state_string() - get the name of the transport based on enum value + * @enum_id: enum id of a specific state of the channel. + * + * Return: name of the channel state, NULL in case of invalid input + */ +const char *glink_get_ch_state_string(enum local_channel_state_e enum_id); + +#define GLINK_IPC_LOG_STR(x...) do { \ + if (glink_get_log_ctx()) \ + ipc_log_string(glink_get_log_ctx(), x); \ +} while (0) + +#define GLINK_DBG(x...) do { \ + if (glink_get_debug_mask() & QCOM_GLINK_DEBUG) \ + GLINK_IPC_LOG_STR(x); \ +} while (0) + +#define GLINK_INFO(x...) do { \ + if (glink_get_debug_mask() & QCOM_GLINK_INFO) \ + GLINK_IPC_LOG_STR(x); \ +} while (0) + +#define GLINK_INFO_PERF(x...) do { \ + if (glink_get_debug_mask() & (QCOM_GLINK_INFO | QCOM_GLINK_PERF)) \ + GLINK_IPC_LOG_STR(x); \ +} while (0) + +#define GLINK_PERF(x...) do { \ + if (glink_get_debug_mask() & QCOM_GLINK_PERF) \ + GLINK_IPC_LOG_STR(" " x); \ +} while (0) + +#define GLINK_UT_ERR(x...) do { \ + if (!(glink_get_debug_mask() & QCOM_GLINK_PERF)) \ + pr_err(" " x); \ + GLINK_IPC_LOG_STR(" " x); \ +} while (0) + +#define GLINK_UT_DBG(x...) do { \ + if (glink_get_debug_mask() & QCOM_GLINK_DEBUG) \ + GLINK_IPC_LOG_STR(" " x); \ +} while (0) + +#define GLINK_UT_INFO(x...) do { \ + if (glink_get_debug_mask() & QCOM_GLINK_INFO) \ + GLINK_IPC_LOG_STR(" " x); \ +} while (0) + +#define GLINK_UT_INFO_PERF(x...) do { \ + if (glink_get_debug_mask() & (QCOM_GLINK_INFO | QCOM_GLINK_PERF)) \ + GLINK_IPC_LOG_STR(" " x); \ +} while (0) + +#define GLINK_UT_PERF(x...) do { \ + if (glink_get_debug_mask() & QCOM_GLINK_PERF) \ + GLINK_IPC_LOG_STR(" " x); \ +} while (0) + +#define GLINK_XPRT_IPC_LOG_STR(xprt, x...) do { \ + if (glink_get_xprt_log_ctx(xprt)) \ + ipc_log_string(glink_get_xprt_log_ctx(xprt), x); \ +} while (0) + +#define GLINK_XPRT_IF_INFO(xprt_if, x...) do { \ + if (glink_get_debug_mask() & QCOM_GLINK_INFO) \ + GLINK_XPRT_IPC_LOG_STR(xprt_if.glink_core_priv, " " x); \ +} while (0) + +#define GLINK_XPRT_IF_DBG(xprt_if, x...) do { \ + if (glink_get_debug_mask() & QCOM_GLINK_DEBUG) \ + GLINK_XPRT_IPC_LOG_STR(xprt_if.glink_core_priv, " " x); \ +} while (0) + +#define GLINK_XPRT_IF_ERR(xprt_if, x...) do { \ + pr_err(" " x); \ + GLINK_XPRT_IPC_LOG_STR(xprt_if.glink_core_priv, " " x); \ +} while (0) + +#define GLINK_PERF_XPRT(xprt, fmt, args...) do { \ + if (glink_get_debug_mask() & QCOM_GLINK_PERF) \ + GLINK_XPRT_IPC_LOG_STR(xprt, " %s:%s " fmt, \ + xprt->name, xprt->edge, args); \ +} while (0) + +#define GLINK_PERF_CH(ctx, fmt, args...) do { \ + if (glink_get_debug_mask() & QCOM_GLINK_PERF) \ + GLINK_XPRT_IPC_LOG_STR(ctx->transport_ptr, \ + " %s:%s:%s[%u:%u] " fmt, \ + ctx->transport_ptr->name, \ + ctx->transport_ptr->edge, \ + ctx->name, \ + ctx->lcid, \ + ctx->rcid, args); \ +} while (0) + +#define GLINK_PERF_CH_XPRT(ctx, xprt, fmt, args...) do { \ + if (glink_get_debug_mask() & QCOM_GLINK_PERF) \ + GLINK_XPRT_IPC_LOG_STR(xprt, \ + " %s:%s:%s[%u:%u] " fmt, \ + xprt->name, \ + xprt->edge, \ + ctx->name, \ + ctx->lcid, \ + ctx->rcid, args); \ +} while (0) + +#define GLINK_INFO_PERF_XPRT(xprt, fmt, args...) do { \ + if (glink_get_debug_mask() & (QCOM_GLINK_INFO | QCOM_GLINK_PERF)) \ + GLINK_XPRT_IPC_LOG_STR(xprt, " %s:%s " fmt, \ + xprt->name, xprt->edge, args); \ +} while (0) + +#define GLINK_INFO_PERF_CH(ctx, fmt, args...) do { \ + if (glink_get_debug_mask() & (QCOM_GLINK_INFO | QCOM_GLINK_PERF)) \ + GLINK_XPRT_IPC_LOG_STR(ctx->transport_ptr, \ + " %s:%s:%s[%u:%u] " fmt, \ + ctx->transport_ptr->name, \ + ctx->transport_ptr->edge, \ + ctx->name, \ + ctx->lcid, \ + ctx->rcid, args); \ +} while (0) + +#define GLINK_INFO_PERF_CH_XPRT(ctx, xprt, fmt, args...) do { \ + if (glink_get_debug_mask() & (QCOM_GLINK_INFO | QCOM_GLINK_PERF)) \ + GLINK_XPRT_IPC_LOG_STR(xprt,\ + " %s:%s:%s[%u:%u] " fmt, \ + xprt->name, \ + xprt->edge, \ + ctx->name, \ + ctx->lcid, \ + ctx->rcid, args); \ +} while (0) + +#define GLINK_INFO_XPRT(xprt, fmt, args...) do { \ + if (glink_get_debug_mask() & QCOM_GLINK_INFO) \ + GLINK_XPRT_IPC_LOG_STR(xprt, " %s:%s " fmt, \ + xprt->name, xprt->edge, args); \ +} while (0) + +#define GLINK_INFO_CH(ctx, fmt, args...) do { \ + if (glink_get_debug_mask() & QCOM_GLINK_INFO) \ + GLINK_XPRT_IPC_LOG_STR(ctx->transport_ptr, \ + " %s:%s:%s[%u:%u] " fmt, \ + ctx->transport_ptr->name, \ + ctx->transport_ptr->edge, \ + ctx->name, \ + ctx->lcid, \ + ctx->rcid, args); \ +} while (0) + +#define GLINK_INFO_CH_XPRT(ctx, xprt, fmt, args...) do { \ + if (glink_get_debug_mask() & QCOM_GLINK_INFO) \ + GLINK_XPRT_IPC_LOG_STR(xprt, \ + " %s:%s:%s[%u:%u] " fmt, \ + xprt->name, \ + xprt->edge, \ + ctx->name, \ + ctx->lcid, \ + ctx->rcid, args); \ +} while (0) + +#define GLINK_DBG_XPRT(xprt, fmt, args...) do { \ + if (glink_get_debug_mask() & QCOM_GLINK_DEBUG) \ + GLINK_XPRT_IPC_LOG_STR(xprt, " %s:%s " fmt, \ + xprt->name, xprt->edge, args); \ +} while (0) + +#define GLINK_DBG_CH(ctx, fmt, args...) do { \ + if (glink_get_debug_mask() & QCOM_GLINK_DEBUG) \ + GLINK_XPRT_IPC_LOG_STR(ctx->transport_ptr, \ + " %s:%s:%s[%u:%u] " fmt, \ + ctx->transport_ptr->name, \ + ctx->transport_ptr->edge, \ + ctx->name, \ + ctx->lcid, \ + ctx->rcid, args); \ +} while (0) + +#define GLINK_DBG_CH_XPRT(ctx, xprt, fmt, args...) do { \ + if (glink_get_debug_mask() & QCOM_GLINK_DEBUG) \ + GLINK_XPRT_IPC_LOG_STR(xprt, \ + " %s:%s:%s[%u:%u] " fmt, \ + xprt->name, \ + xprt->edge, \ + ctx->name, \ + ctx->lcid, \ + ctx->rcid, args); \ +} while (0) + +#define GLINK_ERR(x...) do { \ + pr_err_ratelimited(" " x); \ + GLINK_IPC_LOG_STR(" " x); \ +} while (0) + +#define GLINK_ERR_XPRT(xprt, fmt, args...) do { \ + pr_err_ratelimited(" %s:%s " fmt, \ + xprt->name, xprt->edge, args); \ + GLINK_INFO_XPRT(xprt, fmt, args); \ +} while (0) + +#define GLINK_ERR_CH(ctx, fmt, args...) do { \ + pr_err_ratelimited(" %s:%s:%s[%u:%u] " fmt, \ + ctx->transport_ptr->name, \ + ctx->transport_ptr->edge, \ + ctx->name, \ + ctx->lcid, \ + ctx->rcid, args); \ + GLINK_INFO_CH(ctx, fmt, args); \ +} while (0) + +#define GLINK_ERR_CH_XPRT(ctx, xprt, fmt, args...) do { \ + pr_err_ratelimited(" %s:%s:%s[%u:%u] " fmt, \ + xprt->name, \ + xprt->edge, \ + ctx->name, \ + ctx->lcid, \ + ctx->rcid, args); \ + GLINK_INFO_CH_XPRT(ctx, xprt, fmt, args); \ +} while (0) + +/** + * OVERFLOW_ADD_UNSIGNED() - check for unsigned overflow + * + * type: type to check for overflow + * a: left value to use + * b: right value to use + * returns: true if a + b will result in overflow; false otherwise + */ +#define OVERFLOW_ADD_UNSIGNED(type, a, b) \ + (((type)~0 - (a)) < (b) ? true : false) + +/** + * glink_get_debug_mask() - Return debug mask attribute + * + * Return: debug mask attribute + */ +unsigned glink_get_debug_mask(void); + +/** + * glink_get_log_ctx() - Return log context for other GLINK modules. + * + * Return: Log context or NULL if none. + */ +void *glink_get_log_ctx(void); + +/** + * glink_get_xprt_log_ctx() - Return log context for other GLINK modules. + * + * Return: Log context or NULL if none. + */ +void *glink_get_xprt_log_ctx(struct glink_core_xprt_ctx *xprt); + +/** + * glink_get_channel_id_for_handle() - Get logical channel ID + * + * @handle: handle of channel + * + * Used internally by G-Link debugfs. + * + * Return: Logical Channel ID or standard Linux error code + */ +int glink_get_channel_id_for_handle(void *handle); + +/** + * glink_get_channel_name_for_handle() - return channel name + * + * @handle: handle of channel + * + * Used internally by G-Link debugfs. + * + * Return: Channel name or NULL + */ +char *glink_get_channel_name_for_handle(void *handle); + +/** + * glink_debugfs_init() - initialize glink debugfs directory + * + * Return: error code or success. + */ +int glink_debugfs_init(void); + +/** + * glink_debugfs_exit() - removes glink debugfs directory + */ +void glink_debugfs_exit(void); + +/** + * glink_debugfs_create() - create the debugfs file + * @name: debugfs file name + * @show: pointer to the actual function which will be invoked upon + * opening this file. + * @dir: pointer to a structure debugfs_dir + * @dbgfs_data: pointer to any private data need to be associated with debugfs + * @b_free_req: boolean value to decide to free the memory associated with + * @dbgfs_data during deletion of the file + * + * Return: pointer to the file/directory created, NULL in case of error + * + * This function checks which directory will be used to create the debugfs file + * and calls glink_dfs_create_file. Anybody who intend to allocate some memory + * for the dbgfs_data and required to free it in deletion, need to set + * b_free_req to true. Otherwise, there will be a memory leak. + */ +struct dentry *glink_debugfs_create(const char *name, + void (*show)(struct seq_file *), + struct glink_dbgfs *dir, void *dbgfs_data, bool b_free_req); + +/** + * glink_debugfs_remove_recur() - remove the the directory & files recursively + * @rm_dfs: pointer to the structure glink_dbgfs + * + * This function removes the files & directories. This also takes care of + * freeing any memory associated with the debugfs file. + */ +void glink_debugfs_remove_recur(struct glink_dbgfs *dfs); + +/** + * glink_debugfs_remove_channel() - remove all channel specifc files & folder in + * debugfs when channel is fully closed + * @ch_ctx: pointer to the channel_contenxt + * @xprt_ctx: pointer to the transport_context + * + * This function is invoked when any channel is fully closed. It removes the + * folders & other files in debugfs for that channel. + */ +void glink_debugfs_remove_channel(struct channel_ctx *ch_ctx, + struct glink_core_xprt_ctx *xprt_ctx); + +/** + * glink_debugfs_add_channel() - create channel specifc files & folder in + * debugfs when channel is added + * @ch_ctx: pointer to the channel_contenxt + * @xprt_ctx: pointer to the transport_context + * + * This function is invoked when a new channel is created. It creates the + * folders & other files in debugfs for that channel + */ +void glink_debugfs_add_channel(struct channel_ctx *ch_ctx, + struct glink_core_xprt_ctx *xprt_ctx); + +/** + * glink_debugfs_add_xprt() - create transport specifc files & folder in + * debugfs when new transport is registerd + * @xprt_ctx: pointer to the transport_context + * + * This function is invoked when a new transport is registered. It creates the + * folders & other files in debugfs for that transport + */ +void glink_debugfs_add_xprt(struct glink_core_xprt_ctx *xprt_ctx); + +/** + * glink_xprt_ctx_iterator_init() - Initializes the transport context list iterator + * @xprt_i: pointer to the transport context iterator. + * + * Return: None + * + * This function acquires the transport context lock which must then be + * released by glink_xprt_ctx_iterator_end() + */ +void glink_xprt_ctx_iterator_init(struct xprt_ctx_iterator *xprt_i); + +/** + * glink_xprt_ctx_iterator_end() - Ends the transport context list iteration + * @xprt_i: pointer to the transport context iterator. + * + * Return: None + */ +void glink_xprt_ctx_iterator_end(struct xprt_ctx_iterator *xprt_i); + +/** + * glink_xprt_ctx_iterator_next() - iterates element by element in transport context list + * @xprt_i: pointer to the transport context iterator. + * + * Return: pointer to the transport context structure + */ +struct glink_core_xprt_ctx *glink_xprt_ctx_iterator_next( + struct xprt_ctx_iterator *xprt_i); + +/** + * glink_get_xprt_name() - get the transport name + * @xprt_ctx: pointer to the transport context. + * + * Return: name of the transport + */ +char *glink_get_xprt_name(struct glink_core_xprt_ctx *xprt_ctx); + +/** + * glink_get_xprt_edge_name() - get the name of the remote processor/edge + * of the transport + * @xprt_ctx: pointer to the transport context. + * + * Return: name of the remote processor/edge + */ +char *glink_get_xprt_edge_name(struct glink_core_xprt_ctx *xprt_ctx); + +/** + * glink_get_xprt_state() - get the state of the transport + * @xprt_ctx: pointer to the transport context. + * + * Return: name of the transport state, NULL in case of invalid input + */ +const char *glink_get_xprt_state(struct glink_core_xprt_ctx *xprt_ctx); + +/** + * glink_get_xprt_version_features() - get the version and feature set + * of local transport in glink + * @xprt_ctx: pointer to the transport context. + * + * Return: pointer to the glink_core_version + */ +const struct glink_core_version *glink_get_xprt_version_features( + struct glink_core_xprt_ctx *xprt_ctx); + +/** + * glink_ch_ctx_iterator_init() - Initializes the channel context list iterator + * @ch_iter: pointer to the channel context iterator. + * @xprt: pointer to the transport context that holds the channel list + * + * This function acquires the channel context lock which must then be + * released by glink_ch_ctx_iterator_end() + */ +void glink_ch_ctx_iterator_init(struct ch_ctx_iterator *ch_iter, + struct glink_core_xprt_ctx *xprt); + +/** + * glink_ch_ctx_iterator_end() - Ends the channel context list iteration + * @ch_iter: pointer to the channel context iterator. + * + */ +void glink_ch_ctx_iterator_end(struct ch_ctx_iterator *ch_iter, + struct glink_core_xprt_ctx *xprt); + +/** + * glink_ch_ctx_iterator_next() - iterates element by element in channel context list + * @c_i: pointer to the channel context iterator. + * + * Return: pointer to the channel context structure + */ +struct channel_ctx *glink_ch_ctx_iterator_next(struct ch_ctx_iterator *ch_iter); + +/** + * glink_get_ch_name() - get the channel name + * @ch_ctx: pointer to the channel context. + * + * Return: name of the channel, NULL in case of invalid input + */ +char *glink_get_ch_name(struct channel_ctx *ch_ctx); + +/** + * glink_get_ch_edge_name() - get the name of the remote processor/edge + * of the channel + * @xprt_ctx: pointer to the channel context. + * + * Return: name of the remote processor/edge + */ +char *glink_get_ch_edge_name(struct channel_ctx *ch_ctx); + +/** + * glink_get_ch_rcid() - get the remote channel ID + * @ch_ctx: pointer to the channel context. + * + * Return: remote channel id, -EINVAL in case of invalid input + */ +int glink_get_ch_lcid(struct channel_ctx *ch_ctx); + +/** + * glink_get_ch_rcid() - get the remote channel ID + * @ch_ctx: pointer to the channel context. + * + * Return: remote channel id, -EINVAL in case of invalid input + */ +int glink_get_ch_rcid(struct channel_ctx *ch_ctx); + +/** + * glink_get_ch_lstate() - get the local channel state + * @ch_ctx: pointer to the channel context. + * + * Return: name of the local channel state, NULL in case of invalid input + */ +const char *glink_get_ch_lstate(struct channel_ctx *ch_ctx); + +/** + * glink_get_ch_rstate() - get the remote channel state + * @ch_ctx: pointer to the channel context. + * + * Return: true if remote side is opened false otherwise + */ +bool glink_get_ch_rstate(struct channel_ctx *ch_ctx); + +/** + * glink_get_ch_xprt_name() - get the name of the transport to which + * the channel belongs + * @ch_ctx: pointer to the channel context. + * + * Return: name of the export, NULL in case of invalid input + */ +char *glink_get_ch_xprt_name(struct channel_ctx *ch_ctx); + +/** + * glink_get_tx_pkt_count() - get the total number of packets sent + * through this channel + * @ch_ctx: pointer to the channel context. + * + * Return: number of packets transmitted, -EINVAL in case of invalid input + */ +int glink_get_ch_tx_pkt_count(struct channel_ctx *ch_ctx); + +/** + * glink_get_ch_rx_pkt_count() - get the total number of packets + * recieved at this channel + * @ch_ctx: pointer to the channel context. + * + * Return: number of packets recieved, -EINVAL in case of invalid input + */ +int glink_get_ch_rx_pkt_count(struct channel_ctx *ch_ctx); + +/** + * glink_get_ch_lintents_queued() - get the total number of intents queued + * at local side + * @ch_ctx: pointer to the channel context. + * + * Return: number of intents queued, -EINVAL in case of invalid input + */ +int glink_get_ch_lintents_queued(struct channel_ctx *ch_ctx); + +/** + * glink_get_ch_rintents_queued() - get the total number of intents queued + * from remote side + * @ch_ctx: pointer to the channel context. + * + * Return: number of intents queued + */ +int glink_get_ch_rintents_queued(struct channel_ctx *ch_ctx); + +/** + * glink_get_ch_intent_info() - get the intent details of a channel + * @ch_ctx: pointer to the channel context. + * @ch_ctx_i: pointer to a structure that will contain intent details + * + * This funcion is used to get all the channel intent details including locks. + */ +void glink_get_ch_intent_info(struct channel_ctx *ch_ctx, + struct glink_ch_intent_info *ch_ctx_i); + +/** + * enum ssr_command - G-Link SSR protocol commands + */ +enum ssr_command { + GLINK_SSR_DO_CLEANUP, + GLINK_SSR_CLEANUP_DONE, +}; + +/** + * struct ssr_notify_data - Contains private data used for client notifications + * from G-Link. + * tx_done: Indicates whether or not the tx_done notification has + * been received. + * event: The state notification event received. + * responded: Indicates whether or not a cleanup_done response was + * received. + * edge: The G-Link edge name for the channel associated with + * this callback data + * do_cleanup_data: Structure containing the G-Link SSR do_cleanup message. + */ +struct ssr_notify_data { + bool tx_done; + unsigned event; + bool responded; + const char *edge; + struct do_cleanup_msg *do_cleanup_data; +}; + +/** + * struct subsys_info - Subsystem info structure + * ssr_name: name of the subsystem recognized by the SSR framework + * edge: name of the G-Link edge + * xprt: name of the G-Link transport + * handle: glink_ssr channel used for this subsystem + * link_state_handle: link state handle for this edge, used to unregister + * from receiving link state callbacks + * link_info: Transport info used in link state callback registration + * cb_data: Private callback data structure for notification + * functions + * subsystem_list_node: used to chain this structure in a list of subsystem + * info structures + * notify_list: list of subsys_info_leaf structures, containing the + * subsystems to notify if this subsystem undergoes SSR + * notify_list_len: length of notify_list + * link_up: Flag indicating whether transport is up or not + * link_up_lock: Lock for protecting the link_up flag + */ +struct subsys_info { + const char *ssr_name; + const char *edge; + const char *xprt; + void *handle; + void *link_state_handle; + struct glink_link_info *link_info; + struct ssr_notify_data *cb_data; + struct list_head subsystem_list_node; + struct list_head notify_list; + int notify_list_len; + bool link_up; + spinlock_t link_up_lock; +}; + +/** + * struct subsys_info_leaf - Subsystem info leaf structure (a subsystem on the + * notify list of a subsys_info structure) + * ssr_name: Name of the subsystem recognized by the SSR framework + * edge: Name of the G-Link edge + * xprt: Name of the G-Link transport + * restarted: Indicates whether a restart has been triggered for this edge + * cb_data: Private callback data structure for notification functions + * notify_list_node: used to chain this structure in the notify list + */ +struct subsys_info_leaf { + const char *ssr_name; + const char *edge; + const char *xprt; + bool restarted; + struct ssr_notify_data *cb_data; + struct list_head notify_list_node; +}; + +/** + * struct do_cleanup_msg - The data structure for an SSR do_cleanup message + * version: The G-Link SSR protocol version + * command: The G-Link SSR command - do_cleanup + * seq_num: Sequence number + * name_len: Length of the name of the subsystem being restarted + * name: G-Link edge name of the subsystem being restarted + */ +struct do_cleanup_msg { + uint32_t version; + uint32_t command; + uint32_t seq_num; + uint32_t name_len; + char name[32]; +}; + +/** + * struct cleanup_done_msg - The data structure for an SSR cleanup_done message + * version: The G-Link SSR protocol version + * response: The G-Link SSR response to a do_cleanup command, cleanup_done + * seq_num: Sequence number + */ +struct cleanup_done_msg { + uint32_t version; + uint32_t response; + uint32_t seq_num; +}; + +/** + * get_info_for_subsystem() - Retrieve information about a subsystem from the + * global subsystem_info_list + * @subsystem: The name of the subsystem recognized by the SSR + * framework + * + * Return: subsys_info structure containing info for the requested subsystem; + * NULL if no structure can be found for the requested subsystem + */ +struct subsys_info *get_info_for_subsystem(const char *subsystem); + +/** + * get_info_for_edge() - Retrieve information about a subsystem from the + * global subsystem_info_list + * @edge: The name of the edge recognized by G-Link + * + * Return: subsys_info structure containing info for the requested subsystem; + * NULL if no structure can be found for the requested subsystem + */ +struct subsys_info *get_info_for_edge(const char *edge); + +/** + * glink_ssr_get_seq_num() - Get the current SSR sequence number + * + * Return: The current SSR sequence number + */ +uint32_t glink_ssr_get_seq_num(void); + +/* + * glink_ssr() - SSR cleanup function. + * + * Return: Standard error code. + */ +int glink_ssr(const char *subsystem); + +/** + * notify for subsystem() - Notify other subsystems that a subsystem is being + * restarted + * @ss_info: Subsystem info structure for the subsystem being restarted + * + * This function sends notifications to affected subsystems that the subsystem + * in ss_info is being restarted, and waits for the cleanup done response from + * all of those subsystems. It also initiates any local cleanup that is + * necessary. + * + * Return: 0 on success, standard error codes otherwise + */ +int notify_for_subsystem(struct subsys_info *ss_info); + +/** + * glink_ssr_wait_cleanup_done() - Get the value of the + * notifications_successful flag in glink_ssr. + * @timeout_multiplier: timeout multiplier for waiting on all processors + * + * + * Return: True if cleanup_done received from all processors, false otherwise + */ +bool glink_ssr_wait_cleanup_done(unsigned ssr_timeout_multiplier); + +struct channel_lcid { + struct list_head list_node; + uint32_t lcid; +}; + +/** + * struct rwref_lock - Read/Write Reference Lock + * + * kref: reference count + * read_count: number of readers that own the lock + * write_count: number of writers (max 1) that own the lock + * count_zero: used for internal signaling for non-atomic locks + * + * A Read/Write Reference Lock is a combination of a read/write spinlock and a + * refence count. The main difference is that no locks are held in the + * critical section and the lifetime of the object is guaranteed. + * + * Read Locking + * Multiple readers may access the lock at any given time and a read lock will + * also ensure that the object exists for the life of the lock. + * + * rwref_read_get() + * use resource in "critical section" (no locks are held) + * rwref_read_put() + * + * Write Locking + * A single writer may access the lock at any given time and a write lock will + * also ensure that the object exists for the life of the lock. + * + * rwref_write_get() + * use resource in "critical section" (no locks are held) + * rwref_write_put() + * + * Reference Lock + * To ensure the lifetime of the lock (and not affect the read or write lock), + * a simple reference can be done. By default, rwref_lock_init() will set the + * reference count to 1. + * + * rwref_lock_init() Reference count is 1 + * rwref_get() Reference count is 2 + * rwref_put() Reference count is 1 + * rwref_put() Reference count goes to 0 and object is destroyed + */ +struct rwref_lock { + struct kref kref; + unsigned read_count; + unsigned write_count; + spinlock_t lock; + struct completion count_zero; + + void (*release)(struct rwref_lock *); +}; + +/** + * rwref_lock_release() - Initialize rwref_lock + * lock_ptr: pointer to lock structure + */ +static inline void rwref_lock_release(struct kref *kref_ptr) +{ + struct rwref_lock *lock_ptr; + + BUG_ON(kref_ptr == NULL); + + lock_ptr = container_of(kref_ptr, struct rwref_lock, kref); + if (lock_ptr->release) + lock_ptr->release(lock_ptr); +} + +/** + * rwref_lock_init() - Initialize rwref_lock + * lock_ptr: pointer to lock structure + * release: release function called when reference count goes to 0 + */ +static inline void rwref_lock_init(struct rwref_lock *lock_ptr, + void (*release)(struct rwref_lock *)) +{ + BUG_ON(lock_ptr == NULL); + + kref_init(&lock_ptr->kref); + lock_ptr->read_count = 0; + lock_ptr->write_count = 0; + spin_lock_init(&lock_ptr->lock); + init_completion(&lock_ptr->count_zero); + lock_ptr->release = release; +} + +/** + * rwref_get() - gains a reference count for the object + * lock_ptr: pointer to lock structure + */ +static inline void rwref_get(struct rwref_lock *lock_ptr) +{ + BUG_ON(lock_ptr == NULL); + + kref_get(&lock_ptr->kref); +} + +/** + * rwref_put() - puts a reference count for the object + * lock_ptr: pointer to lock structure + * + * If the reference count goes to zero, the release function is called. + */ +static inline void rwref_put(struct rwref_lock *lock_ptr) +{ + BUG_ON(lock_ptr == NULL); + + kref_put(&lock_ptr->kref, rwref_lock_release); +} + +/** + * rwref_read_get() - gains a reference count for a read operation + * lock_ptr: pointer to lock structure + * + * Multiple readers may acquire the lock as long as the write count is zero. + */ +static inline void rwref_read_get(struct rwref_lock *lock_ptr) +{ + unsigned long flags; + + BUG_ON(lock_ptr == NULL); + + kref_get(&lock_ptr->kref); + while (1) { + spin_lock_irqsave(&lock_ptr->lock, flags); + if (lock_ptr->write_count == 0) { + lock_ptr->read_count++; + spin_unlock_irqrestore(&lock_ptr->lock, flags); + break; + } + spin_unlock_irqrestore(&lock_ptr->lock, flags); + wait_for_completion(&lock_ptr->count_zero); + } +} + +/** + * rwref_read_put() - returns a reference count for a read operation + * lock_ptr: pointer to lock structure + * + * Must be preceded by a call to rwref_read_get(). + */ +static inline void rwref_read_put(struct rwref_lock *lock_ptr) +{ + unsigned long flags; + + BUG_ON(lock_ptr == NULL); + + spin_lock_irqsave(&lock_ptr->lock, flags); + BUG_ON(lock_ptr->read_count == 0); + if (--lock_ptr->read_count == 0) + complete(&lock_ptr->count_zero); + spin_unlock_irqrestore(&lock_ptr->lock, flags); + kref_put(&lock_ptr->kref, rwref_lock_release); +} + +/** + * rwref_write_get() - gains a reference count for a write operation + * lock_ptr: pointer to lock structure + * + * Only one writer may acquire the lock as long as the reader count is zero. + */ +static inline void rwref_write_get(struct rwref_lock *lock_ptr) +{ + unsigned long flags; + + BUG_ON(lock_ptr == NULL); + + kref_get(&lock_ptr->kref); + while (1) { + spin_lock_irqsave(&lock_ptr->lock, flags); + if (lock_ptr->read_count == 0 && lock_ptr->write_count == 0) { + lock_ptr->write_count++; + spin_unlock_irqrestore(&lock_ptr->lock, flags); + break; + } + spin_unlock_irqrestore(&lock_ptr->lock, flags); + wait_for_completion(&lock_ptr->count_zero); + } +} + +/** + * rwref_write_put() - returns a reference count for a write operation + * lock_ptr: pointer to lock structure + * + * Must be preceded by a call to rwref_write_get(). + */ +static inline void rwref_write_put(struct rwref_lock *lock_ptr) +{ + unsigned long flags; + + BUG_ON(lock_ptr == NULL); + + spin_lock_irqsave(&lock_ptr->lock, flags); + BUG_ON(lock_ptr->write_count != 1); + if (--lock_ptr->write_count == 0) + complete(&lock_ptr->count_zero); + spin_unlock_irqrestore(&lock_ptr->lock, flags); + kref_put(&lock_ptr->kref, rwref_lock_release); +} + +#endif /* _SOC_QCOM_GLINK_PRIVATE_H_ */ diff --git a/drivers/soc/qcom/glink_smem_native_xprt.c b/drivers/soc/qcom/glink_smem_native_xprt.c new file mode 100644 index 00000000000000..b2d0be1851ff4e --- /dev/null +++ b/drivers/soc/qcom/glink_smem_native_xprt.c @@ -0,0 +1,2452 @@ +/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "glink_core_if.h" +#include "glink_private.h" +#include "glink_xprt_if.h" +#include +#include + +#define XPRT_NAME "smem" +#define FIFO_FULL_RESERVE 8 +#define FIFO_ALIGNMENT 8 +#define TX_BLOCKED_CMD_RESERVE 8 /* size of struct read_notif_request */ +#define RPM_TOC_ID 0x67727430 +#define RPM_TX_FIFO_ID 0x61703272 +#define RPM_RX_FIFO_ID 0x72326170 +#define RPM_TOC_SIZE 256 +#define RPM_MAX_TOC_ENTRIES 20 +#define RPM_FIFO_ADDR_ALIGN_BYTES 3 +#define TRACER_PKT_FEATURE BIT(2) + +static struct device *glink_dev; +static struct completion glink_ack; +#define GLINK_RPM_REQUEST_TIMEOUT 5*HZ + +/** + * enum command_types - definition of the types of commands sent/received + * @VERSION_CMD: Version and feature set supported + * @VERSION_ACK_CMD: Response for @VERSION_CMD + * @OPEN_CMD: Open a channel + * @CLOSE_CMD: Close a channel + * @OPEN_ACK_CMD: Response to @OPEN_CMD + * @RX_INTENT_CMD: RX intent for a channel was queued + * @RX_DONE_CMD: Use of RX intent for a channel is complete + * @RX_INTENT_REQ_CMD: Request to have RX intent queued + * @RX_INTENT_REQ_ACK_CMD: Response for @RX_INTENT_REQ_CMD + * @TX_DATA_CMD: Start of a data transfer + * @ZERO_COPY_TX_DATA_CMD: Start of a data transfer with zero copy + * @CLOSE_ACK_CMD: Response for @CLOSE_CMD + * @TX_DATA_CONT_CMD: Continuation or end of a data transfer + * @READ_NOTIF_CMD: Request for a notification when this cmd is read + * @RX_DONE_W_REUSE_CMD: Same as @RX_DONE but also reuse the used intent + * @SIGNALS_CMD: Sideband signals + * @TRACER_PKT_CMD: Start of a Tracer Packet Command + * @TRACER_PKT_CONT_CMD: Continuation or end of a Tracer Packet Command + */ +enum command_types { + VERSION_CMD, + VERSION_ACK_CMD, + OPEN_CMD, + CLOSE_CMD, + OPEN_ACK_CMD, + RX_INTENT_CMD, + RX_DONE_CMD, + RX_INTENT_REQ_CMD, + RX_INTENT_REQ_ACK_CMD, + TX_DATA_CMD, + ZERO_COPY_TX_DATA_CMD, + CLOSE_ACK_CMD, + TX_DATA_CONT_CMD, + READ_NOTIF_CMD, + RX_DONE_W_REUSE_CMD, + SIGNALS_CMD, + TRACER_PKT_CMD, + TRACER_PKT_CONT_CMD, +}; + +/** + * struct channel_desc - description of a channel fifo with a remote entity + * @read_index: The read index for the fifo where data should be + * consumed from. + * @write_index: The write index for the fifo where data should produced + * to. + * + * This structure resides in SMEM and contains the control information for the + * fifo data pipes of the channel. There is one physical channel between us + * and a remote entity. + */ +struct channel_desc { + uint32_t read_index; + uint32_t write_index; +}; + +/** + * struct edge_info - local information for managing a single complete edge + * @xprt_if: The transport interface registered with the + * glink core associated with this edge. + * @xprt_cfg: The transport configuration for the glink core + * assocaited with this edge. + * @intentless: True if this edge runs in intentless mode. + * @irq_disabled: Flag indicating the whether interrupt is enabled + * or disabled. + * @rx_reset_reg: Reference to the register to reset the rx irq + * line, if applicable. + * @out_irq_reg: Reference to the register to send an irq to the + * remote side. + * @out_irq_mask: Mask written to @out_irq_reg to trigger the + * correct irq. + * @irq_line: The incoming interrupt line. + * @tx_irq_count: Number of interrupts triggered. + * @rx_irq_count: Number of interrupts received. + * @tx_ch_desc: Reference to the channel description structure + * for tx in SMEM for this edge. + * @rx_ch_desc: Reference to the channel description structure + * for rx in SMEM for this edge. + * @tx_fifo: Reference to the transmit fifo in SMEM. + * @rx_fifo: Reference to the receive fifo in SMEM. + * @tx_fifo_size: Total size of @tx_fifo. + * @rx_fifo_size: Total size of @rx_fifo. + * @read_from_fifo: Memcpy for this edge. + * @write_to_fifo: Memcpy for this edge. + * @write_lock: Lock to serialize access to @tx_fifo. + * @tx_blocked_queue: Queue of entities waiting for the remote side to + * signal @tx_fifo has flushed and is now empty. + * @tx_resume_needed: A tx resume signal needs to be sent to the glink + * core once the remote side indicates @tx_fifo has + * flushed. + * @tx_blocked_signal_sent: Flag to indicate the flush signal has already + * been sent, and a response is pending from the + * remote side. Protected by @write_lock. + * @kwork: Work to be executed when an irq is received. + * @kworker: Handle to the entity processing @kwork. + * @task: Handle to the task context used to run @kworker. + * @use_ref: Active uses of this transport use this to grab + * a reference. Used for ssr synchronization. + * @rx_lock: Used to serialize concurrent instances of rx + * processing. + * @deferred_cmds: List of deferred commands that need to be + * processed in process context. + * @num_pw_states: Size of @ramp_time_us. + * @ramp_time_us: Array of ramp times in microseconds where array + * index position represents a power state. + */ +struct edge_info { + struct glink_transport_if xprt_if; + struct glink_core_transport_cfg xprt_cfg; + bool intentless; + bool irq_disabled; + void __iomem *rx_reset_reg; + void __iomem *out_irq_reg; + uint32_t out_irq_mask; + uint32_t irq_line; + uint32_t tx_irq_count; + uint32_t rx_irq_count; + struct channel_desc *tx_ch_desc; + struct channel_desc *rx_ch_desc; + void __iomem *tx_fifo; + void __iomem *rx_fifo; + uint32_t tx_fifo_size; + uint32_t rx_fifo_size; + void * (*read_from_fifo)(void *dest, const void *src, size_t num_bytes); + void * (*write_to_fifo)(void *dest, const void *src, size_t num_bytes); + spinlock_t write_lock; + wait_queue_head_t tx_blocked_queue; + bool tx_resume_needed; + bool tx_blocked_signal_sent; + struct kthread_work kwork; + struct kthread_worker kworker; + struct task_struct *task; + struct srcu_struct use_ref; + spinlock_t rx_lock; + struct list_head deferred_cmds; + uint32_t num_pw_states; + unsigned long *ramp_time_us; +}; + +/** + * struct deferred_cmd - description of a command to be processed later + * @list_node: Used to put this command on a list in the edge. + * @id: ID of the command. + * @param1: Parameter one of the command. + * @param2: Parameter two of the command. + * @data: Extra data associated with the command, if applicable. + * + * This structure stores the relevant information of a command that was removed + * from the fifo but needs to be processed at a later time. + */ +struct deferred_cmd { + struct list_head list_node; + uint16_t id; + uint16_t param1; + uint32_t param2; + void *data; +}; + +static uint32_t negotiate_features_v1(struct glink_transport_if *if_ptr, + const struct glink_core_version *version, + uint32_t features); +static void register_debugfs_info(struct edge_info *einfo); + +static DEFINE_MUTEX(probe_lock); +static struct glink_core_version versions[] = { + {1, TRACER_PKT_FEATURE, negotiate_features_v1}, +}; + +/** + * send_irq() - send an irq to a remote entity as an event signal + * @einfo: Which remote entity that should receive the irq. + */ +static void send_irq(struct edge_info *einfo) +{ + /* + * Any data associated with this event must be visable to the remote + * before the interrupt is triggered + */ + wmb(); + writel_relaxed(einfo->out_irq_mask, einfo->out_irq_reg); + einfo->tx_irq_count++; +} + +/** + * memcpy32_toio() - memcpy to word access only memory + * @dest: Destination address. + * @src: Source address. + * @num_bytes: Number of bytes to copy. + * + * Return: Destination address. + */ +static void *memcpy32_toio(void *dest, const void *src, size_t num_bytes) +{ + uint32_t *dest_local = (uint32_t *)dest; + uint32_t *src_local = (uint32_t *)src; + + BUG_ON(num_bytes & RPM_FIFO_ADDR_ALIGN_BYTES); + BUG_ON(!dest_local || + ((uintptr_t)dest_local & RPM_FIFO_ADDR_ALIGN_BYTES)); + BUG_ON(!src_local || + ((uintptr_t)src_local & RPM_FIFO_ADDR_ALIGN_BYTES)); + num_bytes /= sizeof(uint32_t); + + while (num_bytes--) + __raw_writel(*src_local++, dest_local++); + + return dest; +} + +/** + * memcpy32_fromio() - memcpy from word access only memory + * @dest: Destination address. + * @src: Source address. + * @num_bytes: Number of bytes to copy. + * + * Return: Destination address. + */ +static void *memcpy32_fromio(void *dest, const void *src, size_t num_bytes) +{ + uint32_t *dest_local = (uint32_t *)dest; + uint32_t *src_local = (uint32_t *)src; + + BUG_ON(num_bytes & RPM_FIFO_ADDR_ALIGN_BYTES); + BUG_ON(!dest_local || + ((uintptr_t)dest_local & RPM_FIFO_ADDR_ALIGN_BYTES)); + BUG_ON(!src_local || + ((uintptr_t)src_local & RPM_FIFO_ADDR_ALIGN_BYTES)); + num_bytes /= sizeof(uint32_t); + + while (num_bytes--) + *dest_local++ = __raw_readl(src_local++); + + return dest; +} + +/** + * fifo_read_avail() - how many bytes are available to be read from an edge + * @einfo: The concerned edge to query. + * + * Return: The number of bytes available to be read from edge. + */ +static uint32_t fifo_read_avail(struct edge_info *einfo) +{ + uint32_t read_index = einfo->rx_ch_desc->read_index; + uint32_t write_index = einfo->rx_ch_desc->write_index; + uint32_t fifo_size = einfo->rx_fifo_size; + uint32_t bytes_avail; + + bytes_avail = write_index - read_index; + if (write_index < read_index) + /* + * Case: W < R - Write has wrapped + * -------------------------------- + * In this case, the write operation has wrapped past the end + * of the FIFO which means that now calculating the amount of + * data in the FIFO results in a negative number. This can be + * easily fixed by adding the fifo_size to the value. Even + * though the values are unsigned, subtraction is always done + * using 2's complement which means that the result will still + * be correct once the FIFO size has been added to the negative + * result. + * + * Example: + * '-' = data in fifo + * '.' = empty + * + * 0 1 + * 0123456789012345 + * |-----w.....r----| + * 0 N + * + * write = 5 = 101b + * read = 11 = 1011b + * Data in FIFO + * (write - read) + fifo_size = (101b - 1011b) + 10000b + * = 11111010b + 10000b = 1010b = 10 + */ + bytes_avail += fifo_size; + + return bytes_avail; +} + +/** + * fifo_write_avail() - how many bytes can be written to the edge + * @einfo: The concerned edge to query. + * + * Calculates the number of bytes that can be transmitted at this time. + * Automatically reserves some space to maintain alignment when the fifo is + * completely full, and reserves space so that the flush command can always be + * transmitted when needed. + * + * Return: The number of bytes available to be read from edge. + */ +static uint32_t fifo_write_avail(struct edge_info *einfo) +{ + uint32_t read_index = einfo->tx_ch_desc->read_index; + uint32_t write_index = einfo->tx_ch_desc->write_index; + uint32_t fifo_size = einfo->tx_fifo_size; + uint32_t bytes_avail = read_index - write_index; + + if (read_index <= write_index) + bytes_avail += fifo_size; + if (bytes_avail < FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE) + bytes_avail = 0; + else + bytes_avail -= FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE; + + return bytes_avail; +} + +/** + * fifo_read() - read data from an edge + * @einfo: The concerned edge to read from. + * @_data: Buffer to copy the read data into. + * @len: The ammount of data to read in bytes. + * + * Return: The number of bytes read. + */ +static int fifo_read(struct edge_info *einfo, void *_data, int len) +{ + void *ptr; + void *data = _data; + int orig_len = len; + uint32_t read_index = einfo->rx_ch_desc->read_index; + uint32_t write_index = einfo->rx_ch_desc->write_index; + uint32_t fifo_size = einfo->rx_fifo_size; + uint32_t n; + + while (len) { + ptr = einfo->rx_fifo + read_index; + if (read_index <= write_index) + n = write_index - read_index; + else + n = fifo_size - read_index; + + if (n == 0) + break; + if (n > len) + n = len; + + einfo->read_from_fifo(data, ptr, n); + + data += n; + len -= n; + read_index += n; + if (read_index >= fifo_size) + read_index -= fifo_size; + } + einfo->rx_ch_desc->read_index = read_index; + + return orig_len - len; +} + +/** + * fifo_write_body() - Copy transmit data into an edge + * @einfo: The concerned edge to copy into. + * @_data: Buffer of data to copy from. + * @len: Size of data to copy in bytes. + * @write_index: Index into the channel where the data should be copied. + * + * Return: Number of bytes remaining to be copied into the edge. + */ +static uint32_t fifo_write_body(struct edge_info *einfo, const void *_data, + int len, uint32_t *write_index) +{ + void *ptr; + const void *data = _data; + uint32_t read_index = einfo->tx_ch_desc->read_index; + uint32_t fifo_size = einfo->tx_fifo_size; + uint32_t n; + + while (len) { + ptr = einfo->tx_fifo + *write_index; + if (*write_index < read_index) { + n = read_index - *write_index - FIFO_FULL_RESERVE; + } else { + if (read_index < FIFO_FULL_RESERVE) + n = fifo_size + read_index - *write_index - + FIFO_FULL_RESERVE; + else + n = fifo_size - *write_index; + } + + if (n == 0) + break; + if (n > len) + n = len; + + einfo->write_to_fifo(ptr, data, n); + + data += n; + len -= n; + *write_index += n; + if (*write_index >= fifo_size) + *write_index -= fifo_size; + } + return len; +} + +/** + * fifo_write() - Write data into an edge + * @einfo: The concerned edge to write to. + * @data: Buffer of data to write. + * @len: Length of data to write, in bytes. + * + * Wrapper around fifo_write_body() to manage additional details that are + * necessary for a complete write event. Does not manage concurrency. Clients + * should use fifo_write_avail() to check if there is sufficent space before + * calling fifo_write(). + * + * Return: Number of bytes written to the edge. + */ +static int fifo_write(struct edge_info *einfo, const void *data, int len) +{ + int orig_len = len; + uint32_t write_index = einfo->tx_ch_desc->write_index; + + len = fifo_write_body(einfo, data, len, &write_index); + einfo->tx_ch_desc->write_index = write_index; + send_irq(einfo); + + return orig_len - len; +} + +/** + * fifo_write_complex() - writes a transaction of multiple buffers to an edge + * @einfo: The concerned edge to write to. + * @data1: The first buffer of data to write. + * @len1: The length of the first buffer in bytes. + * @data2: The second buffer of data to write. + * @len2: The length of the second buffer in bytes. + * @data3: The thirs buffer of data to write. + * @len3: The length of the third buffer in bytes. + * + * A variant of fifo_write() which optimizes the usecase found in tx(). The + * remote side expects all or none of the transmitted data to be available. + * This prevents the tx() usecase from calling fifo_write() multiple times. The + * alternative would be an allocation and additional memcpy to create a buffer + * to copy all the data segments into one location before calling fifo_write(). + * + * Return: Number of bytes written to the edge. + */ +static int fifo_write_complex(struct edge_info *einfo, + const void *data1, int len1, + const void *data2, int len2, + const void *data3, int len3) +{ + int orig_len = len1 + len2 + len3; + uint32_t write_index = einfo->tx_ch_desc->write_index; + + len1 = fifo_write_body(einfo, data1, len1, &write_index); + len2 = fifo_write_body(einfo, data2, len2, &write_index); + len3 = fifo_write_body(einfo, data3, len3, &write_index); + einfo->tx_ch_desc->write_index = write_index; + send_irq(einfo); + + return orig_len - len1 - len2 - len3; +} + +/** + * send_tx_blocked_signal() - send the flush command as we are blocked from tx + * @einfo: The concerned edge which is blocked. + * + * Used to send a signal to the remote side that we have no more space to + * transmit data and therefore need the remote side to signal us when they have + * cleared some space by reading some data. This function relies upon the + * assumption that fifo_write_avail() will reserve some space so that the flush + * signal command can always be put into the transmit fifo, even when "everyone" + * else thinks that the transmit fifo is truely full. This function assumes + * that it is called with the write_lock already locked. + */ +static void send_tx_blocked_signal(struct edge_info *einfo) +{ + struct read_notif_request { + uint16_t cmd; + uint16_t reserved; + uint32_t reserved2; + }; + struct read_notif_request read_notif_req; + + read_notif_req.cmd = READ_NOTIF_CMD; + read_notif_req.reserved = 0; + read_notif_req.reserved2 = 0; + + if (!einfo->tx_blocked_signal_sent) { + einfo->tx_blocked_signal_sent = true; + fifo_write(einfo, &read_notif_req, sizeof(read_notif_req)); + } +} + +/** + * fifo_tx() - transmit data on an edge + * @einfo: The concerned edge to transmit on. + * @data: Buffer of data to transmit. + * @len: Length of data to transmit in bytes. + * + * This helper function is the preferred interface to fifo_write() and should + * be used in the normal case for transmitting entities. fifo_tx() will block + * until there is sufficent room to transmit the requested ammount of data. + * fifo_tx() will manage any concurrency between multiple transmitters on a + * channel. + * + * Return: Number of bytes transmitted. + */ +static int fifo_tx(struct edge_info *einfo, const void *data, int len) +{ + unsigned long flags; + int ret; + + DEFINE_WAIT(wait); + + spin_lock_irqsave(&einfo->write_lock, flags); + while (fifo_write_avail(einfo) < len) { + send_tx_blocked_signal(einfo); + spin_unlock_irqrestore(&einfo->write_lock, flags); + prepare_to_wait(&einfo->tx_blocked_queue, &wait, + TASK_UNINTERRUPTIBLE); + if (fifo_write_avail(einfo) < len) + schedule(); + finish_wait(&einfo->tx_blocked_queue, &wait); + spin_lock_irqsave(&einfo->write_lock, flags); + } + ret = fifo_write(einfo, data, len); + spin_unlock_irqrestore(&einfo->write_lock, flags); + + return ret; +} + +/** + * process_rx_data() - process received data from an edge + * @einfo: The edge the data was received on. + * @cmd_id: ID to specify the type of data. + * @rcid: The remote channel id associated with the data. + * @intend_id: The intent the data should be put in. + */ +static void process_rx_data(struct edge_info *einfo, uint16_t cmd_id, + uint32_t rcid, uint32_t intent_id) +{ + struct command { + uint32_t frag_size; + uint32_t size_remaining; + }; + struct command cmd; + struct glink_core_rx_intent *intent; + char trash[FIFO_ALIGNMENT]; + int alignment; + bool err = false; + + fifo_read(einfo, &cmd, sizeof(cmd)); + + intent = einfo->xprt_if.glink_core_if_ptr->rx_get_pkt_ctx( + &einfo->xprt_if, rcid, intent_id); + if (intent == NULL) { + GLINK_ERR("%s: no intent for ch %d liid %d\n", __func__, rcid, + intent_id); + err = true; + } else if (intent->data == NULL) { + if (einfo->intentless) { + intent->data = kmalloc(cmd.frag_size, GFP_ATOMIC); + if (!intent->data) + err = true; + else + intent->intent_size = cmd.frag_size; + } else { + GLINK_ERR( + "%s: intent for ch %d liid %d has no data buff\n", + __func__, rcid, intent_id); + err = true; + } + } + + if (!err && + (intent->intent_size - intent->write_offset < cmd.frag_size || + intent->write_offset + cmd.size_remaining > intent->intent_size)) { + GLINK_ERR("%s: rx data size:%d and remaining:%d %s %d %s:%d\n", + __func__, + cmd.frag_size, + cmd.size_remaining, + "will overflow ch", + rcid, + "intent", + intent_id); + err = true; + } + + if (err) { + alignment = ALIGN(cmd.frag_size, FIFO_ALIGNMENT); + alignment -= cmd.frag_size; + while (cmd.frag_size) { + if (cmd.frag_size > FIFO_ALIGNMENT) { + fifo_read(einfo, trash, FIFO_ALIGNMENT); + cmd.frag_size -= FIFO_ALIGNMENT; + } else { + fifo_read(einfo, trash, cmd.frag_size); + cmd.frag_size = 0; + } + } + if (alignment) + fifo_read(einfo, trash, alignment); + return; + } + fifo_read(einfo, intent->data + intent->write_offset, cmd.frag_size); + intent->write_offset += cmd.frag_size; + intent->pkt_size += cmd.frag_size; + + alignment = ALIGN(cmd.frag_size, FIFO_ALIGNMENT); + alignment -= cmd.frag_size; + if (alignment) + fifo_read(einfo, trash, alignment); + + if (unlikely((cmd_id == TRACER_PKT_CMD || + cmd_id == TRACER_PKT_CONT_CMD) && !cmd.size_remaining)) { + tracer_pkt_log_event(intent->data, GLINK_XPRT_RX); + intent->tracer_pkt = true; + } + + einfo->xprt_if.glink_core_if_ptr->rx_put_pkt_ctx(&einfo->xprt_if, + rcid, + intent, + cmd.size_remaining ? + false : true); +} + +/** + * queue_cmd() - queue a deferred command for later processing + * @einfo: Edge to queue commands on. + * @cmd: Command to queue. + * @data: Command specific data to queue with the command. + * + * Return: True if queuing was successful, false otherwise. + */ +static bool queue_cmd(struct edge_info *einfo, void *cmd, void *data) +{ + struct command { + uint16_t id; + uint16_t param1; + uint32_t param2; + }; + struct command *_cmd = cmd; + struct deferred_cmd *d_cmd; + + d_cmd = kmalloc(sizeof(*d_cmd), GFP_ATOMIC); + if (!d_cmd) { + GLINK_ERR("%s: Discarding cmd %d\n", __func__, _cmd->id); + return false; + } + d_cmd->id = _cmd->id; + d_cmd->param1 = _cmd->param1; + d_cmd->param2 = _cmd->param2; + d_cmd->data = data; + list_add_tail(&d_cmd->list_node, &einfo->deferred_cmds); + queue_kthread_work(&einfo->kworker, &einfo->kwork); + return true; +} + +/** + * get_rx_fifo() - Find the rx fifo for an edge + * @einfo: Edge to find the fifo for. + * + * Return: True if fifo was found, false otherwise. + */ +static bool get_rx_fifo(struct edge_info *einfo) +{ + return true; +} + +/** + * __rx_worker() - process received commands on a specific edge + * @einfo: Edge to process commands on. + * @atomic_ctx: Indicates if the caller is in atomic context and requires any + * non-atomic operations to be deferred. + */ +static void __rx_worker(struct edge_info *einfo, bool atomic_ctx) +{ + struct command { + uint16_t id; + uint16_t param1; + uint32_t param2; + }; + struct intent_desc { + uint32_t size; + uint32_t id; + }; + struct command cmd; + struct intent_desc intent; + struct intent_desc *intents; + int i; + bool granted; + unsigned long flags; + bool trigger_wakeup = false; + int rcu_id; + uint16_t rcid; + uint32_t name_len; + uint32_t len; + char *name; + char trash[FIFO_ALIGNMENT]; + struct deferred_cmd *d_cmd; + void *cmd_data; + + rcu_id = srcu_read_lock(&einfo->use_ref); + + if (unlikely(!einfo->rx_fifo)) { + if (!get_rx_fifo(einfo)) { + srcu_read_unlock(&einfo->use_ref, rcu_id); + return; + } + einfo->xprt_if.glink_core_if_ptr->link_up(&einfo->xprt_if); + } + + if (!atomic_ctx) { + if (einfo->tx_resume_needed && fifo_write_avail(einfo)) { + einfo->tx_resume_needed = false; + einfo->xprt_if.glink_core_if_ptr->tx_resume( + &einfo->xprt_if); + } + spin_lock_irqsave(&einfo->write_lock, flags); + if (waitqueue_active(&einfo->tx_blocked_queue)) { + einfo->tx_blocked_signal_sent = false; + trigger_wakeup = true; + } + spin_unlock_irqrestore(&einfo->write_lock, flags); + if (trigger_wakeup) + wake_up_all(&einfo->tx_blocked_queue); + } + + + /* + * Access to the fifo needs to be synchronized, however only the calls + * into the core from process_rx_data() are compatible with an atomic + * processing context. For everything else, we need to do all the fifo + * processing, then unlock the lock for the call into the core. Data + * in the fifo is allowed to be processed immediately instead of being + * ordered with the commands because the channel open process prevents + * intents from being queued (which prevents data from being sent) until + * all the channel open commands are processed by the core, thus + * eliminating a race. + */ + spin_lock_irqsave(&einfo->rx_lock, flags); + while (fifo_read_avail(einfo) || + (!atomic_ctx && !list_empty(&einfo->deferred_cmds))) { + + if (!atomic_ctx && !list_empty(&einfo->deferred_cmds)) { + d_cmd = list_first_entry(&einfo->deferred_cmds, + struct deferred_cmd, list_node); + list_del(&d_cmd->list_node); + cmd.id = d_cmd->id; + cmd.param1 = d_cmd->param1; + cmd.param2 = d_cmd->param2; + cmd_data = d_cmd->data; + kfree(d_cmd); + } else { + fifo_read(einfo, &cmd, sizeof(cmd)); + cmd_data = NULL; + } + + switch (cmd.id) { + case VERSION_CMD: + if (atomic_ctx) { + queue_cmd(einfo, &cmd, NULL); + break; + } + spin_unlock_irqrestore(&einfo->rx_lock, flags); + einfo->xprt_if.glink_core_if_ptr->rx_cmd_version( + &einfo->xprt_if, + cmd.param1, + cmd.param2); + spin_lock_irqsave(&einfo->rx_lock, flags); + break; + case VERSION_ACK_CMD: + if (atomic_ctx) { + queue_cmd(einfo, &cmd, NULL); + break; + } + spin_unlock_irqrestore(&einfo->rx_lock, flags); + einfo->xprt_if.glink_core_if_ptr->rx_cmd_version_ack( + &einfo->xprt_if, + cmd.param1, + cmd.param2); + spin_lock_irqsave(&einfo->rx_lock, flags); + break; + case OPEN_CMD: + rcid = cmd.param1; + name_len = cmd.param2; + + if (cmd_data) { + name = cmd_data; + } else { + len = ALIGN(name_len, FIFO_ALIGNMENT); + name = kmalloc(len, GFP_ATOMIC); + if (!name) { + pr_err("No memory available to rx ch open cmd name. Discarding cmd.\n"); + while (len) { + fifo_read(einfo, trash, + FIFO_ALIGNMENT); + len -= FIFO_ALIGNMENT; + } + break; + } + fifo_read(einfo, name, len); + } + if (atomic_ctx) { + if (!queue_cmd(einfo, &cmd, name)) + kfree(name); + break; + } + + spin_unlock_irqrestore(&einfo->rx_lock, flags); + einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_remote_open( + &einfo->xprt_if, + rcid, + name, + SMEM_XPRT_ID); + kfree(name); + spin_lock_irqsave(&einfo->rx_lock, flags); + break; + case CLOSE_CMD: + if (atomic_ctx) { + queue_cmd(einfo, &cmd, NULL); + break; + } + spin_unlock_irqrestore(&einfo->rx_lock, flags); + einfo->xprt_if.glink_core_if_ptr-> + rx_cmd_ch_remote_close( + &einfo->xprt_if, + cmd.param1); + spin_lock_irqsave(&einfo->rx_lock, flags); + break; + case OPEN_ACK_CMD: + if (atomic_ctx) { + queue_cmd(einfo, &cmd, NULL); + break; + } + spin_unlock_irqrestore(&einfo->rx_lock, flags); + einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_open_ack( + &einfo->xprt_if, + cmd.param1, + SMEM_XPRT_ID); + spin_lock_irqsave(&einfo->rx_lock, flags); + break; + case RX_INTENT_CMD: + /* + * One intent listed with this command. This is the + * expected case and can be optimized over the general + * case of an array of intents. + */ + if (cmd.param2 == 1) { + if (cmd_data) { + intent.id = ((struct intent_desc *) + cmd_data)->id; + intent.size = ((struct intent_desc *) + cmd_data)->size; + kfree(cmd_data); + } else { + fifo_read(einfo, &intent, + sizeof(intent)); + } + if (atomic_ctx) { + cmd_data = kmalloc(sizeof(intent), + GFP_ATOMIC); + if (!cmd_data) { + pr_err("%s: dropping cmd %d\n", + __func__, + cmd.id); + break; + } + ((struct intent_desc *)cmd_data)->id = + intent.id; + ((struct intent_desc *)cmd_data)->size = + intent.size; + if (!queue_cmd(einfo, &cmd, cmd_data)) + kfree(cmd_data); + break; + } + spin_unlock_irqrestore(&einfo->rx_lock, flags); + einfo->xprt_if.glink_core_if_ptr-> + rx_cmd_remote_rx_intent_put( + &einfo->xprt_if, + cmd.param1, + intent.id, + intent.size); + spin_lock_irqsave(&einfo->rx_lock, flags); + break; + } + + /* Array of intents to process */ + if (cmd_data) { + intents = cmd_data; + } else { + intents = kmalloc(sizeof(*intents) * cmd.param2, + GFP_ATOMIC); + if (!intents) { + for (i = 0; i < cmd.param2; ++i) + fifo_read(einfo, &intent, + sizeof(intent)); + break; + } + fifo_read(einfo, intents, + sizeof(*intents) * cmd.param2); + } + if (atomic_ctx) { + if (!queue_cmd(einfo, &cmd, intents)) + kfree(intents); + break; + } + spin_unlock_irqrestore(&einfo->rx_lock, flags); + for (i = 0; i < cmd.param2; ++i) { + einfo->xprt_if.glink_core_if_ptr-> + rx_cmd_remote_rx_intent_put( + &einfo->xprt_if, + cmd.param1, + intents[i].id, + intents[i].size); + } + kfree(intents); + spin_lock_irqsave(&einfo->rx_lock, flags); + break; + case RX_DONE_CMD: + if (atomic_ctx) { + queue_cmd(einfo, &cmd, NULL); + break; + } + spin_unlock_irqrestore(&einfo->rx_lock, flags); + einfo->xprt_if.glink_core_if_ptr->rx_cmd_tx_done( + &einfo->xprt_if, + cmd.param1, + cmd.param2, + false); + spin_lock_irqsave(&einfo->rx_lock, flags); + break; + case RX_INTENT_REQ_CMD: + if (atomic_ctx) { + queue_cmd(einfo, &cmd, NULL); + break; + } + spin_unlock_irqrestore(&einfo->rx_lock, flags); + einfo->xprt_if.glink_core_if_ptr-> + rx_cmd_remote_rx_intent_req( + &einfo->xprt_if, + cmd.param1, + cmd.param2); + spin_lock_irqsave(&einfo->rx_lock, flags); + break; + case RX_INTENT_REQ_ACK_CMD: + if (atomic_ctx) { + queue_cmd(einfo, &cmd, NULL); + break; + } + spin_unlock_irqrestore(&einfo->rx_lock, flags); + granted = false; + if (cmd.param2 == 1) + granted = true; + einfo->xprt_if.glink_core_if_ptr-> + rx_cmd_rx_intent_req_ack( + &einfo->xprt_if, + cmd.param1, + granted); + spin_lock_irqsave(&einfo->rx_lock, flags); + break; + case TX_DATA_CMD: + case TX_DATA_CONT_CMD: + case TRACER_PKT_CMD: + case TRACER_PKT_CONT_CMD: + process_rx_data(einfo, cmd.id, cmd.param1, cmd.param2); + break; + case CLOSE_ACK_CMD: + if (atomic_ctx) { + queue_cmd(einfo, &cmd, NULL); + break; + } + spin_unlock_irqrestore(&einfo->rx_lock, flags); + einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_close_ack( + &einfo->xprt_if, + cmd.param1); + spin_lock_irqsave(&einfo->rx_lock, flags); + break; + case READ_NOTIF_CMD: + send_irq(einfo); + break; + case SIGNALS_CMD: + if (atomic_ctx) { + queue_cmd(einfo, &cmd, NULL); + break; + } + spin_unlock_irqrestore(&einfo->rx_lock, flags); + einfo->xprt_if.glink_core_if_ptr->rx_cmd_remote_sigs( + &einfo->xprt_if, + cmd.param1, + cmd.param2); + spin_lock_irqsave(&einfo->rx_lock, flags); + break; + case RX_DONE_W_REUSE_CMD: + if (atomic_ctx) { + queue_cmd(einfo, &cmd, NULL); + break; + } + spin_unlock_irqrestore(&einfo->rx_lock, flags); + einfo->xprt_if.glink_core_if_ptr->rx_cmd_tx_done( + &einfo->xprt_if, + cmd.param1, + cmd.param2, + true); + spin_lock_irqsave(&einfo->rx_lock, flags); + break; + default: + pr_err("Unrecognized command: %d\n", cmd.id); + break; + } + } + spin_unlock_irqrestore(&einfo->rx_lock, flags); + srcu_read_unlock(&einfo->use_ref, rcu_id); +} + +/** + * rx_worker() - worker function to process received commands + * @work: kwork associated with the edge to process commands on. + */ +static void rx_worker(struct kthread_work *work) +{ + struct edge_info *einfo; + + einfo = container_of(work, struct edge_info, kwork); + __rx_worker(einfo, false); +} + +irqreturn_t irq_handler(int irq, void *priv) +{ + struct edge_info *einfo = (struct edge_info *)priv; + + if (einfo->rx_reset_reg) + writel_relaxed(einfo->out_irq_mask, einfo->rx_reset_reg); + + queue_kthread_work(&einfo->kworker, &einfo->kwork); + einfo->rx_irq_count++; + + return IRQ_HANDLED; +} + +/** + * tx_cmd_version() - convert a version cmd to wire format and transmit + * @if_ptr: The transport to transmit on. + * @version: The version number to encode. + * @features: The features information to encode. + */ +static void tx_cmd_version(struct glink_transport_if *if_ptr, uint32_t version, + uint32_t features) +{ + struct command { + uint16_t id; + uint16_t version; + uint32_t features; + }; + struct command cmd; + struct edge_info *einfo; + int rcu_id; + + einfo = container_of(if_ptr, struct edge_info, xprt_if); + + rcu_id = srcu_read_lock(&einfo->use_ref); + + cmd.id = VERSION_CMD; + cmd.version = version; + cmd.features = features; + + fifo_tx(einfo, &cmd, sizeof(cmd)); + srcu_read_unlock(&einfo->use_ref, rcu_id); +} + +/** + * tx_cmd_version_ack() - convert a version ack cmd to wire format and transmit + * @if_ptr: The transport to transmit on. + * @version: The version number to encode. + * @features: The features information to encode. + */ +static void tx_cmd_version_ack(struct glink_transport_if *if_ptr, + uint32_t version, + uint32_t features) +{ + struct command { + uint16_t id; + uint16_t version; + uint32_t features; + }; + struct command cmd; + struct edge_info *einfo; + int rcu_id; + + einfo = container_of(if_ptr, struct edge_info, xprt_if); + + rcu_id = srcu_read_lock(&einfo->use_ref); + + cmd.id = VERSION_ACK_CMD; + cmd.version = version; + cmd.features = features; + + fifo_tx(einfo, &cmd, sizeof(cmd)); + srcu_read_unlock(&einfo->use_ref, rcu_id); +} + +/** + * set_version() - activate a negotiated version and feature set + * @if_ptr: The transport to configure. + * @version: The version to use. + * @features: The features to use. + * + * Return: The supported capabilities of the transport. + */ +static uint32_t set_version(struct glink_transport_if *if_ptr, uint32_t version, + uint32_t features) +{ + struct edge_info *einfo; + uint32_t ret; + int rcu_id; + + einfo = container_of(if_ptr, struct edge_info, xprt_if); + + rcu_id = srcu_read_lock(&einfo->use_ref); + + ret = einfo->intentless ? + GCAP_INTENTLESS | GCAP_SIGNALS : GCAP_SIGNALS; + + if (features & TRACER_PKT_FEATURE) + ret |= GCAP_TRACER_PKT; + + srcu_read_unlock(&einfo->use_ref, rcu_id); + return ret; +} + +/** + * tx_cmd_ch_open() - convert a channel open cmd to wire format and transmit + * @if_ptr: The transport to transmit on. + * @lcid: The local channel id to encode. + * @name: The channel name to encode. + * @req_xprt: The transport the core would like to migrate this channel to. + * + * Return: 0 on success or standard Linux error code. + */ +static int tx_cmd_ch_open(struct glink_transport_if *if_ptr, uint32_t lcid, + const char *name, uint16_t req_xprt) +{ + struct command { + uint16_t id; + uint16_t lcid; + uint32_t length; + }; + struct command cmd; + struct edge_info *einfo; + uint32_t buf_size; + void *buf; + int rcu_id; + + einfo = container_of(if_ptr, struct edge_info, xprt_if); + + rcu_id = srcu_read_lock(&einfo->use_ref); + + cmd.id = OPEN_CMD; + cmd.lcid = lcid; + cmd.length = strlen(name) + 1; + + buf_size = ALIGN(sizeof(cmd) + cmd.length, FIFO_ALIGNMENT); + + buf = kzalloc(buf_size, GFP_KERNEL); + if (!buf) { + pr_err("%s: malloc fail for %d size buf\n", __func__, buf_size); + srcu_read_unlock(&einfo->use_ref, rcu_id); + return -ENOMEM; + } + + memcpy(buf, &cmd, sizeof(cmd)); + memcpy(buf + sizeof(cmd), name, cmd.length); + + fifo_tx(einfo, buf, buf_size); + + kfree(buf); + + srcu_read_unlock(&einfo->use_ref, rcu_id); + return 0; +} + +/** + * tx_cmd_ch_close() - convert a channel close cmd to wire format and transmit + * @if_ptr: The transport to transmit on. + * @lcid: The local channel id to encode. + * + * Return: 0 on success or standard Linux error code. + */ +static int tx_cmd_ch_close(struct glink_transport_if *if_ptr, uint32_t lcid) +{ + struct command { + uint16_t id; + uint16_t lcid; + uint32_t reserved; + }; + struct command cmd; + struct edge_info *einfo; + int rcu_id; + + einfo = container_of(if_ptr, struct edge_info, xprt_if); + + rcu_id = srcu_read_lock(&einfo->use_ref); + + cmd.id = CLOSE_CMD; + cmd.lcid = lcid; + cmd.reserved = 0; + + fifo_tx(einfo, &cmd, sizeof(cmd)); + + srcu_read_unlock(&einfo->use_ref, rcu_id); + return 0; +} + +/** + * tx_cmd_ch_remote_open_ack() - convert a channel open ack cmd to wire format + * and transmit + * @if_ptr: The transport to transmit on. + * @rcid: The remote channel id to encode. + * @xprt_resp: The response to a transport migration request. + */ +static void tx_cmd_ch_remote_open_ack(struct glink_transport_if *if_ptr, + uint32_t rcid, uint16_t xprt_resp) +{ + struct command { + uint16_t id; + uint16_t rcid; + uint32_t reserved; + }; + struct command cmd; + struct edge_info *einfo; + int rcu_id; + + einfo = container_of(if_ptr, struct edge_info, xprt_if); + + rcu_id = srcu_read_lock(&einfo->use_ref); + + cmd.id = OPEN_ACK_CMD; + cmd.rcid = rcid; + cmd.reserved = 0; + + fifo_tx(einfo, &cmd, sizeof(cmd)); + srcu_read_unlock(&einfo->use_ref, rcu_id); +} + +/** + * tx_cmd_ch_remote_close_ack() - convert a channel close ack cmd to wire format + * and transmit + * @if_ptr: The transport to transmit on. + * @rcid: The remote channel id to encode. + */ +static void tx_cmd_ch_remote_close_ack(struct glink_transport_if *if_ptr, + uint32_t rcid) +{ + struct command { + uint16_t id; + uint16_t rcid; + uint32_t reserved; + }; + struct command cmd; + struct edge_info *einfo; + int rcu_id; + + einfo = container_of(if_ptr, struct edge_info, xprt_if); + + rcu_id = srcu_read_lock(&einfo->use_ref); + + cmd.id = CLOSE_ACK_CMD; + cmd.rcid = rcid; + cmd.reserved = 0; + + fifo_tx(einfo, &cmd, sizeof(cmd)); + srcu_read_unlock(&einfo->use_ref, rcu_id); +} + +/** + * int wait_link_down() - Check status of read/write indices + * @if_ptr: The transport to check + * + * Return: 1 if indices are all zero, 0 otherwise + */ +int wait_link_down(struct glink_transport_if *if_ptr) +{ + struct edge_info *einfo; + + einfo = container_of(if_ptr, struct edge_info, xprt_if); + + if (einfo->tx_ch_desc->write_index == 0 && + einfo->tx_ch_desc->read_index == 0 && + einfo->rx_ch_desc->write_index == 0 && + einfo->rx_ch_desc->read_index == 0) + return 1; + else + return 0; +} + +/** + * allocate_rx_intent() - allocate/reserve space for RX Intent + * @if_ptr: The transport the intent is associated with. + * @size: size of intent. + * @intent: Pointer to the intent structure. + * + * Assign "data" with the buffer created, since the transport creates + * a linear buffer and "iovec" with the "intent" itself, so that + * the data can be passed to a client that receives only vector buffer. + * Note that returning NULL for the pointer is valid (it means that space has + * been reserved, but the actual pointer will be provided later). + * + * Return: 0 on success or standard Linux error code. + */ +static int allocate_rx_intent(struct glink_transport_if *if_ptr, size_t size, + struct glink_core_rx_intent *intent) +{ + void *t; + + t = kmalloc(size, GFP_KERNEL); + if (!t) + return -ENOMEM; + + intent->data = t; + intent->iovec = (void *)intent; + intent->vprovider = rx_linear_vbuf_provider; + intent->pprovider = NULL; + return 0; +} + +/** + * deallocate_rx_intent() - Deallocate space created for RX Intent + * @if_ptr: The transport the intent is associated with. + * @intent: Pointer to the intent structure. + * + * Return: 0 on success or standard Linux error code. + */ +static int deallocate_rx_intent(struct glink_transport_if *if_ptr, + struct glink_core_rx_intent *intent) +{ + if (!intent || !intent->data) + return -EINVAL; + + kfree(intent->data); + intent->data = NULL; + intent->iovec = NULL; + intent->vprovider = NULL; + return 0; +} + +/** + * tx_cmd_local_rx_intent() - convert an rx intent cmd to wire format and + * transmit + * @if_ptr: The transport to transmit on. + * @lcid: The local channel id to encode. + * @size: The intent size to encode. + * @liid: The local intent id to encode. + * + * Return: 0 on success or standard Linux error code. + */ +static int tx_cmd_local_rx_intent(struct glink_transport_if *if_ptr, + uint32_t lcid, size_t size, uint32_t liid) +{ + struct command { + uint16_t id; + uint16_t lcid; + uint32_t count; + uint32_t size; + uint32_t liid; + }; + struct command cmd; + struct edge_info *einfo; + int rcu_id; + + if (size > UINT_MAX) { + pr_err("%s: size %zu is too large to encode\n", __func__, size); + return -EMSGSIZE; + } + + einfo = container_of(if_ptr, struct edge_info, xprt_if); + + if (einfo->intentless) + return -EOPNOTSUPP; + + rcu_id = srcu_read_lock(&einfo->use_ref); + + cmd.id = RX_INTENT_CMD; + cmd.lcid = lcid; + cmd.count = 1; + cmd.size = size; + cmd.liid = liid; + + fifo_tx(einfo, &cmd, sizeof(cmd)); + + srcu_read_unlock(&einfo->use_ref, rcu_id); + return 0; +} + +/** + * tx_cmd_local_rx_done() - convert an rx done cmd to wire format and transmit + * @if_ptr: The transport to transmit on. + * @lcid: The local channel id to encode. + * @liid: The local intent id to encode. + * @reuse: Reuse the consumed intent. + */ +static void tx_cmd_local_rx_done(struct glink_transport_if *if_ptr, + uint32_t lcid, uint32_t liid, bool reuse) +{ + struct command { + uint16_t id; + uint16_t lcid; + uint32_t liid; + }; + struct command cmd; + struct edge_info *einfo; + int rcu_id; + + einfo = container_of(if_ptr, struct edge_info, xprt_if); + + if (einfo->intentless) + return; + + rcu_id = srcu_read_lock(&einfo->use_ref); + + cmd.id = reuse ? RX_DONE_W_REUSE_CMD : RX_DONE_CMD; + cmd.lcid = lcid; + cmd.liid = liid; + + fifo_tx(einfo, &cmd, sizeof(cmd)); + srcu_read_unlock(&einfo->use_ref, rcu_id); +} + +/** + * tx_cmd_rx_intent_req() - convert an rx intent request cmd to wire format and + * transmit + * @if_ptr: The transport to transmit on. + * @lcid: The local channel id to encode. + * @size: The requested intent size to encode. + * + * Return: 0 on success or standard Linux error code. + */ +static int tx_cmd_rx_intent_req(struct glink_transport_if *if_ptr, + uint32_t lcid, size_t size) +{ + struct command { + uint16_t id; + uint16_t lcid; + uint32_t size; + }; + struct command cmd; + struct edge_info *einfo; + int rcu_id; + + if (size > UINT_MAX) { + pr_err("%s: size %zu is too large to encode\n", __func__, size); + return -EMSGSIZE; + } + + einfo = container_of(if_ptr, struct edge_info, xprt_if); + + if (einfo->intentless) + return -EOPNOTSUPP; + + rcu_id = srcu_read_lock(&einfo->use_ref); + + cmd.id = RX_INTENT_REQ_CMD, + cmd.lcid = lcid; + cmd.size = size; + + fifo_tx(einfo, &cmd, sizeof(cmd)); + + srcu_read_unlock(&einfo->use_ref, rcu_id); + return 0; +} + +/** + * tx_cmd_rx_intent_req_ack() - convert an rx intent request ack cmd to wire + * format and transmit + * @if_ptr: The transport to transmit on. + * @lcid: The local channel id to encode. + * @granted: The request response to encode. + * + * Return: 0 on success or standard Linux error code. + */ +static int tx_cmd_remote_rx_intent_req_ack(struct glink_transport_if *if_ptr, + uint32_t lcid, bool granted) +{ + struct command { + uint16_t id; + uint16_t lcid; + uint32_t response; + }; + struct command cmd; + struct edge_info *einfo; + int rcu_id; + + einfo = container_of(if_ptr, struct edge_info, xprt_if); + + if (einfo->intentless) + return -EOPNOTSUPP; + + rcu_id = srcu_read_lock(&einfo->use_ref); + + cmd.id = RX_INTENT_REQ_ACK_CMD, + cmd.lcid = lcid; + if (granted) + cmd.response = 1; + else + cmd.response = 0; + + fifo_tx(einfo, &cmd, sizeof(cmd)); + + srcu_read_unlock(&einfo->use_ref, rcu_id); + return 0; +} + +/** + * tx_cmd_set_sigs() - convert a signals ack cmd to wire format and transmit + * @if_ptr: The transport to transmit on. + * @lcid: The local channel id to encode. + * @sigs: The signals to encode. + * + * Return: 0 on success or standard Linux error code. + */ +static int tx_cmd_set_sigs(struct glink_transport_if *if_ptr, uint32_t lcid, + uint32_t sigs) +{ + struct command { + uint16_t id; + uint16_t lcid; + uint32_t sigs; + }; + struct command cmd; + struct edge_info *einfo; + int rcu_id; + + einfo = container_of(if_ptr, struct edge_info, xprt_if); + + rcu_id = srcu_read_lock(&einfo->use_ref); + + cmd.id = SIGNALS_CMD, + cmd.lcid = lcid; + cmd.sigs = sigs; + + fifo_tx(einfo, &cmd, sizeof(cmd)); + + srcu_read_unlock(&einfo->use_ref, rcu_id); + return 0; +} + +/** + * poll() - poll for data on a channel + * @if_ptr: The transport the channel exists on. + * @lcid: The local channel id. + * + * Return: 0 if no data available, 1 if data available. + */ +static int poll(struct glink_transport_if *if_ptr, uint32_t lcid) +{ + struct edge_info *einfo; + int rcu_id; + + einfo = container_of(if_ptr, struct edge_info, xprt_if); + + rcu_id = srcu_read_lock(&einfo->use_ref); + + if (fifo_read_avail(einfo)) { + __rx_worker(einfo, true); + srcu_read_unlock(&einfo->use_ref, rcu_id); + return 1; + } + + srcu_read_unlock(&einfo->use_ref, rcu_id); + return 0; +} + +/** + * mask_rx_irq() - mask the receive irq for a channel + * @if_ptr: The transport the channel exists on. + * @lcid: The local channel id for the channel. + * @mask: True to mask the irq, false to unmask. + * @pstruct: Platform defined structure for handling the masking. + * + * Return: 0 on success or standard Linux error code. + */ +static int mask_rx_irq(struct glink_transport_if *if_ptr, uint32_t lcid, + bool mask, void *pstruct) +{ + struct edge_info *einfo; + struct irq_chip *irq_chip; + struct irq_data *irq_data; + int rcu_id; + + einfo = container_of(if_ptr, struct edge_info, xprt_if); + + rcu_id = srcu_read_lock(&einfo->use_ref); + + irq_chip = irq_get_chip(einfo->irq_line); + if (!irq_chip) { + srcu_read_unlock(&einfo->use_ref, rcu_id); + return -ENODEV; + } + + irq_data = irq_get_irq_data(einfo->irq_line); + if (!irq_data) { + srcu_read_unlock(&einfo->use_ref, rcu_id); + return -ENODEV; + } + + if (mask) { + irq_chip->irq_mask(irq_data); + einfo->irq_disabled = true; + if (pstruct) + irq_set_affinity(einfo->irq_line, pstruct); + } else { + irq_chip->irq_unmask(irq_data); + einfo->irq_disabled = false; + } + + srcu_read_unlock(&einfo->use_ref, rcu_id); + return 0; +} + +/** + * tx_data() - convert a data/tracer_pkt to wire format and transmit + * @if_ptr: The transport to transmit on. + * @cmd_id: The command ID to transmit. + * @lcid: The local channel id to encode. + * @pctx: The data to encode. + * + * Return: Number of bytes written or standard Linux error code. + */ +static int tx_data(struct glink_transport_if *if_ptr, uint16_t cmd_id, + uint32_t lcid, struct glink_core_tx_pkt *pctx) +{ + struct command { + uint16_t id; + uint16_t lcid; + uint32_t riid; + uint32_t size; + uint32_t size_left; + }; + struct command cmd; + struct edge_info *einfo; + uint32_t size; + uint32_t zeros_size; + const void *data_start; + char zeros[FIFO_ALIGNMENT] = { 0 }; + unsigned long flags; + size_t tx_size = 0; + int rcu_id; + + if (pctx->size < pctx->size_remaining) { + GLINK_ERR("%s: size remaining exceeds size. Resetting.\n", + __func__); + pctx->size_remaining = pctx->size; + } + if (!pctx->size_remaining) + return 0; + + einfo = container_of(if_ptr, struct edge_info, xprt_if); + + rcu_id = srcu_read_lock(&einfo->use_ref); + + if (einfo->intentless && + (pctx->size_remaining != pctx->size || cmd_id == TRACER_PKT_CMD)) { + srcu_read_unlock(&einfo->use_ref, rcu_id); + return -EINVAL; + } + + if (cmd_id == TX_DATA_CMD) { + if (pctx->size_remaining == pctx->size) + cmd.id = TX_DATA_CMD; + else + cmd.id = TX_DATA_CONT_CMD; + } else { + if (pctx->size_remaining == pctx->size) + cmd.id = TRACER_PKT_CMD; + else + cmd.id = TRACER_PKT_CONT_CMD; + } + cmd.lcid = lcid; + cmd.riid = pctx->riid; + data_start = get_tx_vaddr(pctx, pctx->size - pctx->size_remaining, + &tx_size); + if (!data_start) { + GLINK_ERR("%s: invalid data_start\n", __func__); + srcu_read_unlock(&einfo->use_ref, rcu_id); + return -EINVAL; + } + + spin_lock_irqsave(&einfo->write_lock, flags); + size = fifo_write_avail(einfo); + + /* Intentless clients expect a complete commit or instant failure */ + if (einfo->intentless && size < sizeof(cmd) + pctx->size) { + spin_unlock_irqrestore(&einfo->write_lock, flags); + srcu_read_unlock(&einfo->use_ref, rcu_id); + return -ENOSPC; + } + + /* Need enough space to write the command and some data */ + if (size <= sizeof(cmd)) { + einfo->tx_resume_needed = true; + spin_unlock_irqrestore(&einfo->write_lock, flags); + srcu_read_unlock(&einfo->use_ref, rcu_id); + return -EAGAIN; + } + size -= sizeof(cmd); + if (size > tx_size) + size = tx_size; + + cmd.size = size; + pctx->size_remaining -= size; + cmd.size_left = pctx->size_remaining; + zeros_size = ALIGN(size, FIFO_ALIGNMENT) - cmd.size; + if (cmd.id == TRACER_PKT_CMD) + tracer_pkt_log_event((void *)(pctx->data), GLINK_XPRT_TX); + + fifo_write_complex(einfo, &cmd, sizeof(cmd), data_start, size, zeros, + zeros_size); + GLINK_DBG("%s %s: lcid[%u] riid[%u] cmd[%d], size[%d], size_left[%d]\n", + "", __func__, cmd.lcid, cmd.riid, cmd.id, cmd.size, + cmd.size_left); + spin_unlock_irqrestore(&einfo->write_lock, flags); + + /* Fake tx_done for intentless since its not supported over the wire */ + if (einfo->intentless) { + spin_lock_irqsave(&einfo->rx_lock, flags); + cmd.id = RX_DONE_CMD; + cmd.lcid = pctx->rcid; + queue_cmd(einfo, &cmd, NULL); + spin_unlock_irqrestore(&einfo->rx_lock, flags); + } + + srcu_read_unlock(&einfo->use_ref, rcu_id); + return cmd.size; +} + +/** + * tx() - convert a data transmit cmd to wire format and transmit + * @if_ptr: The transport to transmit on. + * @lcid: The local channel id to encode. + * @pctx: The data to encode. + * + * Return: Number of bytes written or standard Linux error code. + */ +static int tx(struct glink_transport_if *if_ptr, uint32_t lcid, + struct glink_core_tx_pkt *pctx) +{ + return tx_data(if_ptr, TX_DATA_CMD, lcid, pctx); +} + +/** + * tx_cmd_tracer_pkt() - convert a tracer packet cmd to wire format and transmit + * @if_ptr: The transport to transmit on. + * @lcid: The local channel id to encode. + * @pctx: The data to encode. + * + * Return: Number of bytes written or standard Linux error code. + */ +static int tx_cmd_tracer_pkt(struct glink_transport_if *if_ptr, uint32_t lcid, + struct glink_core_tx_pkt *pctx) +{ + return tx_data(if_ptr, TRACER_PKT_CMD, lcid, pctx); +} + +/** + * get_power_vote_ramp_time() - Get the ramp time required for the power + * votes to be applied + * @if_ptr: The transport interface on which power voting is requested. + * @state: The power state for which ramp time is required. + * + * Return: The ramp time specific to the power state, standard error otherwise. + */ +static unsigned long get_power_vote_ramp_time( + struct glink_transport_if *if_ptr, + uint32_t state) +{ + struct edge_info *einfo; + + einfo = container_of(if_ptr, struct edge_info, xprt_if); + + if (state >= einfo->num_pw_states || !(einfo->ramp_time_us)) + return (unsigned long)ERR_PTR(-EINVAL); + + return einfo->ramp_time_us[state]; +} + +/** + * power_vote() - Update the power votes to meet qos requirement + * @if_ptr: The transport interface on which power voting is requested. + * @state: The power state for which the voting should be done. + * + * Return: 0 on Success, standard error otherwise. + */ +static int power_vote(struct glink_transport_if *if_ptr, uint32_t state) +{ + return 0; +} + +/** + * power_unvote() - Remove the all the power votes + * @if_ptr: The transport interface on which power voting is requested. + * + * Return: 0 on Success, standard error otherwise. + */ +static int power_unvote(struct glink_transport_if *if_ptr) +{ + return 0; +} + +/** + * negotiate_features_v1() - determine what features of a version can be used + * @if_ptr: The transport for which features are negotiated for. + * @version: The version negotiated. + * @features: The set of requested features. + * + * Return: What set of the requested features can be supported. + */ +static uint32_t negotiate_features_v1(struct glink_transport_if *if_ptr, + const struct glink_core_version *version, + uint32_t features) +{ + return features & version->features; +} + +/** + * init_xprt_if() - initialize the xprt_if for an edge + * @einfo: The edge to initialize. + */ +static void init_xprt_if(struct edge_info *einfo) +{ + einfo->xprt_if.tx_cmd_version = tx_cmd_version; + einfo->xprt_if.tx_cmd_version_ack = tx_cmd_version_ack; + einfo->xprt_if.set_version = set_version; + einfo->xprt_if.tx_cmd_ch_open = tx_cmd_ch_open; + einfo->xprt_if.tx_cmd_ch_close = tx_cmd_ch_close; + einfo->xprt_if.tx_cmd_ch_remote_open_ack = tx_cmd_ch_remote_open_ack; + einfo->xprt_if.tx_cmd_ch_remote_close_ack = tx_cmd_ch_remote_close_ack; + einfo->xprt_if.allocate_rx_intent = allocate_rx_intent; + einfo->xprt_if.deallocate_rx_intent = deallocate_rx_intent; + einfo->xprt_if.tx_cmd_local_rx_intent = tx_cmd_local_rx_intent; + einfo->xprt_if.tx_cmd_local_rx_done = tx_cmd_local_rx_done; + einfo->xprt_if.tx = tx; + einfo->xprt_if.tx_cmd_rx_intent_req = tx_cmd_rx_intent_req; + einfo->xprt_if.tx_cmd_remote_rx_intent_req_ack = + tx_cmd_remote_rx_intent_req_ack; + einfo->xprt_if.tx_cmd_set_sigs = tx_cmd_set_sigs; + einfo->xprt_if.poll = poll; + einfo->xprt_if.mask_rx_irq = mask_rx_irq; + einfo->xprt_if.wait_link_down = wait_link_down; + einfo->xprt_if.tx_cmd_tracer_pkt = tx_cmd_tracer_pkt; + einfo->xprt_if.get_power_vote_ramp_time = get_power_vote_ramp_time; + einfo->xprt_if.power_vote = power_vote; + einfo->xprt_if.power_unvote = power_unvote; +} + +static struct qcom_ipc_device *to_ipc_device(struct device *dev) +{ + return container_of(dev, struct qcom_ipc_device, dev); +} + +static struct qcom_ipc_driver *to_ipc_driver(struct device *dev) +{ + struct qcom_ipc_device *qidev = to_ipc_device(dev); + + return container_of(qidev->dev.driver, struct qcom_ipc_driver, driver); +} + +static int qcom_ipc_dev_match(struct device *dev, struct device_driver *drv) +{ + return of_driver_match_device(dev, drv); +} + +static void msm_rpm_trans_notify_tx_done(void *handle, const void *priv, + const void *pkt_priv, const void *ptr) +{ + return; +} + +static void msm_rpm_trans_notify_state(void *handle, const void *priv, + unsigned event) +{ + switch (event) { + case GLINK_CONNECTED: + if (IS_ERR_OR_NULL(handle)) { + pr_err("glink_handle %d\n", + (int)PTR_ERR(handle)); + BUG_ON(1); + } + complete(&glink_ack); + break; + default: + break; + } +} + +/* + * Probe the ipc client. + */ +static int qcom_ipc_dev_probe(struct device *dev) +{ + struct qcom_ipc_device *qidev = to_ipc_device(dev); + struct qcom_ipc_driver *qidrv = to_ipc_driver(dev); + struct glink_open_config *open_config; + const char *channel_name, *key; + int ret; + + key = "qcom,glink-channels"; + ret = of_property_read_string(dev->of_node, key, + &channel_name); + + if (ret) { + pr_err("Failed to read node: %s, key=%s\n", + dev->of_node->full_name, key); + return ret; + } + + open_config = kzalloc(sizeof(*open_config), GFP_KERNEL); + + /* open a glink channel */ + open_config->name = channel_name; + open_config->priv = qidev; + open_config->edge = dev_get_drvdata(dev); + open_config->notify_rx = qidrv->callback; + open_config->notify_tx_done = msm_rpm_trans_notify_tx_done; + open_config->notify_state = msm_rpm_trans_notify_state; + + qidev->channel = glink_open(open_config); + ret = wait_for_completion_timeout(&glink_ack, GLINK_RPM_REQUEST_TIMEOUT); + if (!ret) + return -ETIMEDOUT; + + ret = qidrv->probe(qidev); + if (ret) + goto err; + + return 0; + +err: + dev_err(&qidev->dev, "probe failed\n"); + return ret; +} + +static int qcom_ipc_dev_remove(struct device *dev) +{ + struct qcom_ipc_device *qidev = to_ipc_device(dev); + struct qcom_ipc_driver *qidrv = to_ipc_driver(dev); + int ret; + + ret = glink_close(qidev->channel); + if (ret) + dev_err(&qidev->dev, "glink_close failed"); + + qidrv->remove(qidev); + + return ret; +} + +static struct bus_type qcom_ipc_bus = { + .name = "qcom_ipc", + .match = qcom_ipc_dev_match, + .probe = qcom_ipc_dev_probe, + .remove = qcom_ipc_dev_remove, +}; + +/* + * Release function for the qcom_smd_device object. + */ +static void qcom_ipc_release_device(struct device *dev) +{ + struct qcom_ipc_device *qidev = to_ipc_device(dev); + + kfree(qidev); +} + +/* + * Create a ipc client device for channel that is being opened. + */ +static int qcom_ipc_create_device(struct device_node *node, + const void *edge_name) +{ + struct qcom_ipc_device *qidev; + const char *name = edge_name; + int ret; + + qidev = kzalloc(sizeof(*qidev), GFP_KERNEL); + if (!qidev) + return -ENOMEM; + + dev_set_name(&qidev->dev, "%s.%s", name, node->name); + qidev->dev.parent = glink_dev; + qidev->dev.bus = &qcom_ipc_bus; + qidev->dev.release = qcom_ipc_release_device; + qidev->dev.of_node = node; + dev_set_drvdata(&qidev->dev, (void *)edge_name); + + ret = device_register(&qidev->dev); + if (ret) { + dev_err(&qidev->dev, "device_register failed: %d\n", ret); + put_device(&qidev->dev); + } + + return ret; +} + +/** + * init_xprt_cfg() - initialize the xprt_cfg for an edge + * @einfo: The edge to initialize. + * @name: The name of the remote side this edge communicates to. + */ +static void init_xprt_cfg(struct edge_info *einfo, const char *name) +{ + einfo->xprt_cfg.name = XPRT_NAME; + einfo->xprt_cfg.edge = name; + einfo->xprt_cfg.versions = versions; + einfo->xprt_cfg.versions_entries = ARRAY_SIZE(versions); + einfo->xprt_cfg.max_cid = SZ_64K; + einfo->xprt_cfg.max_iid = SZ_2G; +} + +static int glink_edge_parse(struct device_node *node, const char *edge_name) +{ + struct device_node *child_node; + struct edge_info *einfo; + int rc; + char *key; + const char *subsys_name; + uint32_t irq_line; + uint32_t irq_mask; + struct resource irq_r; + struct resource msgram_r; + void __iomem *msgram; + char toc[RPM_TOC_SIZE]; + uint32_t *tocp; + uint32_t num_toc_entries; + + einfo = kzalloc(sizeof(*einfo), GFP_KERNEL); + if (!einfo) { + pr_err("%s: edge_info allocation failed\n", __func__); + rc = -ENOMEM; + goto edge_info_alloc_fail; + } + + subsys_name = edge_name; + + key = "interrupts"; + irq_line = irq_of_parse_and_map(node, 0); + if (!irq_line) { + pr_err("%s: missing key %s\n", __func__, key); + rc = -ENODEV; + goto missing_key; + } + + key = "qcom,irq-mask"; + rc = of_property_read_u32(node, key, &irq_mask); + if (rc) { + pr_err("%s: missing key %s\n", __func__, key); + rc = -ENODEV; + goto missing_key; + } + + rc = of_address_to_resource(node, 1, &irq_r); + if (rc || !irq_r.start) { + pr_err("%s: missing key %s\n", __func__, key); + rc = -ENODEV; + goto missing_key; + } + + rc = of_address_to_resource(node, 0, &msgram_r); + if (rc || !msgram_r.start) { + pr_err("%s: missing key %s\n", __func__, key); + rc = -ENODEV; + goto missing_key; + } + + init_xprt_cfg(einfo, subsys_name); + init_xprt_if(einfo); + spin_lock_init(&einfo->write_lock); + init_waitqueue_head(&einfo->tx_blocked_queue); + init_kthread_work(&einfo->kwork, rx_worker); + init_kthread_worker(&einfo->kworker); + einfo->intentless = true; + einfo->read_from_fifo = memcpy32_fromio; + einfo->write_to_fifo = memcpy32_toio; + init_srcu_struct(&einfo->use_ref); + spin_lock_init(&einfo->rx_lock); + INIT_LIST_HEAD(&einfo->deferred_cmds); + + einfo->out_irq_mask = irq_mask; + einfo->out_irq_reg = ioremap_nocache(irq_r.start, + resource_size(&irq_r)); + + if (!einfo->out_irq_reg) { + pr_err("%s: unable to map irq reg\n", __func__); + rc = -ENOMEM; + goto irq_ioremap_fail; + } + + msgram = ioremap_nocache(msgram_r.start, resource_size(&msgram_r)); + if (!msgram) { + pr_err("%s: unable to map msgram\n", __func__); + rc = -ENOMEM; + goto msgram_ioremap_fail; + } + + einfo->task = kthread_run(kthread_worker_fn, &einfo->kworker, + "smem_native_%s", subsys_name); + if (IS_ERR(einfo->task)) { + rc = PTR_ERR(einfo->task); + pr_err("%s: kthread_run failed %d\n", __func__, rc); + goto kthread_fail; + } + + memcpy32_fromio(toc, msgram + resource_size(&msgram_r) - RPM_TOC_SIZE, + RPM_TOC_SIZE); + tocp = (uint32_t *)toc; + if (*tocp != RPM_TOC_ID) { + rc = -ENODEV; + pr_err("%s: TOC id %d is not valid\n", __func__, *tocp); + goto toc_init_fail; + } + ++tocp; + num_toc_entries = *tocp; + if (num_toc_entries > RPM_MAX_TOC_ENTRIES) { + rc = -ENODEV; + pr_err("%s: %d is too many toc entries\n", __func__, + num_toc_entries); + goto toc_init_fail; + } + ++tocp; + + for (rc = 0; rc < num_toc_entries; ++rc) { + if (*tocp != RPM_TX_FIFO_ID) { + tocp += 3; + continue; + } + ++tocp; + einfo->tx_ch_desc = msgram + *tocp; + einfo->tx_fifo = einfo->tx_ch_desc + 1; + if ((uintptr_t)einfo->tx_fifo > + (uintptr_t)(msgram + resource_size(&msgram_r))) { + pr_err("%s: invalid tx fifo address\n", __func__); + einfo->tx_fifo = NULL; + break; + } + ++tocp; + einfo->tx_fifo_size = *tocp; + if (einfo->tx_fifo_size > resource_size(&msgram_r) || + (uintptr_t)(einfo->tx_fifo + einfo->tx_fifo_size) > + (uintptr_t)(msgram + resource_size(&msgram_r))) { + pr_err("%s: invalid tx fifo size\n", __func__); + einfo->tx_fifo = NULL; + break; + } + break; + } + if (!einfo->tx_fifo) { + rc = -ENODEV; + pr_err("%s: tx fifo not found\n", __func__); + goto toc_init_fail; + } + + tocp = (uint32_t *)toc; + tocp += 2; + for (rc = 0; rc < num_toc_entries; ++rc) { + if (*tocp != RPM_RX_FIFO_ID) { + tocp += 3; + continue; + } + ++tocp; + einfo->rx_ch_desc = msgram + *tocp; + einfo->rx_fifo = einfo->rx_ch_desc + 1; + if ((uintptr_t)einfo->rx_fifo > + (uintptr_t)(msgram + resource_size(&msgram_r))) { + pr_err("%s: invalid rx fifo address\n", __func__); + einfo->rx_fifo = NULL; + break; + } + ++tocp; + einfo->rx_fifo_size = *tocp; + if (einfo->rx_fifo_size > resource_size(&msgram_r) || + (uintptr_t)(einfo->rx_fifo + einfo->rx_fifo_size) > + (uintptr_t)(msgram + resource_size(&msgram_r))) { + pr_err("%s: invalid rx fifo size\n", __func__); + einfo->rx_fifo = NULL; + break; + } + break; + } + if (!einfo->rx_fifo) { + rc = -ENODEV; + pr_err("%s: rx fifo not found\n", __func__); + goto toc_init_fail; + } + + einfo->tx_ch_desc->write_index = 0; + einfo->rx_ch_desc->read_index = 0; + + rc = glink_core_register_transport(&einfo->xprt_if, &einfo->xprt_cfg); + if (rc == -EPROBE_DEFER) + goto reg_xprt_fail; + if (rc) { + pr_err("%s: glink core register transport failed: %d\n", + __func__, rc); + goto reg_xprt_fail; + } + + einfo->irq_line = irq_line; + rc = request_irq(irq_line, irq_handler, + IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND | IRQF_SHARED, + node->name, einfo); + if (rc < 0) { + pr_err("%s: request_irq on %d failed: %d\n", __func__, irq_line, + rc); + goto request_irq_fail; + } + rc = enable_irq_wake(irq_line); + if (rc < 0) + pr_err("%s: enable_irq_wake() failed on %d\n", __func__, + irq_line); + + register_debugfs_info(einfo); + einfo->xprt_if.glink_core_if_ptr->link_up(&einfo->xprt_if); + + /* scan through all the edges available channels */ + for_each_available_child_of_node(node, child_node) + qcom_ipc_create_device(child_node, edge_name); + return 0; + +request_irq_fail: + glink_core_unregister_transport(&einfo->xprt_if); +reg_xprt_fail: +toc_init_fail: + flush_kthread_worker(&einfo->kworker); + kthread_stop(einfo->task); + einfo->task = NULL; +kthread_fail: + iounmap(msgram); +msgram_ioremap_fail: + iounmap(einfo->out_irq_reg); +irq_ioremap_fail: +missing_key: + kfree(einfo); +edge_info_alloc_fail: + return rc; +} + +static int glink_native_probe(struct platform_device *pdev) +{ + struct device_node *node; + const char *edge_name, *key; + int ret; + + glink_dev = &pdev->dev; + + init_completion(&glink_ack); + qcom_ipc_bus_register(&qcom_ipc_bus); + + for_each_available_child_of_node(pdev->dev.of_node, node) { + key = "qcom,glink-edge"; + ret = of_property_read_string(node, key, &edge_name); + if (ret) { + dev_err(&pdev->dev, "edge missing %s property\n", key); + return -EINVAL; + } + + glink_edge_parse(node, edge_name); + } + + return 0; +} + +#if defined(CONFIG_DEBUG_FS) +/** + * debug_edge() - generates formatted text output displaying current edge state + * @s: File to send the output to. + */ +static void debug_edge(struct seq_file *s) +{ + struct edge_info *einfo; + struct glink_dbgfs_data *dfs_d; + + dfs_d = s->private; + einfo = dfs_d->priv_data; + +/* + * formatted, human readable edge state output, ie: + * TX/RX fifo information: +ID|EDGE |TX READ |TX WRITE |TX SIZE |RX READ |RX WRITE |RX SIZE +------------------------------------------------------------------------------- +01|mpss |0x00000128|0x00000128|0x00000800|0x00000256|0x00000256|0x00001000 + * + * Interrupt information: + * EDGE |TX INT |RX INT + * -------------------------------- + * mpss |0x00000006|0x00000008 + */ + seq_puts(s, "TX/RX fifo information:\n"); + seq_printf(s, "%2s|%-10s|%-10s|%-10s|%-10s|%-10s|%-10s|%-10s\n", + "ID", + "EDGE", + "TX READ", + "TX WRITE", + "TX SIZE", + "RX READ", + "RX WRITE", + "RX SIZE"); + seq_puts(s, + "-------------------------------------------------------------------------------\n"); + if (!einfo) + return; + + if (!einfo->rx_fifo) + seq_puts(s, "Link Not Up\n"); + else + seq_printf(s, "0x%08X|0x%08X|0x%08X|0x%08X|0x%08X|0x%08X\n", + einfo->tx_ch_desc->read_index, + einfo->tx_ch_desc->write_index, + einfo->tx_fifo_size, + einfo->rx_ch_desc->read_index, + einfo->rx_ch_desc->write_index, + einfo->rx_fifo_size); + + seq_puts(s, "\nInterrupt information:\n"); + seq_printf(s, "%-10s|%-10s|%-10s\n", "EDGE", "TX INT", "RX INT"); + seq_puts(s, "--------------------------------\n"); + seq_printf(s, "%-10s|0x%08X|0x%08X\n", einfo->xprt_cfg.edge, + einfo->tx_irq_count, + einfo->rx_irq_count); +} + +/** + * register_debugfs_info() - initialize debugfs device entries + * @einfo: Pointer to specific edge_info for which register is called. + */ +static void register_debugfs_info(struct edge_info *einfo) +{ + struct glink_dbgfs dfs; + char *curr_dir_name; + int dir_name_len; + + dir_name_len = strlen(einfo->xprt_cfg.edge) + + strlen(einfo->xprt_cfg.name) + 2; + curr_dir_name = kmalloc(dir_name_len, GFP_KERNEL); + if (!curr_dir_name) { + GLINK_ERR("%s: Memory allocation failed\n", __func__); + return; + } + + snprintf(curr_dir_name, dir_name_len, "%s_%s", + einfo->xprt_cfg.edge, einfo->xprt_cfg.name); + dfs.curr_name = curr_dir_name; + dfs.par_name = "xprt"; + dfs.b_dir_create = false; + glink_debugfs_create("XPRT_INFO", debug_edge, + &dfs, einfo, false); + kfree(curr_dir_name); +} + +#else +static void register_debugfs_info(struct edge_info *einfo) +{ +} +#endif /* CONFIG_DEBUG_FS */ + +static struct of_device_id glink_match_table[] = { + { .compatible = "qcom,glink" }, + {}, +}; + +static struct platform_driver glink_rpm_native_driver = { + .probe = glink_native_probe, + .driver = { + .name = "qcom_glink", + .owner = THIS_MODULE, + .of_match_table = glink_match_table, + }, +}; + +static int __init glink_smem_native_xprt_init(void) +{ + int rc; + + rc = platform_driver_register(&glink_rpm_native_driver); + if (rc) { + pr_err("%s: glink_rpm_native_driver register failed %d\n", + __func__, rc); + return rc; + } + + return 0; +} +postcore_initcall(glink_smem_native_xprt_init); + +MODULE_DESCRIPTION("MSM G-Link SMEM Native Transport"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/glink_xprt_if.h b/drivers/soc/qcom/glink_xprt_if.h new file mode 100644 index 00000000000000..6242e867fe7202 --- /dev/null +++ b/drivers/soc/qcom/glink_xprt_if.h @@ -0,0 +1,201 @@ +/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef _SOC_QCOM_GLINK_XPRT_IF_H_ +#define _SOC_QCOM_GLINK_XPRT_IF_H_ + +#include +#include +#include + +struct glink_core_xprt_ctx; +struct glink_core_if; +struct channel_ctx; +struct glink_core_rx_intent; + +enum buf_type { + LINEAR = 0, + VECTOR, +}; + +enum xprt_ids { + SMEM_XPRT_ID = 100, + SMD_TRANS_XPRT_ID = 200, + LLOOP_XPRT_ID = 300, + MOCK_XPRT_HIGH_ID = 390, + MOCK_XPRT_ID = 400, + MOCK_XPRT_LOW_ID = 410, +}; + +#define GCAP_SIGNALS BIT(0) +#define GCAP_INTENTLESS BIT(1) +#define GCAP_TRACER_PKT BIT(2) +#define GCAP_AUTO_QUEUE_RX_INT BIT(3) + +/** + * struct glink_core_tx_pkt - Transmit Packet information + * @list_done: Index to the channel's transmit queue. + * @list_done: Index to the channel's acknowledgment queue. + * @pkt_priv: Private information specific to the packet. + * @data: Pointer to the buffer containing the data. + * @riid: Remote receive intent used to transmit the packet. + * @rcid: Remote channel receiving the packet. + * @size: Total size of the data in the packet. + * @tx_len: Data length to transmit in the current transmit slot. + * @size_remaining: Remaining size of the data in the packet. + * @intent_size: Receive intent size queued by the remote side. + * @tracer_pkt: Flag to indicate if the packet is a tracer packet. + * @iovec: Pointer to the vector buffer packet. + * @vprovider: Packet-specific virtual buffer provider function. + * @pprovider: Packet-specific physical buffer provider function. + * @pkt_ref: Active references to the packet. + */ +struct glink_core_tx_pkt { + struct list_head list_node; + struct list_head list_done; + const void *pkt_priv; + const void *data; + uint32_t riid; + uint32_t rcid; + uint32_t size; + uint32_t tx_len; + uint32_t size_remaining; + size_t intent_size; + bool tracer_pkt; + void *iovec; + void * (*vprovider)(void *iovec, size_t offset, size_t *size); + void * (*pprovider)(void *iovec, size_t offset, size_t *size); + struct rwref_lock pkt_ref; +}; + +/** + * Note - each call to register the interface must pass a unique + * instance of this data. + */ +struct glink_transport_if { + /* Negotiation */ + void (*tx_cmd_version)(struct glink_transport_if *if_ptr, + uint32_t version, + uint32_t features); + void (*tx_cmd_version_ack)(struct glink_transport_if *if_ptr, + uint32_t version, + uint32_t features); + uint32_t (*set_version)(struct glink_transport_if *if_ptr, + uint32_t version, + uint32_t features); + + /* channel state */ + int (*tx_cmd_ch_open)(struct glink_transport_if *if_ptr, uint32_t lcid, + const char *name, uint16_t req_xprt); + int (*tx_cmd_ch_close)(struct glink_transport_if *if_ptr, + uint32_t lcid); + void (*tx_cmd_ch_remote_open_ack)(struct glink_transport_if *if_ptr, + uint32_t rcid, uint16_t xprt_resp); + void (*tx_cmd_ch_remote_close_ack)(struct glink_transport_if *if_ptr, + uint32_t rcid); + int (*ssr)(struct glink_transport_if *if_ptr); + + /* channel data */ + int (*allocate_rx_intent)(struct glink_transport_if *if_ptr, + size_t size, + struct glink_core_rx_intent *intent); + int (*deallocate_rx_intent)(struct glink_transport_if *if_ptr, + struct glink_core_rx_intent *intent); + /* Optional */ + int (*reuse_rx_intent)(struct glink_transport_if *if_ptr, + struct glink_core_rx_intent *intent); + + int (*tx_cmd_local_rx_intent)(struct glink_transport_if *if_ptr, + uint32_t lcid, size_t size, uint32_t liid); + void (*tx_cmd_local_rx_done)(struct glink_transport_if *if_ptr, + uint32_t lcid, uint32_t liid, bool reuse); + int (*tx)(struct glink_transport_if *if_ptr, uint32_t lcid, + struct glink_core_tx_pkt *pctx); + int (*tx_cmd_rx_intent_req)(struct glink_transport_if *if_ptr, + uint32_t lcid, size_t size); + int (*tx_cmd_remote_rx_intent_req_ack)( + struct glink_transport_if *if_ptr, + uint32_t lcid, bool granted); + int (*tx_cmd_set_sigs)(struct glink_transport_if *if_ptr, + uint32_t lcid, uint32_t sigs); + + /* Optional. If NULL at xprt registration, dummies will be used */ + int (*poll)(struct glink_transport_if *if_ptr, uint32_t lcid); + int (*mask_rx_irq)(struct glink_transport_if *if_ptr, uint32_t lcid, + bool mask, void *pstruct); + int (*wait_link_down)(struct glink_transport_if *if_ptr); + int (*tx_cmd_tracer_pkt)(struct glink_transport_if *if_ptr, + uint32_t lcid, struct glink_core_tx_pkt *pctx); + unsigned long (*get_power_vote_ramp_time)( + struct glink_transport_if *if_ptr, uint32_t state); + int (*power_vote)(struct glink_transport_if *if_ptr, uint32_t state); + int (*power_unvote)(struct glink_transport_if *if_ptr); + /* + * Keep data pointers at the end of the structure after all function + * pointer to allow for in-place initialization. + */ + + /* private pointer for core */ + struct glink_core_xprt_ctx *glink_core_priv; + + /* core pointer (set during transport registration) */ + struct glink_core_if *glink_core_if_ptr; +}; + +#ifdef CONFIG_MSM_GLINK + +/** + * get_tx_vaddr() - Get the virtual address from which the tx has to be done + * @pctx: transmit packet context. + * @offset: offset into the packet. + * @tx_size: pointer to hold the length of the contiguous buffer + * space. + * + * Return: Address from which the tx has to be done. + */ +static inline void *get_tx_vaddr(struct glink_core_tx_pkt *pctx, size_t offset, + size_t *tx_size) +{ + void *pdata; + + if (pctx->vprovider) { + return pctx->vprovider((void *)pctx->iovec, offset, tx_size); + } else if (pctx->pprovider) { + pdata = pctx->pprovider((void *)pctx->iovec, offset, tx_size); + return phys_to_virt((unsigned long)pdata); + } + return NULL; +} + +/** + * glink_xprt_name_to_id() - convert transport name to id + * @name: Name of the transport. + * @id: Assigned id. + * + * Return: 0 on success or standard Linux error code. + */ +int glink_xprt_name_to_id(const char *name, uint16_t *id); + + +#else /* CONFIG_MSM_GLINK */ +static inline void *get_tx_vaddr(struct glink_core_tx_pkt *pctx, size_t offset, + size_t *tx_size) +{ + return NULL; +} + +static inline int glink_xprt_name_to_id(const char *name, uint16_t *id) +{ + return -ENODEV; +} + +#endif /* CONFIG_MSM_GLINK */ +#endif /* _SOC_QCOM_GLINK_XPRT_IF_H_ */ diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c index 731fa066f712eb..925a68fed70e57 100644 --- a/drivers/soc/qcom/smd-rpm.c +++ b/drivers/soc/qcom/smd-rpm.c @@ -20,23 +20,28 @@ #include #include +#include #include #define RPM_REQUEST_TIMEOUT (5 * HZ) +#define SMD_RPM BIT(0) +#define GLINK_RPM BIT(1) + /** * struct qcom_smd_rpm - state of the rpm device driver - * @rpm_channel: reference to the smd channel + * @smd_channel: reference to the smd channel * @ack: completion for acks * @lock: mutual exclusion around the send/complete pair * @ack_status: result of the rpm request */ struct qcom_smd_rpm { - struct qcom_smd_channel *rpm_channel; - + struct qcom_smd_channel *smd_channel; + void *glink_channel; struct completion ack; struct mutex lock; int ack_status; + u8 flag; }; /** @@ -132,7 +137,12 @@ int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm, pkt->req.data_len = cpu_to_le32(count); memcpy(pkt->payload, buf, count); - ret = qcom_smd_send(rpm->rpm_channel, pkt, size); + if (rpm->flag & SMD_RPM) { + ret = qcom_smd_send(rpm->smd_channel, pkt, size); + } else { + ret = glink_tx(rpm->glink_channel, pkt, pkt, size, + GLINK_TX_SINGLE_THREADED); + } if (ret) goto out; @@ -149,14 +159,15 @@ int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm, } EXPORT_SYMBOL(qcom_rpm_smd_write); -static int qcom_smd_rpm_callback(struct qcom_smd_device *qsdev, +static int qcom_ipc_rpm_callback(void *dev, const void *data, size_t count) { const struct qcom_rpm_header *hdr = data; + struct qcom_ipc_device *qidev = dev; size_t hdr_length = le32_to_cpu(hdr->length); const struct qcom_rpm_message *msg; - struct qcom_smd_rpm *rpm = dev_get_drvdata(&qsdev->dev); + struct qcom_smd_rpm *rpm = dev_get_drvdata(&qidev->dev); const u8 *buf = data + sizeof(struct qcom_rpm_header); const u8 *end = buf + hdr_length; char msgbuf[32]; @@ -165,13 +176,14 @@ static int qcom_smd_rpm_callback(struct qcom_smd_device *qsdev, if (le32_to_cpu(hdr->service_type) != RPM_SERVICE_TYPE_REQUEST || hdr_length < sizeof(struct qcom_rpm_message)) { - dev_err(&qsdev->dev, "invalid request\n"); + dev_err(&qidev->dev, "invalid request\n"); return 0; } while (buf < end) { msg = (struct qcom_rpm_message *)buf; msg_length = le32_to_cpu(msg->length); + switch (le32_to_cpu(msg->msg_type)) { case RPM_MSG_TYPE_MSG_ID: break; @@ -191,64 +203,74 @@ static int qcom_smd_rpm_callback(struct qcom_smd_device *qsdev, } rpm->ack_status = status; + complete(&rpm->ack); return 0; } -static int qcom_smd_rpm_probe(struct qcom_smd_device *sdev) +static int qcom_ipc_rpm_probe(struct qcom_ipc_device *idev) { struct qcom_smd_rpm *rpm; - rpm = devm_kzalloc(&sdev->dev, sizeof(*rpm), GFP_KERNEL); + rpm = devm_kzalloc(&idev->dev, sizeof(*rpm), GFP_KERNEL); if (!rpm) return -ENOMEM; + if (of_device_is_compatible(idev->dev.of_node, "qcom,rpm-msm8996")) { + rpm->flag |= GLINK_RPM; + rpm->glink_channel = idev->channel; + + } else if (of_device_is_compatible(idev->dev.of_node, + "qcom,rpm-msm8974")) { + rpm->flag |= SMD_RPM; + rpm->smd_channel = idev->channel; + } + mutex_init(&rpm->lock); init_completion(&rpm->ack); - rpm->rpm_channel = sdev->channel; - - dev_set_drvdata(&sdev->dev, rpm); + dev_set_drvdata(&idev->dev, rpm); - return of_platform_populate(sdev->dev.of_node, NULL, NULL, &sdev->dev); + return of_platform_populate(idev->dev.of_node, NULL, NULL, &idev->dev); } -static void qcom_smd_rpm_remove(struct qcom_smd_device *sdev) +static void qcom_ipc_rpm_remove(struct qcom_ipc_device *idev) { - of_platform_depopulate(&sdev->dev); + of_platform_depopulate(&idev->dev); } -static const struct of_device_id qcom_smd_rpm_of_match[] = { +static const struct of_device_id qcom_ipc_rpm_of_match[] = { { .compatible = "qcom,rpm-apq8084" }, { .compatible = "qcom,rpm-msm8916" }, + { .compatible = "qcom,rpm-msm8996" }, { .compatible = "qcom,rpm-msm8974" }, {} }; -MODULE_DEVICE_TABLE(of, qcom_smd_rpm_of_match); +MODULE_DEVICE_TABLE(of, qcom_ipc_rpm_of_match); -static struct qcom_smd_driver qcom_smd_rpm_driver = { - .probe = qcom_smd_rpm_probe, - .remove = qcom_smd_rpm_remove, - .callback = qcom_smd_rpm_callback, +static struct qcom_ipc_driver qcom_ipc_rpm_driver = { + .probe = qcom_ipc_rpm_probe, + .remove = qcom_ipc_rpm_remove, + .callback = qcom_ipc_rpm_callback, .driver = { - .name = "qcom_smd_rpm", + .name = "qcom_ipc_rpm", .owner = THIS_MODULE, - .of_match_table = qcom_smd_rpm_of_match, + .of_match_table = qcom_ipc_rpm_of_match, }, }; -static int __init qcom_smd_rpm_init(void) +static int __init qcom_ipc_rpm_init(void) { - return qcom_smd_driver_register(&qcom_smd_rpm_driver); + return qcom_ipc_driver_register(&qcom_ipc_rpm_driver); } -arch_initcall(qcom_smd_rpm_init); +subsys_initcall(qcom_ipc_rpm_init); -static void __exit qcom_smd_rpm_exit(void) +static void __exit qcom_ipc_rpm_exit(void) { - qcom_smd_driver_unregister(&qcom_smd_rpm_driver); + qcom_ipc_driver_unregister(&qcom_ipc_rpm_driver); } -module_exit(qcom_smd_rpm_exit); +module_exit(qcom_ipc_rpm_exit); MODULE_AUTHOR("Bjorn Andersson "); -MODULE_DESCRIPTION("Qualcomm SMD backed RPM driver"); +MODULE_DESCRIPTION("Qualcomm SMD/GLINK backed RPM driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/smd.c b/drivers/soc/qcom/smd.c index 498fd0581a4519..05e7ff52088a92 100644 --- a/drivers/soc/qcom/smd.c +++ b/drivers/soc/qcom/smd.c @@ -69,6 +69,8 @@ struct smd_channel_info_pair; struct smd_channel_info_word; struct smd_channel_info_word_pair; +static struct bus_type *ipc_bus; + #define SMD_ALLOC_TBL_COUNT 2 #define SMD_ALLOC_TBL_SIZE 64 @@ -149,7 +151,7 @@ enum smd_channel_state { /** * struct qcom_smd_channel - smd channel struct * @edge: qcom_smd_edge this channel is living on - * @qsdev: reference to a associated smd client device + * @qidev: reference to a associated ipc client device * @name: name of the channel * @state: local state of the channel * @remote_state: remote state of the channel @@ -169,7 +171,7 @@ enum smd_channel_state { struct qcom_smd_channel { struct qcom_smd_edge *edge; - struct qcom_smd_device *qsdev; + struct qcom_ipc_device *qidev; char *name; enum smd_channel_state state; @@ -186,7 +188,7 @@ struct qcom_smd_channel { int fifo_size; void *bounce_buffer; - int (*cb)(struct qcom_smd_device *, const void *, size_t); + int (*cb)(void *, const void *, size_t); spinlock_t recv_lock; @@ -497,7 +499,7 @@ static void qcom_smd_channel_advance(struct qcom_smd_channel *channel, */ static int qcom_smd_channel_recv_single(struct qcom_smd_channel *channel) { - struct qcom_smd_device *qsdev = channel->qsdev; + struct qcom_ipc_device *qidev = channel->qidev; unsigned tail; size_t len; void *ptr; @@ -517,7 +519,7 @@ static int qcom_smd_channel_recv_single(struct qcom_smd_channel *channel) len = channel->pkt_size; } - ret = channel->cb(qsdev, ptr, len); + ret = channel->cb(qidev, ptr, len); if (ret < 0) return ret; @@ -762,22 +764,22 @@ int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len) } EXPORT_SYMBOL(qcom_smd_send); -static struct qcom_smd_device *to_smd_device(struct device *dev) +static struct qcom_ipc_device *to_ipc_device(struct device *dev) { - return container_of(dev, struct qcom_smd_device, dev); + return container_of(dev, struct qcom_ipc_device, dev); } -static struct qcom_smd_driver *to_smd_driver(struct device *dev) +static struct qcom_ipc_driver *to_ipc_driver(struct device *dev) { - struct qcom_smd_device *qsdev = to_smd_device(dev); + struct qcom_ipc_device *qidev = to_ipc_device(dev); - return container_of(qsdev->dev.driver, struct qcom_smd_driver, driver); + return container_of(qidev->dev.driver, struct qcom_ipc_driver, driver); } -static int qcom_smd_dev_match(struct device *dev, struct device_driver *drv) +static int qcom_ipc_dev_match(struct device *dev, struct device_driver *drv) { - struct qcom_smd_device *qsdev = to_smd_device(dev); - struct qcom_smd_driver *qsdrv = container_of(drv, struct qcom_smd_driver, driver); + struct qcom_ipc_device *qsdev = to_ipc_device(dev); + struct qcom_ipc_driver *qsdrv = container_of(drv, struct qcom_ipc_driver, driver); const struct qcom_smd_id *match = qsdrv->smd_match_table; const char *name = qsdev->channel->name; @@ -793,16 +795,16 @@ static int qcom_smd_dev_match(struct device *dev, struct device_driver *drv) } /* - * Probe the smd client. + * Probe the ipc client. * * The remote side have indicated that it want the channel to be opened, so * complete the state handshake and probe our client driver. */ -static int qcom_smd_dev_probe(struct device *dev) +static int qcom_ipc_dev_probe(struct device *dev) { - struct qcom_smd_device *qsdev = to_smd_device(dev); - struct qcom_smd_driver *qsdrv = to_smd_driver(dev); - struct qcom_smd_channel *channel = qsdev->channel; + struct qcom_ipc_device *qidev = to_ipc_device(dev); + struct qcom_ipc_driver *qidrv = to_ipc_driver(dev); + struct qcom_smd_channel *channel = qidev->channel; size_t bb_size; int ret; @@ -814,13 +816,13 @@ static int qcom_smd_dev_probe(struct device *dev) if (!channel->bounce_buffer) return -ENOMEM; - channel->cb = qsdrv->callback; + channel->cb = qidrv->callback; qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENING); qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENED); - ret = qsdrv->probe(qsdev); + ret = qidrv->probe(qidev); if (ret) goto err; @@ -829,7 +831,7 @@ static int qcom_smd_dev_probe(struct device *dev) return 0; err: - dev_err(&qsdev->dev, "probe failed\n"); + dev_err(&qidev->dev, "probe failed\n"); channel->cb = NULL; kfree(channel->bounce_buffer); @@ -845,11 +847,11 @@ static int qcom_smd_dev_probe(struct device *dev) * The channel is going away, for some reason, so remove the smd client and * reset the channel state. */ -static int qcom_smd_dev_remove(struct device *dev) +static int qcom_ipc_dev_remove(struct device *dev) { - struct qcom_smd_device *qsdev = to_smd_device(dev); - struct qcom_smd_driver *qsdrv = to_smd_driver(dev); - struct qcom_smd_channel *channel = qsdev->channel; + struct qcom_ipc_device *qidev = to_ipc_device(dev); + struct qcom_ipc_driver *qidrv = to_ipc_driver(dev); + struct qcom_smd_channel *channel = qidev->channel; unsigned long flags; qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSING); @@ -868,13 +870,13 @@ static int qcom_smd_dev_remove(struct device *dev) * We expect that the client might block in remove() waiting for any * outstanding calls to qcom_smd_send() to wake up and finish. */ - if (qsdrv->remove) - qsdrv->remove(qsdev); + if (qidrv->remove) + qidrv->remove(qidev); /* * The client is now gone, cleanup and reset the channel state. */ - channel->qsdev = NULL; + channel->qidev = NULL; kfree(channel->bounce_buffer); channel->bounce_buffer = NULL; @@ -885,21 +887,21 @@ static int qcom_smd_dev_remove(struct device *dev) return 0; } -static struct bus_type qcom_smd_bus = { - .name = "qcom_smd", - .match = qcom_smd_dev_match, - .probe = qcom_smd_dev_probe, - .remove = qcom_smd_dev_remove, +struct bus_type qcom_ipc_bus = { + .name = "qcom_ipc", + .match = qcom_ipc_dev_match, + .probe = qcom_ipc_dev_probe, + .remove = qcom_ipc_dev_remove, }; /* * Release function for the qcom_smd_device object. */ -static void qcom_smd_release_device(struct device *dev) +static void qcom_ipc_release_device(struct device *dev) { - struct qcom_smd_device *qsdev = to_smd_device(dev); + struct qcom_ipc_device *qidev = to_ipc_device(dev); - kfree(qsdev); + kfree(qidev); } /* @@ -927,43 +929,44 @@ static struct device_node *qcom_smd_match_channel(struct device_node *edge_node, } /* - * Create a smd client device for channel that is being opened. + * Create a ipc client device for channel that is being opened. */ -static int qcom_smd_create_device(struct qcom_smd_channel *channel) +static int qcom_ipc_create_device(struct qcom_smd_channel *channel) { - struct qcom_smd_device *qsdev; + struct qcom_ipc_device *qidev; struct qcom_smd_edge *edge = channel->edge; struct device_node *node; struct qcom_smd *smd = edge->smd; int ret; - if (channel->qsdev) + if (channel->qidev) return -EEXIST; dev_dbg(smd->dev, "registering '%s'\n", channel->name); - qsdev = kzalloc(sizeof(*qsdev), GFP_KERNEL); - if (!qsdev) + qidev = kzalloc(sizeof(*qidev), GFP_KERNEL); + if (!qidev) return -ENOMEM; node = qcom_smd_match_channel(edge->of_node, channel->name); - dev_set_name(&qsdev->dev, "%s.%s", + dev_set_name(&qidev->dev, "%s.%s", edge->of_node->name, node ? node->name : channel->name); - qsdev->dev.parent = smd->dev; - qsdev->dev.bus = &qcom_smd_bus; - qsdev->dev.release = qcom_smd_release_device; - qsdev->dev.of_node = node; + dev_set_name(&qidev->dev, "%s.%s", edge->of_node->name, node->name); + qidev->dev.parent = smd->dev; + qidev->dev.bus = ipc_bus; + qidev->dev.release = qcom_ipc_release_device; + qidev->dev.of_node = node; - qsdev->channel = channel; + qidev->channel = channel; - channel->qsdev = qsdev; + channel->qidev = qidev; - ret = device_register(&qsdev->dev); + ret = device_register(&qidev->dev); if (ret) { dev_err(smd->dev, "device_register failed: %d\n", ret); - put_device(&qsdev->dev); + put_device(&qidev->dev); } return ret; @@ -972,13 +975,13 @@ static int qcom_smd_create_device(struct qcom_smd_channel *channel) /* * Destroy a smd client device for a channel that's going away. */ -static void qcom_smd_destroy_device(struct qcom_smd_channel *channel) +static void qcom_ipc_destroy_device(struct qcom_smd_channel *channel) { struct device *dev; - BUG_ON(!channel->qsdev); + BUG_ON(!channel->qidev); - dev = &channel->qsdev->dev; + dev = &channel->qidev->dev; device_unregister(dev); of_node_put(dev->of_node); @@ -986,25 +989,26 @@ static void qcom_smd_destroy_device(struct qcom_smd_channel *channel) } /** - * qcom_smd_driver_register - register a smd driver - * @qsdrv: qcom_smd_driver struct + * qcom_ipc_driver_register - register a smd driver + * @qidrv: qcom_ipc_driver struct */ -int qcom_smd_driver_register(struct qcom_smd_driver *qsdrv) +int qcom_ipc_driver_register(struct qcom_ipc_driver *qidrv) { - qsdrv->driver.bus = &qcom_smd_bus; - return driver_register(&qsdrv->driver); + qidrv->driver.bus = ipc_bus; + + return driver_register(&qidrv->driver); } -EXPORT_SYMBOL(qcom_smd_driver_register); +EXPORT_SYMBOL(qcom_ipc_driver_register); /** - * qcom_smd_driver_unregister - unregister a smd driver - * @qsdrv: qcom_smd_driver struct + * qcom_ipc_driver_unregister - unregister a ipc driver + * @qidrv: qcom_ipc_driver struct */ -void qcom_smd_driver_unregister(struct qcom_smd_driver *qsdrv) +void qcom_ipc_driver_unregister(struct qcom_ipc_driver *qidrv) { - driver_unregister(&qsdrv->driver); + driver_unregister(&qidrv->driver); } -EXPORT_SYMBOL(qcom_smd_driver_unregister); +EXPORT_SYMBOL(qcom_ipc_driver_unregister); /* * Allocate the qcom_smd_channel object for a newly found smd channel, @@ -1183,7 +1187,7 @@ static void qcom_channel_state_worker(struct work_struct *work) remote_state != SMD_CHANNEL_OPENED) continue; - qcom_smd_create_device(channel); + qcom_ipc_create_device(channel); } /* @@ -1200,7 +1204,7 @@ static void qcom_channel_state_worker(struct work_struct *work) remote_state == SMD_CHANNEL_OPENED) continue; - qcom_smd_destroy_device(channel); + qcom_ipc_destroy_device(channel); } } @@ -1313,6 +1317,7 @@ static int qcom_smd_probe(struct platform_device *pdev) } platform_set_drvdata(pdev, smd); + qcom_ipc_bus_register(&qcom_ipc_bus); return 0; } @@ -1335,10 +1340,10 @@ static int qcom_smd_remove(struct platform_device *pdev) cancel_work_sync(&edge->work); list_for_each_entry(channel, &edge->channels, list) { - if (!channel->qsdev) + if (!channel->qidev) continue; - qcom_smd_destroy_device(channel); + qcom_ipc_destroy_device(channel); } } @@ -1360,27 +1365,32 @@ static struct platform_driver qcom_smd_driver = { }, }; -static int __init qcom_smd_init(void) +void qcom_ipc_bus_register(struct bus_type *bus) { int ret; - ret = bus_register(&qcom_smd_bus); + ret = bus_register(bus); if (ret) { - pr_err("failed to register smd bus: %d\n", ret); - return ret; + pr_err("failed to register ipc bus: %d\n", ret); + return; } + ipc_bus = bus; +} + +static int __init qcom_ipc_init(void) +{ return platform_driver_register(&qcom_smd_driver); } -postcore_initcall(qcom_smd_init); +postcore_initcall(qcom_ipc_init); -static void __exit qcom_smd_exit(void) +static void __exit qcom_ipc_exit(void) { platform_driver_unregister(&qcom_smd_driver); - bus_unregister(&qcom_smd_bus); + bus_unregister(ipc_bus); } -module_exit(qcom_smd_exit); +module_exit(qcom_ipc_exit); MODULE_AUTHOR("Bjorn Andersson "); -MODULE_DESCRIPTION("Qualcomm Shared Memory Driver"); +MODULE_DESCRIPTION("Qualcomm SMD IPC driver"); MODULE_LICENSE("GPL v2"); diff --git a/include/dt-bindings/clock/qcom,gcc-msm8996.h b/include/dt-bindings/clock/qcom,gcc-msm8996.h index 888e75ce8fecfa..6f814db11c7e1a 100644 --- a/include/dt-bindings/clock/qcom,gcc-msm8996.h +++ b/include/dt-bindings/clock/qcom,gcc-msm8996.h @@ -336,4 +336,15 @@ #define GCC_MSS_Q6_BCR 99 #define GCC_QREFS_VBG_CAL_BCR 100 +/* Indexes for GDSCs */ +#define AGGRE0_NOC_GDSC 0 +#define HLOS1_VOTE_AGGRE0_NOC_GDSC 1 +#define HLOS1_VOTE_LPASS_ADSP_GDSC 2 +#define HLOS1_VOTE_LPASS_CORE_GDSC 3 +#define USB30_GDSC 4 +#define PCIE0_GDSC 5 +#define PCIE1_GDSC 6 +#define PCIE2_GDSC 7 +#define UFS_GDSC 8 + #endif diff --git a/include/dt-bindings/clock/qcom,mmcc-msm8996.h b/include/dt-bindings/clock/qcom,mmcc-msm8996.h index 9b81ca65fcecfb..5abc445ad8152d 100644 --- a/include/dt-bindings/clock/qcom,mmcc-msm8996.h +++ b/include/dt-bindings/clock/qcom,mmcc-msm8996.h @@ -282,4 +282,22 @@ #define FD_BCR 58 #define MMSS_SPDM_RM_BCR 59 +/* Indexes for GDSCs */ +#define MMAGIC_VIDEO_GDSC 0 +#define MMAGIC_MDSS_GDSC 1 +#define MMAGIC_CAMSS_GDSC 2 +#define GPU_GDSC 3 +#define VENUS_GDSC 4 +#define VENUS_CORE0_GDSC 5 +#define VENUS_CORE1_GDSC 6 +#define CAMSS_GDSC 7 +#define VFE0_GDSC 8 +#define VFE1_GDSC 9 +#define JPEG_GDSC 10 +#define CPP_GDSC 11 +#define FD_GDSC 12 +#define MDSS_GDSC 13 +#define GPU_GX_GDSC 14 +#define MMAGIC_BIMC_GDSC 15 + #endif diff --git a/include/dt-bindings/clock/qcom,rpmcc.h b/include/dt-bindings/clock/qcom,rpmcc.h new file mode 100644 index 00000000000000..132d2928fbe909 --- /dev/null +++ b/include/dt-bindings/clock/qcom,rpmcc.h @@ -0,0 +1,63 @@ +/* + * Copyright 2015 Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_MSM_RPMCC_H +#define _DT_BINDINGS_CLK_MSM_RPMCC_H + +/* msm8916 */ +#define RPM_XO_CLK_SRC 0 +#define RPM_XO_A_CLK_SRC 1 +#define RPM_PCNOC_CLK 2 +#define RPM_PCNOC_A_CLK 3 +#define RPM_SNOC_CLK 4 +#define RPM_SNOC_A_CLK 5 +#define RPM_BIMC_CLK 6 +#define RPM_BIMC_A_CLK 7 +#define RPM_QDSS_CLK 8 +#define RPM_QDSS_A_CLK 9 +#define RPM_BB_CLK1 10 +#define RPM_BB_CLK1_A 11 +#define RPM_BB_CLK2 12 +#define RPM_BB_CLK2_A 13 +#define RPM_RF_CLK1 14 +#define RPM_RF_CLK1_A 15 +#define RPM_RF_CLK2 16 +#define RPM_RF_CLK2_A 17 +#define RPM_BB_CLK1_PIN 18 +#define RPM_BB_CLK1_A_PIN 19 +#define RPM_BB_CLK2_PIN 20 +#define RPM_BB_CLK2_A_PIN 21 +#define RPM_RF_CLK1_PIN 22 +#define RPM_RF_CLK1_A_PIN 23 +#define RPM_RF_CLK2_PIN 24 +#define RPM_RF_CLK2_A_PIN 25 +#define RPM_AGGR1_NOC_CLK 26 +#define RPM_AGGR1_NOC_A_CLK 27 +#define RPM_AGGR2_NOC_CLK 28 +#define RPM_AGGR2_NOC_A_CLK 29 +#define RPM_CNOC_CLK 30 +#define RPM_CNOC_A_CLK 31 +#define RPM_MMAXI_CLK 32 +#define RPM_MMAXI_A_CLK 33 +#define RPM_IPA_CLK 34 +#define RPM_IPA_A_CLK 35 +#define RPM_CE1_CLK 36 +#define RPM_CE1_A_CLK 37 +#define RPM_DIV_CLK1 38 +#define RPM_DIV_CLK1_AO 39 +#define RPM_DIV_CLK2 40 +#define RPM_DIV_CLK2_AO 41 +#define RPM_DIV_CLK3 42 +#define RPM_DIV_CLK3_AO 43 + +#endif diff --git a/include/linux/ipc_logging.h b/include/linux/ipc_logging.h new file mode 100644 index 00000000000000..780a82d2f1b4b3 --- /dev/null +++ b/include/linux/ipc_logging.h @@ -0,0 +1,290 @@ +/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _IPC_LOGGING_H +#define _IPC_LOGGING_H + +#include + +#define MAX_MSG_SIZE 255 + +enum { + TSV_TYPE_MSG_START = 1, + TSV_TYPE_SKB = TSV_TYPE_MSG_START, + TSV_TYPE_STRING, + TSV_TYPE_MSG_END = TSV_TYPE_STRING, +}; + +struct tsv_header { + unsigned char type; + unsigned char size; /* size of data field */ +}; + +struct encode_context { + struct tsv_header hdr; + char buff[MAX_MSG_SIZE]; + int offset; +}; + +struct decode_context { + int output_format; /* 0 = debugfs */ + char *buff; /* output buffer */ + int size; /* size of output buffer */ +}; + +#if defined(CONFIG_IPC_LOGGING) +/* + * ipc_log_context_create: Create a debug log context + * Should not be called from atomic context + * + * @max_num_pages: Number of pages of logging space required (max. 10) + * @mod_name : Name of the directory entry under DEBUGFS + * @user_version : Version number of user-defined message formats + * + * returns context id on success, NULL on failure + */ +void *ipc_log_context_create(int max_num_pages, const char *modname, + uint16_t user_version); + +/* + * msg_encode_start: Start encoding a log message + * + * @ectxt: Temporary storage to hold the encoded message + * @type: Root event type defined by the module which is logging + */ +void msg_encode_start(struct encode_context *ectxt, uint32_t type); + +/* + * tsv_timestamp_write: Writes the current timestamp count + * + * @ectxt: Context initialized by calling msg_encode_start() + */ +int tsv_timestamp_write(struct encode_context *ectxt); + +/* + * tsv_qtimer_write: Writes the current QTimer timestamp count + * + * @ectxt: Context initialized by calling msg_encode_start() + */ +int tsv_qtimer_write(struct encode_context *ectxt); + +/* + * tsv_pointer_write: Writes a data pointer + * + * @ectxt: Context initialized by calling msg_encode_start() + * @pointer: Pointer value to write + */ +int tsv_pointer_write(struct encode_context *ectxt, void *pointer); + +/* + * tsv_int32_write: Writes a 32-bit integer value + * + * @ectxt: Context initialized by calling msg_encode_start() + * @n: Integer to write + */ +int tsv_int32_write(struct encode_context *ectxt, int32_t n); + +/* + * tsv_int32_write: Writes a 32-bit integer value + * + * @ectxt: Context initialized by calling msg_encode_start() + * @n: Integer to write + */ +int tsv_byte_array_write(struct encode_context *ectxt, + void *data, int data_size); + +/* + * msg_encode_end: Complete the message encode process + * + * @ectxt: Temporary storage which holds the encoded message + */ +void msg_encode_end(struct encode_context *ectxt); + +/* + * msg_encode_end: Complete the message encode process + * + * @ectxt: Temporary storage which holds the encoded message + */ +void ipc_log_write(void *ctxt, struct encode_context *ectxt); + +/* + * ipc_log_string: Helper function to log a string + * + * @ilctxt: Debug Log Context created using ipc_log_context_create() + * @fmt: Data specified using format specifiers + */ +int ipc_log_string(void *ilctxt, const char *fmt, ...) __printf(2, 3); + +/** + * ipc_log_extract - Reads and deserializes log + * + * @ilctxt: logging context + * @buff: buffer to receive the data + * @size: size of the buffer + * @returns: 0 if no data read; >0 number of bytes read; < 0 error + * + * If no data is available to be read, then the ilctxt::read_avail + * completion is reinitialized. This allows clients to block + * until new log data is save. + */ +int ipc_log_extract(void *ilctxt, char *buff, int size); + +/* + * Print a string to decode context. + * @dctxt Decode context + * @args printf args + */ +#define IPC_SPRINTF_DECODE(dctxt, args...) \ +do { \ + int i; \ + i = scnprintf(dctxt->buff, dctxt->size, args); \ + dctxt->buff += i; \ + dctxt->size -= i; \ +} while (0) + +/* + * tsv_timestamp_read: Reads a timestamp + * + * @ectxt: Context retrieved by reading from log space + * @dctxt: Temporary storage to hold the decoded message + * @format: Output format while dumping through DEBUGFS + */ +void tsv_timestamp_read(struct encode_context *ectxt, + struct decode_context *dctxt, const char *format); + +/* + * tsv_qtimer_read: Reads a QTimer timestamp + * + * @ectxt: Context retrieved by reading from log space + * @dctxt: Temporary storage to hold the decoded message + * @format: Output format while dumping through DEBUGFS + */ +void tsv_qtimer_read(struct encode_context *ectxt, + struct decode_context *dctxt, const char *format); + +/* + * tsv_pointer_read: Reads a data pointer + * + * @ectxt: Context retrieved by reading from log space + * @dctxt: Temporary storage to hold the decoded message + * @format: Output format while dumping through DEBUGFS + */ +void tsv_pointer_read(struct encode_context *ectxt, + struct decode_context *dctxt, const char *format); + +/* + * tsv_int32_read: Reads a 32-bit integer value + * + * @ectxt: Context retrieved by reading from log space + * @dctxt: Temporary storage to hold the decoded message + * @format: Output format while dumping through DEBUGFS + */ +int32_t tsv_int32_read(struct encode_context *ectxt, + struct decode_context *dctxt, const char *format); + +/* + * tsv_int32_read: Reads a 32-bit integer value + * + * @ectxt: Context retrieved by reading from log space + * @dctxt: Temporary storage to hold the decoded message + * @format: Output format while dumping through DEBUGFS + */ +void tsv_byte_array_read(struct encode_context *ectxt, + struct decode_context *dctxt, const char *format); + +/* + * add_deserialization_func: Register a deserialization function to + * to unpack the subevents of a main event + * + * @ctxt: Debug log context to which the deserialization function has + * to be registered + * @type: Main/Root event, defined by the module which is logging, to + * which this deserialization function has to be registered. + * @dfune: Deserialization function to be registered + * + * return 0 on success, -ve value on FAILURE + */ +int add_deserialization_func(void *ctxt, int type, + void (*dfunc)(struct encode_context *, + struct decode_context *)); + +/* + * ipc_log_context_destroy: Destroy debug log context + * + * @ctxt: debug log context created by calling ipc_log_context_create API. + */ +int ipc_log_context_destroy(void *ctxt); + +#else + +static inline void *ipc_log_context_create(int max_num_pages, + const char *modname, uint16_t user_version) +{ return NULL; } + +static inline void msg_encode_start(struct encode_context *ectxt, + uint32_t type) { } + +static inline int tsv_timestamp_write(struct encode_context *ectxt) +{ return -EINVAL; } + +static inline int tsv_qtimer_write(struct encode_context *ectxt) +{ return -EINVAL; } + +static inline int tsv_pointer_write(struct encode_context *ectxt, void *pointer) +{ return -EINVAL; } + +static inline int tsv_int32_write(struct encode_context *ectxt, int32_t n) +{ return -EINVAL; } + +static inline int tsv_byte_array_write(struct encode_context *ectxt, + void *data, int data_size) +{ return -EINVAL; } + +static inline void msg_encode_end(struct encode_context *ectxt) { } + +static inline void ipc_log_write(void *ctxt, struct encode_context *ectxt) { } + +static inline int ipc_log_string(void *ilctxt, const char *fmt, ...) +{ return -EINVAL; } + +static inline int ipc_log_extract(void *ilctxt, char *buff, int size) +{ return -EINVAL; } + +#define IPC_SPRINTF_DECODE(dctxt, args...) do { } while (0) + +static inline void tsv_timestamp_read(struct encode_context *ectxt, + struct decode_context *dctxt, const char *format) { } + +static inline void tsv_qtimer_read(struct encode_context *ectxt, + struct decode_context *dctxt, const char *format) { } + +static inline void tsv_pointer_read(struct encode_context *ectxt, + struct decode_context *dctxt, const char *format) { } + +static inline int32_t tsv_int32_read(struct encode_context *ectxt, + struct decode_context *dctxt, const char *format) +{ return 0; } + +static inline void tsv_byte_array_read(struct encode_context *ectxt, + struct decode_context *dctxt, const char *format) { } + +static inline int add_deserialization_func(void *ctxt, int type, + void (*dfunc)(struct encode_context *, + struct decode_context *)) +{ return 0; } + +static inline int ipc_log_context_destroy(void *ctxt) +{ return 0; } + +#endif + +#endif diff --git a/include/linux/soc/qcom/smd-rpm.h b/include/linux/soc/qcom/smd-rpm.h index 2a53dcaeeeed42..ebdabd669d932a 100644 --- a/include/linux/soc/qcom/smd-rpm.h +++ b/include/linux/soc/qcom/smd-rpm.h @@ -26,6 +26,10 @@ struct qcom_smd_rpm; #define QCOM_SMD_RPM_SMPB 0x62706d73 #define QCOM_SMD_RPM_SPDM 0x63707362 #define QCOM_SMD_RPM_VSA 0x00617376 +#define QCOM_SMD_RPM_MMAXI_CLK 0x69786d6d +#define QCOM_SMD_RPM_IPA_CLK 0x617069 +#define QCOM_SMD_RPM_CE_CLK 0x6563 +#define QCOM_SMD_RPM_AGGR_CLK 0x72676761 int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm, int state, diff --git a/include/linux/soc/qcom/smd.h b/include/linux/soc/qcom/smd.h index d0cb6d189a0a02..f02da589a440cc 100644 --- a/include/linux/soc/qcom/smd.h +++ b/include/linux/soc/qcom/smd.h @@ -17,40 +17,41 @@ struct qcom_smd_id { }; /** - * struct qcom_smd_device - smd device struct + * struct qcom_ipc_device - ipc device struct * @dev: the device struct * @channel: handle to the smd channel for this device */ -struct qcom_smd_device { +struct qcom_ipc_device { struct device dev; struct qcom_smd_channel *channel; }; /** - * struct qcom_smd_driver - smd driver struct + * struct qcom_ipc_driver - ipc driver struct * @driver: underlying device driver * @smd_match_table: static channel match table - * @probe: invoked when the smd channel is found - * @remove: invoked when the smd channel is closed + * @probe: invoked when the ipc channel is found + * @remove: invoked when the ipc channel is closed * @callback: invoked when an inbound message is received on the channel, * should return 0 on success or -EBUSY if the data cannot be * consumed at this time */ -struct qcom_smd_driver { +struct qcom_ipc_driver { struct device_driver driver; const struct qcom_smd_id *smd_match_table; - int (*probe)(struct qcom_smd_device *dev); - void (*remove)(struct qcom_smd_device *dev); - int (*callback)(struct qcom_smd_device *, const void *, size_t); + int (*probe)(struct qcom_ipc_device *dev); + void (*remove)(struct qcom_ipc_device *dev); + int (*callback)(void *, const void *, size_t); }; -int qcom_smd_driver_register(struct qcom_smd_driver *drv); -void qcom_smd_driver_unregister(struct qcom_smd_driver *drv); +int qcom_ipc_driver_register(struct qcom_ipc_driver *drv); +void qcom_ipc_driver_unregister(struct qcom_ipc_driver *drv); +void qcom_ipc_bus_register(struct bus_type *bus); -#define module_qcom_smd_driver(__smd_driver) \ - module_driver(__smd_driver, qcom_smd_driver_register, \ - qcom_smd_driver_unregister) +#define module_qcom_ipc_driver(__ipc_driver) \ + module_driver(__ipc_driver, qcom_ipc_driver_register, \ + qcom_ipc_driver_unregister) int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len); diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 0e32bc71245ef4..ca73c503b92a75 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -311,6 +311,7 @@ enum { __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */ __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */ + __WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */ WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ @@ -411,12 +412,12 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args) #define create_workqueue(name) \ - alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, (name)) + alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name)) #define create_freezable_workqueue(name) \ - alloc_workqueue("%s", WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, \ - 1, (name)) + alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \ + WQ_MEM_RECLAIM, 1, (name)) #define create_singlethread_workqueue(name) \ - alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name) + alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name) extern void destroy_workqueue(struct workqueue_struct *wq); diff --git a/include/soc/qcom/glink.h b/include/soc/qcom/glink.h new file mode 100644 index 00000000000000..cb497ead10999c --- /dev/null +++ b/include/soc/qcom/glink.h @@ -0,0 +1,430 @@ +/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef _SOC_QCOM_GLINK_H_ +#define _SOC_QCOM_GLINK_H_ + +#include + +/* Maximum size (including null) for channel, edge, or transport names */ +#define GLINK_NAME_SIZE 32 + +/* Maximum packet size for TX and RX */ +#define GLINK_MAX_PKT_SIZE SZ_1M + +/** + * G-Link Port State Notification Values + */ +enum { + GLINK_CONNECTED, + GLINK_LOCAL_DISCONNECTED, + GLINK_REMOTE_DISCONNECTED, +}; + +/** + * G-Link Open Options + * + * Used to define the glink_open_config::options field which is passed into + * glink_open(). + */ +enum { + GLINK_OPT_INITIAL_XPORT = BIT(0), + GLINK_OPT_RX_INTENT_NOTIF = BIT(1), +}; + +/** + * Open configuration. + * + * priv: Private data passed into user callbacks + * options: Open option flags + * rx_intent_req_timeout_ms: Timeout for requesting an RX intent, in + * milliseconds; if set to 0, timeout is infinite + * notify_rx: Receive notification function (required) + * notify_tx_done: Transmit-done notification function (required) + * notify_state: State-change notification (required) + * notify_rx_intent_req: Receive intent request (optional) + * notify_rxv: Receive notification function for vector buffers + * (required if notify_rx is not provided) + * notify_sig: Signal-change notification (optional) + * notify_rx_tracer_pkt: Receive notification for tracer packet + * notify_remote_rx_intent: Receive notification for remote-queued RX intent + * + * This structure is passed into the glink_open() call to setup + * configuration handles. All unused fields should be set to 0. + * + * The structure is copied internally before the call to glink_open() returns. + */ +struct glink_open_config { + void *priv; + uint32_t options; + + const char *transport; + const char *edge; + const char *name; + unsigned int rx_intent_req_timeout_ms; + + int (*notify_rx)(void *handle, const void *ptr, size_t size); + void (*notify_tx_done)(void *handle, const void *priv, + const void *pkt_priv, const void *ptr); + void (*notify_state)(void *handle, const void *priv, unsigned event); + bool (*notify_rx_intent_req)(void *handle, const void *priv, + size_t req_size); + void (*notify_rxv)(void *handle, const void *priv, const void *pkt_priv, + void *iovec, size_t size, + void * (*vbuf_provider)(void *iovec, size_t offset, + size_t *size), + void * (*pbuf_provider)(void *iovec, size_t offset, + size_t *size)); + void (*notify_rx_sigs)(void *handle, const void *priv, + uint32_t old_sigs, uint32_t new_sigs); + void (*notify_rx_abort)(void *handle, const void *priv, + const void *pkt_priv); + void (*notify_tx_abort)(void *handle, const void *priv, + const void *pkt_priv); + void (*notify_rx_tracer_pkt)(void *handle, const void *priv, + const void *pkt_priv, const void *ptr, size_t size); + void (*notify_remote_rx_intent)(void *handle, const void *priv, + size_t size); +}; + +enum glink_link_state { + GLINK_LINK_STATE_UP, + GLINK_LINK_STATE_DOWN, +}; + +/** + * Data structure containing information during Link State callback + * transport: String identifying the transport. + * edge: String identifying the edge. + * link_state: Link state(UP?DOWN). + */ +struct glink_link_state_cb_info { + const char *transport; + const char *edge; + enum glink_link_state link_state; +}; + +/** + * Data structure containing information for link state registration + * transport: String identifying the transport. + * edge: String identifying the edge. + * glink_link_state_notif_cb: Callback function used to pass the event. + */ +struct glink_link_info { + const char *transport; + const char *edge; + void (*glink_link_state_notif_cb)( + struct glink_link_state_cb_info *cb_info, + void *priv); +}; + +enum tx_flags { + GLINK_TX_REQ_INTENT = 0x1, + GLINK_TX_SINGLE_THREADED = 0x2, + GLINK_TX_TRACER_PKT = 0x4, + GLINK_TX_ATOMIC = 0x8, +}; + +#ifdef CONFIG_MSM_GLINK +/** + * Open GLINK channel. + * + * @cfg_ptr: Open configuration structure (the structure is copied before + * glink_open returns). All unused fields should be zero-filled. + * + * This should not be called from link state callback context by clients. + * It is recommended that client should invoke this function from their own + * thread. + * + * Return: Pointer to channel on success, PTR_ERR() with standard Linux + * error code on failure. + */ +void *glink_open(const struct glink_open_config *cfg_ptr); + +/** + * glink_close() - Close a previously opened channel. + * + * @handle: handle to close + * + * Once the closing process has been completed, the GLINK_LOCAL_DISCONNECTED + * state event will be sent and the channel can be reopened. + * + * Return: 0 on success; -EINVAL for invalid handle, -EBUSY is close is + * already in progress, standard Linux Error code otherwise. + */ +int glink_close(void *handle); + +/** + * glink_tx() - Transmit packet. + * + * @handle: handle returned by glink_open() + * @pkt_priv: opaque data value that will be returned to client with + * notify_tx_done notification + * @data: pointer to the data + * @size: size of data + * @tx_flags: Flags to specify transmit specific options + * + * Return: -EINVAL for invalid handle; -EBUSY if channel isn't ready for + * transmit operation (not fully opened); -EAGAIN if remote side + * has not provided a receive intent that is big enough. + */ +int glink_tx(void *handle, void *pkt_priv, void *data, size_t size, + uint32_t tx_flags); + +/** + * glink_queue_rx_intent() - Register an intent to receive data. + * + * @handle: handle returned by glink_open() + * @pkt_priv: opaque data type that is returned when a packet is received + * size: maximum size of data to receive + * + * Return: 0 for success; standard Linux error code for failure case + */ +int glink_queue_rx_intent(void *handle, const void *pkt_priv, size_t size); + +/** + * glink_rx_intent_exists() - Check if an intent of size exists. + * + * @handle: handle returned by glink_open() + * @size: size of an intent to check or 0 for any intent + * + * Return: TRUE if an intent exists with greater than or equal to the size + * else FALSE + */ +bool glink_rx_intent_exists(void *handle, size_t size); + +/** + * glink_rx_done() - Return receive buffer to remote side. + * + * @handle: handle returned by glink_open() + * @ptr: data pointer provided in the notify_rx() call + * @reuse: if true, receive intent is re-used + * + * Return: 0 for success; standard Linux error code for failure case + */ +int glink_rx_done(void *handle, const void *ptr, bool reuse); + +/** + * glink_txv() - Transmit a packet in vector form. + * + * @handle: handle returned by glink_open() + * @pkt_priv: opaque data value that will be returned to client with + * notify_tx_done notification + * @iovec: pointer to the vector (must remain valid until notify_tx_done + * notification) + * @size: size of data/vector + * @vbuf_provider: Client provided helper function to iterate the vector + * in physical address space + * @pbuf_provider: Client provided helper function to iterate the vector + * in virtual address space + * @tx_flags: Flags to specify transmit specific options + * + * Return: -EINVAL for invalid handle; -EBUSY if channel isn't ready for + * transmit operation (not fully opened); -EAGAIN if remote side has + * not provided a receive intent that is big enough. + */ +int glink_txv(void *handle, void *pkt_priv, + void *iovec, size_t size, + void * (*vbuf_provider)(void *iovec, size_t offset, size_t *size), + void * (*pbuf_provider)(void *iovec, size_t offset, size_t *size), + uint32_t tx_flags); + +/** + * glink_sigs_set() - Set the local signals for the GLINK channel + * + * @handle: handle returned by glink_open() + * @sigs: modified signal value + * + * Return: 0 for success; standard Linux error code for failure case + */ +int glink_sigs_set(void *handle, uint32_t sigs); + +/** + * glink_sigs_local_get() - Get the local signals for the GLINK channel + * + * handle: handle returned by glink_open() + * sigs: Pointer to hold the signals + * + * Return: 0 for success; standard Linux error code for failure case + */ +int glink_sigs_local_get(void *handle, uint32_t *sigs); + +/** + * glink_sigs_remote_get() - Get the Remote signals for the GLINK channel + * + * handle: handle returned by glink_open() + * sigs: Pointer to hold the signals + * + * Return: 0 for success; standard Linux error code for failure case + */ +int glink_sigs_remote_get(void *handle, uint32_t *sigs); + +/** + * glink_register_link_state_cb() - Register for link state notification + * @link_info: Data structure containing the link identification and callback. + * @priv: Private information to be passed with the callback. + * + * This function is used to register a notifier to receive the updates about a + * link's/transport's state. This notifier needs to be registered first before + * an attempt to open a channel. + * + * Return: a reference to the notifier handle. + */ +void *glink_register_link_state_cb(struct glink_link_info *link_info, + void *priv); + +/** + * glink_unregister_link_state_cb() - Unregister the link state notification + * notif_handle: Handle to be unregistered. + * + * This function is used to unregister a notifier to stop receiving the updates + * about a link's/transport's state. + */ +void glink_unregister_link_state_cb(void *notif_handle); + +/** + * glink_qos_latency() - Register the latency QoS requirement + * @handle: Channel handle in which the latency is required. + * @latency_us: Latency requirement in units of micro-seconds. + * @pkt_size: Worst case packet size for which the latency is required. + * + * This function is used to register the latency requirement for a channel + * and ensures that the latency requirement for this channel is met without + * impacting the existing latency requirements of other channels. + * + * Return: 0 if QoS request is achievable, standard Linux error codes on error + */ +int glink_qos_latency(void *handle, unsigned long latency_us, size_t pkt_size); + +/** + * glink_qos_cancel() - Cancel or unregister the QoS request + * @handle: Channel handle for which the QoS request is cancelled. + * + * This function is used to cancel/unregister the QoS requests for a channel. + * + * Return: 0 on success, standard Linux error codes on failure + */ +int glink_qos_cancel(void *handle); + +/** + * glink_qos_start() - Start of the transmission requiring QoS + * @handle: Channel handle in which the transmit activity is performed. + * + * This function is called by the clients to indicate G-Link regarding the + * start of the transmission which requires a certain QoS. The clients + * must account for the QoS ramp time to ensure meeting the QoS. + * + * Return: 0 on success, standard Linux error codes on failure + */ +int glink_qos_start(void *handle); + +/** + * glink_qos_get_ramp_time() - Get the QoS ramp time + * @handle: Channel handle for which the QoS ramp time is required. + * @pkt_size: Worst case packet size. + * + * This function is called by the clients to obtain the ramp time required + * to meet the QoS requirements. + * + * Return: QoS ramp time is returned in units of micro-seconds + */ +unsigned long glink_qos_get_ramp_time(void *handle, size_t pkt_size); + +#else /* CONFIG_MSM_GLINK */ +static inline void *glink_open(const struct glink_open_config *cfg_ptr) +{ + return NULL; +} + +static inline int glink_close(void *handle) +{ + return -ENODEV; +} + +static inline int glink_tx(void *handle, void *pkt_priv, void *data, + size_t size, uint32_t tx_flags) +{ + return -ENODEV; +} + +static inline int glink_queue_rx_intent(void *handle, const void *pkt_priv, + size_t size) +{ + return -ENODEV; +} + +static inline bool glink_rx_intent_exists(void *handle, size_t size) +{ + return -ENODEV; +} + +static inline int glink_rx_done(void *handle, const void *ptr, bool reuse) +{ + return -ENODEV; +} + +static inline int glink_txv(void *handle, void *pkt_priv, + void *iovec, size_t size, + void * (*vbuf_provider)(void *iovec, size_t offset, size_t *size), + void * (*pbuf_provider)(void *iovec, size_t offset, size_t *size), + uint32_t tx_flags) +{ + return -ENODEV; +} + +static inline int glink_sigs_set(void *handle, uint32_t sigs) +{ + return -ENODEV; +} + +static inline int glink_sigs_local_get(void *handle, uint32_t *sigs) +{ + return -ENODEV; +} + +static inline int glink_sigs_remote_get(void *handle, uint32_t *sigs) +{ + return -ENODEV; +} + +static inline void *glink_register_link_state_cb( + struct glink_link_info *link_info, void *priv) +{ + return NULL; +} + +static inline void glink_unregister_link_state_cb(void *notif_handle) +{ +} + +static inline int glink_qos_latency(void *handle, unsigned long latency_us, + size_t pkt_size) +{ + return -ENODEV; +} + +static inline int glink_qos_cancel(void *handle) +{ + return -ENODEV; +} + +static inline int glink_qos_start(void *handle) +{ + return -ENODEV; +} + +static inline unsigned long glink_qos_get_ramp_time(void *handle, + size_t pkt_size) +{ + return 0; +} +#endif /* CONFIG_MSM_GLINK */ +#endif /* _SOC_QCOM_GLINK_H_ */ diff --git a/include/soc/qcom/glink_rpm_xprt.h b/include/soc/qcom/glink_rpm_xprt.h new file mode 100644 index 00000000000000..8dfd43783e5749 --- /dev/null +++ b/include/soc/qcom/glink_rpm_xprt.h @@ -0,0 +1,78 @@ +/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef _SOC_QCOM_GLINK_RPM_XPRT_H_ +#define _SOC_QCOM_GLINK_RPM_XPRT_H_ + +#include + +#ifdef CONFIG_MSM_GLINK + +/** + * glink_rpm_rx_poll() - Poll and receive any available events + * @handle: Channel handle in which this operation is performed. + * + * This function is used to poll and receive events and packets while the + * receive interrupt from RPM is disabled. + * + * Note that even if a return value > 0 is returned indicating that some events + * were processed, clients should only use the notification functions passed + * into glink_open() to determine if an entire packet has been received since + * some events may be internal details that are not visible to clients. + * + * Return: 0 for no packets available; > 0 for events available; standard + * Linux error codes on failure. + */ +int glink_rpm_rx_poll(void *handle); + +/** + * glink_rpm_mask_rx_interrupt() - Mask or unmask the RPM receive interrupt + * @handle: Channel handle in which this operation is performed. + * @mask: Flag to mask or unmask the interrupt. + * @pstruct: Pointer to any platform specific data. + * + * This function is used to mask or unmask the receive interrupt from RPM. + * "mask" set to true indicates masking the interrupt and when set to false + * indicates unmasking the interrupt. + * + * Return: 0 on success, standard Linux error codes on failure. + */ +int glink_rpm_mask_rx_interrupt(void *handle, bool mask, void *pstruct); + +/** + * glink_wait_link_down() - Return whether read/write indices in FIFO are all 0. + * @handle: Channel handle in which this operation is performed. + * + * This function returns the status of the read/write indices in the FIFO. + * + * Return: 1 if the indices are all 0, 0 otherwise. + */ +int glink_wait_link_down(void *handle); + +#else +static inline int glink_rpm_rx_poll(void *handle) +{ + return -ENODEV; +} + +static inline int glink_rpm_mask_rx_interrupt(void *handle, bool mask, + void *pstruct) +{ + return -ENODEV; +} +static inline int glink_wait_link_down(void *handle) +{ + return -ENODEV; +} + +#endif /* CONFIG_MSM_GLINK */ + +#endif /* _SOC_QCOM_GLINK_RPM_XPRT_H_ */ diff --git a/include/soc/qcom/rpm-notifier.h b/include/soc/qcom/rpm-notifier.h new file mode 100644 index 00000000000000..ea6d95e313a8dc --- /dev/null +++ b/include/soc/qcom/rpm-notifier.h @@ -0,0 +1,63 @@ +/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __ARCH_ARM_MACH_MSM_RPM_NOTIF_H +#define __ARCH_ARM_MACH_MSM_RPM_NOTIF_H + +struct msm_rpm_notifier_data { + uint32_t rsc_type; + uint32_t rsc_id; + uint32_t key; + uint32_t size; + uint8_t *value; +}; +/** + * msm_rpm_register_notifier - Register for sleep set notifications + * + * @nb - notifier block to register + * + * return 0 on success, errno on failure. + */ +int msm_rpm_register_notifier(struct notifier_block *nb); + +/** + * msm_rpm_unregister_notifier - Unregister previously registered notifications + * + * @nb - notifier block to unregister + * + * return 0 on success, errno on failure. + */ +int msm_rpm_unregister_notifier(struct notifier_block *nb); + +/** + * msm_rpm_enter_sleep - Notify RPM driver to prepare for entering sleep + * + * @bool - flag to enable print contents of sleep buffer. + * @cpumask - cpumask of next wakeup cpu + * + * return 0 on success errno on failure. + */ +int msm_rpm_enter_sleep(bool print, const struct cpumask *cpumask); + +/** + * msm_rpm_exit_sleep - Notify RPM driver about resuming from power collapse + */ +void msm_rpm_exit_sleep(void); + +/** + * msm_rpm_waiting_for_ack - Indicate if there is RPM message + * pending acknowledgement. + * returns true for pending messages and false otherwise + */ +bool msm_rpm_waiting_for_ack(void); + +#endif /*__ARCH_ARM_MACH_MSM_RPM_NOTIF_H */ diff --git a/include/soc/qcom/rpm-smd.h b/include/soc/qcom/rpm-smd.h new file mode 100644 index 00000000000000..020ffe3082847b --- /dev/null +++ b/include/soc/qcom/rpm-smd.h @@ -0,0 +1,309 @@ +/* Copyright (c) 2012, 2014-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __ARCH_ARM_MACH_MSM_RPM_SMD_H +#define __ARCH_ARM_MACH_MSM_RPM_SMD_H + +/** + * enum msm_rpm_set - RPM enumerations for sleep/active set + * %MSM_RPM_CTX_SET_0: Set resource parameters for active mode. + * %MSM_RPM_CTX_SET_SLEEP: Set resource parameters for sleep. + */ +enum msm_rpm_set { + MSM_RPM_CTX_ACTIVE_SET, + MSM_RPM_CTX_SLEEP_SET, +}; + +struct msm_rpm_request; + +struct msm_rpm_kvp { + uint32_t key; + uint32_t length; + uint8_t *data; +}; +#ifdef CONFIG_MSM_RPM_SMD +/** + * msm_rpm_request() - Creates a parent element to identify the + * resource on the RPM, that stores the KVPs for different fields modified + * for a hardware resource + * + * @set: if the device is setting the active/sleep set parameter + * for the resource + * @rsc_type: unsigned 32 bit integer that identifies the type of the resource + * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type + * @num_elements: number of KVPs pairs associated with the resource + * + * returns pointer to a msm_rpm_request on success, NULL on error + */ +struct msm_rpm_request *msm_rpm_create_request( + enum msm_rpm_set set, uint32_t rsc_type, + uint32_t rsc_id, int num_elements); + +/** + * msm_rpm_request_noirq() - Creates a parent element to identify the + * resource on the RPM, that stores the KVPs for different fields modified + * for a hardware resource. This function is similar to msm_rpm_create_request + * except that it has to be called with interrupts masked. + * + * @set: if the device is setting the active/sleep set parameter + * for the resource + * @rsc_type: unsigned 32 bit integer that identifies the type of the resource + * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type + * @num_elements: number of KVPs pairs associated with the resource + * + * returns pointer to a msm_rpm_request on success, NULL on error + */ +struct msm_rpm_request *msm_rpm_create_request_noirq( + enum msm_rpm_set set, uint32_t rsc_type, + uint32_t rsc_id, int num_elements); + +/** + * msm_rpm_add_kvp_data() - Adds a Key value pair to a existing RPM resource. + * + * @handle: RPM resource handle to which the data should be appended + * @key: unsigned integer identify the parameter modified + * @data: byte array that contains the value corresponding to key. + * @size: size of data in bytes. + * + * returns 0 on success or errno + */ +int msm_rpm_add_kvp_data(struct msm_rpm_request *handle, + uint32_t key, const uint8_t *data, int size); + +/** + * msm_rpm_add_kvp_data_noirq() - Adds a Key value pair to a existing RPM + * resource. This function is similar to msm_rpm_add_kvp_data except that it + * has to be called with interrupts masked. + * + * @handle: RPM resource handle to which the data should be appended + * @key: unsigned integer identify the parameter modified + * @data: byte array that contains the value corresponding to key. + * @size: size of data in bytes. + * + * returns 0 on success or errno + */ +int msm_rpm_add_kvp_data_noirq(struct msm_rpm_request *handle, + uint32_t key, const uint8_t *data, int size); + +/** msm_rpm_free_request() - clean up the RPM request handle created with + * msm_rpm_create_request + * + * @handle: RPM resource handle to be cleared. + */ + +void msm_rpm_free_request(struct msm_rpm_request *handle); + +/** + * msm_rpm_send_request() - Send the RPM messages using SMD. The function + * assigns a message id before sending the data out to the RPM. RPM hardware + * uses the message id to acknowledge the messages. + * + * @handle: pointer to the msm_rpm_request for the resource being modified. + * + * returns non-zero message id on success and zero on a failed transaction. + * The drivers use message id to wait for ACK from RPM. + */ +int msm_rpm_send_request(struct msm_rpm_request *handle); + +/** + * msm_rpm_send_request_noack() - Send the RPM messages using SMD. The function + * assigns a message id before sending the data out to the RPM. RPM hardware + * uses the message id to acknowledge the messages, but this API does not wait + * on the ACK for this message id and it does not add the message id to the wait + * list. + * + * @handle: pointer to the msm_rpm_request for the resource being modified. + * + * returns NULL on success and PTR_ERR on a failed transaction. + */ +void *msm_rpm_send_request_noack(struct msm_rpm_request *handle); + +/** + * msm_rpm_send_request_noirq() - Send the RPM messages using SMD. The + * function assigns a message id before sending the data out to the RPM. + * RPM hardware uses the message id to acknowledge the messages. This function + * is similar to msm_rpm_send_request except that it has to be called with + * interrupts masked. + * + * @handle: pointer to the msm_rpm_request for the resource being modified. + * + * returns non-zero message id on success and zero on a failed transaction. + * The drivers use message id to wait for ACK from RPM. + */ +int msm_rpm_send_request_noirq(struct msm_rpm_request *handle); + +/** + * msm_rpm_wait_for_ack() - A blocking call that waits for acknowledgment of + * a message from RPM. + * + * @msg_id: the return from msm_rpm_send_requests + * + * returns 0 on success or errno + */ +int msm_rpm_wait_for_ack(uint32_t msg_id); + +/** + * msm_rpm_wait_for_ack_noirq() - A blocking call that waits for acknowledgment + * of a message from RPM. This function is similar to msm_rpm_wait_for_ack + * except that it has to be called with interrupts masked. + * + * @msg_id: the return from msm_rpm_send_request + * + * returns 0 on success or errno + */ +int msm_rpm_wait_for_ack_noirq(uint32_t msg_id); + +/** + * msm_rpm_send_message() -Wrapper function for clients to send data given an + * array of key value pairs. + * + * @set: if the device is setting the active/sleep set parameter + * for the resource + * @rsc_type: unsigned 32 bit integer that identifies the type of the resource + * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type + * @kvp: array of KVP data. + * @nelem: number of KVPs pairs associated with the message. + * + * returns 0 on success and errno on failure. + */ +int msm_rpm_send_message(enum msm_rpm_set set, uint32_t rsc_type, + uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems); + +/** + * msm_rpm_send_message_noack() -Wrapper function for clients to send data + * given an array of key value pairs without waiting for ack. + * + * @set: if the device is setting the active/sleep set parameter + * for the resource + * @rsc_type: unsigned 32 bit integer that identifies the type of the resource + * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type + * @kvp: array of KVP data. + * @nelem: number of KVPs pairs associated with the message. + * + * returns NULL on success and PTR_ERR(errno) on failure. + */ +void *msm_rpm_send_message_noack(enum msm_rpm_set set, uint32_t rsc_type, + uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems); + +/** + * msm_rpm_send_message_noirq() -Wrapper function for clients to send data + * given an array of key value pairs. This function is similar to the + * msm_rpm_send_message() except that it has to be called with interrupts + * disabled. Clients should choose the irq version when possible for system + * performance. + * + * @set: if the device is setting the active/sleep set parameter + * for the resource + * @rsc_type: unsigned 32 bit integer that identifies the type of the resource + * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type + * @kvp: array of KVP data. + * @nelem: number of KVPs pairs associated with the message. + * + * returns 0 on success and errno on failure. + */ +int msm_rpm_send_message_noirq(enum msm_rpm_set set, uint32_t rsc_type, + uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems); + +/** + * msm_rpm_driver_init() - Initialization function that registers for a + * rpm platform driver. + * + * returns 0 on success. + */ +int __init msm_rpm_driver_init(void); + +#else + +static inline struct msm_rpm_request *msm_rpm_create_request( + enum msm_rpm_set set, uint32_t rsc_type, + uint32_t rsc_id, int num_elements) +{ + return NULL; +} + +static inline struct msm_rpm_request *msm_rpm_create_request_noirq( + enum msm_rpm_set set, uint32_t rsc_type, + uint32_t rsc_id, int num_elements) +{ + return NULL; + +} +static inline uint32_t msm_rpm_add_kvp_data(struct msm_rpm_request *handle, + uint32_t key, const uint8_t *data, int count) +{ + return 0; +} +static inline uint32_t msm_rpm_add_kvp_data_noirq( + struct msm_rpm_request *handle, uint32_t key, + const uint8_t *data, int count) +{ + return 0; +} + +static inline void msm_rpm_free_request(struct msm_rpm_request *handle) +{ + return; +} + +static inline int msm_rpm_send_request(struct msm_rpm_request *handle) +{ + return 0; +} + +static inline int msm_rpm_send_request_noirq(struct msm_rpm_request *handle) +{ + return 0; + +} + +static inline void *msm_rpm_send_request_noack(struct msm_rpm_request *handle) +{ + return NULL; +} + +static inline int msm_rpm_send_message(enum msm_rpm_set set, uint32_t rsc_type, + uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems) +{ + return 0; +} + +static inline int msm_rpm_send_message_noirq(enum msm_rpm_set set, + uint32_t rsc_type, uint32_t rsc_id, struct msm_rpm_kvp *kvp, + int nelems) +{ + return 0; +} + +static inline void *msm_rpm_send_message_noack(enum msm_rpm_set set, + uint32_t rsc_type, uint32_t rsc_id, struct msm_rpm_kvp *kvp, + int nelems) +{ + return NULL; +} + +static inline int msm_rpm_wait_for_ack(uint32_t msg_id) +{ + return 0; + +} +static inline int msm_rpm_wait_for_ack_noirq(uint32_t msg_id) +{ + return 0; +} + +static inline int __init msm_rpm_driver_init(void) +{ + return 0; +} +#endif +#endif /*__ARCH_ARM_MACH_MSM_RPM_SMD_H*/ diff --git a/include/soc/qcom/tracer_pkt.h b/include/soc/qcom/tracer_pkt.h new file mode 100644 index 00000000000000..2657b79b1ed6f1 --- /dev/null +++ b/include/soc/qcom/tracer_pkt.h @@ -0,0 +1,130 @@ +/* Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef _TRACER_PKT_H_ +#define _TRACER_PKT_H_ + +#include +#include + +#ifdef CONFIG_TRACER_PKT + +/** + * tracer_pkt_init() - initialize the tracer packet + * @data: Pointer to the buffer to be initialized with a tracer + * packet. + * @data_len: Length of the buffer. + * @client_event_cfg: Client-specific event configuration mask. + * @glink_event_cfg: G-Link-specific event configuration mask. + * @pkt_priv: Private/Cookie information to be added to the tracer + * packet. + * @pkt_priv_len: Length of the private data. + * + * This function is used to initialize a buffer with the tracer packet header. + * The tracer packet header includes the data as passed by the elements in the + * parameters. + * + * Return: 0 on success, standard Linux error codes on failure. + */ +int tracer_pkt_init(void *data, size_t data_len, + uint16_t client_event_cfg, uint32_t glink_event_cfg, + void *pkt_priv, size_t pkt_priv_len); + +/** + * tracer_pkt_set_event_cfg() - set the event configuration mask in the tracer + * packet + * @data: Pointer to the buffer to be initialized with event + * configuration mask. + * @client_event_cfg: Client-specific event configuration mask. + * @glink_event_cfg: G-Link-specific event configuration mask. + * + * This function is used to initialize a buffer with the event configuration + * mask as passed by the elements in the parameters. + * + * Return: 0 on success, standard Linux error codes on failure. + */ +int tracer_pkt_set_event_cfg(void *data, uint16_t client_event_cfg, + uint32_t glink_event_cfg); + +/** + * tracer_pkt_log_event() - log an event specific to the tracer packet + * @data: Pointer to the buffer containing tracer packet. + * @event_id: Event ID to be logged. + * + * This function is used to log an event specific to the tracer packet. + * The event is logged either into the tracer packet itself or a different + * tracing mechanism as configured. + * + * Return: 0 on success, standard Linux error codes on failure. + */ +int tracer_pkt_log_event(void *data, uint32_t event_id); + +/** + * tracer_pkt_calc_hex_dump_size() - calculate the hex dump size of a tracer + * packet + * @data: Pointer to the buffer containing tracer packet. + * @data_len: Length of the tracer packet buffer. + * + * This function is used to calculate the length of the buffer required to + * hold the hex dump of the tracer packet. + * + * Return: 0 on success, standard Linux error codes on failure. + */ +size_t tracer_pkt_calc_hex_dump_size(void *data, size_t data_len); + +/** + * tracer_pkt_hex_dump() - hex dump the tracer packet into a buffer + * @buf: Buffer to contain the hex dump of the tracer packet. + * @buf_len: Length of the hex dump buffer. + * @data: Buffer containing the tracer packet. + * @data_len: Length of the buffer containing the tracer packet. + * + * This function is used to dump the contents of the tracer packet into + * a buffer in a specific hexadecimal format. The hex dump buffer can then + * be dumped through debugfs. + * + * Return: 0 on success, standard Linux error codes on failure. + */ +int tracer_pkt_hex_dump(void *buf, size_t buf_len, void *data, size_t data_len); + +#else + +static inline int tracer_pkt_init(void *data, size_t data_len, + uint16_t client_event_cfg, uint32_t glink_event_cfg, + void *pkt_priv, size_t pkt_priv_len) +{ + return -EOPNOTSUPP; +} + +static inline int tracer_pkt_set_event_cfg(uint16_t client_event_cfg, + uint32_t glink_event_cfg) +{ + return -EOPNOTSUPP; +} + +static inline int tracer_pkt_log_event(void *data, uint32_t event_id) +{ + return -EOPNOTSUPP; +} + +static inline size_t tracer_pkt_calc_hex_dump_size(void *data, size_t data_len) +{ + return -EOPNOTSUPP; +} + +static inline int tracer_pkt_hex_dump(void *buf, size_t buf_len, void *data, + size_t data_len) +{ + return -EOPNOTSUPP; +} + +#endif /* CONFIG_TRACER_PKT */ +#endif /* _TRACER_PKT_H_ */ diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 61a0264e28f9b5..dc7faad63646e7 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2355,7 +2355,8 @@ static void check_flush_dependency(struct workqueue_struct *target_wq, WARN_ONCE(current->flags & PF_MEMALLOC, "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%pf", current->pid, current->comm, target_wq->name, target_func); - WARN_ONCE(worker && (worker->current_pwq->wq->flags & WQ_MEM_RECLAIM), + WARN_ONCE(worker && ((worker->current_pwq->wq->flags & + (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM), "workqueue: WQ_MEM_RECLAIM %s:%pf is flushing !WQ_MEM_RECLAIM %s:%pf", worker->current_pwq->wq->name, worker->current_func, target_wq->name, target_func);