Skip to content

Commit 20bf7d0

Browse files
LorenzoBianconiPaolo Abeni
authored andcommitted
net: airoha: Add sched ETS offload support
Introduce support for ETS Qdisc offload available on the Airoha EN7581 ethernet controller. In order to be effective, ETS Qdisc must configured as leaf of a HTB Qdisc (HTB Qdisc offload will be added in the following patch). ETS Qdisc available on EN7581 ethernet controller supports at most 8 concurrent bands (QoS queues). We can enable an ETS Qdisc for each available QoS channel. Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
1 parent 2b288b8 commit 20bf7d0

File tree

1 file changed

+195
-1
lines changed

1 file changed

+195
-1
lines changed

drivers/net/ethernet/mediatek/airoha_eth.c

Lines changed: 195 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
#include <linux/u64_stats_sync.h>
1616
#include <net/dsa.h>
1717
#include <net/page_pool/helpers.h>
18+
#include <net/pkt_cls.h>
1819
#include <uapi/linux/ppp_defs.h>
1920

2021
#define AIROHA_MAX_NUM_GDM_PORTS 1
@@ -543,9 +544,24 @@
543544
#define INGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
544545
#define INGRESS_FAST_TICK_MASK GENMASK(15, 0)
545546

547+
#define REG_QUEUE_CLOSE_CFG(_n) (0x00a0 + ((_n) & 0xfc))
548+
#define TXQ_DISABLE_CHAN_QUEUE_MASK(_n, _m) BIT((_m) + (((_n) & 0x3) << 3))
549+
546550
#define REG_TXQ_DIS_CFG_BASE(_n) ((_n) ? 0x20a0 : 0x00a0)
547551
#define REG_TXQ_DIS_CFG(_n, _m) (REG_TXQ_DIS_CFG_BASE((_n)) + (_m) << 2)
548552

553+
#define REG_CNTR_CFG(_n) (0x0400 + ((_n) << 3))
554+
#define CNTR_EN_MASK BIT(31)
555+
#define CNTR_ALL_CHAN_EN_MASK BIT(30)
556+
#define CNTR_ALL_QUEUE_EN_MASK BIT(29)
557+
#define CNTR_ALL_DSCP_RING_EN_MASK BIT(28)
558+
#define CNTR_SRC_MASK GENMASK(27, 24)
559+
#define CNTR_DSCP_RING_MASK GENMASK(20, 16)
560+
#define CNTR_CHAN_MASK GENMASK(7, 3)
561+
#define CNTR_QUEUE_MASK GENMASK(2, 0)
562+
563+
#define REG_CNTR_VAL(_n) (0x0404 + ((_n) << 3))
564+
549565
#define REG_LMGR_INIT_CFG 0x1000
550566
#define LMGR_INIT_START BIT(31)
551567
#define LMGR_SRAM_MODE_MASK BIT(30)
@@ -571,9 +587,19 @@
571587
#define TWRR_WEIGHT_SCALE_MASK BIT(31)
572588
#define TWRR_WEIGHT_BASE_MASK BIT(3)
573589

590+
#define REG_TXWRR_WEIGHT_CFG 0x1024
591+
#define TWRR_RW_CMD_MASK BIT(31)
592+
#define TWRR_RW_CMD_DONE BIT(30)
593+
#define TWRR_CHAN_IDX_MASK GENMASK(23, 19)
594+
#define TWRR_QUEUE_IDX_MASK GENMASK(18, 16)
595+
#define TWRR_VALUE_MASK GENMASK(15, 0)
596+
574597
#define REG_PSE_BUF_USAGE_CFG 0x1028
575598
#define PSE_BUF_ESTIMATE_EN_MASK BIT(29)
576599

600+
#define REG_CHAN_QOS_MODE(_n) (0x1040 + ((_n) << 2))
601+
#define CHAN_QOS_MODE_MASK(_n) GENMASK(2 + ((_n) << 2), (_n) << 2)
602+
577603
#define REG_GLB_TRTCM_CFG 0x1080
578604
#define GLB_TRTCM_EN_MASK BIT(31)
579605
#define GLB_TRTCM_MODE_MASK BIT(30)
@@ -722,6 +748,17 @@ enum {
722748
FE_PSE_PORT_DROP = 0xf,
723749
};
724750

751+
enum tx_sched_mode {
752+
TC_SCH_WRR8,
753+
TC_SCH_SP,
754+
TC_SCH_WRR7,
755+
TC_SCH_WRR6,
756+
TC_SCH_WRR5,
757+
TC_SCH_WRR4,
758+
TC_SCH_WRR3,
759+
TC_SCH_WRR2,
760+
};
761+
725762
struct airoha_queue_entry {
726763
union {
727764
void *buf;
@@ -812,6 +849,10 @@ struct airoha_gdm_port {
812849
int id;
813850

814851
struct airoha_hw_stats stats;
852+
853+
/* qos stats counters */
854+
u64 cpu_tx_packets;
855+
u64 fwd_tx_packets;
815856
};
816857

817858
struct airoha_eth {
@@ -1961,6 +2002,27 @@ static void airoha_qdma_init_qos(struct airoha_qdma *qdma)
19612002
FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40));
19622003
}
19632004

2005+
static void airoha_qdma_init_qos_stats(struct airoha_qdma *qdma)
2006+
{
2007+
int i;
2008+
2009+
for (i = 0; i < AIROHA_NUM_QOS_CHANNELS; i++) {
2010+
/* Tx-cpu transferred count */
2011+
airoha_qdma_wr(qdma, REG_CNTR_VAL(i << 1), 0);
2012+
airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1),
2013+
CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK |
2014+
CNTR_ALL_DSCP_RING_EN_MASK |
2015+
FIELD_PREP(CNTR_CHAN_MASK, i));
2016+
/* Tx-fwd transferred count */
2017+
airoha_qdma_wr(qdma, REG_CNTR_VAL((i << 1) + 1), 0);
2018+
airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1),
2019+
CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK |
2020+
CNTR_ALL_DSCP_RING_EN_MASK |
2021+
FIELD_PREP(CNTR_SRC_MASK, 1) |
2022+
FIELD_PREP(CNTR_CHAN_MASK, i));
2023+
}
2024+
}
2025+
19642026
static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
19652027
{
19662028
int i;
@@ -2011,6 +2073,7 @@ static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
20112073

20122074
airoha_qdma_set(qdma, REG_TXQ_CNGST_CFG,
20132075
TXQ_CNGST_DROP_EN | TXQ_CNGST_DEI_DROP_EN);
2076+
airoha_qdma_init_qos_stats(qdma);
20142077

20152078
return 0;
20162079
}
@@ -2638,6 +2701,135 @@ airoha_ethtool_get_rmon_stats(struct net_device *dev,
26382701
} while (u64_stats_fetch_retry(&port->stats.syncp, start));
26392702
}
26402703

2704+
static int airoha_qdma_set_chan_tx_sched(struct airoha_gdm_port *port,
2705+
int channel, enum tx_sched_mode mode,
2706+
const u16 *weights, u8 n_weights)
2707+
{
2708+
int i;
2709+
2710+
for (i = 0; i < AIROHA_NUM_TX_RING; i++)
2711+
airoha_qdma_clear(port->qdma, REG_QUEUE_CLOSE_CFG(channel),
2712+
TXQ_DISABLE_CHAN_QUEUE_MASK(channel, i));
2713+
2714+
for (i = 0; i < n_weights; i++) {
2715+
u32 status;
2716+
int err;
2717+
2718+
airoha_qdma_wr(port->qdma, REG_TXWRR_WEIGHT_CFG,
2719+
TWRR_RW_CMD_MASK |
2720+
FIELD_PREP(TWRR_CHAN_IDX_MASK, channel) |
2721+
FIELD_PREP(TWRR_QUEUE_IDX_MASK, i) |
2722+
FIELD_PREP(TWRR_VALUE_MASK, weights[i]));
2723+
err = read_poll_timeout(airoha_qdma_rr, status,
2724+
status & TWRR_RW_CMD_DONE,
2725+
USEC_PER_MSEC, 10 * USEC_PER_MSEC,
2726+
true, port->qdma,
2727+
REG_TXWRR_WEIGHT_CFG);
2728+
if (err)
2729+
return err;
2730+
}
2731+
2732+
airoha_qdma_rmw(port->qdma, REG_CHAN_QOS_MODE(channel >> 3),
2733+
CHAN_QOS_MODE_MASK(channel),
2734+
mode << __ffs(CHAN_QOS_MODE_MASK(channel)));
2735+
2736+
return 0;
2737+
}
2738+
2739+
static int airoha_qdma_set_tx_prio_sched(struct airoha_gdm_port *port,
2740+
int channel)
2741+
{
2742+
static const u16 w[AIROHA_NUM_QOS_QUEUES] = {};
2743+
2744+
return airoha_qdma_set_chan_tx_sched(port, channel, TC_SCH_SP, w,
2745+
ARRAY_SIZE(w));
2746+
}
2747+
2748+
static int airoha_qdma_set_tx_ets_sched(struct airoha_gdm_port *port,
2749+
int channel,
2750+
struct tc_ets_qopt_offload *opt)
2751+
{
2752+
struct tc_ets_qopt_offload_replace_params *p = &opt->replace_params;
2753+
enum tx_sched_mode mode = TC_SCH_SP;
2754+
u16 w[AIROHA_NUM_QOS_QUEUES] = {};
2755+
int i, nstrict = 0;
2756+
2757+
if (p->bands > AIROHA_NUM_QOS_QUEUES)
2758+
return -EINVAL;
2759+
2760+
for (i = 0; i < p->bands; i++) {
2761+
if (!p->quanta[i])
2762+
nstrict++;
2763+
}
2764+
2765+
/* this configuration is not supported by the hw */
2766+
if (nstrict == AIROHA_NUM_QOS_QUEUES - 1)
2767+
return -EINVAL;
2768+
2769+
for (i = 0; i < p->bands - nstrict; i++)
2770+
w[i] = p->weights[nstrict + i];
2771+
2772+
if (!nstrict)
2773+
mode = TC_SCH_WRR8;
2774+
else if (nstrict < AIROHA_NUM_QOS_QUEUES - 1)
2775+
mode = nstrict + 1;
2776+
2777+
return airoha_qdma_set_chan_tx_sched(port, channel, mode, w,
2778+
ARRAY_SIZE(w));
2779+
}
2780+
2781+
static int airoha_qdma_get_tx_ets_stats(struct airoha_gdm_port *port,
2782+
int channel,
2783+
struct tc_ets_qopt_offload *opt)
2784+
{
2785+
u64 cpu_tx_packets = airoha_qdma_rr(port->qdma,
2786+
REG_CNTR_VAL(channel << 1));
2787+
u64 fwd_tx_packets = airoha_qdma_rr(port->qdma,
2788+
REG_CNTR_VAL((channel << 1) + 1));
2789+
u64 tx_packets = (cpu_tx_packets - port->cpu_tx_packets) +
2790+
(fwd_tx_packets - port->fwd_tx_packets);
2791+
_bstats_update(opt->stats.bstats, 0, tx_packets);
2792+
2793+
port->cpu_tx_packets = cpu_tx_packets;
2794+
port->fwd_tx_packets = fwd_tx_packets;
2795+
2796+
return 0;
2797+
}
2798+
2799+
static int airoha_tc_setup_qdisc_ets(struct airoha_gdm_port *port,
2800+
struct tc_ets_qopt_offload *opt)
2801+
{
2802+
int channel = TC_H_MAJ(opt->handle) >> 16;
2803+
2804+
if (opt->parent == TC_H_ROOT)
2805+
return -EINVAL;
2806+
2807+
switch (opt->command) {
2808+
case TC_ETS_REPLACE:
2809+
return airoha_qdma_set_tx_ets_sched(port, channel, opt);
2810+
case TC_ETS_DESTROY:
2811+
/* PRIO is default qdisc scheduler */
2812+
return airoha_qdma_set_tx_prio_sched(port, channel);
2813+
case TC_ETS_STATS:
2814+
return airoha_qdma_get_tx_ets_stats(port, channel, opt);
2815+
default:
2816+
return -EOPNOTSUPP;
2817+
}
2818+
}
2819+
2820+
static int airoha_dev_tc_setup(struct net_device *dev, enum tc_setup_type type,
2821+
void *type_data)
2822+
{
2823+
struct airoha_gdm_port *port = netdev_priv(dev);
2824+
2825+
switch (type) {
2826+
case TC_SETUP_QDISC_ETS:
2827+
return airoha_tc_setup_qdisc_ets(port, type_data);
2828+
default:
2829+
return -EOPNOTSUPP;
2830+
}
2831+
}
2832+
26412833
static const struct net_device_ops airoha_netdev_ops = {
26422834
.ndo_init = airoha_dev_init,
26432835
.ndo_open = airoha_dev_open,
@@ -2646,6 +2838,7 @@ static const struct net_device_ops airoha_netdev_ops = {
26462838
.ndo_start_xmit = airoha_dev_xmit,
26472839
.ndo_get_stats64 = airoha_dev_get_stats64,
26482840
.ndo_set_mac_address = airoha_dev_set_macaddr,
2841+
.ndo_setup_tc = airoha_dev_tc_setup,
26492842
};
26502843

26512844
static const struct ethtool_ops airoha_ethtool_ops = {
@@ -2695,7 +2888,8 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
26952888
dev->watchdog_timeo = 5 * HZ;
26962889
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
26972890
NETIF_F_TSO6 | NETIF_F_IPV6_CSUM |
2698-
NETIF_F_SG | NETIF_F_TSO;
2891+
NETIF_F_SG | NETIF_F_TSO |
2892+
NETIF_F_HW_TC;
26992893
dev->features |= dev->hw_features;
27002894
dev->dev.of_node = np;
27012895
dev->irq = qdma->irq;

0 commit comments

Comments
 (0)