Skip to content

Commit

Permalink
Merge branch 'am65-cpsw-preemption-coalescing'
Browse files Browse the repository at this point in the history
Roger Quadros says:

====================
net: ethernet: am65-cpsw: Add mqprio, frame preemption & coalescing

This series adds mqprio qdisc offload in channel mode,
Frame Preemption MAC merge support and RX/TX coalesing
for AM65 CPSW driver.

In v11 following changes were made
- Fix patch "net: ethernet: ti: am65-cpsw: add mqprio qdisc offload in channel mode"
by including units.h

Changelog information in each patch file.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
davem330 committed Dec 23, 2023
2 parents 2437c0f + e4918f9 commit d11db8a
Show file tree
Hide file tree
Showing 9 changed files with 1,100 additions and 187 deletions.
14 changes: 8 additions & 6 deletions drivers/net/ethernet/ti/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -134,14 +134,16 @@ config TI_K3_AM65_CPTS
protocol, Ethernet Enhanced Scheduled Traffic Operations (CPTS_ESTFn)
and PCIe Subsystem Precision Time Measurement (PTM).

config TI_AM65_CPSW_TAS
bool "Enable TAS offload in AM65 CPSW"
config TI_AM65_CPSW_QOS
bool "Enable QoS offload features in AM65 CPSW"
depends on TI_K3_AM65_CPSW_NUSS && NET_SCH_TAPRIO && TI_K3_AM65_CPTS
help
Say y here to support Time Aware Shaper(TAS) offload in AM65 CPSW.
AM65 CPSW hardware supports Enhanced Scheduled Traffic (EST)
defined in IEEE 802.1Q 2018. The EST scheduler runs on CPTS and the
TAS/EST schedule is updated in the Fetch RAM memory of the CPSW.
This option enables QoS offload features in AM65 CPSW like
Time Aware Shaper (TAS) / Enhanced Scheduled Traffic (EST),
MQPRIO qdisc offload and Frame-Preemption MAC Merge / Interspersing
Express Traffic (IET).
The EST scheduler runs on CPTS and the TAS/EST schedule is
updated in the Fetch RAM memory of the CPSW.

config TI_KEYSTONE_NETCP
tristate "TI Keystone NETCP Core Support"
Expand Down
3 changes: 2 additions & 1 deletion drivers/net/ethernet/ti/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,8 @@ keystone_netcp_ethss-y := netcp_ethss.o netcp_sgmii.o netcp_xgbepcsr.o cpsw_ale.
obj-$(CONFIG_TI_K3_CPPI_DESC_POOL) += k3-cppi-desc-pool.o

obj-$(CONFIG_TI_K3_AM65_CPSW_NUSS) += ti-am65-cpsw-nuss.o
ti-am65-cpsw-nuss-y := am65-cpsw-nuss.o cpsw_sl.o am65-cpsw-ethtool.o cpsw_ale.o am65-cpsw-qos.o
ti-am65-cpsw-nuss-y := am65-cpsw-nuss.o cpsw_sl.o am65-cpsw-ethtool.o cpsw_ale.o
ti-am65-cpsw-nuss-$(CONFIG_TI_AM65_CPSW_QOS) += am65-cpsw-qos.o
ti-am65-cpsw-nuss-$(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV) += am65-cpsw-switchdev.o
obj-$(CONFIG_TI_K3_AM65_CPTS) += am65-cpts.o

Expand Down
246 changes: 246 additions & 0 deletions drivers/net/ethernet/ti/am65-cpsw-ethtool.c
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
#include <linux/pm_runtime.h>

#include "am65-cpsw-nuss.h"
#include "am65-cpsw-qos.h"
#include "cpsw_ale.h"
#include "am65-cpts.h"

Expand Down Expand Up @@ -670,6 +671,9 @@ static void am65_cpsw_get_eth_mac_stats(struct net_device *ndev,

stats = port->stat_base;

if (s->src != ETHTOOL_MAC_STATS_SRC_AGGREGATE)
return;

s->FramesTransmittedOK = readl_relaxed(&stats->tx_good_frames);
s->SingleCollisionFrames = readl_relaxed(&stats->tx_single_coll_frames);
s->MultipleCollisionFrames = readl_relaxed(&stats->tx_mult_coll_frames);
Expand Down Expand Up @@ -740,6 +744,240 @@ static int am65_cpsw_set_ethtool_priv_flags(struct net_device *ndev, u32 flags)
return 0;
}

static void am65_cpsw_port_iet_rx_enable(struct am65_cpsw_port *port, bool enable)
{
u32 val;

val = readl(port->port_base + AM65_CPSW_PN_REG_CTL);
if (enable)
val |= AM65_CPSW_PN_CTL_IET_PORT_EN;
else
val &= ~AM65_CPSW_PN_CTL_IET_PORT_EN;

writel(val, port->port_base + AM65_CPSW_PN_REG_CTL);
am65_cpsw_iet_common_enable(port->common);
}

static void am65_cpsw_port_iet_tx_enable(struct am65_cpsw_port *port, bool enable)
{
u32 val;

val = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
if (enable)
val |= AM65_CPSW_PN_IET_MAC_PENABLE;
else
val &= ~AM65_CPSW_PN_IET_MAC_PENABLE;

writel(val, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
}

static int am65_cpsw_get_mm(struct net_device *ndev, struct ethtool_mm_state *state)
{
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
struct am65_cpsw_ndev_priv *priv = netdev_priv(ndev);
u32 port_ctrl, iet_ctrl, iet_status;
u32 add_frag_size;

if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_QOS))
return -EOPNOTSUPP;

mutex_lock(&priv->mm_lock);

iet_ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
port_ctrl = readl(port->port_base + AM65_CPSW_PN_REG_CTL);

state->tx_enabled = !!(iet_ctrl & AM65_CPSW_PN_IET_MAC_PENABLE);
state->pmac_enabled = !!(port_ctrl & AM65_CPSW_PN_CTL_IET_PORT_EN);

iet_status = readl(port->port_base + AM65_CPSW_PN_REG_IET_STATUS);

if (iet_ctrl & AM65_CPSW_PN_IET_MAC_DISABLEVERIFY)
state->verify_status = ETHTOOL_MM_VERIFY_STATUS_DISABLED;
else if (iet_status & AM65_CPSW_PN_MAC_VERIFIED)
state->verify_status = ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED;
else if (iet_status & AM65_CPSW_PN_MAC_VERIFY_FAIL)
state->verify_status = ETHTOOL_MM_VERIFY_STATUS_FAILED;
else
state->verify_status = ETHTOOL_MM_VERIFY_STATUS_UNKNOWN;

add_frag_size = AM65_CPSW_PN_IET_MAC_GET_ADDFRAGSIZE(iet_ctrl);
state->tx_min_frag_size = ethtool_mm_frag_size_add_to_min(add_frag_size);

/* Errata i2208: RX min fragment size cannot be less than 124 */
state->rx_min_frag_size = 124;

/* FPE active if common tx_enabled and verification success or disabled (forced) */
state->tx_active = state->tx_enabled &&
(state->verify_status == ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED ||
state->verify_status == ETHTOOL_MM_VERIFY_STATUS_DISABLED);
state->verify_enabled = !(iet_ctrl & AM65_CPSW_PN_IET_MAC_DISABLEVERIFY);

state->verify_time = port->qos.iet.verify_time_ms;

/* 802.3-2018 clause 30.14.1.6, says that the aMACMergeVerifyTime
* variable has a range between 1 and 128 ms inclusive. Limit to that.
*/
state->max_verify_time = 128;

mutex_unlock(&priv->mm_lock);

return 0;
}

static int am65_cpsw_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg,
struct netlink_ext_ack *extack)
{
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
struct am65_cpsw_ndev_priv *priv = netdev_priv(ndev);
struct am65_cpsw_iet *iet = &port->qos.iet;
u32 val, add_frag_size;
int err;

if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_QOS))
return -EOPNOTSUPP;

err = ethtool_mm_frag_size_min_to_add(cfg->tx_min_frag_size, &add_frag_size, extack);
if (err)
return err;

mutex_lock(&priv->mm_lock);

if (cfg->pmac_enabled) {
/* change TX & RX FIFO MAX_BLKS as per TRM recommendation */
if (!iet->original_max_blks)
iet->original_max_blks = readl(port->port_base + AM65_CPSW_PN_REG_MAX_BLKS);

writel(AM65_CPSW_PN_TX_RX_MAX_BLKS_IET,
port->port_base + AM65_CPSW_PN_REG_MAX_BLKS);
} else if (iet->original_max_blks) {
/* restore RX & TX FIFO MAX_BLKS */
writel(iet->original_max_blks,
port->port_base + AM65_CPSW_PN_REG_MAX_BLKS);
}

am65_cpsw_port_iet_rx_enable(port, cfg->pmac_enabled);
am65_cpsw_port_iet_tx_enable(port, cfg->tx_enabled);

val = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
if (cfg->verify_enabled) {
val &= ~AM65_CPSW_PN_IET_MAC_DISABLEVERIFY;
/* Reset Verify state machine. Verification won't start here.
* Verification will be done once link-up.
*/
val |= AM65_CPSW_PN_IET_MAC_LINKFAIL;
} else {
val |= AM65_CPSW_PN_IET_MAC_DISABLEVERIFY;
/* Clear LINKFAIL to allow verify/response packets */
val &= ~AM65_CPSW_PN_IET_MAC_LINKFAIL;
}

val &= ~AM65_CPSW_PN_IET_MAC_MAC_ADDFRAGSIZE_MASK;
val |= AM65_CPSW_PN_IET_MAC_SET_ADDFRAGSIZE(add_frag_size);
writel(val, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);

/* verify_timeout_count can only be set at valid link */
port->qos.iet.verify_time_ms = cfg->verify_time;

/* enable/disable preemption based on link status */
am65_cpsw_iet_commit_preemptible_tcs(port);

mutex_unlock(&priv->mm_lock);

return 0;
}

static void am65_cpsw_get_mm_stats(struct net_device *ndev,
struct ethtool_mm_stats *s)
{
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
void __iomem *base = port->stat_base;

s->MACMergeFrameAssOkCount = readl(base + AM65_CPSW_STATN_IET_RX_ASSEMBLY_OK);
s->MACMergeFrameAssErrorCount = readl(base + AM65_CPSW_STATN_IET_RX_ASSEMBLY_ERROR);
s->MACMergeFrameSmdErrorCount = readl(base + AM65_CPSW_STATN_IET_RX_SMD_ERROR);
/* CPSW Functional Spec states:
* "The IET stat aMACMergeFragCountRx is derived by adding the
* Receive Assembly Error count to this value. i.e. AM65_CPSW_STATN_IET_RX_FRAG"
*/
s->MACMergeFragCountRx = readl(base + AM65_CPSW_STATN_IET_RX_FRAG) + s->MACMergeFrameAssErrorCount;
s->MACMergeFragCountTx = readl(base + AM65_CPSW_STATN_IET_TX_FRAG);
s->MACMergeHoldCount = readl(base + AM65_CPSW_STATN_IET_TX_HOLD);
}

static int am65_cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct am65_cpsw_tx_chn *tx_chn;

tx_chn = &common->tx_chns[0];

coal->rx_coalesce_usecs = common->rx_pace_timeout / 1000;
coal->tx_coalesce_usecs = tx_chn->tx_pace_timeout / 1000;

return 0;
}

static int am65_cpsw_get_per_queue_coalesce(struct net_device *ndev, u32 queue,
struct ethtool_coalesce *coal)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct am65_cpsw_tx_chn *tx_chn;

if (queue >= AM65_CPSW_MAX_TX_QUEUES)
return -EINVAL;

tx_chn = &common->tx_chns[queue];

coal->tx_coalesce_usecs = tx_chn->tx_pace_timeout / 1000;

return 0;
}

static int am65_cpsw_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct am65_cpsw_tx_chn *tx_chn;

tx_chn = &common->tx_chns[0];

if (coal->rx_coalesce_usecs && coal->rx_coalesce_usecs < 20)
return -EINVAL;

if (coal->tx_coalesce_usecs && coal->tx_coalesce_usecs < 20)
return -EINVAL;

common->rx_pace_timeout = coal->rx_coalesce_usecs * 1000;
tx_chn->tx_pace_timeout = coal->tx_coalesce_usecs * 1000;

return 0;
}

static int am65_cpsw_set_per_queue_coalesce(struct net_device *ndev, u32 queue,
struct ethtool_coalesce *coal)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct am65_cpsw_tx_chn *tx_chn;

if (queue >= AM65_CPSW_MAX_TX_QUEUES)
return -EINVAL;

tx_chn = &common->tx_chns[queue];

if (coal->tx_coalesce_usecs && coal->tx_coalesce_usecs < 20) {
dev_info(common->dev, "defaulting to min value of 20us for tx-usecs for tx-%u\n",
queue);
coal->tx_coalesce_usecs = 20;
}

tx_chn->tx_pace_timeout = coal->tx_coalesce_usecs * 1000;

return 0;
}

const struct ethtool_ops am65_cpsw_ethtool_ops_slave = {
.begin = am65_cpsw_ethtool_op_begin,
.complete = am65_cpsw_ethtool_op_complete,
Expand All @@ -758,6 +996,11 @@ const struct ethtool_ops am65_cpsw_ethtool_ops_slave = {
.get_ts_info = am65_cpsw_get_ethtool_ts_info,
.get_priv_flags = am65_cpsw_get_ethtool_priv_flags,
.set_priv_flags = am65_cpsw_set_ethtool_priv_flags,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_coalesce = am65_cpsw_get_coalesce,
.set_coalesce = am65_cpsw_set_coalesce,
.get_per_queue_coalesce = am65_cpsw_get_per_queue_coalesce,
.set_per_queue_coalesce = am65_cpsw_set_per_queue_coalesce,

.get_link = ethtool_op_get_link,
.get_link_ksettings = am65_cpsw_get_link_ksettings,
Expand All @@ -769,4 +1012,7 @@ const struct ethtool_ops am65_cpsw_ethtool_ops_slave = {
.get_eee = am65_cpsw_get_eee,
.set_eee = am65_cpsw_set_eee,
.nway_reset = am65_cpsw_nway_reset,
.get_mm = am65_cpsw_get_mm,
.set_mm = am65_cpsw_set_mm,
.get_mm_stats = am65_cpsw_get_mm_stats,
};
Loading

0 comments on commit d11db8a

Please sign in to comment.