Skip to content

Commit

Permalink
Merge branch 'mlx4'
Browse files Browse the repository at this point in the history
Or Gerlitz says:

====================
This series adds support for the SRIOV ndo_set_vf callbacks to the mlx4 driver.

Series done against the net-next tree as of commit 37fe066 "net:
fix address check in rtnl_fdb_del"

We have successfully tested the series on net-next, except for getting
the VF link info issue I have reported earlier today on netdev, we
see the problem for both ixgbe and mlx4 VFs. Just to make sure get
VF config is working OK with patch #6 - we have run it over 3.8.8 too.

We added to the V1 series two patches that disable HW timestamping
when running over a VF, as this isn't supported yet.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
davem330 committed Apr 27, 2013
2 parents 50754d2 + 2cccb9e commit b38a54e
Show file tree
Hide file tree
Showing 8 changed files with 383 additions and 12 deletions.
200 changes: 200 additions & 0 deletions drivers/net/ethernet/mellanox/mlx4/cmd.c
Original file line number Diff line number Diff line change
Expand Up @@ -1490,6 +1490,69 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
return ret;
}

static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
{
int port, err;
struct mlx4_vport_state *vp_admin;
struct mlx4_vport_oper_state *vp_oper;

for (port = 1; port <= MLX4_MAX_PORTS; port++) {
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
vp_oper->state = *vp_admin;
if (MLX4_VGT != vp_admin->default_vlan) {
err = __mlx4_register_vlan(&priv->dev, port,
vp_admin->default_vlan, &(vp_oper->vlan_idx));
if (err) {
vp_oper->vlan_idx = NO_INDX;
mlx4_warn((&priv->dev),
"No vlan resorces slave %d, port %d\n",
slave, port);
return err;
}
mlx4_dbg((&(priv->dev)), "alloc vlan %d idx %d slave %d port %d\n",
(int)(vp_oper->state.default_vlan),
vp_oper->vlan_idx, slave, port);
}
if (vp_admin->spoofchk) {
vp_oper->mac_idx = __mlx4_register_mac(&priv->dev,
port,
vp_admin->mac);
if (0 > vp_oper->mac_idx) {
err = vp_oper->mac_idx;
vp_oper->mac_idx = NO_INDX;
mlx4_warn((&priv->dev),
"No mac resorces slave %d, port %d\n",
slave, port);
return err;
}
mlx4_dbg((&(priv->dev)), "alloc mac %llx idx %d slave %d port %d\n",
vp_oper->state.mac, vp_oper->mac_idx, slave, port);
}
}
return 0;
}

static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave)
{
int port;
struct mlx4_vport_oper_state *vp_oper;

for (port = 1; port <= MLX4_MAX_PORTS; port++) {
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
if (NO_INDX != vp_oper->vlan_idx) {
__mlx4_unregister_vlan(&priv->dev,
port, vp_oper->vlan_idx);
vp_oper->vlan_idx = NO_INDX;
}
if (NO_INDX != vp_oper->mac_idx) {
__mlx4_unregister_mac(&priv->dev, port, vp_oper->mac_idx);
vp_oper->mac_idx = NO_INDX;
}
}
return;
}

static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
u16 param, u8 toggle)
{
Expand All @@ -1510,6 +1573,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
if (cmd == MLX4_COMM_CMD_RESET) {
mlx4_warn(dev, "Received reset from slave:%d\n", slave);
slave_state[slave].active = false;
mlx4_master_deactivate_admin_state(priv, slave);
for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
slave_state[slave].event_eq[i].eqn = -1;
slave_state[slave].event_eq[i].token = 0;
Expand Down Expand Up @@ -1556,6 +1620,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2)
goto reset_slave;
slave_state[slave].vhcr_dma |= param;
if (mlx4_master_activate_admin_state(priv, slave))
goto reset_slave;
slave_state[slave].active = true;
mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, slave);
break;
Expand Down Expand Up @@ -1732,6 +1798,18 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
if (!priv->mfunc.master.slave_state)
goto err_comm;

priv->mfunc.master.vf_admin =
kzalloc(dev->num_slaves *
sizeof(struct mlx4_vf_admin_state), GFP_KERNEL);
if (!priv->mfunc.master.vf_admin)
goto err_comm_admin;

priv->mfunc.master.vf_oper =
kzalloc(dev->num_slaves *
sizeof(struct mlx4_vf_oper_state), GFP_KERNEL);
if (!priv->mfunc.master.vf_oper)
goto err_comm_oper;

for (i = 0; i < dev->num_slaves; ++i) {
s_state = &priv->mfunc.master.slave_state[i];
s_state->last_cmd = MLX4_COMM_CMD_RESET;
Expand All @@ -1752,6 +1830,10 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
goto err_slaves;
}
INIT_LIST_HEAD(&s_state->mcast_filters[port]);
priv->mfunc.master.vf_admin[i].vport[port].default_vlan = MLX4_VGT;
priv->mfunc.master.vf_oper[i].vport[port].state.default_vlan = MLX4_VGT;
priv->mfunc.master.vf_oper[i].vport[port].vlan_idx = NO_INDX;
priv->mfunc.master.vf_oper[i].vport[port].mac_idx = NO_INDX;
}
spin_lock_init(&s_state->lock);
}
Expand Down Expand Up @@ -1800,6 +1882,10 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
for (port = 1; port <= MLX4_MAX_PORTS; port++)
kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
}
kfree(priv->mfunc.master.vf_oper);
err_comm_oper:
kfree(priv->mfunc.master.vf_admin);
err_comm_admin:
kfree(priv->mfunc.master.slave_state);
err_comm:
iounmap(priv->mfunc.comm);
Expand Down Expand Up @@ -1874,6 +1960,8 @@ void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
}
kfree(priv->mfunc.master.slave_state);
kfree(priv->mfunc.master.vf_admin);
kfree(priv->mfunc.master.vf_oper);
}

iounmap(priv->mfunc.comm);
Expand Down Expand Up @@ -1984,3 +2072,115 @@ u32 mlx4_comm_get_version(void)
{
return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER;
}

static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
{
if ((vf < 0) || (vf >= dev->num_vfs)) {
mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n", vf, dev->num_vfs);
return -EINVAL;
}

return vf+1;
}

int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_vport_state *s_info;
int slave;

if (!mlx4_is_master(dev))
return -EPROTONOSUPPORT;

slave = mlx4_get_slave_indx(dev, vf);
if (slave < 0)
return -EINVAL;

s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
s_info->mac = mac;
mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n",
vf, port, s_info->mac);
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_set_vf_mac);

int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_vport_state *s_info;
int slave;

if ((!mlx4_is_master(dev)) ||
!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VLAN_CONTROL))
return -EPROTONOSUPPORT;

if ((vlan > 4095) || (qos > 7))
return -EINVAL;

slave = mlx4_get_slave_indx(dev, vf);
if (slave < 0)
return -EINVAL;

s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
if ((0 == vlan) && (0 == qos))
s_info->default_vlan = MLX4_VGT;
else
s_info->default_vlan = vlan;
s_info->default_qos = qos;
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan);

int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_vport_state *s_info;
int slave;

if ((!mlx4_is_master(dev)) ||
!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FSM))
return -EPROTONOSUPPORT;

slave = mlx4_get_slave_indx(dev, vf);
if (slave < 0)
return -EINVAL;

s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
s_info->spoofchk = setting;

return 0;
}
EXPORT_SYMBOL_GPL(mlx4_set_vf_spoofchk);

int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_vport_state *s_info;
int slave;

if (!mlx4_is_master(dev))
return -EPROTONOSUPPORT;

slave = mlx4_get_slave_indx(dev, vf);
if (slave < 0)
return -EINVAL;

s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
ivf->vf = vf;

/* need to convert it to a func */
ivf->mac[0] = ((s_info->mac >> (5*8)) & 0xff);
ivf->mac[1] = ((s_info->mac >> (4*8)) & 0xff);
ivf->mac[2] = ((s_info->mac >> (3*8)) & 0xff);
ivf->mac[3] = ((s_info->mac >> (2*8)) & 0xff);
ivf->mac[4] = ((s_info->mac >> (1*8)) & 0xff);
ivf->mac[5] = ((s_info->mac) & 0xff);

ivf->vlan = s_info->default_vlan;
ivf->qos = s_info->default_qos;
ivf->tx_rate = s_info->tx_rate;
ivf->spoofchk = s_info->spoofchk;

return 0;
}
EXPORT_SYMBOL_GPL(mlx4_get_vf_config);
78 changes: 74 additions & 4 deletions drivers/net/ethernet/mellanox/mlx4/en_netdev.c
Original file line number Diff line number Diff line change
Expand Up @@ -1373,7 +1373,8 @@ static void mlx4_en_service_task(struct work_struct *work)

mutex_lock(&mdev->state_lock);
if (mdev->device_up) {
mlx4_en_ptp_overflow_check(mdev);
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
mlx4_en_ptp_overflow_check(mdev);

queue_delayed_work(mdev->workqueue, &priv->service_task,
SERVICE_TASK_DELAY);
Expand Down Expand Up @@ -2023,6 +2024,42 @@ static int mlx4_en_set_features(struct net_device *netdev,

}

static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
{
struct mlx4_en_priv *en_priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = en_priv->mdev;
u64 mac_u64 = mlx4_en_mac_to_u64(mac);

if (!is_valid_ether_addr(mac))
return -EINVAL;

return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64);
}

static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
{
struct mlx4_en_priv *en_priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = en_priv->mdev;

return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos);
}

static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
{
struct mlx4_en_priv *en_priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = en_priv->mdev;

return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting);
}

static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf)
{
struct mlx4_en_priv *en_priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = en_priv->mdev;

return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf);
}

static const struct net_device_ops mlx4_netdev_ops = {
.ndo_open = mlx4_en_open,
.ndo_stop = mlx4_en_close,
Expand All @@ -2047,6 +2084,33 @@ static const struct net_device_ops mlx4_netdev_ops = {
#endif
};

static const struct net_device_ops mlx4_netdev_ops_master = {
.ndo_open = mlx4_en_open,
.ndo_stop = mlx4_en_close,
.ndo_start_xmit = mlx4_en_xmit,
.ndo_select_queue = mlx4_en_select_queue,
.ndo_get_stats = mlx4_en_get_stats,
.ndo_set_rx_mode = mlx4_en_set_rx_mode,
.ndo_set_mac_address = mlx4_en_set_mac,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = mlx4_en_change_mtu,
.ndo_tx_timeout = mlx4_en_tx_timeout,
.ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
.ndo_set_vf_mac = mlx4_en_set_vf_mac,
.ndo_set_vf_vlan = mlx4_en_set_vf_vlan,
.ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk,
.ndo_get_vf_config = mlx4_en_get_vf_config,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = mlx4_en_netpoll,
#endif
.ndo_set_features = mlx4_en_set_features,
.ndo_setup_tc = mlx4_en_setup_tc,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif
};

int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
struct mlx4_en_port_profile *prof)
{
Expand Down Expand Up @@ -2163,7 +2227,10 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
/*
* Initialize netdev entry points
*/
dev->netdev_ops = &mlx4_netdev_ops;
if (mlx4_is_master(priv->mdev->dev))
dev->netdev_ops = &mlx4_netdev_ops_master;
else
dev->netdev_ops = &mlx4_netdev_ops;
dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
Expand Down Expand Up @@ -2228,8 +2295,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
}
mlx4_en_set_default_moderation(priv);
queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
queue_delayed_work(mdev->workqueue, &priv->service_task,
SERVICE_TASK_DELAY);

if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
queue_delayed_work(mdev->workqueue, &priv->service_task,
SERVICE_TASK_DELAY);

return 0;

out:
Expand Down
Loading

0 comments on commit b38a54e

Please sign in to comment.