Skip to content

Commit 3057224

Browse files
idoschdavem330
authored andcommitted
mlxsw: spectrum_router: Implement FIB offload in deferred work
FIB offload is currently done in process context with RTNL held, but we're about to dump the FIB tables in RCU critical section, so we can no longer sleep. Instead, defer the operation to process context using deferred work. Make sure fib info isn't freed while the work is queued by taking a reference on it and releasing it after the operation is done. Deferring the operation is valid because the upper layers always assume the operation was successful. If it's not, then the driver-specific abort mechanism is called and all routed traffic is directed to slow path. The work items are submitted to an ordered workqueue to prevent a mismatch between the kernel's FIB table and the device's. Signed-off-by: Ido Schimmel <idosch@mellanox.com> Signed-off-by: Jiri Pirko <jiri@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent a3832b3 commit 3057224

File tree

1 file changed

+62
-10
lines changed

1 file changed

+62
-10
lines changed

drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c

+62-10
Original file line numberDiff line numberDiff line change
@@ -593,6 +593,14 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
593593

594594
static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
595595
{
596+
/* At this stage we're guaranteed not to have new incoming
597+
* FIB notifications and the work queue is free from FIBs
598+
* sitting on top of mlxsw netdevs. However, we can still
599+
* have other FIBs queued. Flush the queue before flushing
600+
* the device's tables. No need for locks, as we're the only
601+
* writer.
602+
*/
603+
mlxsw_core_flush_owq();
596604
mlxsw_sp_router_fib_flush(mlxsw_sp);
597605
kfree(mlxsw_sp->router.vrs);
598606
}
@@ -1948,30 +1956,74 @@ static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
19481956
kfree(mlxsw_sp->rifs);
19491957
}
19501958

1951-
static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
1952-
unsigned long event, void *ptr)
1959+
struct mlxsw_sp_fib_event_work {
1960+
struct delayed_work dw;
1961+
struct fib_entry_notifier_info fen_info;
1962+
struct mlxsw_sp *mlxsw_sp;
1963+
unsigned long event;
1964+
};
1965+
1966+
static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
19531967
{
1954-
struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
1955-
struct fib_entry_notifier_info *fen_info = ptr;
1968+
struct mlxsw_sp_fib_event_work *fib_work =
1969+
container_of(work, struct mlxsw_sp_fib_event_work, dw.work);
1970+
struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
19561971
int err;
19571972

1958-
if (!net_eq(fen_info->info.net, &init_net))
1959-
return NOTIFY_DONE;
1960-
1961-
switch (event) {
1973+
/* Protect internal structures from changes */
1974+
rtnl_lock();
1975+
switch (fib_work->event) {
19621976
case FIB_EVENT_ENTRY_ADD:
1963-
err = mlxsw_sp_router_fib4_add(mlxsw_sp, fen_info);
1977+
err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info);
19641978
if (err)
19651979
mlxsw_sp_router_fib4_abort(mlxsw_sp);
1980+
fib_info_put(fib_work->fen_info.fi);
19661981
break;
19671982
case FIB_EVENT_ENTRY_DEL:
1968-
mlxsw_sp_router_fib4_del(mlxsw_sp, fen_info);
1983+
mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
1984+
fib_info_put(fib_work->fen_info.fi);
19691985
break;
19701986
case FIB_EVENT_RULE_ADD: /* fall through */
19711987
case FIB_EVENT_RULE_DEL:
19721988
mlxsw_sp_router_fib4_abort(mlxsw_sp);
19731989
break;
19741990
}
1991+
rtnl_unlock();
1992+
kfree(fib_work);
1993+
}
1994+
1995+
/* Called with rcu_read_lock() */
1996+
static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
1997+
unsigned long event, void *ptr)
1998+
{
1999+
struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
2000+
struct mlxsw_sp_fib_event_work *fib_work;
2001+
struct fib_notifier_info *info = ptr;
2002+
2003+
if (!net_eq(info->net, &init_net))
2004+
return NOTIFY_DONE;
2005+
2006+
fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
2007+
if (WARN_ON(!fib_work))
2008+
return NOTIFY_BAD;
2009+
2010+
INIT_DELAYED_WORK(&fib_work->dw, mlxsw_sp_router_fib_event_work);
2011+
fib_work->mlxsw_sp = mlxsw_sp;
2012+
fib_work->event = event;
2013+
2014+
switch (event) {
2015+
case FIB_EVENT_ENTRY_ADD: /* fall through */
2016+
case FIB_EVENT_ENTRY_DEL:
2017+
memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info));
2018+
/* Take referece on fib_info to prevent it from being
2019+
* freed while work is queued. Release it afterwards.
2020+
*/
2021+
fib_info_hold(fib_work->fen_info.fi);
2022+
break;
2023+
}
2024+
2025+
mlxsw_core_schedule_odw(&fib_work->dw, 0);
2026+
19752027
return NOTIFY_DONE;
19762028
}
19772029

0 commit comments

Comments
 (0)