Skip to content

Commit

Permalink
net/smc: separate locks for SMCD and SMCR link group lists
Browse files Browse the repository at this point in the history
This patch introduces separate locks for the split SMCD and SMCR
link group lists.

Signed-off-by: Ursula Braun <ubraun@linux.ibm.com>
Signed-off-by: Karsten Graul <kgraul@linux.ibm.com>
Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
  • Loading branch information
Ursula Braun authored and Jakub Kicinski committed Oct 10, 2019
1 parent a2351c5 commit a0a62ee
Show file tree
Hide file tree
Showing 3 changed files with 44 additions and 15 deletions.
1 change: 1 addition & 0 deletions include/net/smc.h
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,7 @@ struct smcd_dev {
u8 pnetid[SMC_MAX_PNETID_LEN];
bool pnetid_by_user;
struct list_head lgr_list;
spinlock_t lgr_lock;
};

struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
Expand Down
57 changes: 42 additions & 15 deletions net/smc/smc_core.c
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,19 @@ static struct smc_lgr_list smc_lgr_list = { /* established link groups */
static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
struct smc_buf_desc *buf_desc);

/* return head of link group list and its lock for a given link group */
static inline struct list_head *smc_lgr_list_head(struct smc_link_group *lgr,
spinlock_t **lgr_lock)
{
if (lgr->is_smcd) {
*lgr_lock = &lgr->smcd->lgr_lock;
return &lgr->smcd->lgr_list;
}

*lgr_lock = &smc_lgr_list.lock;
return &smc_lgr_list.list;
}

static void smc_lgr_schedule_free_work(struct smc_link_group *lgr)
{
/* client link group creation always follows the server link group
Expand Down Expand Up @@ -157,19 +170,21 @@ static void smc_lgr_free_work(struct work_struct *work)
struct smc_link_group *lgr = container_of(to_delayed_work(work),
struct smc_link_group,
free_work);
spinlock_t *lgr_lock;
bool conns;

spin_lock_bh(&smc_lgr_list.lock);
smc_lgr_list_head(lgr, &lgr_lock);
spin_lock_bh(lgr_lock);
read_lock_bh(&lgr->conns_lock);
conns = RB_EMPTY_ROOT(&lgr->conns_all);
read_unlock_bh(&lgr->conns_lock);
if (!conns) { /* number of lgr connections is no longer zero */
spin_unlock_bh(&smc_lgr_list.lock);
spin_unlock_bh(lgr_lock);
return;
}
if (!list_empty(&lgr->list))
list_del_init(&lgr->list); /* remove from smc_lgr_list */
spin_unlock_bh(&smc_lgr_list.lock);
spin_unlock_bh(lgr_lock);

if (!lgr->is_smcd && !lgr->terminating) {
struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
Expand Down Expand Up @@ -200,6 +215,7 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
struct smc_link_group *lgr;
struct list_head *lgr_list;
struct smc_link *lnk;
spinlock_t *lgr_lock;
u8 rndvec[3];
int rc = 0;
int i;
Expand Down Expand Up @@ -235,6 +251,7 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
lgr->peer_gid = ini->ism_gid;
lgr->smcd = ini->ism_dev;
lgr_list = &ini->ism_dev->lgr_list;
lgr_lock = &lgr->smcd->lgr_lock;
} else {
/* SMC-R specific settings */
lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
Expand All @@ -248,6 +265,7 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
lnk->smcibdev = ini->ib_dev;
lnk->ibport = ini->ib_port;
lgr_list = &smc_lgr_list.list;
lgr_lock = &smc_lgr_list.lock;
lnk->path_mtu =
ini->ib_dev->pattr[ini->ib_port - 1].active_mtu;
if (!ini->ib_dev->initialized)
Expand Down Expand Up @@ -277,9 +295,9 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
goto destroy_qp;
}
smc->conn.lgr = lgr;
spin_lock_bh(&smc_lgr_list.lock);
spin_lock_bh(lgr_lock);
list_add(&lgr->list, lgr_list);
spin_unlock_bh(&smc_lgr_list.lock);
spin_unlock_bh(lgr_lock);
return 0;

destroy_qp:
Expand Down Expand Up @@ -442,11 +460,15 @@ static void smc_lgr_free(struct smc_link_group *lgr)

void smc_lgr_forget(struct smc_link_group *lgr)
{
spin_lock_bh(&smc_lgr_list.lock);
struct list_head *lgr_list;
spinlock_t *lgr_lock;

lgr_list = smc_lgr_list_head(lgr, &lgr_lock);
spin_lock_bh(lgr_lock);
/* do not use this link group for new connections */
if (!list_empty(&lgr->list))
list_del_init(&lgr->list);
spin_unlock_bh(&smc_lgr_list.lock);
if (!list_empty(lgr_list))
list_del_init(lgr_list);
spin_unlock_bh(lgr_lock);
}

/* terminate linkgroup abnormally */
Expand Down Expand Up @@ -487,9 +509,12 @@ static void __smc_lgr_terminate(struct smc_link_group *lgr)

void smc_lgr_terminate(struct smc_link_group *lgr)
{
spin_lock_bh(&smc_lgr_list.lock);
spinlock_t *lgr_lock;

smc_lgr_list_head(lgr, &lgr_lock);
spin_lock_bh(lgr_lock);
__smc_lgr_terminate(lgr);
spin_unlock_bh(&smc_lgr_list.lock);
spin_unlock_bh(lgr_lock);
}

/* Called when IB port is terminated */
Expand All @@ -514,15 +539,15 @@ void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, unsigned short vlan)
LIST_HEAD(lgr_free_list);

/* run common cleanup function and build free list */
spin_lock_bh(&smc_lgr_list.lock);
spin_lock_bh(&dev->lgr_lock);
list_for_each_entry_safe(lgr, l, &dev->lgr_list, list) {
if ((!peer_gid || lgr->peer_gid == peer_gid) &&
(vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) {
__smc_lgr_terminate(lgr);
list_move(&lgr->list, &lgr_free_list);
}
}
spin_unlock_bh(&smc_lgr_list.lock);
spin_unlock_bh(&dev->lgr_lock);

/* cancel the regular free workers and actually free lgrs */
list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
Expand Down Expand Up @@ -609,17 +634,19 @@ int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini)
struct list_head *lgr_list;
struct smc_link_group *lgr;
enum smc_lgr_role role;
spinlock_t *lgr_lock;
int rc = 0;

lgr_list = ini->is_smcd ? &ini->ism_dev->lgr_list : &smc_lgr_list.list;
lgr_lock = ini->is_smcd ? &ini->ism_dev->lgr_lock : &smc_lgr_list.lock;
ini->cln_first_contact = SMC_FIRST_CONTACT;
role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
if (role == SMC_CLNT && ini->srv_first_contact)
/* create new link group as well */
goto create;

/* determine if an existing link group can be reused */
spin_lock_bh(&smc_lgr_list.lock);
spin_lock_bh(lgr_lock);
list_for_each_entry(lgr, lgr_list, list) {
write_lock_bh(&lgr->conns_lock);
if ((ini->is_smcd ?
Expand All @@ -640,7 +667,7 @@ int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini)
}
write_unlock_bh(&lgr->conns_lock);
}
spin_unlock_bh(&smc_lgr_list.lock);
spin_unlock_bh(lgr_lock);

if (role == SMC_CLNT && !ini->srv_first_contact &&
ini->cln_first_contact == SMC_FIRST_CONTACT) {
Expand Down
1 change: 1 addition & 0 deletions net/smc/smc_ism.c
Original file line number Diff line number Diff line change
Expand Up @@ -286,6 +286,7 @@ struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
smc_pnetid_by_dev_port(parent, 0, smcd->pnetid);

spin_lock_init(&smcd->lock);
spin_lock_init(&smcd->lgr_lock);
INIT_LIST_HEAD(&smcd->vlan);
INIT_LIST_HEAD(&smcd->lgr_list);
smcd->event_wq = alloc_ordered_workqueue("ism_evt_wq-%s)",
Expand Down

0 comments on commit a0a62ee

Please sign in to comment.