Skip to content

Commit

Permalink
Fix 'zpool import' blkid device names
Browse files Browse the repository at this point in the history
When importing a pool using the blkid cache only the device
node path was added to the list of known paths for a device.
This results in 'zpool import' always using the sdX names
in preference to the 'path' name stored in the label.

To fix the issue the blkid import path has been updated to
add both the 'path', 'devid', and 'devname' names from the
label to the known paths.  A sanity check is done to ensure
these paths do refer to the same device identified by blkid.

Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Tony Hutter <hutter2@llnl.gov>
Closes #4523
Closes #3043
  • Loading branch information
behlendorf committed Apr 25, 2016
1 parent b8faf0c commit 325414e
Showing 1 changed file with 123 additions and 19 deletions.
142 changes: 123 additions & 19 deletions lib/libzfs/libzfs_import.c
Original file line number Diff line number Diff line change
Expand Up @@ -99,15 +99,15 @@ typedef struct pool_list {
name_entry_t *names;
} pool_list_t;

#define DEV_BYID_PATH "/dev/disk/by-id/"

/*
* Linux persistent device strings for vdev labels
*
* based on libudev for consistency with libudev disk add/remove events
*/
#ifdef HAVE_LIBUDEV

#define DEV_BYID_PATH "/dev/disk/by-id/"

typedef struct vdev_dev_strs {
char vds_devid[128];
char vds_devphys[128];
Expand Down Expand Up @@ -536,7 +536,6 @@ update_vdev_config_dev_strs(nvlist_t *nv)

#endif /* HAVE_LIBUDEV */


/*
* Go through and fix up any path and/or devid information for the given vdev
* configuration.
Expand Down Expand Up @@ -577,7 +576,6 @@ fix_paths(nvlist_t *nv, name_entry_t *names)
best = NULL;
for (ne = names; ne != NULL; ne = ne->ne_next) {
if (ne->ne_guid == guid) {

if (path == NULL) {
best = ne;
break;
Expand Down Expand Up @@ -762,6 +760,116 @@ add_config(libzfs_handle_t *hdl, pool_list_t *pl, const char *path,
return (0);
}

static int
add_path(libzfs_handle_t *hdl, pool_list_t *pools, uint64_t pool_guid,
uint64_t vdev_guid, const char *path, int order)
{
nvlist_t *label;
uint64_t guid;
int error, fd, num_labels;

fd = open64(path, O_RDONLY);
if (fd < 0)
return (errno);

error = zpool_read_label(fd, &label, &num_labels);
close(fd);

if (error || label == NULL)
return (ENOENT);

error = nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, &guid);
if (error || guid != pool_guid) {
nvlist_free(label);
return (EINVAL);
}

error = nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid);
if (error || guid != vdev_guid) {
nvlist_free(label);
return (EINVAL);
}

error = add_config(hdl, pools, path, order, num_labels, label);

return (error);
}

static int
add_configs_from_label_impl(libzfs_handle_t *hdl, pool_list_t *pools,
nvlist_t *nvroot, uint64_t pool_guid, uint64_t vdev_guid)
{
char udevpath[MAXPATHLEN];
char *path;
nvlist_t **child;
uint_t c, children;
uint64_t guid;
int error;

if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0) {
for (c = 0; c < children; c++) {
error = add_configs_from_label_impl(hdl, pools,
child[c], pool_guid, vdev_guid);
if (error)
return (error);
}
return (0);
}

if (nvroot == NULL)
return (0);

error = nvlist_lookup_uint64(nvroot, ZPOOL_CONFIG_GUID, &guid);
if ((error != 0) || (guid != vdev_guid))
return (0);

error = nvlist_lookup_string(nvroot, ZPOOL_CONFIG_PATH, &path);
if (error == 0)
(void) add_path(hdl, pools, pool_guid, vdev_guid, path, 0);

error = nvlist_lookup_string(nvroot, ZPOOL_CONFIG_DEVID, &path);
if (error == 0) {
sprintf(udevpath, "%s%s", DEV_BYID_PATH, path);
(void) add_path(hdl, pools, pool_guid, vdev_guid, udevpath, 1);
}

return (0);
}

/*
* Given a disk label call add_config() for all known paths to the device
* as described by the label itself. The paths are added in the following
* priority order: 'path', 'devid', 'devnode'. As these alternate paths are
* added the labels are verified to make sure they refer to the same device.
*/
static int
add_configs_from_label(libzfs_handle_t *hdl, pool_list_t *pools,
char *devname, int num_labels, nvlist_t *label)
{
nvlist_t *nvroot;
uint64_t pool_guid;
uint64_t vdev_guid;
int error;

if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_VDEV_TREE, &nvroot) ||
nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, &pool_guid) ||
nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &vdev_guid))
return (ENOENT);

/* Allow devlinks to stabilize so all paths are available. */
zpool_label_disk_wait(devname, DISK_LABEL_WAIT);

/* Add alternate paths as described by the label vdev_tree. */
(void) add_configs_from_label_impl(hdl, pools, nvroot,
pool_guid, vdev_guid);

/* Add the device node /dev/sdX path as a last resort. */
error = add_config(hdl, pools, devname, 100, num_labels, label);

return (error);
}

/*
* Returns true if the named pool matches the given GUID.
*/
Expand Down Expand Up @@ -1609,9 +1717,7 @@ zpool_find_import_blkid(libzfs_handle_t *hdl, pool_list_t *pools)
blkid_cache cache;
blkid_dev_iterate iter;
blkid_dev dev;
const char *devname;
nvlist_t *config;
int fd, err, num_labels;
int err;

err = blkid_get_cache(&cache, NULL);
if (err != 0) {
Expand Down Expand Up @@ -1642,25 +1748,23 @@ zpool_find_import_blkid(libzfs_handle_t *hdl, pool_list_t *pools)
}

while (blkid_dev_next(iter, &dev) == 0) {
devname = blkid_dev_devname(dev);
nvlist_t *label;
char *devname;
int fd, num_labels;

devname = (char *) blkid_dev_devname(dev);
if ((fd = open64(devname, O_RDONLY)) < 0)
continue;

err = zpool_read_label(fd, &config, &num_labels);
err = zpool_read_label(fd, &label, &num_labels);
(void) close(fd);

if (err != 0) {
(void) no_memory(hdl);
goto err_blkid3;
}
if (err || label == NULL)
continue;

if (config != NULL) {
err = add_config(hdl, pools, devname, 0,
num_labels, config);
if (err != 0)
goto err_blkid3;
}
add_configs_from_label(hdl, pools, devname, num_labels, label);
}
err = 0;

err_blkid3:
blkid_dev_iterate_end(iter);
Expand Down

1 comment on commit 325414e

@bitshark
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ah nice - - I've had an issue with this , thanks for fixing it. ZFS ftw

Please sign in to comment.