Skip to content

Commit

Permalink
Harden hardware pool interfaces. (#251)
Browse files Browse the repository at this point in the history
Signed-off-by: Samuel K. Gutierrez <samuel@lanl.gov>
  • Loading branch information
samuelkgutierrez authored Jul 26, 2024
1 parent 4c3ef21 commit 3d6ea2a
Show file tree
Hide file tree
Showing 4 changed files with 130 additions and 65 deletions.
27 changes: 2 additions & 25 deletions src/qvi-bbuff-rmi.h
Original file line number Diff line number Diff line change
Expand Up @@ -508,11 +508,7 @@ qvi_bbuff_rmi_pack_item(
qvi_bbuff_t *buff,
const qvi_hwpool_cpu_s &data
) {
// Pack hints.
const int rc = qvi_bbuff_rmi_pack_item(buff, data.hints);
if (qvi_unlikely(rc != QV_SUCCESS)) return rc;

return qvi_bbuff_rmi_pack_item(buff, data.cpuset);
return data.packinto(buff);
}

/**
Expand Down Expand Up @@ -825,26 +821,7 @@ qvi_bbuff_rmi_unpack_item(
byte_t *buffpos,
size_t *bytes_written
) {
size_t bw = 0, total_bw = 0;
// Unpack hints.
int rc = qvi_bbuff_rmi_unpack_item(
&cpu.hints, buffpos, &bw
);
if (qvi_unlikely(rc != QV_SUCCESS)) goto out;
total_bw += bw;
buffpos += bw;
// Unpack bitmap.
rc = qvi_bbuff_rmi_unpack_item(
cpu.cpuset, buffpos, &bw
);
if (qvi_unlikely(rc != QV_SUCCESS)) goto out;
total_bw += bw;
out:
if (qvi_unlikely(rc != QV_SUCCESS)) {
total_bw = 0;
}
*bytes_written = total_bw;
return rc;
return qvi_hwpool_cpu_s::unpack(buffpos, bytes_written, cpu);
}

/**
Expand Down
103 changes: 81 additions & 22 deletions src/qvi-hwpool.cc
Original file line number Diff line number Diff line change
Expand Up @@ -134,13 +134,64 @@ pool_release_cpus_by_cpuset(
}
#endif

qvi_hwloc_bitmap_s &
qvi_hwpool_cpu_s::cpuset(void)
{
return m_cpuset;
}

const qvi_hwloc_bitmap_s &
qvi_hwpool_cpu_s::cpuset(void)
const {
return m_cpuset;
}

int
qvi_hwpool_cpu_s::packinto(
qvi_bbuff_t *buff
) const {
// Pack hints.
const int rc = qvi_bbuff_rmi_pack_item(buff, m_hints);
if (qvi_unlikely(rc != QV_SUCCESS)) return rc;
// Pack cpuset.
return qvi_bbuff_rmi_pack_item(buff, m_cpuset);
}

int
qvi_hwpool_cpu_s::unpack(
byte_t *buffpos,
size_t *bytes_written,
qvi_hwpool_cpu_s &cpu
) {
size_t bw = 0, total_bw = 0;
// Unpack hints.
int rc = qvi_bbuff_rmi_unpack_item(
&cpu.m_hints, buffpos, &bw
);
if (qvi_unlikely(rc != QV_SUCCESS)) goto out;
total_bw += bw;
buffpos += bw;
// Unpack bitmap.
rc = qvi_bbuff_rmi_unpack_item(
cpu.m_cpuset, buffpos, &bw
);
if (qvi_unlikely(rc != QV_SUCCESS)) goto out;
total_bw += bw;
out:
if (qvi_unlikely(rc != QV_SUCCESS)) {
total_bw = 0;
}
*bytes_written = total_bw;
return rc;
}

qvi_hwpool_dev_s::qvi_hwpool_dev_s(
const qvi_hwloc_device_s &dev
) : type(dev.type)
, affinity(dev.affinity)
) : m_type(dev.type)
, m_affinity(dev.affinity)
, m_id(dev.id)
, pci_bus_id(dev.pci_bus_id)
, uuid(dev.uuid) { }
, m_pci_bus_id(dev.pci_bus_id)
, m_uuid(dev.uuid) { }

qvi_hwpool_dev_s::qvi_hwpool_dev_s(
const std::shared_ptr<qvi_hwloc_device_s> &shdev
Expand All @@ -150,7 +201,7 @@ bool
qvi_hwpool_dev_s::operator==(
const qvi_hwpool_dev_s &x
) const {
return uuid == x.uuid;
return m_uuid == x.m_uuid;
}

int
Expand All @@ -161,10 +212,10 @@ qvi_hwpool_dev_s::id(
int rc = QV_SUCCESS, nw = 0;
switch (format) {
case (QV_DEVICE_ID_UUID):
nw = asprintf(result, "%s", uuid.c_str());
nw = asprintf(result, "%s", m_uuid.c_str());
break;
case (QV_DEVICE_ID_PCI_BUS_ID):
nw = asprintf(result, "%s", pci_bus_id.c_str());
nw = asprintf(result, "%s", m_pci_bus_id.c_str());
break;
case (QV_DEVICE_ID_ORDINAL):
nw = asprintf(result, "%d", m_id);
Expand All @@ -181,27 +232,33 @@ qvi_hwpool_dev_s::id(
return rc;
}

const qvi_hwloc_bitmap_s &
qvi_hwpool_dev_s::affinity(void)
const {
return m_affinity;
}

int
qvi_hwpool_dev_s::packinto(
qvi_bbuff_t *buff
) const {
// Pack device hints.
int rc = qvi_bbuff_rmi_pack_item(buff, hints);
int rc = qvi_bbuff_rmi_pack_item(buff, m_hints);
if (qvi_unlikely(rc != QV_SUCCESS)) return rc;
// Pack device affinity.
rc = qvi_bbuff_rmi_pack_item(buff, affinity);
rc = qvi_bbuff_rmi_pack_item(buff, m_affinity);
if (qvi_unlikely(rc != QV_SUCCESS)) return rc;
// Pack device type.
rc = qvi_bbuff_rmi_pack_item(buff, type);
rc = qvi_bbuff_rmi_pack_item(buff, m_type);
if (qvi_unlikely(rc != QV_SUCCESS)) return rc;
// Pack device ID.
rc = qvi_bbuff_rmi_pack_item(buff, m_id);
if (qvi_unlikely(rc != QV_SUCCESS)) return rc;
// Pack device PCI bus ID.
rc = qvi_bbuff_rmi_pack_item(buff, pci_bus_id);
rc = qvi_bbuff_rmi_pack_item(buff, m_pci_bus_id);
if (qvi_unlikely(rc != QV_SUCCESS)) return rc;
// Pack device UUID.
return qvi_bbuff_rmi_pack_item(buff, uuid);
return qvi_bbuff_rmi_pack_item(buff, m_uuid);
}

int
Expand All @@ -213,21 +270,21 @@ qvi_hwpool_dev_s::unpack(
size_t bw = 0, total_bw = 0;

int rc = qvi_bbuff_rmi_unpack_item(
&dev->hints, buffpos, &bw
&dev->m_hints, buffpos, &bw
);
if (qvi_unlikely(rc != QV_SUCCESS)) goto out;
total_bw += bw;
buffpos += bw;

rc = qvi_bbuff_rmi_unpack_item(
dev->affinity, buffpos, &bw
dev->m_affinity, buffpos, &bw
);
if (qvi_unlikely(rc != QV_SUCCESS)) goto out;
total_bw += bw;
buffpos += bw;

rc = qvi_bbuff_rmi_unpack_item(
&dev->type, buffpos, &bw
&dev->m_type, buffpos, &bw
);
if (qvi_unlikely(rc != QV_SUCCESS)) goto out;
total_bw += bw;
Expand All @@ -241,14 +298,14 @@ qvi_hwpool_dev_s::unpack(
buffpos += bw;

rc = qvi_bbuff_rmi_unpack_item(
dev->pci_bus_id, buffpos, &bw
dev->m_pci_bus_id, buffpos, &bw
);
if (qvi_unlikely(rc != QV_SUCCESS)) goto out;
total_bw += bw;
buffpos += bw;

rc = qvi_bbuff_rmi_unpack_item(
dev->uuid, buffpos, &bw
dev->m_uuid, buffpos, &bw
);
if (qvi_unlikely(rc != QV_SUCCESS)) goto out;
total_bw += bw;
Expand All @@ -269,7 +326,7 @@ qvi_hwpool_s::add_devices_with_affinity(
for (const auto devt : qvi_hwloc_supported_devices()) {
qvi_hwloc_dev_list_t devs;
rc = qvi_hwloc_get_devices_in_bitmap(
hwloc, devt, m_cpu.cpuset, devs
hwloc, devt, m_cpu.cpuset(), devs
);
if (qvi_unlikely(rc != QV_SUCCESS)) return rc;
for (const auto &dev : devs) {
Expand Down Expand Up @@ -304,7 +361,7 @@ qvi_hwpool_s::initialize(
qvi_hwloc_t *hwloc,
hwloc_const_bitmap_t cpuset
) {
const int rc = m_cpu.cpuset.set(cpuset);
const int rc = m_cpu.cpuset().set(cpuset);
if (qvi_unlikely(rc != QV_SUCCESS)) return rc;
// Add devices with affinity to the hardware pool.
return add_devices_with_affinity(hwloc);
Expand All @@ -313,7 +370,7 @@ qvi_hwpool_s::initialize(
const qvi_hwloc_bitmap_s &
qvi_hwpool_s::cpuset(void) const
{
return m_cpu.cpuset;
return m_cpu.cpuset();
}

const qvi_hwpool_devs_t &
Expand All @@ -330,7 +387,7 @@ qvi_hwpool_s::nobjects(
) {
if (qvi_hwloc_obj_type_is_host_resource(obj_type)) {
return qvi_hwloc_get_nobjs_in_cpuset(
hwloc, obj_type, m_cpu.cpuset.cdata(), result
hwloc, obj_type, m_cpu.cpuset().cdata(), result
);
}
*result = m_devs.count(obj_type);
Expand All @@ -342,7 +399,7 @@ qvi_hwpool_s::add_device(
const qvi_hwpool_dev_s &dev
) {
auto shdev = std::make_shared<qvi_hwpool_dev_s>(dev);
m_devs.insert({dev.type, shdev});
m_devs.insert({dev.m_type, shdev});
return QV_SUCCESS;
}

Expand Down Expand Up @@ -421,6 +478,7 @@ qvi_hwpool_s::unpack(
return rc;
}

#if 0
/**
* Extend namespace std so we can easily add qvi_devinfo_ts to
* unordered_sets.
Expand All @@ -439,6 +497,7 @@ namespace std {
}
};
}
#endif

/*
* vim: ft=cpp ts=4 sts=4 sw=4 expandtab
Expand Down
61 changes: 45 additions & 16 deletions src/qvi-hwpool.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,34 +23,65 @@
* Base hardware pool resource class.
*/
struct qvi_hwpool_res_s {
protected:
/** Resource hint flags. */
qv_scope_create_hints_t hints = QV_SCOPE_CREATE_HINT_NONE;
qv_scope_create_hints_t m_hints = QV_SCOPE_CREATE_HINT_NONE;
};

/**
* Defines a hardware pool CPU. A CPU here may have multiple
* processing units (PUs), which are defined in the CPU's cpuset.
*/
struct qvi_hwpool_cpu_s : qvi_hwpool_res_s {
protected:
/** The cpuset of the CPU's PUs. */
qvi_hwloc_bitmap_s cpuset;
qvi_hwloc_bitmap_s m_cpuset;
public:
/**
* Returns a reference to the
* CPU's resources encoded by a bitmap.
*/
qvi_hwloc_bitmap_s &
cpuset(void);
/**
* Returns a const reference to the
* CPU's resources encoded by a bitmap.
*/
const qvi_hwloc_bitmap_s &
cpuset(void) const;
/** Packs the instance into the provided buffer. */
int
packinto(
qvi_bbuff_t *buff
) const;
/** Unpacks the buffer and creates a new hardware pool instance. */
static int
unpack(
byte_t *buffpos,
size_t *bytes_written,
qvi_hwpool_cpu_s &cpu
);
};

/**
* Defines a hardware pool device. This differs from a qvi_hwloc_device_s
* because we only maintain information relevant for user-facing operations.
*/
struct qvi_hwpool_dev_s : qvi_hwpool_res_s {
/** Hardware pools are our friends. */
friend qvi_hwpool_s;
private:
/** Device type. */
qv_hw_obj_type_t type = QV_HW_OBJ_LAST;
qv_hw_obj_type_t m_type = QV_HW_OBJ_LAST;
/** The bitmap encoding CPU affinity. */
qvi_hwloc_bitmap_s affinity;
qvi_hwloc_bitmap_s m_affinity;
/** Device ID (ordinal). */
int m_id = QVI_HWLOC_DEVICE_INVALID_ID;
/** The PCI bus ID. */
std::string pci_bus_id;
std::string m_pci_bus_id;
/** Universally Unique Identifier. */
std::string uuid;
std::string m_uuid;
public:
/** Default constructor. */
qvi_hwpool_dev_s(void) = default;
/** Constructor using qvi_hwloc_device_s. */
Expand All @@ -75,15 +106,17 @@ struct qvi_hwpool_dev_s : qvi_hwpool_res_s {
char **result
);
/**
* Packs the instance into the provided buffer.
* Returns a const reference to the
* device's affinity encoded by a bitmap.
*/
const qvi_hwloc_bitmap_s &
affinity(void) const;
/** Packs the instance into the provided buffer. */
int
packinto(
qvi_bbuff_t *buff
) const;
/**
* Unpacks the buffer and creates a new hardware pool device instance.
*/
/** Unpacks the buffer and creates a new hardware pool device instance. */
static int
unpack(
byte_t *buffpos,
Expand Down Expand Up @@ -164,16 +197,12 @@ struct qvi_hwpool_s {
*/
int
release_devices(void);
/**
* Packs the instance into the provided buffer.
*/
/** Packs the instance into the provided buffer. */
int
packinto(
qvi_bbuff_t *buff
) const;
/**
* Unpacks the buffer and creates a new hardware pool instance.
*/
/** Unpacks the buffer and creates a new hardware pool instance. */
static int
unpack(
byte_t *buffpos,
Expand Down
Loading

0 comments on commit 3d6ea2a

Please sign in to comment.