Skip to content

Commit

Permalink
B OpenNebula#6596: Fix Host NUMA nodes after VM migration (OpenNebula…
Browse files Browse the repository at this point in the history
…#3226)

* Fix Host NUMA nodes after VM migration
* Move template parsing to HostShareCapacity

Signed-off-by: Kristian Feldsam <feldsam@gmail.com>
  • Loading branch information
paczerny authored and feldsam committed Nov 4, 2024
1 parent 697827e commit e48aedb
Show file tree
Hide file tree
Showing 6 changed files with 93 additions and 22 deletions.
43 changes: 43 additions & 0 deletions include/HostShareCapacity.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#ifndef HOST_SHARE_CAPACITY_H_
#define HOST_SHARE_CAPACITY_H_

#include "Template.h"
#include "Attribute.h"

/* ------------------------------------------------------------------------ */
Expand Down Expand Up @@ -53,6 +54,48 @@ struct HostShareCapacity
VectorAttribute * topology;

std::vector<VectorAttribute *> nodes;

/**
* Get the VM capacity from the template
* @param vid the VM ID
* @param tmpl the VM template. Warning: the HostShareCapacity use pointers to
* the tmpl, so it must exist for the lifetime of the HostareCapacity
*/
void set(int vid, Template& tmpl)
{
float fcpu;

pci.clear();
nodes.clear();

vmid = vid;

if ((tmpl.get("MEMORY", mem) == false) ||
(tmpl.get("CPU", fcpu) == false))
{
cpu = 0;
mem = 0;
disk = 0;

vcpu = 0;

return;
}

cpu = (int) (fcpu * 100); //%
mem = mem * 1024; //Kb
disk = 0;

tmpl.get("VCPU", vcpu);

tmpl.get("PCI", pci);

tmpl.get("NUMA_NODE", nodes);

topology = tmpl.get("TOPOLOGY");

return;
}
};

#endif /*HOST_SHARE_CAPACITY_H_*/
11 changes: 10 additions & 1 deletion include/VirtualMachine.h
Original file line number Diff line number Diff line change
Expand Up @@ -1030,10 +1030,19 @@ class VirtualMachine : public PoolObjectSQL

/**
* Get the VM physical capacity requirements for the host.
* @param sr the HostShareCapacity to store the capacity request.
* @param sr the HostShareCapacity to store the capacity request. The sr
* use pointers to VM template, do not destroy the VM object before sr.
*/
void get_capacity(HostShareCapacity &sr) const;

/**
* Get the VM physical capacity from the previous history
* @param sr the HostShareCapacity to store the capacity request.
* @param tmpl temporary object, to hold pointers, do not release the tmpl
* before the HostShareCapacity
*/
void get_previous_capacity(HostShareCapacity &sr, Template &tmpl) const;

/**
* Adds automatic placement requirements: Datastore and Cluster
* @param cluster_ids set of viable clusters for this VM
Expand Down
2 changes: 2 additions & 0 deletions src/dm/DispatchManagerActions.cc
Original file line number Diff line number Diff line change
Expand Up @@ -163,6 +163,8 @@ int DispatchManager::import(unique_ptr<VirtualMachine> vm, const RequestAttribut

vm->set_running_stime(the_time);

vm->set_vm_info();

vmpool->update_history(vm.get());

vmpool->update(vm.get());
Expand Down
8 changes: 8 additions & 0 deletions src/lcm/LifeCycleActions.cc
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,8 @@ void LifeCycleManager::trigger_deploy(int vid)

vm->set_prolog_stime(thetime);

vm->set_vm_info();

vmpool->update_history(vm.get());

vmpool->update(vm.get());
Expand Down Expand Up @@ -254,6 +256,8 @@ void LifeCycleManager::trigger_migrate(int vid, const RequestAttributes& ra,

vm->set_action(vm_action, uid, gid, req_id);

vm->set_vm_info();

vmpool->update_history(vm.get());

vm->set_previous_action(vm_action, uid, gid, req_id);
Expand Down Expand Up @@ -333,6 +337,8 @@ void LifeCycleManager::trigger_migrate(int vid, const RequestAttributes& ra,

vm->set_prolog_stime(the_time);

vm->set_vm_info();

vmpool->update_history(vm.get());

vmpool->update(vm.get());
Expand Down Expand Up @@ -386,6 +392,8 @@ void LifeCycleManager::trigger_live_migrate(int vid, const RequestAttributes& ra

vm->set_stime(time(0));

vm->set_vm_info();

vmpool->update_history(vm.get());

vm->set_previous_action(VMActions::LIVE_MIGRATE_ACTION, uid, gid,
Expand Down
7 changes: 5 additions & 2 deletions src/lcm/LifeCycleStates.cc
Original file line number Diff line number Diff line change
Expand Up @@ -57,10 +57,11 @@ void LifeCycleManager::start_prolog_migrate(VirtualMachine* vm)

vmpool->update(vm);

vm->get_capacity(sr);

if ( vm->get_hid() != vm->get_previous_hid() )
{
Template tmpl;
vm->get_previous_capacity(sr, tmpl);

hpool->del_capacity(vm->get_previous_hid(), sr);

vm->release_previous_vnc_port();
Expand Down Expand Up @@ -781,6 +782,8 @@ void LifeCycleManager::trigger_prolog_failure(int vid)

hpool->add_capacity(vm->get_hid(), sr);

vm->set_vm_info();

vmpool->insert_history(vm.get());

vmpool->update(vm.get());
Expand Down
44 changes: 25 additions & 19 deletions src/vm/VirtualMachine.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1987,39 +1987,45 @@ void VirtualMachine::cp_previous_history()

void VirtualMachine::get_capacity(HostShareCapacity& sr) const
{
float fcpu;
sr.set(oid, *obj_template);
}

sr.pci.clear();
sr.nodes.clear();
void VirtualMachine::get_previous_capacity(HostShareCapacity& sr, Template &tmpl) const
{
if (!previous_history)
{
return;
}

sr.vmid = oid;
const string& vm_info = previous_history->vm_info;

if ((get_template_attribute("MEMORY", sr.mem) == false) ||
(get_template_attribute("CPU", fcpu) == false))
{
sr.cpu = 0;
sr.mem = 0;
sr.disk = 0;
ObjectXML xml(vm_info);

sr.vcpu = 0;
vector<xmlNodePtr> content;
xml.get_nodes("/VM/TEMPLATE", content);

if (content.empty())
{
NebulaLog::error("ONE", "Unable to read capacity from previous history");

return;
}

sr.cpu = (int) (fcpu * 100); //%
sr.mem = sr.mem * 1024; //Kb
sr.disk = 0;
auto rc = tmpl.from_xml_node(content[0]);

get_template_attribute("VCPU", sr.vcpu);
if (rc != 0)
{
NebulaLog::error("ONE", "Unable to parse capacity from previous history");

obj_template->get("PCI", sr.pci);
return;
}

obj_template->get("NUMA_NODE", sr.nodes);
sr.set(oid, tmpl);

sr.topology = obj_template->get("TOPOLOGY");
ObjectXML::free_nodes(content);

return;
};
}

/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
Expand Down

0 comments on commit e48aedb

Please sign in to comment.