Skip to content

Commit

Permalink
uncomment tests
Browse files Browse the repository at this point in the history
  • Loading branch information
csegarragonz committed Jun 6, 2021
1 parent 8321bfc commit 26d57ac
Show file tree
Hide file tree
Showing 2 changed files with 59 additions and 37 deletions.
35 changes: 26 additions & 9 deletions tests/test/scheduler/test_function_client_server.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -51,35 +51,53 @@ class ClientServerFixture
}
};

/*
TEST_CASE_METHOD(ClientServerFixture, "Test sending MPI message", "[scheduler]")
{
// Create an MPI world on this host and one on a "remote" host
std::string otherHost = "192.168.9.2";
auto& sch = faabric::scheduler::getScheduler();

// Force the scheduler to initialise a world in the remote host by setting
// a worldSize bigger than the slots available locally
int worldSize = 2;
faabric::HostResources localResources;
localResources.set_slots(1);
localResources.set_usedslots(1);
faabric::HostResources otherResources;
otherResources.set_slots(1);

// Set up a remote host
std::string otherHost = LOCALHOST;
sch.addHostToGlobalSet(otherHost);

// Mock everything to make sure the other host has resources as well
faabric::util::setMockMode(true);
sch.setThisHostResources(localResources);
faabric::scheduler::queueResourceResponse(otherHost, otherResources);

// Create an MPI world on this host and one on a "remote" host
const char* user = "mpi";
const char* func = "hellompi";
int worldId = 123;
faabric::Message msg;
msg.set_user(user);
msg.set_function(func);
msg.set_mpiworldid(worldId);
msg.set_mpiworldsize(2);
msg.set_mpiworldsize(worldSize);
faabric::util::messageFactory(user, func);

scheduler::MpiWorldRegistry& registry = getMpiWorldRegistry();
scheduler::MpiWorld& localWorld =
registry.createWorld(msg, worldId, LOCALHOST);
registry.createWorld(msg, worldId);

scheduler::MpiWorld remoteWorld;
remoteWorld.overrideHost(otherHost);
remoteWorld.initialiseFromState(msg, worldId);
remoteWorld.initialiseFromMsg(msg);

// Register a rank on each
int rankLocal = 0;
int rankRemote = 1;
localWorld.registerRank(rankLocal);
remoteWorld.registerRank(rankRemote);

// Undo the mocking, so we actually send the MPI message
faabric::util::setMockMode(false);

// Create a message
faabric::MPIMessage mpiMsg;
Expand All @@ -104,7 +122,6 @@ TEST_CASE_METHOD(ClientServerFixture, "Test sending MPI message", "[scheduler]")
remoteWorld.destroy();
registry.clear();
}
*/

TEST_CASE_METHOD(ClientServerFixture,
"Test sending flush message",
Expand Down
61 changes: 33 additions & 28 deletions tests/test/scheduler/test_mpi_world.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -91,45 +91,50 @@ TEST_CASE("Test world loading from msg", "[mpi]")
tearDown({ &worldA, &worldB });
}

/*
TEST_CASE("Test registering a rank", "[mpi]")
TEST_CASE("Test rank allocation", "[mpi]")
{
cleanFaabric();

// Note, we deliberately make the host names different lengths,
// shorter than the buffer
std::string hostA = faabric::util::randomString(MPI_HOST_STATE_LEN - 5);
std::string hostB = faabric::util::randomString(MPI_HOST_STATE_LEN - 10);
auto& sch = faabric::scheduler::getScheduler();

// Create a world
const faabric::Message& msg = faabric::util::messageFactory(user, func);
scheduler::MpiWorld worldA;
worldA.overrideHost(hostA);
worldA.create(msg, worldId, worldSize);
// Force the scheduler to initialise a world in the remote host by setting
// a worldSize bigger than the slots available locally
int worldSize = 2;
faabric::HostResources localResources;
localResources.set_slots(1);
localResources.set_usedslots(1);
faabric::HostResources otherResources;
otherResources.set_slots(1);

// Register a rank to this host and check
int rankA = 5;
worldA.registerRank(5);
const std::string actualHost = worldA.getHostForRank(0);
REQUIRE(actualHost == hostA);
std::string thisHost = faabric::util::getSystemConfig().endpointHost;
std::string otherHost = LOCALHOST;
sch.addHostToGlobalSet(otherHost);

// Create a new instance of the world with a new host ID
scheduler::MpiWorld worldB;
worldB.overrideHost(hostB);
worldB.initialiseFromMsg(msg, worldId);
// Mock everything to make sure the other host has resources as well
faabric::util::setMockMode(true);
sch.setThisHostResources(localResources);
faabric::scheduler::queueResourceResponse(otherHost, otherResources);

int rankB = 4;
worldB.registerRank(4);
// Create a world
faabric::Message msg = faabric::util::messageFactory(user, func);
msg.set_mpiworldid(worldId);
msg.set_mpiworldsize(worldSize);

// Create the local world
scheduler::MpiWorld& localWorld =
getMpiWorldRegistry().createWorld(msg, worldId);

scheduler::MpiWorld remoteWorld;
remoteWorld.overrideHost(otherHost);
remoteWorld.initialiseFromMsg(msg);

// Now check both world instances report the same mappings
REQUIRE(worldA.getHostForRank(rankA) == hostA);
REQUIRE(worldA.getHostForRank(rankB) == hostB);
REQUIRE(worldB.getHostForRank(rankA) == hostA);
REQUIRE(worldB.getHostForRank(rankB) == hostB);
REQUIRE(localWorld.getHostForRank(0) == thisHost);
REQUIRE(localWorld.getHostForRank(1) == otherHost);

tearDown({ &worldA, &worldB });
faabric::util::setMockMode(false);
tearDown({ &localWorld, &remoteWorld });
}
*/

TEST_CASE("Test cartesian communicator", "[mpi]")
{
Expand Down

0 comments on commit 26d57ac

Please sign in to comment.