Skip to content

Commit

Permalink
[Reclaiming buffer] Common code update (sonic-net#1996)
Browse files Browse the repository at this point in the history
- What I did
Common code update for reclaiming buffer.
1. Loading zero_profiles when dynamic buffer manager startingץ The buffer manager won't consume it for now. This is to pass Azure CI.
2. Support removing a buffer pool.
3. Support exposing maximum PGs and queues per port
4. Support transmit between bitmap and map string
5. Change the log severity from ERROR to NOTICE when parsing buffer profile from buffer profile list failed. Typically this can be resolved by retrying. The severity of similar log when parsing buffer PG and queue is already NOTICE.

- Why I did it
To split large PR into smaller ones and help pass CI.

- How I verified it
vs test and sonic-mgmt test.

Signed-off-by: Stephen Sun <stephens@nvidia.com>
  • Loading branch information
stephenxs committed Nov 22, 2021
1 parent b91d8ba commit 32d7a69
Show file tree
Hide file tree
Showing 10 changed files with 262 additions and 41 deletions.
37 changes: 24 additions & 13 deletions cfgmgr/buffermgrd.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,12 +38,13 @@ mutex gDbMutex;

void usage()
{
cout << "Usage: buffermgrd <-l pg_lookup.ini|-a asic_table.json [-p peripheral_table.json]>" << endl;
cout << "Usage: buffermgrd <-l pg_lookup.ini|-a asic_table.json [-p peripheral_table.json] [-z zero_profiles.json]>" << endl;
cout << " -l pg_lookup.ini: PG profile look up table file (mandatory for static mode)" << endl;
cout << " format: csv" << endl;
cout << " values: 'speed, cable, size, xon, xoff, dynamic_threshold, xon_offset'" << endl;
cout << " -a asic_table.json: ASIC-specific parameters definition (mandatory for dynamic mode)" << endl;
cout << " -p peripheral_table.json: Peripheral (eg. gearbox) parameters definition (mandatory for dynamic mode)" << endl;
cout << " -p peripheral_table.json: Peripheral (eg. gearbox) parameters definition (optional for dynamic mode)" << endl;
cout << " -z zero_profiles.json: Zero profiles definition for reclaiming unused buffers (optional for dynamic mode)" << endl;
}

void dump_db_item(KeyOpFieldsValuesTuple &db_item)
Expand Down Expand Up @@ -109,13 +110,13 @@ int main(int argc, char **argv)
string pg_lookup_file = "";
string asic_table_file = "";
string peripherial_table_file = "";
string json_file = "";
string zero_profile_file = "";
Logger::linkToDbNative("buffermgrd");
SWSS_LOG_ENTER();

SWSS_LOG_NOTICE("--- Starting buffermgrd ---");

while ((opt = getopt(argc, argv, "l:a:p:h")) != -1 )
while ((opt = getopt(argc, argv, "l:a:p:z:h")) != -1 )
{
switch (opt)
{
Expand All @@ -131,6 +132,9 @@ int main(int argc, char **argv)
case 'p':
peripherial_table_file = optarg;
break;
case 'z':
zero_profile_file = optarg;
break;
default: /* '?' */
usage();
return EXIT_FAILURE;
Expand All @@ -141,7 +145,9 @@ int main(int argc, char **argv)
{
std::vector<Orch *> cfgOrchList;
bool dynamicMode = false;
shared_ptr<vector<KeyOpFieldsValuesTuple>> db_items_ptr;
shared_ptr<vector<KeyOpFieldsValuesTuple>> asic_table_ptr = nullptr;
shared_ptr<vector<KeyOpFieldsValuesTuple>> peripherial_table_ptr = nullptr;
shared_ptr<vector<KeyOpFieldsValuesTuple>> zero_profiles_ptr = nullptr;

DBConnector cfgDb("CONFIG_DB", 0);
DBConnector stateDb("STATE_DB", 0);
Expand All @@ -150,18 +156,23 @@ int main(int argc, char **argv)
if (!asic_table_file.empty())
{
// Load the json file containing the SWITCH_TABLE
db_items_ptr = load_json(asic_table_file);
if (nullptr != db_items_ptr)
asic_table_ptr = load_json(asic_table_file);
if (nullptr != asic_table_ptr)
{
write_to_state_db(db_items_ptr);
db_items_ptr.reset();
write_to_state_db(asic_table_ptr);

if (!peripherial_table_file.empty())
{
//Load the json file containing the PERIPHERIAL_TABLE
db_items_ptr = load_json(peripherial_table_file);
if (nullptr != db_items_ptr)
write_to_state_db(db_items_ptr);
peripherial_table_ptr = load_json(peripherial_table_file);
if (nullptr != peripherial_table_ptr)
write_to_state_db(peripherial_table_ptr);
}

if (!zero_profile_file.empty())
{
//Load the json file containing the zero profiles
zero_profiles_ptr = load_json(zero_profile_file);
}

dynamicMode = true;
Expand All @@ -183,7 +194,7 @@ int main(int argc, char **argv)
TableConnector(&stateDb, STATE_BUFFER_MAXIMUM_VALUE_TABLE),
TableConnector(&stateDb, STATE_PORT_TABLE_NAME)
};
cfgOrchList.emplace_back(new BufferMgrDynamic(&cfgDb, &stateDb, &applDb, buffer_table_connectors, db_items_ptr));
cfgOrchList.emplace_back(new BufferMgrDynamic(&cfgDb, &stateDb, &applDb, buffer_table_connectors, peripherial_table_ptr, zero_profiles_ptr));
}
else if (!pg_lookup_file.empty())
{
Expand Down
2 changes: 1 addition & 1 deletion cfgmgr/buffermgrdyn.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
using namespace std;
using namespace swss;

BufferMgrDynamic::BufferMgrDynamic(DBConnector *cfgDb, DBConnector *stateDb, DBConnector *applDb, const vector<TableConnector> &tables, shared_ptr<vector<KeyOpFieldsValuesTuple>> gearboxInfo = nullptr) :
BufferMgrDynamic::BufferMgrDynamic(DBConnector *cfgDb, DBConnector *stateDb, DBConnector *applDb, const vector<TableConnector> &tables, shared_ptr<vector<KeyOpFieldsValuesTuple>> gearboxInfo, shared_ptr<vector<KeyOpFieldsValuesTuple>> zeroProfilesInfo) :
Orch(tables),
m_platform(),
m_applDb(applDb),
Expand Down
2 changes: 1 addition & 1 deletion cfgmgr/buffermgrdyn.h
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ typedef std::map<std::string, std::string> gearbox_delay_t;
class BufferMgrDynamic : public Orch
{
public:
BufferMgrDynamic(DBConnector *cfgDb, DBConnector *stateDb, DBConnector *applDb, const std::vector<TableConnector> &tables, std::shared_ptr<std::vector<KeyOpFieldsValuesTuple>> gearboxInfo);
BufferMgrDynamic(DBConnector *cfgDb, DBConnector *stateDb, DBConnector *applDb, const std::vector<TableConnector> &tables, std::shared_ptr<std::vector<KeyOpFieldsValuesTuple>> gearboxInfo, std::shared_ptr<std::vector<KeyOpFieldsValuesTuple>> zeroProfilesInfo);
using Orch::doTask;

private:
Expand Down
66 changes: 48 additions & 18 deletions orchagent/bufferorch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -210,6 +210,15 @@ bool BufferOrch::isPortReady(const std::string& port_name) const
return result;
}

void BufferOrch::clearBufferPoolWatermarkCounterIdList(const sai_object_id_t object_id)
{
if (m_isBufferPoolWatermarkCounterIdListGenerated)
{
string key = BUFFER_POOL_WATERMARK_STAT_COUNTER_FLEX_COUNTER_GROUP ":" + sai_serialize_object_id(object_id);
m_flexCounterTable->del(key);
}
}

void BufferOrch::generateBufferPoolWatermarkCounterIdList(void)
{
// This function will be called in FlexCounterOrch when field:value tuple "FLEX_COUNTER_STATUS":"enable"
Expand Down Expand Up @@ -460,6 +469,7 @@ task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple)

if (SAI_NULL_OBJECT_ID != sai_object)
{
clearBufferPoolWatermarkCounterIdList(sai_object);
sai_status = sai_buffer_api->remove_buffer_pool(sai_object);
if (SAI_STATUS_SUCCESS != sai_status)
{
Expand Down Expand Up @@ -699,6 +709,7 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple)
string op = kfvOp(tuple);
vector<string> tokens;
sai_uint32_t range_low, range_high;
bool need_update_sai = true;

SWSS_LOG_DEBUG("Processing:%s", key.c_str());
tokens = tokenize(key, delimiter);
Expand Down Expand Up @@ -736,6 +747,12 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple)
}
else if (op == DEL_COMMAND)
{
auto &typemap = (*m_buffer_type_maps[APP_BUFFER_QUEUE_TABLE_NAME]);
if (typemap.find(key) == typemap.end())
{
SWSS_LOG_INFO("%s doesn't not exist, don't need to notfiy SAI", key.c_str());
need_update_sai = false;
}
sai_buffer_profile = SAI_NULL_OBJECT_ID;
SWSS_LOG_NOTICE("Remove buffer queue %s", key.c_str());
removeObject(m_buffer_type_maps, APP_BUFFER_QUEUE_TABLE_NAME, key);
Expand All @@ -760,7 +777,6 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple)
}
for (size_t ind = range_low; ind <= range_high; ind++)
{
sai_object_id_t queue_id;
SWSS_LOG_DEBUG("processing queue:%zd", ind);
if (port.m_queue_ids.size() <= ind)
{
Expand All @@ -772,16 +788,20 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple)
SWSS_LOG_WARN("Queue %zd on port %s is locked, will retry", ind, port_name.c_str());
return task_process_status::task_need_retry;
}
queue_id = port.m_queue_ids[ind];
SWSS_LOG_DEBUG("Applying buffer profile:0x%" PRIx64 " to queue index:%zd, queue sai_id:0x%" PRIx64, sai_buffer_profile, ind, queue_id);
sai_status_t sai_status = sai_queue_api->set_queue_attribute(queue_id, &attr);
if (sai_status != SAI_STATUS_SUCCESS)
if (need_update_sai)
{
SWSS_LOG_ERROR("Failed to set queue's buffer profile attribute, status:%d", sai_status);
task_process_status handle_status = handleSaiSetStatus(SAI_API_QUEUE, sai_status);
if (handle_status != task_process_status::task_success)
sai_object_id_t queue_id;
queue_id = port.m_queue_ids[ind];
SWSS_LOG_DEBUG("Applying buffer profile:0x%" PRIx64 " to queue index:%zd, queue sai_id:0x%" PRIx64, sai_buffer_profile, ind, queue_id);
sai_status_t sai_status = sai_queue_api->set_queue_attribute(queue_id, &attr);
if (sai_status != SAI_STATUS_SUCCESS)
{
return handle_status;
SWSS_LOG_ERROR("Failed to set queue's buffer profile attribute, status:%d", sai_status);
task_process_status handle_status = handleSaiSetStatus(SAI_API_QUEUE, sai_status);
if (handle_status != task_process_status::task_success)
{
return handle_status;
}
}
}
}
Expand Down Expand Up @@ -823,6 +843,7 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup
string op = kfvOp(tuple);
vector<string> tokens;
sai_uint32_t range_low, range_high;
bool need_update_sai = true;

SWSS_LOG_DEBUG("processing:%s", key.c_str());
tokens = tokenize(key, delimiter);
Expand Down Expand Up @@ -861,6 +882,12 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup
}
else if (op == DEL_COMMAND)
{
auto &typemap = (*m_buffer_type_maps[APP_BUFFER_PG_TABLE_NAME]);
if (typemap.find(key) == typemap.end())
{
SWSS_LOG_INFO("%s doesn't not exist, don't need to notfiy SAI", key.c_str());
need_update_sai = false;
}
sai_buffer_profile = SAI_NULL_OBJECT_ID;
SWSS_LOG_NOTICE("Remove buffer PG %s", key.c_str());
removeObject(m_buffer_type_maps, APP_BUFFER_PG_TABLE_NAME, key);
Expand All @@ -886,7 +913,6 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup
}
for (size_t ind = range_low; ind <= range_high; ind++)
{
sai_object_id_t pg_id;
SWSS_LOG_DEBUG("processing pg:%zd", ind);
if (port.m_priority_group_ids.size() <= ind)
{
Expand All @@ -901,16 +927,20 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup
}
else
{
pg_id = port.m_priority_group_ids[ind];
SWSS_LOG_DEBUG("Applying buffer profile:0x%" PRIx64 " to port:%s pg index:%zd, pg sai_id:0x%" PRIx64, sai_buffer_profile, port_name.c_str(), ind, pg_id);
sai_status_t sai_status = sai_buffer_api->set_ingress_priority_group_attribute(pg_id, &attr);
if (sai_status != SAI_STATUS_SUCCESS)
if (need_update_sai)
{
SWSS_LOG_ERROR("Failed to set port:%s pg:%zd buffer profile attribute, status:%d", port_name.c_str(), ind, sai_status);
task_process_status handle_status = handleSaiSetStatus(SAI_API_BUFFER, sai_status);
if (handle_status != task_process_status::task_success)
sai_object_id_t pg_id;
pg_id = port.m_priority_group_ids[ind];
SWSS_LOG_DEBUG("Applying buffer profile:0x%" PRIx64 " to port:%s pg index:%zd, pg sai_id:0x%" PRIx64, sai_buffer_profile, port_name.c_str(), ind, pg_id);
sai_status_t sai_status = sai_buffer_api->set_ingress_priority_group_attribute(pg_id, &attr);
if (sai_status != SAI_STATUS_SUCCESS)
{
return handle_status;
SWSS_LOG_ERROR("Failed to set port:%s pg:%zd buffer profile attribute, status:%d", port_name.c_str(), ind, sai_status);
task_process_status handle_status = handleSaiSetStatus(SAI_API_BUFFER, sai_status);
if (handle_status != task_process_status::task_success)
{
return handle_status;
}
}
}
}
Expand Down
1 change: 1 addition & 0 deletions orchagent/bufferorch.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ class BufferOrch : public Orch

void doTask() override;
virtual void doTask(Consumer& consumer);
void clearBufferPoolWatermarkCounterIdList(const sai_object_id_t object_id);
void initTableHandlers();
void initBufferReadyLists(DBConnector *confDb, DBConnector *applDb);
void initBufferReadyList(Table& table, bool isConfigDb);
Expand Down
Loading

0 comments on commit 32d7a69

Please sign in to comment.