Skip to content

Commit 901211f

Browse files
authored
Revert "[Dynamic buffer calc] Bug fix: Remove PGs from an administratively down port. (sonic-net#1652)" (sonic-net#1676)
This reverts commit 908e0c6.
1 parent 908e0c6 commit 901211f

File tree

4 files changed

+147
-373
lines changed

4 files changed

+147
-373
lines changed

cfgmgr/buffer_pool_mellanox.lua

+39-34
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ local lossypg_400g = 0
1212
local result = {}
1313
local profiles = {}
1414

15-
local total_port = 0
15+
local count_up_port = 0
1616

1717
local mgmt_pool_size = 256 * 1024
1818
local egress_mirror_headroom = 10 * 1024
@@ -30,46 +30,56 @@ end
3030

3131
local function iterate_all_items(all_items)
3232
table.sort(all_items)
33+
local prev_port = "None"
3334
local port
35+
local is_up
3436
local fvpairs
37+
local status
38+
local admin_down_ports = 0
3539
for i = 1, #all_items, 1 do
36-
-- Count the number of priorities or queues in each BUFFER_PG or BUFFER_QUEUE item
37-
-- For example, there are:
38-
-- 3 queues in 'BUFFER_QUEUE_TABLE:Ethernet0:0-2'
39-
-- 2 priorities in 'BUFFER_PG_TABLE:Ethernet0:3-4'
40+
-- Check whether the port on which pg or tc hosts is admin down
4041
port = string.match(all_items[i], "Ethernet%d+")
4142
if port ~= nil then
42-
local range = string.match(all_items[i], "Ethernet%d+:([^%s]+)$")
43-
local profile = redis.call('HGET', all_items[i], 'profile')
44-
local index = find_profile(profile)
45-
if index == 0 then
46-
-- Indicate an error in case the referenced profile hasn't been inserted or has been removed
47-
-- It's possible when the orchagent is busy
48-
-- The buffermgrd will take care of it and retry later
49-
return 1
50-
end
51-
local size
52-
if string.len(range) == 1 then
53-
size = 1
54-
else
55-
size = 1 + tonumber(string.sub(range, -1)) - tonumber(string.sub(range, 1, 1))
43+
if prev_port ~= port then
44+
status = redis.call('HGET', 'PORT_TABLE:'..port, 'admin_status')
45+
prev_port = port
46+
if status == "down" then
47+
is_up = false
48+
else
49+
is_up = true
50+
end
5651
end
57-
profiles[index][2] = profiles[index][2] + size
58-
local speed = redis.call('HGET', 'PORT_TABLE:'..port, 'speed')
59-
if speed == '400000' and profile == '[BUFFER_PROFILE_TABLE:ingress_lossy_profile]' then
60-
lossypg_400g = lossypg_400g + size
52+
if is_up == true then
53+
local range = string.match(all_items[i], "Ethernet%d+:([^%s]+)$")
54+
local profile = redis.call('HGET', all_items[i], 'profile')
55+
local index = find_profile(profile)
56+
local size
57+
if string.len(range) == 1 then
58+
size = 1
59+
else
60+
size = 1 + tonumber(string.sub(range, -1)) - tonumber(string.sub(range, 1, 1))
61+
end
62+
profiles[index][2] = profiles[index][2] + size
63+
local speed = redis.call('HGET', 'PORT_TABLE:'..port, 'speed')
64+
if speed == '400000' and profile == '[BUFFER_PROFILE_TABLE:ingress_lossy_profile]' then
65+
lossypg_400g = lossypg_400g + size
66+
end
6167
end
6268
end
6369
end
64-
return 0
6570
end
6671

6772
-- Connect to CONFIG_DB
6873
redis.call('SELECT', config_db)
6974

7075
local ports_table = redis.call('KEYS', 'PORT|*')
7176

72-
total_port = #ports_table
77+
for i = 1, #ports_table do
78+
local status = redis.call('HGET', ports_table[i], 'admin_status')
79+
if status == "up" then
80+
count_up_port = count_up_port + 1
81+
end
82+
end
7383

7484
local egress_lossless_pool_size = redis.call('HGET', 'BUFFER_POOL|egress_lossless_pool', 'size')
7585

@@ -104,12 +114,8 @@ end
104114
local all_pgs = redis.call('KEYS', 'BUFFER_PG*')
105115
local all_tcs = redis.call('KEYS', 'BUFFER_QUEUE*')
106116

107-
local fail_count = 0
108-
fail_count = fail_count + iterate_all_items(all_pgs)
109-
fail_count = fail_count + iterate_all_items(all_tcs)
110-
if fail_count > 0 then
111-
return {}
112-
end
117+
iterate_all_items(all_pgs)
118+
iterate_all_items(all_tcs)
113119

114120
local statistics = {}
115121

@@ -124,7 +130,7 @@ for i = 1, #profiles, 1 do
124130
size = size + lossypg_reserved
125131
end
126132
if profiles[i][1] == "BUFFER_PROFILE_TABLE:egress_lossy_profile" then
127-
profiles[i][2] = total_port
133+
profiles[i][2] = count_up_port
128134
end
129135
if size ~= 0 then
130136
if shp_enabled and shp_size == 0 then
@@ -146,7 +152,7 @@ local lossypg_extra_for_400g = (lossypg_reserved_400g - lossypg_reserved) * loss
146152
accumulative_occupied_buffer = accumulative_occupied_buffer + lossypg_extra_for_400g
147153

148154
-- Accumulate sizes for egress mirror and management pool
149-
local accumulative_egress_mirror_overhead = total_port * egress_mirror_headroom
155+
local accumulative_egress_mirror_overhead = count_up_port * egress_mirror_headroom
150156
accumulative_occupied_buffer = accumulative_occupied_buffer + accumulative_egress_mirror_overhead + mgmt_pool_size
151157

152158
-- Fetch mmu_size
@@ -234,6 +240,5 @@ table.insert(result, "debug:egress_mirror:" .. accumulative_egress_mirror_overhe
234240
table.insert(result, "debug:shp_enabled:" .. tostring(shp_enabled))
235241
table.insert(result, "debug:shp_size:" .. shp_size)
236242
table.insert(result, "debug:accumulative xoff:" .. accumulative_xoff)
237-
table.insert(result, "debug:total port:" .. total_port)
238243

239244
return result

0 commit comments

Comments
 (0)