@@ -85,9 +85,24 @@ RingBuf_Fini(RingBuf *buf)
85
85
PyMem_Free (items );
86
86
}
87
87
88
- static void
89
- coalesce_items (RingBuf * buf , PyObject * * new_items , Py_ssize_t new_capacity )
88
+ // Resize the underlying items array of buf to the new capacity and arrange
89
+ // the items contiguously in the new items array.
90
+ //
91
+ // Returns -1 on allocation failure or 0 on success.
92
+ static int
93
+ resize_ringbuf (RingBuf * buf , Py_ssize_t capacity )
90
94
{
95
+ Py_ssize_t new_capacity = Py_MAX (INITIAL_RING_BUF_CAPACITY , capacity );
96
+ if (new_capacity == buf -> items_cap ) {
97
+ return ;
98
+ }
99
+ assert (buf -> num_items <= new_capacity );
100
+
101
+ PyObject * * new_items = PyMem_Calloc (new_capacity , sizeof (PyObject * ));
102
+ if (new_items == NULL ) {
103
+ return -1 ;
104
+ }
105
+
91
106
// Copy the "tail" of the old items array. This corresponds to "head" of
92
107
// the abstract ring buffer.
93
108
Py_ssize_t tail_size = Py_MIN (buf -> num_items , buf -> items_cap - buf -> get_idx );
@@ -107,26 +122,8 @@ coalesce_items(RingBuf *buf, PyObject **new_items, Py_ssize_t new_capacity)
107
122
buf -> items_cap = new_capacity ;
108
123
buf -> get_idx = 0 ;
109
124
buf -> put_idx = buf -> num_items ;
110
- }
111
-
112
- static void
113
- shrink_ringbuf (RingBuf * buf )
114
- {
115
- Py_ssize_t new_capacity =
116
- Py_MAX (INITIAL_RING_BUF_CAPACITY , buf -> items_cap / 2 );
117
- assert (new_capacity >= buf -> num_items );
118
- if (new_capacity == buf -> items_cap ) {
119
- return ;
120
- }
121
125
122
- PyObject * * new_items = PyMem_Calloc (new_capacity , sizeof (PyObject * ));
123
- if (new_items == NULL ) {
124
- // It's safe to ignore the failure; shrinking is an optimization and
125
- // isn't required for correctness.
126
- return ;
127
- }
128
-
129
- coalesce_items (buf , new_items , new_capacity );
126
+ return 0 ;
130
127
}
131
128
132
129
// Returns an owned reference
@@ -135,9 +132,14 @@ RingBuf_Get(RingBuf *buf)
135
132
{
136
133
assert (buf -> num_items > 0 );
137
134
138
- if (buf -> num_items < (buf -> items_cap / 2 )) {
139
- // Items is less than 50% occupied, shrink it
140
- shrink_ringbuf (buf );
135
+ if (buf -> num_items < (buf -> items_cap / 4 )) {
136
+ // Items is less than 25% occupied, shrink it by 50%. This allows for
137
+ // growth without immediately needing to resize the underlying items
138
+ // array.
139
+ //
140
+ // It's safe it ignore allocation failures here; shrinking is an
141
+ // optimization that isn't required for correctness.
142
+ resize_ringbuf (buf , buf -> items_cap / 2 );
141
143
}
142
144
143
145
PyObject * item = buf -> items [buf -> get_idx ];
@@ -147,27 +149,15 @@ RingBuf_Get(RingBuf *buf)
147
149
return item ;
148
150
}
149
151
150
- static int
151
- grow_ringbuf (RingBuf * buf )
152
- {
153
- Py_ssize_t new_capacity =
154
- Py_MAX (INITIAL_RING_BUF_CAPACITY , buf -> items_cap * 2 );
155
- PyObject * * new_items = PyMem_Calloc (new_capacity , sizeof (PyObject * ));
156
- if (new_items == NULL ) {
157
- PyErr_NoMemory ();
158
- return -1 ;
159
- }
160
- coalesce_items (buf , new_items , new_capacity );
161
- return 0 ;
162
- }
163
-
164
152
// Returns 0 on success or -1 if the buffer failed to grow.
165
153
// Steals a reference to item.
166
154
static int
167
155
RingBuf_Put (RingBuf * buf , PyObject * item )
168
156
{
169
157
if (buf -> num_items == buf -> items_cap ) {
170
- if (grow_ringbuf (buf ) < 0 ) {
158
+ // Buffer is full, grow it.
159
+ if (resize_ringbuf (buf , buf -> items_cap * 2 ) < 0 ) {
160
+ PyErr_NoMemory ();
171
161
return -1 ;
172
162
}
173
163
}
0 commit comments