Skip to content

Commit

Permalink
Introduce correct support for compacting GC. (#73)
Browse files Browse the repository at this point in the history
* Improve size computation.
  • Loading branch information
ioquatix authored Aug 21, 2023
1 parent 7a14eb7 commit 5ef599a
Show file tree
Hide file tree
Showing 6 changed files with 221 additions and 39 deletions.
16 changes: 16 additions & 0 deletions ext/io/event/selector/array.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,12 @@ inline static void IO_Event_Array_allocate(struct IO_Event_Array *array, size_t
array->element_size = element_size;
}

inline static size_t IO_Event_Array_memory_size(const struct IO_Event_Array *array)
{
// Upper bound.
return array->count * (sizeof(void*) + array->element_size);
}

inline static void IO_Event_Array_free(struct IO_Event_Array *array)
{
for (size_t i = 0; i < array->limit; i += 1) {
Expand Down Expand Up @@ -117,3 +123,13 @@ inline static void* IO_Event_Array_push(struct IO_Event_Array *array)
{
return IO_Event_Array_lookup(array, array->limit);
}

inline static void IO_Event_Array_each(struct IO_Event_Array *array, void (*callback)(void*))
{
for (size_t i = 0; i < array->limit; i += 1) {
void *element = array->base[i];
if (element) {
callback(element);
}
}
}
92 changes: 74 additions & 18 deletions ext/io/event/selector/epoll.c
Original file line number Diff line number Diff line change
Expand Up @@ -63,10 +63,75 @@ struct IO_Event_Selector_EPoll
struct IO_Event_Array descriptors;
};

// This represents zero or more fibers waiting for a specific descriptor.
struct IO_Event_Selector_EPoll_Descriptor
{
struct IO_Event_List list;

// The last IO object that was used to register events.
VALUE io;

// The union of all events we are waiting for:
enum IO_Event waiting_events;

// The union of events we are registered for:
enum IO_Event registered_events;
};

static
void IO_Event_Selector_EPoll_Waiting_mark(struct IO_Event_List *_waiting)
{
struct IO_Event_Selector_EPoll_Waiting *waiting = (void*)_waiting;

if (waiting->fiber) {
rb_gc_mark_movable(waiting->fiber);
}
}

static
void IO_Event_Selector_EPoll_Descriptor_mark(void *_descriptor)
{
struct IO_Event_Selector_EPoll_Descriptor *descriptor = _descriptor;

IO_Event_List_immutable_each(&descriptor->list, IO_Event_Selector_EPoll_Waiting_mark);
rb_gc_mark_movable(descriptor->io);
}

static
void IO_Event_Selector_EPoll_Type_mark(void *_selector)
{
struct IO_Event_Selector_EPoll *selector = _selector;

IO_Event_Selector_mark(&selector->backend);
IO_Event_Array_each(&selector->descriptors, IO_Event_Selector_EPoll_Descriptor_mark);
}

static
void IO_Event_Selector_EPoll_Waiting_compact(struct IO_Event_List *_waiting)
{
struct IO_Event_Selector_EPoll_Waiting *waiting = (void*)_waiting;

if (waiting->fiber) {
waiting->fiber = rb_gc_location(waiting->fiber);
}
}

static
void IO_Event_Selector_EPoll_Descriptor_compact(void *_descriptor)
{
struct IO_Event_Selector_EPoll_Descriptor *descriptor = _descriptor;

IO_Event_List_immutable_each(&descriptor->list, IO_Event_Selector_EPoll_Waiting_compact);
descriptor->io = rb_gc_location(descriptor->io);
}

static
void IO_Event_Selector_EPoll_Type_compact(void *_selector)
{
struct IO_Event_Selector_EPoll *selector = _selector;

IO_Event_Selector_compact(&selector->backend);
IO_Event_Array_each(&selector->descriptors, IO_Event_Selector_EPoll_Descriptor_compact);
}

static
Expand All @@ -79,7 +144,7 @@ void close_internal(struct IO_Event_Selector_EPoll *selector)
IO_Event_Interrupt_close(&selector->interrupt);
}
}

static
void IO_Event_Selector_EPoll_Type_free(void *_selector)
{
struct IO_Event_Selector_EPoll *selector = _selector;
Expand All @@ -91,37 +156,28 @@ void IO_Event_Selector_EPoll_Type_free(void *_selector)
free(selector);
}

size_t IO_Event_Selector_EPoll_Type_size(const void *selector)
static
size_t IO_Event_Selector_EPoll_Type_size(const void *_selector)
{
return sizeof(struct IO_Event_Selector_EPoll);
const struct IO_Event_Selector_EPoll *selector = _selector;

return sizeof(struct IO_Event_Selector_EPoll)
+ IO_Event_Array_memory_size(&selector->descriptors)
;
}

static const rb_data_type_t IO_Event_Selector_EPoll_Type = {
.wrap_struct_name = "IO_Event::Backend::EPoll",
.function = {
.dmark = IO_Event_Selector_EPoll_Type_mark,
.dcompact = IO_Event_Selector_EPoll_Type_compact,
.dfree = IO_Event_Selector_EPoll_Type_free,
.dsize = IO_Event_Selector_EPoll_Type_size,
},
.data = NULL,
.flags = RUBY_TYPED_FREE_IMMEDIATELY,
};

// This represents zero or more fibers waiting for a specific descriptor.
struct IO_Event_Selector_EPoll_Descriptor
{
struct IO_Event_List list;

// The last IO object that was used to register events.
VALUE io;

// The union of all events we are waiting for:
enum IO_Event waiting_events;

// The union of events we are registered for:
enum IO_Event registered_events;
};

inline static
struct IO_Event_Selector_EPoll_Descriptor * IO_Event_Selector_EPoll_Descriptor_lookup(struct IO_Event_Selector_EPoll *selector, int descriptor)
{
Expand Down
87 changes: 70 additions & 17 deletions ext/io/event/selector/kqueue.c
Original file line number Diff line number Diff line change
Expand Up @@ -74,10 +74,71 @@ struct IO_Event_Selector_KQueue
struct IO_Event_Array descriptors;
};

// This represents zero or more fibers waiting for a specific descriptor.
struct IO_Event_Selector_KQueue_Descriptor
{
struct IO_Event_List list;

// The union of all events we are waiting for:
enum IO_Event waiting_events;

// The union of events we are registered for:
enum IO_Event registered_events;

// The events that are currently ready:
enum IO_Event ready_events;
};

static
void IO_Event_Selector_KQueue_Waiting_mark(struct IO_Event_List *_waiting)
{
struct IO_Event_Selector_KQueue_Waiting *waiting = (void*)_waiting;

if (waiting->fiber) {
rb_gc_mark_movable(waiting->fiber);
}
}

static
void IO_Event_Selector_KQueue_Descriptor_mark(void *_descriptor)
{
struct IO_Event_Selector_KQueue_Descriptor *descriptor = _descriptor;

IO_Event_List_immutable_each(&descriptor->list, IO_Event_Selector_KQueue_Waiting_mark);
}

static
void IO_Event_Selector_KQueue_Type_mark(void *_selector)
{
struct IO_Event_Selector_KQueue *selector = _selector;
IO_Event_Selector_mark(&selector->backend);
IO_Event_Array_each(&selector->descriptors, IO_Event_Selector_KQueue_Descriptor_mark);
}

static
void IO_Event_Selector_KQueue_Waiting_compact(struct IO_Event_List *_waiting)
{
struct IO_Event_Selector_KQueue_Waiting *waiting = (void*)_waiting;

if (waiting->fiber) {
rb_gc_location(waiting->fiber);
}
}

static
void IO_Event_Selector_KQueue_Descriptor_compact(void *_descriptor)
{
struct IO_Event_Selector_KQueue_Descriptor *descriptor = _descriptor;

IO_Event_List_immutable_each(&descriptor->list, IO_Event_Selector_KQueue_Waiting_compact);
}

static
void IO_Event_Selector_KQueue_Type_compact(void *_selector)
{
struct IO_Event_Selector_KQueue *selector = _selector;
IO_Event_Selector_compact(&selector->backend);
IO_Event_Array_each(&selector->descriptors, IO_Event_Selector_KQueue_Descriptor_compact);
}

static
Expand All @@ -89,6 +150,7 @@ void close_internal(struct IO_Event_Selector_KQueue *selector)
}
}

static
void IO_Event_Selector_KQueue_Type_free(void *_selector)
{
struct IO_Event_Selector_KQueue *selector = _selector;
Expand All @@ -100,37 +162,28 @@ void IO_Event_Selector_KQueue_Type_free(void *_selector)
free(selector);
}

size_t IO_Event_Selector_KQueue_Type_size(const void *selector)
static
size_t IO_Event_Selector_KQueue_Type_size(const void *_selector)
{
return sizeof(struct IO_Event_Selector_KQueue);
const struct IO_Event_Selector_KQueue *selector = _selector;

return sizeof(struct IO_Event_Selector_KQueue)
+ IO_Event_Array_memory_size(&selector->descriptors)
;
}

static const rb_data_type_t IO_Event_Selector_KQueue_Type = {
.wrap_struct_name = "IO_Event::Backend::KQueue",
.function = {
.dmark = IO_Event_Selector_KQueue_Type_mark,
.dcompact = IO_Event_Selector_KQueue_Type_compact,
.dfree = IO_Event_Selector_KQueue_Type_free,
.dsize = IO_Event_Selector_KQueue_Type_size,
},
.data = NULL,
.flags = RUBY_TYPED_FREE_IMMEDIATELY,
};

// This represents zero or more fibers waiting for a specific descriptor.
struct IO_Event_Selector_KQueue_Descriptor
{
struct IO_Event_List list;

// The union of all events we are waiting for:
enum IO_Event waiting_events;

// The union of events we are registered for:
enum IO_Event registered_events;

// The events that are currently ready:
enum IO_Event ready_events;
};

inline static
struct IO_Event_Selector_KQueue_Descriptor * IO_Event_Selector_KQueue_Descriptor_lookup(struct IO_Event_Selector_KQueue *selector, uintptr_t descriptor)
{
Expand Down
11 changes: 11 additions & 0 deletions ext/io/event/selector/list.h
Original file line number Diff line number Diff line change
Expand Up @@ -69,3 +69,14 @@ inline static int IO_Event_List_empty(struct IO_Event_List *list)
{
return list->head == list->tail;
}

inline static void IO_Event_List_immutable_each(struct IO_Event_List *list, void (*callback)(struct IO_Event_List *node))
{
struct IO_Event_List *node = list->tail;

while (node != list) {
callback(node);

node = node->tail;
}
}
15 changes: 13 additions & 2 deletions ext/io/event/selector/selector.h
Original file line number Diff line number Diff line change
Expand Up @@ -109,11 +109,22 @@ void IO_Event_Selector_initialize(struct IO_Event_Selector *backend, VALUE loop)

static inline
void IO_Event_Selector_mark(struct IO_Event_Selector *backend) {
rb_gc_mark(backend->loop);
rb_gc_mark_movable(backend->loop);

struct IO_Event_Selector_Queue *ready = backend->ready;
while (ready) {
rb_gc_mark(ready->fiber);
rb_gc_mark_movable(ready->fiber);
ready = ready->behind;
}
}

static inline
void IO_Event_Selector_compact(struct IO_Event_Selector *backend) {
backend->loop = rb_gc_location(backend->loop);

struct IO_Event_Selector_Queue *ready = backend->ready;
while (ready) {
ready->fiber = rb_gc_location(ready->fiber);
ready = ready->behind;
}
}
Expand Down
39 changes: 37 additions & 2 deletions ext/io/event/selector/uring.c
Original file line number Diff line number Diff line change
Expand Up @@ -75,10 +75,38 @@ struct IO_Event_Selector_URing_Completion
struct IO_Event_Selector_URing_Waiting *waiting;
};

static
void IO_Event_Selector_URing_Completion_mark(void *_completion)
{
struct IO_Event_Selector_URing_Completion *completion = _completion;

if (completion->waiting) {
rb_gc_mark_movable(completion->waiting->fiber);
}
}

void IO_Event_Selector_URing_Type_mark(void *_selector)
{
struct IO_Event_Selector_URing *selector = _selector;
IO_Event_Selector_mark(&selector->backend);
IO_Event_Array_each(&selector->completions, IO_Event_Selector_URing_Completion_mark);
}

static
void IO_Event_Selector_URing_Completion_compact(void *_completion)
{
struct IO_Event_Selector_URing_Completion *completion = _completion;

if (completion->waiting) {
completion->waiting->fiber = rb_gc_location(completion->waiting->fiber);
}
}

void IO_Event_Selector_URing_Type_compact(void *_selector)
{
struct IO_Event_Selector_URing *selector = _selector;
IO_Event_Selector_compact(&selector->backend);
IO_Event_Array_each(&selector->completions, IO_Event_Selector_URing_Completion_compact);
}

static
Expand All @@ -90,6 +118,7 @@ void close_internal(struct IO_Event_Selector_URing *selector)
}
}

static
void IO_Event_Selector_URing_Type_free(void *_selector)
{
struct IO_Event_Selector_URing *selector = _selector;
Expand All @@ -101,15 +130,21 @@ void IO_Event_Selector_URing_Type_free(void *_selector)
free(selector);
}

size_t IO_Event_Selector_URing_Type_size(const void *selector)
static
size_t IO_Event_Selector_URing_Type_size(const void *_selector)
{
return sizeof(struct IO_Event_Selector_URing);
const struct IO_Event_Selector_URing *selector = _selector;

return sizeof(struct IO_Event_Selector_URing)
+ IO_Event_Array_memory_size(&selector->completions)
;
}

static const rb_data_type_t IO_Event_Selector_URing_Type = {
.wrap_struct_name = "IO_Event::Backend::URing",
.function = {
.dmark = IO_Event_Selector_URing_Type_mark,
.dcompact = IO_Event_Selector_URing_Type_compact,
.dfree = IO_Event_Selector_URing_Type_free,
.dsize = IO_Event_Selector_URing_Type_size,
},
Expand Down

0 comments on commit 5ef599a

Please sign in to comment.