Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Memory #216

Merged
merged 20 commits into from
Jul 22, 2020
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions examples/flow_tracker/flow_tracker.c
Original file line number Diff line number Diff line change
Expand Up @@ -360,6 +360,8 @@ main(int argc, char *argv[]) {
onvm_nflib_run(nf_local_ctx);

onvm_nflib_stop(nf_local_ctx);
onvm_ft_free(state_info->ft);
rte_free(state_info);
printf("If we reach here, program is ending!\n");
return 0;
}
2 changes: 2 additions & 0 deletions examples/load_balancer/load_balancer.c
Original file line number Diff line number Diff line change
Expand Up @@ -617,6 +617,8 @@ main(int argc, char *argv[]) {
onvm_nflib_run(nf_local_ctx);

onvm_nflib_stop(nf_local_ctx);
onvm_ft_free(lb->ft);
rte_free(lb);
printf("If we reach here, program is ending\n");
return 0;
}
13 changes: 7 additions & 6 deletions onvm/lib/cJSON.c
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@
#include <stdlib.h>
#include <limits.h>
#include <ctype.h>
#include <rte_memcpy.h>

#ifdef ENABLE_LOCALES
#include <locale.h>
Expand Down Expand Up @@ -172,7 +173,7 @@ static unsigned char* cJSON_strdup(const unsigned char* string, const internal_h
{
return NULL;
}
memcpy(copy, string, length);
rte_memcpy(copy, string, length);

return copy;
}
Expand Down Expand Up @@ -457,7 +458,7 @@ static unsigned char* ensure(printbuffer * const p, size_t needed)
}
if (newbuffer)
{
memcpy(newbuffer, p->buffer, p->offset + 1);
rte_memcpy(newbuffer, p->buffer, p->offset + 1);
}
p->hooks.deallocate(p->buffer);
}
Expand Down Expand Up @@ -897,7 +898,7 @@ static cJSON_bool print_string_ptr(const unsigned char * const input, printbuffe
if (escape_characters == 0)
{
output[0] = '\"';
memcpy(output + 1, input, output_length);
rte_memcpy(output + 1, input, output_length);
output[output_length + 1] = '\"';
output[output_length + 2] = '\0';

Expand Down Expand Up @@ -1135,7 +1136,7 @@ static unsigned char *print(const cJSON * const item, cJSON_bool format, const i
{
goto fail;
}
memcpy(printed, buffer->buffer, cjson_min(buffer->length, buffer->offset + 1));
rte_memcpy(printed, buffer->buffer, cjson_min(buffer->length, buffer->offset + 1));
printed[buffer->offset] = '\0'; /* just to be sure */

/* free the buffer */
Expand Down Expand Up @@ -1329,7 +1330,7 @@ static cJSON_bool print_value(const cJSON * const item, printbuffer * const outp
{
return false;
}
memcpy(output, item->valuestring, raw_length);
rte_memcpy(output, item->valuestring, raw_length);
return true;
}

Expand Down Expand Up @@ -1848,7 +1849,7 @@ static cJSON *create_reference(const cJSON *item, const internal_hooks * const h
return NULL;
}

memcpy(reference, item, sizeof(cJSON));
rte_memcpy(reference, item, sizeof(cJSON));
reference->string = NULL;
reference->type |= cJSON_IsReference;
reference->next = reference->prev = NULL;
Expand Down
134 changes: 102 additions & 32 deletions onvm/onvm_mgr/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -207,9 +207,6 @@ rx_thread_main(void *arg) {
}

RTE_LOG(INFO, APP, "Core %d: RX thread done\n", rte_lcore_id());

free(rx_mgr->nf_rx_bufs);
free(rx_mgr);
return 0;
}

Expand Down Expand Up @@ -254,11 +251,6 @@ tx_thread_main(void *arg) {
}

RTE_LOG(INFO, APP, "Core %d: TX thread done\n", rte_lcore_id());

free(tx_mgr->tx_thread_info->port_tx_bufs);
free(tx_mgr->tx_thread_info);
free(tx_mgr->nf_rx_bufs);
free(tx_mgr);
return 0;
}

Expand Down Expand Up @@ -310,8 +302,47 @@ wakeup_thread_main(void *arg) {
return 0;
}

/*
* Function to free all allocated memory from main function.
*/
static void
onvm_main_free(unsigned tx_lcores, unsigned rx_lcores, struct queue_mgr *tx_mgr[],
struct queue_mgr *rx_mgr[], struct wakeup_thread_context *wakeup_ctx[]) {
unsigned i;
for (i = 0; i < tx_lcores; i++) {
if (tx_mgr[i]-> nf_rx_bufs != NULL) {
rte_free(tx_mgr[i]->nf_rx_bufs);
}
if (tx_mgr[i]->tx_thread_info->port_tx_bufs != NULL) {
rte_free(tx_mgr[i]->tx_thread_info->port_tx_bufs);
}
if (tx_mgr[i]-> tx_thread_info != NULL) {
rte_free(tx_mgr[i]->tx_thread_info);
}
if (tx_mgr[i] == NULL) {
break;
dennisafa marked this conversation as resolved.
Show resolved Hide resolved
}
rte_free(tx_mgr[i]);
}
for (i = 0; i < rx_lcores; i++) {
if (rx_mgr[i]->nf_rx_bufs != NULL) {
rte_free(rx_mgr[i]->nf_rx_bufs);
}
if (rx_mgr[i] == NULL) {
break;
bdevierno1 marked this conversation as resolved.
Show resolved Hide resolved
}
rte_free(rx_mgr[i]);
}
if (ONVM_NF_SHARE_CORES) {
for (i = 0; i < ONVM_NUM_WAKEUP_THREADS; i++) {
if (wakeup_ctx[i] == NULL) {
break;
bdevierno1 marked this conversation as resolved.
Show resolved Hide resolved
}
rte_free(wakeup_ctx[i]);
}
}
}
/*******************************Main function*********************************/

int
main(int argc, char *argv[]) {
unsigned cur_lcore, rx_lcores, tx_lcores, wakeup_lcores;
Expand Down Expand Up @@ -373,57 +404,96 @@ main(int argc, char *argv[]) {
signal(SIGINT, handle_signal);
signal(SIGTERM, handle_signal);

struct queue_mgr *tx_mgr[tx_lcores];
struct queue_mgr *rx_mgr[rx_lcores];
struct wakeup_thread_context *wakeup_ctx[ONVM_NUM_WAKEUP_THREADS];

for (i = 0; i < tx_lcores; i++) {
struct queue_mgr *tx_mgr = calloc(1, sizeof(struct queue_mgr));
tx_mgr->mgr_type_t = MGR;
tx_mgr->id = i;
tx_mgr->tx_thread_info = calloc(1, sizeof(struct tx_thread_info));
tx_mgr->tx_thread_info->port_tx_bufs = calloc(RTE_MAX_ETHPORTS, sizeof(struct packet_buf));
tx_mgr->nf_rx_bufs = calloc(MAX_NFS, sizeof(struct packet_buf));
tx_mgr->tx_thread_info->first_nf = RTE_MIN(i * nfs_per_tx + 1, (unsigned)MAX_NFS);
tx_mgr->tx_thread_info->last_nf = RTE_MIN((i + 1) * nfs_per_tx + 1, (unsigned)MAX_NFS);
tx_mgr[i] = rte_calloc(NULL, 1, sizeof(struct queue_mgr), 0);
if (tx_mgr[i] == NULL) {
RTE_LOG(ERR, APP, "Can't allocate queue_mgr struct\n");
onvm_main_free(tx_lcores, rx_lcores, tx_mgr, rx_mgr, wakeup_ctx);
return -1;
}
tx_mgr[i]->mgr_type_t = MGR;
tx_mgr[i]->id = i;
tx_mgr[i]->tx_thread_info = rte_calloc(NULL, 1, sizeof(struct tx_thread_info), 0);
if (tx_mgr[i]->tx_thread_info == NULL) {
RTE_LOG(ERR, APP, "Can't allocate tx_thread_info struct\n");
onvm_main_free(tx_lcores,rx_lcores, tx_mgr, rx_mgr, wakeup_ctx);
return -1;
}
tx_mgr[i]->tx_thread_info->port_tx_bufs =
rte_calloc(NULL, RTE_MAX_ETHPORTS, sizeof(struct packet_buf), 0);
if (tx_mgr[i]->tx_thread_info->port_tx_bufs == NULL) {
RTE_LOG(ERR, APP, "Can't allocate packet_buf struct\n");
onvm_main_free(tx_lcores,rx_lcores, tx_mgr, rx_mgr, wakeup_ctx);
return -1;
}
tx_mgr[i]->nf_rx_bufs = rte_calloc(NULL, MAX_NFS, sizeof(struct packet_buf), 0);
if (tx_mgr[i]->nf_rx_bufs == NULL) {
RTE_LOG(ERR, APP, "Can't allocate packet_buf struct\n");
onvm_main_free(tx_lcores,rx_lcores, tx_mgr, rx_mgr, wakeup_ctx);
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think that calling onvm_main_free after every check may be redundant, could we maybe set a flag and move that call out of the loop so that it's called only once? We should fail on any unsuccessful allocation anyway.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think calling onvm_main_free followed by RTE_EXIT would be best? If not I could also see using a flag or a goto label as another option.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@bdevierno1 you probably want to just set a flag and break out of the loop. Then just log that we couldn't allocate the structs required to continue.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

^

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I just made a change as per your suggestions. I ended up using a goto flag as using a break would exit the for loop. If I used a break in the first loop it will still enter the next loop.

return -1;
}
tx_mgr[i]->tx_thread_info->first_nf = RTE_MIN(i * nfs_per_tx + 1, (unsigned)MAX_NFS);
tx_mgr[i]->tx_thread_info->last_nf = RTE_MIN((i + 1) * nfs_per_tx + 1, (unsigned)MAX_NFS);
cur_lcore = rte_get_next_lcore(cur_lcore, 1, 1);
if (rte_eal_remote_launch(tx_thread_main, (void *)tx_mgr, cur_lcore) == -EBUSY) {
if (rte_eal_remote_launch(tx_thread_main, (void *)tx_mgr[i], cur_lcore) == -EBUSY) {
RTE_LOG(ERR, APP, "Core %d is already busy, can't use for nf %d TX\n", cur_lcore,
tx_mgr->tx_thread_info->first_nf);
tx_mgr[i]->tx_thread_info->first_nf);
return -1;
}
}

/* Launch RX thread main function for each RX queue on cores */
for (i = 0; i < rx_lcores; i++) {
struct queue_mgr *rx_mgr = calloc(1, sizeof(struct queue_mgr));
rx_mgr->mgr_type_t = MGR;
rx_mgr->id = i;
rx_mgr->tx_thread_info = NULL;
rx_mgr->nf_rx_bufs = calloc(MAX_NFS, sizeof(struct packet_buf));
rx_mgr[i] = rte_calloc(NULL, 1, sizeof(struct queue_mgr), 0);
if (rx_mgr[i] == NULL) {
RTE_LOG(ERR, APP, "Can't allocate queue_mgr struct\n");
onvm_main_free(tx_lcores,rx_lcores, tx_mgr, rx_mgr, wakeup_ctx);
return -1;
}
rx_mgr[i]->mgr_type_t = MGR;
rx_mgr[i]->id = i;
rx_mgr[i]->tx_thread_info = NULL;
rx_mgr[i]->nf_rx_bufs = rte_calloc(NULL, MAX_NFS, sizeof(struct packet_buf), 0);
if (rx_mgr[i] -> nf_rx_bufs == NULL) {
RTE_LOG(ERR, APP, "Can't allocate packet_buf struct\n");
onvm_main_free(tx_lcores,rx_lcores, tx_mgr, rx_mgr, wakeup_ctx);
return -1;
}
cur_lcore = rte_get_next_lcore(cur_lcore, 1, 1);
if (rte_eal_remote_launch(rx_thread_main, (void *)rx_mgr, cur_lcore) == -EBUSY) {
if (rte_eal_remote_launch(rx_thread_main, (void *)rx_mgr[i], cur_lcore) == -EBUSY) {
RTE_LOG(ERR, APP, "Core %d is already busy, can't use for RX queue id %d\n", cur_lcore,
rx_mgr->id);
rx_mgr[i]->id);
onvm_main_free(tx_lcores,rx_lcores, tx_mgr, rx_mgr, wakeup_ctx);
return -1;
}
}

if (ONVM_NF_SHARE_CORES) {
nfs_per_wakeup_thread = ceil((unsigned)MAX_NFS / wakeup_lcores);
for (i = 0; i < ONVM_NUM_WAKEUP_THREADS; i++) {
struct wakeup_thread_context *wakeup_ctx = calloc(1, sizeof(struct wakeup_thread_context));
if (wakeup_ctx == NULL) {
wakeup_ctx[i] = rte_calloc(NULL, 1, sizeof(struct wakeup_thread_context), 0);
if (wakeup_ctx[i] == NULL) {
RTE_LOG(ERR, APP, "Can't allocate wakeup info struct\n");
onvm_main_free(tx_lcores, rx_lcores, tx_mgr, rx_mgr, wakeup_ctx);
return -1;
}
wakeup_ctx->first_nf = RTE_MIN(i * nfs_per_wakeup_thread + 1, (unsigned)MAX_NFS);
wakeup_ctx->last_nf = RTE_MIN((i + 1) * nfs_per_wakeup_thread + 1, (unsigned)MAX_NFS);
wakeup_ctx[i]->first_nf = RTE_MIN(i * nfs_per_wakeup_thread + 1, (unsigned)MAX_NFS);
wakeup_ctx[i]->last_nf = RTE_MIN((i + 1) * nfs_per_wakeup_thread + 1, (unsigned)MAX_NFS);
cur_lcore = rte_get_next_lcore(cur_lcore, 1, 1);
if (rte_eal_remote_launch(wakeup_thread_main, (void*)wakeup_ctx, cur_lcore) == -EBUSY) {
if (rte_eal_remote_launch(wakeup_thread_main, (void*)wakeup_ctx[i], cur_lcore) == -EBUSY) {
RTE_LOG(ERR, APP, "Core %d is already busy, can't use for nf %d wakeup thread\n",
cur_lcore, wakeup_ctx->first_nf);
cur_lcore, wakeup_ctx[i]->first_nf);
onvm_main_free(tx_lcores, rx_lcores, tx_mgr, rx_mgr, wakeup_ctx);
return -1;
}
}
}
/* Master thread handles statistics and NF management */
master_thread_main();
onvm_main_free(tx_lcores,rx_lcores, tx_mgr, rx_mgr, wakeup_ctx);
return 0;
}
6 changes: 4 additions & 2 deletions onvm/onvm_mgr/onvm_stats.c
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,7 @@ void
onvm_stats_gen_event_info(const char *msg, uint8_t type, void *data) {
struct onvm_event *event;

event = (struct onvm_event *)malloc(sizeof(struct onvm_event));
event = (struct onvm_event *)rte_malloc("onvm stats gen event info", sizeof(struct onvm_event), 0);
if (event == NULL) {
perror("Couldn't allocate event");
return;
Expand All @@ -246,7 +246,7 @@ void
onvm_stats_gen_event_nf_info(const char *msg, struct onvm_nf *nf) {
struct onvm_event *event;

event = (struct onvm_event *)malloc(sizeof(struct onvm_event));
event = (struct onvm_event *)rte_malloc("onvm stats gen event nf info", sizeof(struct onvm_event), 0);
if (event == NULL) {
perror("Couldn't allocate event");
return;
Expand All @@ -264,6 +264,7 @@ onvm_stats_gen_event_nf_info(const char *msg, struct onvm_nf *nf) {
static void
onvm_stats_add_event(struct onvm_event *event_info) {
if (event_info == NULL || stats_destination != ONVM_STATS_WEB) {
rte_free(event_info);
return;
}
char event_time_buf[20];
Expand Down Expand Up @@ -305,6 +306,7 @@ onvm_stats_add_event(struct onvm_event *event_info) {

cJSON_AddItemToObject(new_event, "source", source);
cJSON_AddItemToArray(onvm_json_events_arr, new_event);
rte_free(event_info);
}

static void
Expand Down
Loading