-
Notifications
You must be signed in to change notification settings - Fork 678
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Implement shadow queues to store pending WQEs. Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com>
- Loading branch information
Konstantin Taranov
committed
Jul 1, 2024
1 parent
7314f71
commit d47badd
Showing
2 changed files
with
146 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,145 @@ | ||
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ | ||
/* | ||
* Copyright (c) 2024, Microsoft Corporation. All rights reserved. | ||
*/ | ||
|
||
#ifndef _SHADOW_QUEUE_H_ | ||
#define _SHADOW_QUEUE_H_ | ||
|
||
#include <stdio.h> | ||
#include <linux/types.h> | ||
#include <endian.h> | ||
#include <infiniband/verbs.h> | ||
#include <sys/mman.h> | ||
#include <util/util.h> | ||
|
||
struct shadow_wqe_header { | ||
uint8_t opcode; // see enum ibv_wc_opcode | ||
uint8_t flags; // see enum ibv_wc_flags | ||
uint16_t posted_wqe_size_in_bu; | ||
uint32_t unmasked_queue_offset; | ||
uint64_t wr_id; | ||
}; | ||
|
||
struct rc_sq_shadow_wqe { | ||
struct shadow_wqe_header header; | ||
uint32_t end_psn; | ||
uint32_t read_posted_wqe_size_in_bu; | ||
}; | ||
|
||
struct rc_rq_shadow_wqe { | ||
struct shadow_wqe_header header; | ||
uint32_t byte_len; | ||
uint32_t imm_or_rkey; | ||
}; | ||
|
||
struct shadow_queue { | ||
uint64_t prod_idx; | ||
uint64_t cons_idx; | ||
uint64_t next_to_complete_idx; | ||
uint32_t length; | ||
uint32_t stride; | ||
void *buffer; | ||
}; | ||
|
||
static inline void reset_shadow_queue(struct shadow_queue *queue) | ||
{ | ||
queue->prod_idx = 0; | ||
queue->cons_idx = 0; | ||
queue->next_to_complete_idx = 0; | ||
} | ||
|
||
static inline int create_shadow_queue(struct shadow_queue *queue, uint32_t length, uint32_t stride) | ||
{ | ||
length = roundup_pow_of_two(length); | ||
stride = align(stride, 8); | ||
|
||
void *buffer = mmap(NULL, stride * length, PROT_READ | PROT_WRITE, | ||
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); | ||
|
||
if (buffer == MAP_FAILED) | ||
return -1; | ||
|
||
queue->length = length; | ||
queue->stride = stride; | ||
reset_shadow_queue(queue); | ||
queue->buffer = buffer; | ||
return 0; | ||
} | ||
|
||
static inline void destroy_shadow_queue(struct shadow_queue *queue) | ||
{ | ||
if (queue->buffer) { | ||
munmap(queue->buffer, queue->stride * queue->length); | ||
queue->buffer = NULL; | ||
} | ||
} | ||
|
||
static inline struct shadow_wqe_header * | ||
shadow_queue_get_element(const struct shadow_queue *queue, uint64_t unmasked_index) | ||
{ | ||
uint32_t index = unmasked_index & (queue->length - 1); | ||
|
||
return (struct shadow_wqe_header *)((uint8_t *)queue->buffer + index * queue->stride); | ||
} | ||
|
||
static inline bool shadow_queue_full(struct shadow_queue *queue) | ||
{ | ||
return (queue->prod_idx - queue->cons_idx) >= queue->length; | ||
} | ||
|
||
static inline struct shadow_wqe_header * | ||
shadow_queue_producer_entry(struct shadow_queue *queue) | ||
{ | ||
return shadow_queue_get_element(queue, queue->prod_idx); | ||
} | ||
|
||
static inline void shadow_queue_advance_producer(struct shadow_queue *queue) | ||
{ | ||
queue->prod_idx++; | ||
} | ||
|
||
static inline void shadow_queue_retreat_producer(struct shadow_queue *queue) | ||
{ | ||
queue->prod_idx--; | ||
} | ||
|
||
static inline void shadow_queue_advance_consumer(struct shadow_queue *queue) | ||
{ | ||
queue->cons_idx++; | ||
} | ||
|
||
static inline bool shadow_queue_empty(struct shadow_queue *queue) | ||
{ | ||
return queue->prod_idx == queue->cons_idx; | ||
} | ||
|
||
static inline uint32_t shadow_queue_get_pending_wqe_count(struct shadow_queue *queue) | ||
{ | ||
return (uint32_t)(queue->prod_idx - queue->next_to_complete_idx); | ||
} | ||
|
||
static inline struct shadow_wqe_header * | ||
shadow_queue_get_next_to_consume(const struct shadow_queue *queue) | ||
{ | ||
if (queue->cons_idx == queue->next_to_complete_idx) | ||
return NULL; | ||
|
||
return shadow_queue_get_element(queue, queue->cons_idx); | ||
} | ||
|
||
static inline struct shadow_wqe_header * | ||
shadow_queue_get_next_to_complete(struct shadow_queue *queue) | ||
{ | ||
if (queue->next_to_complete_idx == queue->prod_idx) | ||
return NULL; | ||
|
||
return shadow_queue_get_element(queue, queue->next_to_complete_idx); | ||
} | ||
|
||
static inline void shadow_queue_advance_next_to_complete(struct shadow_queue *queue) | ||
{ | ||
queue->next_to_complete_idx++; | ||
} | ||
|
||
#endif //_SHADOW_QUEUE_H_ |