Skip to content

Commit aef04fc

Browse files
esposemkevmw
authored andcommitted
thread-pool: avoid passing the pool parameter every time
thread_pool_submit_aio() is always called on a pool taken from qemu_get_current_aio_context(), and that is the only intended use: each pool runs only in the same thread that is submitting work to it, it can't run anywhere else. Therefore simplify the thread_pool_submit* API and remove the ThreadPool function parameter. Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> Message-Id: <20230203131731.851116-5-eesposit@redhat.com> Reviewed-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
1 parent 0fdb731 commit aef04fc

File tree

12 files changed

+27
-44
lines changed

12 files changed

+27
-44
lines changed

backends/tpm/tpm_backend.c

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -100,16 +100,14 @@ bool tpm_backend_had_startup_error(TPMBackend *s)
100100

101101
void tpm_backend_deliver_request(TPMBackend *s, TPMBackendCmd *cmd)
102102
{
103-
ThreadPool *pool = aio_get_thread_pool(qemu_get_aio_context());
104-
105103
if (s->cmd != NULL) {
106104
error_report("There is a TPM request pending");
107105
return;
108106
}
109107

110108
s->cmd = cmd;
111109
object_ref(OBJECT(s));
112-
thread_pool_submit_aio(pool, tpm_backend_worker_thread, s,
110+
thread_pool_submit_aio(tpm_backend_worker_thread, s,
113111
tpm_backend_request_completed, s);
114112
}
115113

block/file-posix.c

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2042,9 +2042,7 @@ static int handle_aiocb_truncate(void *opaque)
20422042

20432043
static int coroutine_fn raw_thread_pool_submit(ThreadPoolFunc func, void *arg)
20442044
{
2045-
/* @bs can be NULL, bdrv_get_aio_context() returns the main context then */
2046-
ThreadPool *pool = aio_get_thread_pool(qemu_get_current_aio_context());
2047-
return thread_pool_submit_co(pool, func, arg);
2045+
return thread_pool_submit_co(func, arg);
20482046
}
20492047

20502048
/*

block/file-win32.c

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -153,7 +153,6 @@ static BlockAIOCB *paio_submit(BlockDriverState *bs, HANDLE hfile,
153153
BlockCompletionFunc *cb, void *opaque, int type)
154154
{
155155
RawWin32AIOData *acb = g_new(RawWin32AIOData, 1);
156-
ThreadPool *pool;
157156

158157
acb->bs = bs;
159158
acb->hfile = hfile;
@@ -168,8 +167,7 @@ static BlockAIOCB *paio_submit(BlockDriverState *bs, HANDLE hfile,
168167
acb->aio_offset = offset;
169168

170169
trace_file_paio_submit(acb, opaque, offset, count, type);
171-
pool = aio_get_thread_pool(qemu_get_current_aio_context());
172-
return thread_pool_submit_aio(pool, aio_worker, acb, cb, opaque);
170+
return thread_pool_submit_aio(aio_worker, acb, cb, opaque);
173171
}
174172

175173
int qemu_ftruncate64(int fd, int64_t length)

block/qcow2-threads.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,6 @@ qcow2_co_process(BlockDriverState *bs, ThreadPoolFunc *func, void *arg)
4343
{
4444
int ret;
4545
BDRVQcow2State *s = bs->opaque;
46-
ThreadPool *pool = aio_get_thread_pool(qemu_get_current_aio_context());
4746

4847
qemu_co_mutex_lock(&s->lock);
4948
while (s->nb_threads >= QCOW2_MAX_THREADS) {
@@ -52,7 +51,7 @@ qcow2_co_process(BlockDriverState *bs, ThreadPoolFunc *func, void *arg)
5251
s->nb_threads++;
5352
qemu_co_mutex_unlock(&s->lock);
5453

55-
ret = thread_pool_submit_co(pool, func, arg);
54+
ret = thread_pool_submit_co(func, arg);
5655

5756
qemu_co_mutex_lock(&s->lock);
5857
s->nb_threads--;

hw/9pfs/coth.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,5 @@ static int coroutine_enter_func(void *arg)
4141
void co_run_in_worker_bh(void *opaque)
4242
{
4343
Coroutine *co = opaque;
44-
thread_pool_submit_aio(aio_get_thread_pool(qemu_get_aio_context()),
45-
coroutine_enter_func, co, coroutine_enter_cb, co);
44+
thread_pool_submit_aio(coroutine_enter_func, co, coroutine_enter_cb, co);
4645
}

hw/ppc/spapr_nvdimm.c

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -496,7 +496,6 @@ static int spapr_nvdimm_flush_post_load(void *opaque, int version_id)
496496
{
497497
SpaprNVDIMMDevice *s_nvdimm = (SpaprNVDIMMDevice *)opaque;
498498
SpaprNVDIMMDeviceFlushState *state;
499-
ThreadPool *pool = aio_get_thread_pool(qemu_get_aio_context());
500499
HostMemoryBackend *backend = MEMORY_BACKEND(PC_DIMM(s_nvdimm)->hostmem);
501500
bool is_pmem = object_property_get_bool(OBJECT(backend), "pmem", NULL);
502501
bool pmem_override = object_property_get_bool(OBJECT(s_nvdimm),
@@ -517,7 +516,7 @@ static int spapr_nvdimm_flush_post_load(void *opaque, int version_id)
517516
}
518517

519518
QLIST_FOREACH(state, &s_nvdimm->pending_nvdimm_flush_states, node) {
520-
thread_pool_submit_aio(pool, flush_worker_cb, state,
519+
thread_pool_submit_aio(flush_worker_cb, state,
521520
spapr_nvdimm_flush_completion_cb, state);
522521
}
523522

@@ -664,7 +663,6 @@ static target_ulong h_scm_flush(PowerPCCPU *cpu, SpaprMachineState *spapr,
664663
PCDIMMDevice *dimm;
665664
HostMemoryBackend *backend = NULL;
666665
SpaprNVDIMMDeviceFlushState *state;
667-
ThreadPool *pool = aio_get_thread_pool(qemu_get_aio_context());
668666
int fd;
669667

670668
if (!drc || !drc->dev ||
@@ -699,7 +697,7 @@ static target_ulong h_scm_flush(PowerPCCPU *cpu, SpaprMachineState *spapr,
699697

700698
state->drcidx = drc_index;
701699

702-
thread_pool_submit_aio(pool, flush_worker_cb, state,
700+
thread_pool_submit_aio(flush_worker_cb, state,
703701
spapr_nvdimm_flush_completion_cb, state);
704702

705703
continue_token = state->continue_token;

hw/virtio/virtio-pmem.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,6 @@ static void virtio_pmem_flush(VirtIODevice *vdev, VirtQueue *vq)
7070
VirtIODeviceRequest *req_data;
7171
VirtIOPMEM *pmem = VIRTIO_PMEM(vdev);
7272
HostMemoryBackend *backend = MEMORY_BACKEND(pmem->memdev);
73-
ThreadPool *pool = aio_get_thread_pool(qemu_get_aio_context());
7473

7574
trace_virtio_pmem_flush_request();
7675
req_data = virtqueue_pop(vq, sizeof(VirtIODeviceRequest));
@@ -88,7 +87,7 @@ static void virtio_pmem_flush(VirtIODevice *vdev, VirtQueue *vq)
8887
req_data->fd = memory_region_get_fd(&backend->mr);
8988
req_data->pmem = pmem;
9089
req_data->vdev = vdev;
91-
thread_pool_submit_aio(pool, worker_cb, req_data, done_cb, req_data);
90+
thread_pool_submit_aio(worker_cb, req_data, done_cb, req_data);
9291
}
9392

9493
static void virtio_pmem_get_config(VirtIODevice *vdev, uint8_t *config)

include/block/thread-pool.h

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -33,12 +33,10 @@ void thread_pool_free(ThreadPool *pool);
3333
* thread_pool_submit* API: submit I/O requests in the thread's
3434
* current AioContext.
3535
*/
36-
BlockAIOCB *thread_pool_submit_aio(ThreadPool *pool,
37-
ThreadPoolFunc *func, void *arg,
38-
BlockCompletionFunc *cb, void *opaque);
39-
int coroutine_fn thread_pool_submit_co(ThreadPool *pool,
40-
ThreadPoolFunc *func, void *arg);
41-
void thread_pool_submit(ThreadPool *pool, ThreadPoolFunc *func, void *arg);
36+
BlockAIOCB *thread_pool_submit_aio(ThreadPoolFunc *func, void *arg,
37+
BlockCompletionFunc *cb, void *opaque);
38+
int coroutine_fn thread_pool_submit_co(ThreadPoolFunc *func, void *arg);
39+
void thread_pool_submit(ThreadPoolFunc *func, void *arg);
4240

4341
void thread_pool_update_params(ThreadPool *pool, struct AioContext *ctx);
4442

scsi/pr-manager.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,6 @@ static int pr_manager_worker(void *opaque)
5151
int coroutine_fn pr_manager_execute(PRManager *pr_mgr, AioContext *ctx, int fd,
5252
struct sg_io_hdr *hdr)
5353
{
54-
ThreadPool *pool = aio_get_thread_pool(ctx);
5554
PRManagerData data = {
5655
.pr_mgr = pr_mgr,
5756
.fd = fd,
@@ -62,7 +61,7 @@ int coroutine_fn pr_manager_execute(PRManager *pr_mgr, AioContext *ctx, int fd,
6261

6362
/* The matching object_unref is in pr_manager_worker. */
6463
object_ref(OBJECT(pr_mgr));
65-
return thread_pool_submit_co(pool, pr_manager_worker, &data);
64+
return thread_pool_submit_co(pr_manager_worker, &data);
6665
}
6766

6867
bool pr_manager_is_connected(PRManager *pr_mgr)

scsi/qemu-pr-helper.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -180,7 +180,6 @@ static int do_sgio_worker(void *opaque)
180180
static int do_sgio(int fd, const uint8_t *cdb, uint8_t *sense,
181181
uint8_t *buf, int *sz, int dir)
182182
{
183-
ThreadPool *pool = aio_get_thread_pool(qemu_get_aio_context());
184183
int r;
185184

186185
PRHelperSGIOData data = {
@@ -192,7 +191,7 @@ static int do_sgio(int fd, const uint8_t *cdb, uint8_t *sense,
192191
.dir = dir,
193192
};
194193

195-
r = thread_pool_submit_co(pool, do_sgio_worker, &data);
194+
r = thread_pool_submit_co(do_sgio_worker, &data);
196195
*sz = data.sz;
197196
return r;
198197
}

0 commit comments

Comments
 (0)