mirror of
https://github.com/torvalds/linux.git
synced 2026-01-25 07:47:50 +00:00
RDMA/core: RDMA/mlx5: replace use of system_unbound_wq with system_dfl_wq
Currently if a user enqueue a work item using schedule_delayed_work() the used wq is "system_wq" (per-cpu wq) while queue_delayed_work() use WORK_CPU_UNBOUND (used when a cpu is not specified). The same applies to schedule_work() that is using system_wq and queue_work(), that makes use again of WORK_CPU_UNBOUND. This lack of consistency cannot be addressed without refactoring the API. system_unbound_wq should be the default workqueue so as not to enforce locality constraints for random work whenever it's not required. Adding system_dfl_wq to encourage its use when unbound work should be used. The old system_unbound_wq will be kept for a few release cycles. Suggested-by: Tejun Heo <tj@kernel.org> Signed-off-by: Marco Crivellari <marco.crivellari@suse.com> Link: https://patch.msgid.link/20251101163121.78400-2-marco.crivellari@suse.com Signed-off-by: Leon Romanovsky <leon@kernel.org>
This commit is contained in:
committed by
Leon Romanovsky
parent
da58d4223b
commit
f673fb3449
@@ -366,7 +366,7 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id,
|
||||
if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) {
|
||||
xa_lock(&ctx_table);
|
||||
if (xa_load(&ctx_table, ctx->id) == ctx)
|
||||
queue_work(system_unbound_wq, &ctx->close_work);
|
||||
queue_work(system_dfl_wq, &ctx->close_work);
|
||||
xa_unlock(&ctx_table);
|
||||
}
|
||||
return 0;
|
||||
|
||||
@@ -265,7 +265,7 @@ static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr)
|
||||
|
||||
/* Freeing a MR is a sleeping operation, so bounce to a work queue */
|
||||
INIT_WORK(&mr->odp_destroy.work, free_implicit_child_mr_work);
|
||||
queue_work(system_unbound_wq, &mr->odp_destroy.work);
|
||||
queue_work(system_dfl_wq, &mr->odp_destroy.work);
|
||||
}
|
||||
|
||||
static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
|
||||
@@ -2093,6 +2093,6 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
|
||||
destroy_prefetch_work(work);
|
||||
return rc;
|
||||
}
|
||||
queue_work(system_unbound_wq, &work->work);
|
||||
queue_work(system_dfl_wq, &work->work);
|
||||
return 0;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user