diff options
| author | Marco Crivellari <marco.crivellari@suse.com> | 2025-09-18 16:24:26 +0200 |
|---|---|---|
| committer | Jakub Kicinski <kuba@kernel.org> | 2025-09-22 17:40:30 -0700 |
| commit | 5fd8bb982e10f29e856ef71072609af5ce55d281 (patch) | |
| tree | 3fe1eced876cffb4938404f42c5e4b6f997889f9 /net | |
| parent | 9870d350e45a5724ee25f77aa0b6d053c9b766db (diff) | |
| download | net-5fd8bb982e10f29e856ef71072609af5ce55d281.tar.gz | |
net: replace use of system_wq with system_percpu_wq
Currently if a user enqueue a work item using schedule_delayed_work() the
used wq is "system_wq" (per-cpu wq) while queue_delayed_work() use
WORK_CPU_UNBOUND (used when a cpu is not specified). The same applies to
schedule_work() that is using system_wq and queue_work(), that makes use
again of WORK_CPU_UNBOUND.
This lack of consistentcy cannot be addressed without refactoring the API.
system_unbound_wq should be the default workqueue so as not to enforce
locality constraints for random work whenever it's not required.
Adding system_dfl_wq to encourage its use when unbound work should be used.
The old system_unbound_wq will be kept for a few release cycles.
Suggested-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Marco Crivellari <marco.crivellari@suse.com>
Link: https://patch.msgid.link/20250918142427.309519-3-marco.crivellari@suse.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net')
| -rw-r--r-- | net/bridge/br_cfm.c | 6 | ||||
| -rw-r--r-- | net/bridge/br_mrp.c | 8 | ||||
| -rw-r--r-- | net/ceph/mon_client.c | 2 | ||||
| -rw-r--r-- | net/core/skmsg.c | 2 | ||||
| -rw-r--r-- | net/devlink/core.c | 2 | ||||
| -rw-r--r-- | net/ipv4/inet_fragment.c | 2 | ||||
| -rw-r--r-- | net/netfilter/nf_conntrack_ecache.c | 2 | ||||
| -rw-r--r-- | net/openvswitch/dp_notify.c | 2 | ||||
| -rw-r--r-- | net/rfkill/input.c | 2 | ||||
| -rw-r--r-- | net/smc/smc_core.c | 2 | ||||
| -rw-r--r-- | net/vmw_vsock/af_vsock.c | 2 |
11 files changed, 16 insertions, 16 deletions
diff --git a/net/bridge/br_cfm.c b/net/bridge/br_cfm.c index a3c755d0a09de9..c2c1c7d44c615f 100644 --- a/net/bridge/br_cfm.c +++ b/net/bridge/br_cfm.c @@ -134,7 +134,7 @@ static void ccm_rx_timer_start(struct br_cfm_peer_mep *peer_mep) * of the configured CC 'expected_interval' * in order to detect CCM defect after 3.25 interval. */ - queue_delayed_work(system_wq, &peer_mep->ccm_rx_dwork, + queue_delayed_work(system_percpu_wq, &peer_mep->ccm_rx_dwork, usecs_to_jiffies(interval_us / 4)); } @@ -285,7 +285,7 @@ static void ccm_tx_work_expired(struct work_struct *work) ccm_frame_tx(skb); interval_us = interval_to_us(mep->cc_config.exp_interval); - queue_delayed_work(system_wq, &mep->ccm_tx_dwork, + queue_delayed_work(system_percpu_wq, &mep->ccm_tx_dwork, usecs_to_jiffies(interval_us)); } @@ -809,7 +809,7 @@ int br_cfm_cc_ccm_tx(struct net_bridge *br, const u32 instance, * to send first frame immediately */ mep->ccm_tx_end = jiffies + usecs_to_jiffies(tx_info->period * 1000000); - queue_delayed_work(system_wq, &mep->ccm_tx_dwork, 0); + queue_delayed_work(system_percpu_wq, &mep->ccm_tx_dwork, 0); save: mep->cc_ccm_tx_info = *tx_info; diff --git a/net/bridge/br_mrp.c b/net/bridge/br_mrp.c index fd2de35ffb3cf8..3c36fa24bc05a5 100644 --- a/net/bridge/br_mrp.c +++ b/net/bridge/br_mrp.c @@ -341,7 +341,7 @@ static void br_mrp_test_work_expired(struct work_struct *work) out: rcu_read_unlock(); - queue_delayed_work(system_wq, &mrp->test_work, + queue_delayed_work(system_percpu_wq, &mrp->test_work, usecs_to_jiffies(mrp->test_interval)); } @@ -418,7 +418,7 @@ static void br_mrp_in_test_work_expired(struct work_struct *work) out: rcu_read_unlock(); - queue_delayed_work(system_wq, &mrp->in_test_work, + queue_delayed_work(system_percpu_wq, &mrp->in_test_work, usecs_to_jiffies(mrp->in_test_interval)); } @@ -725,7 +725,7 @@ int br_mrp_start_test(struct net_bridge *br, mrp->test_max_miss = test->max_miss; mrp->test_monitor = test->monitor; mrp->test_count_miss = 0; - queue_delayed_work(system_wq, &mrp->test_work, + queue_delayed_work(system_percpu_wq, &mrp->test_work, usecs_to_jiffies(test->interval)); return 0; @@ -865,7 +865,7 @@ int br_mrp_start_in_test(struct net_bridge *br, mrp->in_test_end = jiffies + usecs_to_jiffies(in_test->period); mrp->in_test_max_miss = in_test->max_miss; mrp->in_test_count_miss = 0; - queue_delayed_work(system_wq, &mrp->in_test_work, + queue_delayed_work(system_percpu_wq, &mrp->in_test_work, usecs_to_jiffies(in_test->interval)); return 0; diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c index ab66b599ac4792..c227ececa9254c 100644 --- a/net/ceph/mon_client.c +++ b/net/ceph/mon_client.c @@ -314,7 +314,7 @@ static void __schedule_delayed(struct ceph_mon_client *monc) delay = CEPH_MONC_PING_INTERVAL; dout("__schedule_delayed after %lu\n", delay); - mod_delayed_work(system_wq, &monc->delayed_work, + mod_delayed_work(system_percpu_wq, &monc->delayed_work, round_jiffies_relative(delay)); } diff --git a/net/core/skmsg.c b/net/core/skmsg.c index 83c78379932e23..2ac7731e1e0a74 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -876,7 +876,7 @@ void sk_psock_drop(struct sock *sk, struct sk_psock *psock) sk_psock_stop(psock); INIT_RCU_WORK(&psock->rwork, sk_psock_destroy); - queue_rcu_work(system_wq, &psock->rwork); + queue_rcu_work(system_percpu_wq, &psock->rwork); } EXPORT_SYMBOL_GPL(sk_psock_drop); diff --git a/net/devlink/core.c b/net/devlink/core.c index 7203c39532fcc3..58093f49c0905e 100644 --- a/net/devlink/core.c +++ b/net/devlink/core.c @@ -320,7 +320,7 @@ static void devlink_release(struct work_struct *work) void devlink_put(struct devlink *devlink) { if (refcount_dec_and_test(&devlink->refcount)) - queue_rcu_work(system_wq, &devlink->rwork); + queue_rcu_work(system_percpu_wq, &devlink->rwork); } struct devlink *devlinks_xa_find_get(struct net *net, unsigned long *indexp) diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index 470ab17ceb51be..025895eb6ec597 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c @@ -183,7 +183,7 @@ static void fqdir_work_fn(struct work_struct *work) rhashtable_free_and_destroy(&fqdir->rhashtable, inet_frags_free_cb, NULL); if (llist_add(&fqdir->free_list, &fqdir_free_list)) - queue_delayed_work(system_wq, &fqdir_free_work, HZ); + queue_delayed_work(system_percpu_wq, &fqdir_free_work, HZ); } int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net) diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c index af68c64acaab70..81baf20826046e 100644 --- a/net/netfilter/nf_conntrack_ecache.c +++ b/net/netfilter/nf_conntrack_ecache.c @@ -301,7 +301,7 @@ void nf_conntrack_ecache_work(struct net *net, enum nf_ct_ecache_state state) net->ct.ecache_dwork_pending = true; } else if (state == NFCT_ECACHE_DESTROY_SENT) { if (!hlist_nulls_empty(&cnet->ecache.dying_list)) - mod_delayed_work(system_wq, &cnet->ecache.dwork, 0); + mod_delayed_work(system_percpu_wq, &cnet->ecache.dwork, 0); else net->ct.ecache_dwork_pending = false; } diff --git a/net/openvswitch/dp_notify.c b/net/openvswitch/dp_notify.c index 7af0cde8b293c8..a2af90ee99af68 100644 --- a/net/openvswitch/dp_notify.c +++ b/net/openvswitch/dp_notify.c @@ -75,7 +75,7 @@ static int dp_device_event(struct notifier_block *unused, unsigned long event, /* schedule vport destroy, dev_put and genl notification */ ovs_net = net_generic(dev_net(dev), ovs_net_id); - queue_work(system_wq, &ovs_net->dp_notify_work); + queue_work(system_percpu_wq, &ovs_net->dp_notify_work); } return NOTIFY_DONE; diff --git a/net/rfkill/input.c b/net/rfkill/input.c index 598d0a61bda775..53d286b108439e 100644 --- a/net/rfkill/input.c +++ b/net/rfkill/input.c @@ -159,7 +159,7 @@ static void rfkill_schedule_global_op(enum rfkill_sched_op op) rfkill_op_pending = true; if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) { /* bypass the limiter for EPO */ - mod_delayed_work(system_wq, &rfkill_op_work, 0); + mod_delayed_work(system_percpu_wq, &rfkill_op_work, 0); rfkill_last_scheduled = jiffies; } else rfkill_schedule_ratelimited(); diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 2a559a98541c75..e216d237865b03 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -85,7 +85,7 @@ static void smc_lgr_schedule_free_work(struct smc_link_group *lgr) * otherwise there is a risk of out-of-sync link groups. */ if (!lgr->freeing) { - mod_delayed_work(system_wq, &lgr->free_work, + mod_delayed_work(system_percpu_wq, &lgr->free_work, (!lgr->is_smcd && lgr->role == SMC_CLNT) ? SMC_LGR_FREE_DELAY_CLNT : SMC_LGR_FREE_DELAY_SERV); diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 0538948d5fd90b..4c2db6cca5579b 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -1649,7 +1649,7 @@ static int vsock_connect(struct socket *sock, struct sockaddr *addr, * reschedule it, then ungrab the socket refcount to * keep it balanced. */ - if (mod_delayed_work(system_wq, &vsk->connect_work, + if (mod_delayed_work(system_percpu_wq, &vsk->connect_work, timeout)) sock_put(sk); |
