diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2025-10-09 10:33:50 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2025-10-09 10:33:50 -0700 |
| commit | f6db358deaeafd9830c8bcba8f76f55f8a14b059 (patch) | |
| tree | f747bf49eaf73cfd913c382a2c61ecfcd4444624 /mm | |
| parent | ec714e371f22f716a04e6ecb2a24988c92b26911 (diff) | |
| parent | f7dfa0f31b13ee5f2ba598cdfcab9a831ed8a6b8 (diff) | |
| download | linux-f6db358deaeafd9830c8bcba8f76f55f8a14b059.tar.gz | |
Merge tag 'slab-for-6.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab
Pull slab fixes from Vlastimil Babka:
- Fixes for several corner cases in error paths and debugging options,
related to the new kmalloc_nolock() functionality (Kuniyuki Iwashima,
Ran Xiaokai)
* tag 'slab-for-6.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab:
slub: Don't call lockdep_unregister_key() for immature kmem_cache.
slab: Fix using this_cpu_ptr() in preemptible context
slab: Add allow_spin check to eliminate kmemleak warnings
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/slub.c | 17 |
1 files changed, 13 insertions, 4 deletions
diff --git a/mm/slub.c b/mm/slub.c index 584a5ff1828b16..135c408e051528 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2152,7 +2152,8 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, return 0; } - kmemleak_not_leak(vec); + if (allow_spin) + kmemleak_not_leak(vec); return 0; } @@ -6431,17 +6432,24 @@ static void free_deferred_objects(struct irq_work *work) static void defer_free(struct kmem_cache *s, void *head) { - struct defer_free *df = this_cpu_ptr(&defer_free_objects); + struct defer_free *df; + + guard(preempt)(); + df = this_cpu_ptr(&defer_free_objects); if (llist_add(head + s->offset, &df->objects)) irq_work_queue(&df->work); } static void defer_deactivate_slab(struct slab *slab, void *flush_freelist) { - struct defer_free *df = this_cpu_ptr(&defer_free_objects); + struct defer_free *df; slab->flush_freelist = flush_freelist; + + guard(preempt)(); + + df = this_cpu_ptr(&defer_free_objects); if (llist_add(&slab->llnode, &df->slabs)) irq_work_queue(&df->work); } @@ -7693,7 +7701,8 @@ void __kmem_cache_release(struct kmem_cache *s) pcs_destroy(s); #ifndef CONFIG_SLUB_TINY #ifdef CONFIG_PREEMPT_RT - lockdep_unregister_key(&s->lock_key); + if (s->cpu_slab) + lockdep_unregister_key(&s->lock_key); #endif free_percpu(s->cpu_slab); #endif |
