aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/khugepaged.c58
1 files changed, 21 insertions, 37 deletions
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 9ed1af2b5c3884..52786ffef80a1a 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -104,14 +104,6 @@ struct collapse_control {
};
/**
- * struct khugepaged_mm_slot - khugepaged information per mm that is being scanned
- * @slot: hash lookup from mm to mm_slot
- */
-struct khugepaged_mm_slot {
- struct mm_slot slot;
-};
-
-/**
* struct khugepaged_scan - cursor for scanning
* @mm_head: the head of the mm list to scan
* @mm_slot: the current mm_slot we are scanning
@@ -121,7 +113,7 @@ struct khugepaged_mm_slot {
*/
struct khugepaged_scan {
struct list_head mm_head;
- struct khugepaged_mm_slot *mm_slot;
+ struct mm_slot *mm_slot;
unsigned long address;
};
@@ -384,7 +376,10 @@ int hugepage_madvise(struct vm_area_struct *vma,
int __init khugepaged_init(void)
{
- mm_slot_cache = KMEM_CACHE(khugepaged_mm_slot, 0);
+ mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
+ sizeof(struct mm_slot),
+ __alignof__(struct mm_slot),
+ 0, NULL);
if (!mm_slot_cache)
return -ENOMEM;
@@ -438,7 +433,6 @@ static bool hugepage_pmd_enabled(void)
void __khugepaged_enter(struct mm_struct *mm)
{
- struct khugepaged_mm_slot *mm_slot;
struct mm_slot *slot;
int wakeup;
@@ -447,12 +441,10 @@ void __khugepaged_enter(struct mm_struct *mm)
if (unlikely(mm_flags_test_and_set(MMF_VM_HUGEPAGE, mm)))
return;
- mm_slot = mm_slot_alloc(mm_slot_cache);
- if (!mm_slot)
+ slot = mm_slot_alloc(mm_slot_cache);
+ if (!slot)
return;
- slot = &mm_slot->slot;
-
spin_lock(&khugepaged_mm_lock);
mm_slot_insert(mm_slots_hash, mm, slot);
/*
@@ -480,14 +472,12 @@ void khugepaged_enter_vma(struct vm_area_struct *vma,
void __khugepaged_exit(struct mm_struct *mm)
{
- struct khugepaged_mm_slot *mm_slot;
struct mm_slot *slot;
int free = 0;
spin_lock(&khugepaged_mm_lock);
slot = mm_slot_lookup(mm_slots_hash, mm);
- mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
- if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
+ if (slot && khugepaged_scan.mm_slot != slot) {
hash_del(&slot->hash);
list_del(&slot->mm_node);
free = 1;
@@ -496,9 +486,9 @@ void __khugepaged_exit(struct mm_struct *mm)
if (free) {
mm_flags_clear(MMF_VM_HUGEPAGE, mm);
- mm_slot_free(mm_slot_cache, mm_slot);
+ mm_slot_free(mm_slot_cache, slot);
mmdrop(mm);
- } else if (mm_slot) {
+ } else if (slot) {
/*
* This is required to serialize against
* hpage_collapse_test_exit() (which is guaranteed to run
@@ -1432,9 +1422,8 @@ out:
return result;
}
-static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
+static void collect_mm_slot(struct mm_slot *slot)
{
- struct mm_slot *slot = &mm_slot->slot;
struct mm_struct *mm = slot->mm;
lockdep_assert_held(&khugepaged_mm_lock);
@@ -1451,7 +1440,7 @@ static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
*/
/* khugepaged_mm_lock actually not necessary for the below */
- mm_slot_free(mm_slot_cache, mm_slot);
+ mm_slot_free(mm_slot_cache, slot);
mmdrop(mm);
}
}
@@ -2394,7 +2383,6 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
__acquires(&khugepaged_mm_lock)
{
struct vma_iterator vmi;
- struct khugepaged_mm_slot *mm_slot;
struct mm_slot *slot;
struct mm_struct *mm;
struct vm_area_struct *vma;
@@ -2405,14 +2393,12 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
*result = SCAN_FAIL;
if (khugepaged_scan.mm_slot) {
- mm_slot = khugepaged_scan.mm_slot;
- slot = &mm_slot->slot;
+ slot = khugepaged_scan.mm_slot;
} else {
slot = list_first_entry(&khugepaged_scan.mm_head,
struct mm_slot, mm_node);
- mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
khugepaged_scan.address = 0;
- khugepaged_scan.mm_slot = mm_slot;
+ khugepaged_scan.mm_slot = slot;
}
spin_unlock(&khugepaged_mm_lock);
@@ -2510,7 +2496,7 @@ breakouterloop:
breakouterloop_mmap_lock:
spin_lock(&khugepaged_mm_lock);
- VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
+ VM_BUG_ON(khugepaged_scan.mm_slot != slot);
/*
* Release the current mm_slot if this mm is about to die, or
* if we scanned all vmas of this mm.
@@ -2522,16 +2508,14 @@ breakouterloop_mmap_lock:
* mm_slot not pointing to the exiting mm.
*/
if (!list_is_last(&slot->mm_node, &khugepaged_scan.mm_head)) {
- slot = list_next_entry(slot, mm_node);
- khugepaged_scan.mm_slot =
- mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
+ khugepaged_scan.mm_slot = list_next_entry(slot, mm_node);
khugepaged_scan.address = 0;
} else {
khugepaged_scan.mm_slot = NULL;
khugepaged_full_scans++;
}
- collect_mm_slot(mm_slot);
+ collect_mm_slot(slot);
}
return progress;
@@ -2618,7 +2602,7 @@ static void khugepaged_wait_work(void)
static int khugepaged(void *none)
{
- struct khugepaged_mm_slot *mm_slot;
+ struct mm_slot *slot;
set_freezable();
set_user_nice(current, MAX_NICE);
@@ -2629,10 +2613,10 @@ static int khugepaged(void *none)
}
spin_lock(&khugepaged_mm_lock);
- mm_slot = khugepaged_scan.mm_slot;
+ slot = khugepaged_scan.mm_slot;
khugepaged_scan.mm_slot = NULL;
- if (mm_slot)
- collect_mm_slot(mm_slot);
+ if (slot)
+ collect_mm_slot(slot);
spin_unlock(&khugepaged_mm_lock);
return 0;
}