|  | // SPDX-License-Identifier: GPL-2.0-only | 
|  | /* | 
|  | *  linux/mm/mmu_notifier.c | 
|  | * | 
|  | *  Copyright (C) 2008  Qumranet, Inc. | 
|  | *  Copyright (C) 2008  SGI | 
|  | *             Christoph Lameter <cl@linux.com> | 
|  | */ | 
|  |  | 
|  | #include <linux/rculist.h> | 
|  | #include <linux/mmu_notifier.h> | 
|  | #include <linux/export.h> | 
|  | #include <linux/mm.h> | 
|  | #include <linux/err.h> | 
|  | #include <linux/srcu.h> | 
|  | #include <linux/rcupdate.h> | 
|  | #include <linux/sched.h> | 
|  | #include <linux/sched/mm.h> | 
|  | #include <linux/slab.h> | 
|  |  | 
|  | /* global SRCU for all MMs */ | 
|  | DEFINE_STATIC_SRCU(srcu); | 
|  |  | 
|  | /* | 
|  | * This function allows mmu_notifier::release callback to delay a call to | 
|  | * a function that will free appropriate resources. The function must be | 
|  | * quick and must not block. | 
|  | */ | 
|  | void mmu_notifier_call_srcu(struct rcu_head *rcu, | 
|  | void (*func)(struct rcu_head *rcu)) | 
|  | { | 
|  | call_srcu(&srcu, rcu, func); | 
|  | } | 
|  | EXPORT_SYMBOL_GPL(mmu_notifier_call_srcu); | 
|  |  | 
|  | /* | 
|  | * This function can't run concurrently against mmu_notifier_register | 
|  | * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap | 
|  | * runs with mm_users == 0. Other tasks may still invoke mmu notifiers | 
|  | * in parallel despite there being no task using this mm any more, | 
|  | * through the vmas outside of the exit_mmap context, such as with | 
|  | * vmtruncate. This serializes against mmu_notifier_unregister with | 
|  | * the mmu_notifier_mm->lock in addition to SRCU and it serializes | 
|  | * against the other mmu notifiers with SRCU. struct mmu_notifier_mm | 
|  | * can't go away from under us as exit_mmap holds an mm_count pin | 
|  | * itself. | 
|  | */ | 
|  | void __mmu_notifier_release(struct mm_struct *mm) | 
|  | { | 
|  | struct mmu_notifier *mn; | 
|  | int id; | 
|  |  | 
|  | /* | 
|  | * SRCU here will block mmu_notifier_unregister until | 
|  | * ->release returns. | 
|  | */ | 
|  | id = srcu_read_lock(&srcu); | 
|  | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) | 
|  | /* | 
|  | * If ->release runs before mmu_notifier_unregister it must be | 
|  | * handled, as it's the only way for the driver to flush all | 
|  | * existing sptes and stop the driver from establishing any more | 
|  | * sptes before all the pages in the mm are freed. | 
|  | */ | 
|  | if (mn->ops->release) | 
|  | mn->ops->release(mn, mm); | 
|  |  | 
|  | spin_lock(&mm->mmu_notifier_mm->lock); | 
|  | while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) { | 
|  | mn = hlist_entry(mm->mmu_notifier_mm->list.first, | 
|  | struct mmu_notifier, | 
|  | hlist); | 
|  | /* | 
|  | * We arrived before mmu_notifier_unregister so | 
|  | * mmu_notifier_unregister will do nothing other than to wait | 
|  | * for ->release to finish and for mmu_notifier_unregister to | 
|  | * return. | 
|  | */ | 
|  | hlist_del_init_rcu(&mn->hlist); | 
|  | } | 
|  | spin_unlock(&mm->mmu_notifier_mm->lock); | 
|  | srcu_read_unlock(&srcu, id); | 
|  |  | 
|  | /* | 
|  | * synchronize_srcu here prevents mmu_notifier_release from returning to | 
|  | * exit_mmap (which would proceed with freeing all pages in the mm) | 
|  | * until the ->release method returns, if it was invoked by | 
|  | * mmu_notifier_unregister. | 
|  | * | 
|  | * The mmu_notifier_mm can't go away from under us because one mm_count | 
|  | * is held by exit_mmap. | 
|  | */ | 
|  | synchronize_srcu(&srcu); | 
|  | } | 
|  |  | 
|  | /* | 
|  | * If no young bitflag is supported by the hardware, ->clear_flush_young can | 
|  | * unmap the address and return 1 or 0 depending if the mapping previously | 
|  | * existed or not. | 
|  | */ | 
|  | int __mmu_notifier_clear_flush_young(struct mm_struct *mm, | 
|  | unsigned long start, | 
|  | unsigned long end) | 
|  | { | 
|  | struct mmu_notifier *mn; | 
|  | int young = 0, id; | 
|  |  | 
|  | id = srcu_read_lock(&srcu); | 
|  | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { | 
|  | if (mn->ops->clear_flush_young) | 
|  | young |= mn->ops->clear_flush_young(mn, mm, start, end); | 
|  | } | 
|  | srcu_read_unlock(&srcu, id); | 
|  |  | 
|  | return young; | 
|  | } | 
|  |  | 
|  | int __mmu_notifier_clear_young(struct mm_struct *mm, | 
|  | unsigned long start, | 
|  | unsigned long end) | 
|  | { | 
|  | struct mmu_notifier *mn; | 
|  | int young = 0, id; | 
|  |  | 
|  | id = srcu_read_lock(&srcu); | 
|  | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { | 
|  | if (mn->ops->clear_young) | 
|  | young |= mn->ops->clear_young(mn, mm, start, end); | 
|  | } | 
|  | srcu_read_unlock(&srcu, id); | 
|  |  | 
|  | return young; | 
|  | } | 
|  |  | 
|  | int __mmu_notifier_test_young(struct mm_struct *mm, | 
|  | unsigned long address) | 
|  | { | 
|  | struct mmu_notifier *mn; | 
|  | int young = 0, id; | 
|  |  | 
|  | id = srcu_read_lock(&srcu); | 
|  | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { | 
|  | if (mn->ops->test_young) { | 
|  | young = mn->ops->test_young(mn, mm, address); | 
|  | if (young) | 
|  | break; | 
|  | } | 
|  | } | 
|  | srcu_read_unlock(&srcu, id); | 
|  |  | 
|  | return young; | 
|  | } | 
|  |  | 
|  | void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, | 
|  | pte_t pte) | 
|  | { | 
|  | struct mmu_notifier *mn; | 
|  | int id; | 
|  |  | 
|  | id = srcu_read_lock(&srcu); | 
|  | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { | 
|  | if (mn->ops->change_pte) | 
|  | mn->ops->change_pte(mn, mm, address, pte); | 
|  | } | 
|  | srcu_read_unlock(&srcu, id); | 
|  | } | 
|  |  | 
|  | int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range) | 
|  | { | 
|  | struct mmu_notifier *mn; | 
|  | int ret = 0; | 
|  | int id; | 
|  |  | 
|  | id = srcu_read_lock(&srcu); | 
|  | hlist_for_each_entry_rcu(mn, &range->mm->mmu_notifier_mm->list, hlist) { | 
|  | if (mn->ops->invalidate_range_start) { | 
|  | int _ret = mn->ops->invalidate_range_start(mn, range); | 
|  | if (_ret) { | 
|  | pr_info("%pS callback failed with %d in %sblockable context.\n", | 
|  | mn->ops->invalidate_range_start, _ret, | 
|  | !mmu_notifier_range_blockable(range) ? "non-" : ""); | 
|  | ret = _ret; | 
|  | } | 
|  | } | 
|  | } | 
|  | srcu_read_unlock(&srcu, id); | 
|  |  | 
|  | return ret; | 
|  | } | 
|  | EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_start); | 
|  |  | 
|  | void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range, | 
|  | bool only_end) | 
|  | { | 
|  | struct mmu_notifier *mn; | 
|  | int id; | 
|  |  | 
|  | id = srcu_read_lock(&srcu); | 
|  | hlist_for_each_entry_rcu(mn, &range->mm->mmu_notifier_mm->list, hlist) { | 
|  | /* | 
|  | * Call invalidate_range here too to avoid the need for the | 
|  | * subsystem of having to register an invalidate_range_end | 
|  | * call-back when there is invalidate_range already. Usually a | 
|  | * subsystem registers either invalidate_range_start()/end() or | 
|  | * invalidate_range(), so this will be no additional overhead | 
|  | * (besides the pointer check). | 
|  | * | 
|  | * We skip call to invalidate_range() if we know it is safe ie | 
|  | * call site use mmu_notifier_invalidate_range_only_end() which | 
|  | * is safe to do when we know that a call to invalidate_range() | 
|  | * already happen under page table lock. | 
|  | */ | 
|  | if (!only_end && mn->ops->invalidate_range) | 
|  | mn->ops->invalidate_range(mn, range->mm, | 
|  | range->start, | 
|  | range->end); | 
|  | if (mn->ops->invalidate_range_end) | 
|  | mn->ops->invalidate_range_end(mn, range); | 
|  | } | 
|  | srcu_read_unlock(&srcu, id); | 
|  | } | 
|  | EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_end); | 
|  |  | 
|  | void __mmu_notifier_invalidate_range(struct mm_struct *mm, | 
|  | unsigned long start, unsigned long end) | 
|  | { | 
|  | struct mmu_notifier *mn; | 
|  | int id; | 
|  |  | 
|  | id = srcu_read_lock(&srcu); | 
|  | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { | 
|  | if (mn->ops->invalidate_range) | 
|  | mn->ops->invalidate_range(mn, mm, start, end); | 
|  | } | 
|  | srcu_read_unlock(&srcu, id); | 
|  | } | 
|  | EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range); | 
|  |  | 
|  | static int do_mmu_notifier_register(struct mmu_notifier *mn, | 
|  | struct mm_struct *mm, | 
|  | int take_mmap_sem) | 
|  | { | 
|  | struct mmu_notifier_mm *mmu_notifier_mm; | 
|  | int ret; | 
|  |  | 
|  | BUG_ON(atomic_read(&mm->mm_users) <= 0); | 
|  |  | 
|  | ret = -ENOMEM; | 
|  | mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL); | 
|  | if (unlikely(!mmu_notifier_mm)) | 
|  | goto out; | 
|  |  | 
|  | if (take_mmap_sem) | 
|  | down_write(&mm->mmap_sem); | 
|  | ret = mm_take_all_locks(mm); | 
|  | if (unlikely(ret)) | 
|  | goto out_clean; | 
|  |  | 
|  | if (!mm_has_notifiers(mm)) { | 
|  | INIT_HLIST_HEAD(&mmu_notifier_mm->list); | 
|  | spin_lock_init(&mmu_notifier_mm->lock); | 
|  |  | 
|  | mm->mmu_notifier_mm = mmu_notifier_mm; | 
|  | mmu_notifier_mm = NULL; | 
|  | } | 
|  | mmgrab(mm); | 
|  |  | 
|  | /* | 
|  | * Serialize the update against mmu_notifier_unregister. A | 
|  | * side note: mmu_notifier_release can't run concurrently with | 
|  | * us because we hold the mm_users pin (either implicitly as | 
|  | * current->mm or explicitly with get_task_mm() or similar). | 
|  | * We can't race against any other mmu notifier method either | 
|  | * thanks to mm_take_all_locks(). | 
|  | */ | 
|  | spin_lock(&mm->mmu_notifier_mm->lock); | 
|  | hlist_add_head_rcu(&mn->hlist, &mm->mmu_notifier_mm->list); | 
|  | spin_unlock(&mm->mmu_notifier_mm->lock); | 
|  |  | 
|  | mm_drop_all_locks(mm); | 
|  | out_clean: | 
|  | if (take_mmap_sem) | 
|  | up_write(&mm->mmap_sem); | 
|  | kfree(mmu_notifier_mm); | 
|  | out: | 
|  | BUG_ON(atomic_read(&mm->mm_users) <= 0); | 
|  | return ret; | 
|  | } | 
|  |  | 
|  | /* | 
|  | * Must not hold mmap_sem nor any other VM related lock when calling | 
|  | * this registration function. Must also ensure mm_users can't go down | 
|  | * to zero while this runs to avoid races with mmu_notifier_release, | 
|  | * so mm has to be current->mm or the mm should be pinned safely such | 
|  | * as with get_task_mm(). If the mm is not current->mm, the mm_users | 
|  | * pin should be released by calling mmput after mmu_notifier_register | 
|  | * returns. mmu_notifier_unregister must be always called to | 
|  | * unregister the notifier. mm_count is automatically pinned to allow | 
|  | * mmu_notifier_unregister to safely run at any time later, before or | 
|  | * after exit_mmap. ->release will always be called before exit_mmap | 
|  | * frees the pages. | 
|  | */ | 
|  | int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) | 
|  | { | 
|  | return do_mmu_notifier_register(mn, mm, 1); | 
|  | } | 
|  | EXPORT_SYMBOL_GPL(mmu_notifier_register); | 
|  |  | 
|  | /* | 
|  | * Same as mmu_notifier_register but here the caller must hold the | 
|  | * mmap_sem in write mode. | 
|  | */ | 
|  | int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) | 
|  | { | 
|  | return do_mmu_notifier_register(mn, mm, 0); | 
|  | } | 
|  | EXPORT_SYMBOL_GPL(__mmu_notifier_register); | 
|  |  | 
|  | /* this is called after the last mmu_notifier_unregister() returned */ | 
|  | void __mmu_notifier_mm_destroy(struct mm_struct *mm) | 
|  | { | 
|  | BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list)); | 
|  | kfree(mm->mmu_notifier_mm); | 
|  | mm->mmu_notifier_mm = LIST_POISON1; /* debug */ | 
|  | } | 
|  |  | 
|  | /* | 
|  | * This releases the mm_count pin automatically and frees the mm | 
|  | * structure if it was the last user of it. It serializes against | 
|  | * running mmu notifiers with SRCU and against mmu_notifier_unregister | 
|  | * with the unregister lock + SRCU. All sptes must be dropped before | 
|  | * calling mmu_notifier_unregister. ->release or any other notifier | 
|  | * method may be invoked concurrently with mmu_notifier_unregister, | 
|  | * and only after mmu_notifier_unregister returned we're guaranteed | 
|  | * that ->release or any other method can't run anymore. | 
|  | */ | 
|  | void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm) | 
|  | { | 
|  | BUG_ON(atomic_read(&mm->mm_count) <= 0); | 
|  |  | 
|  | if (!hlist_unhashed(&mn->hlist)) { | 
|  | /* | 
|  | * SRCU here will force exit_mmap to wait for ->release to | 
|  | * finish before freeing the pages. | 
|  | */ | 
|  | int id; | 
|  |  | 
|  | id = srcu_read_lock(&srcu); | 
|  | /* | 
|  | * exit_mmap will block in mmu_notifier_release to guarantee | 
|  | * that ->release is called before freeing the pages. | 
|  | */ | 
|  | if (mn->ops->release) | 
|  | mn->ops->release(mn, mm); | 
|  | srcu_read_unlock(&srcu, id); | 
|  |  | 
|  | spin_lock(&mm->mmu_notifier_mm->lock); | 
|  | /* | 
|  | * Can not use list_del_rcu() since __mmu_notifier_release | 
|  | * can delete it before we hold the lock. | 
|  | */ | 
|  | hlist_del_init_rcu(&mn->hlist); | 
|  | spin_unlock(&mm->mmu_notifier_mm->lock); | 
|  | } | 
|  |  | 
|  | /* | 
|  | * Wait for any running method to finish, of course including | 
|  | * ->release if it was run by mmu_notifier_release instead of us. | 
|  | */ | 
|  | synchronize_srcu(&srcu); | 
|  |  | 
|  | BUG_ON(atomic_read(&mm->mm_count) <= 0); | 
|  |  | 
|  | mmdrop(mm); | 
|  | } | 
|  | EXPORT_SYMBOL_GPL(mmu_notifier_unregister); | 
|  |  | 
|  | /* | 
|  | * Same as mmu_notifier_unregister but no callback and no srcu synchronization. | 
|  | */ | 
|  | void mmu_notifier_unregister_no_release(struct mmu_notifier *mn, | 
|  | struct mm_struct *mm) | 
|  | { | 
|  | spin_lock(&mm->mmu_notifier_mm->lock); | 
|  | /* | 
|  | * Can not use list_del_rcu() since __mmu_notifier_release | 
|  | * can delete it before we hold the lock. | 
|  | */ | 
|  | hlist_del_init_rcu(&mn->hlist); | 
|  | spin_unlock(&mm->mmu_notifier_mm->lock); | 
|  |  | 
|  | BUG_ON(atomic_read(&mm->mm_count) <= 0); | 
|  | mmdrop(mm); | 
|  | } | 
|  | EXPORT_SYMBOL_GPL(mmu_notifier_unregister_no_release); | 
|  |  | 
|  | bool | 
|  | mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range) | 
|  | { | 
|  | if (!range->vma || range->event != MMU_NOTIFY_PROTECTION_VMA) | 
|  | return false; | 
|  | /* Return true if the vma still have the read flag set. */ | 
|  | return range->vma->vm_flags & VM_READ; | 
|  | } | 
|  | EXPORT_SYMBOL_GPL(mmu_notifier_range_update_to_read_only); |