Lines Matching full:pte

29 #include <asm/pte-walk.h>
45 * reasonably "normal" PTEs. We currently require a PTE to be present
46 * and we avoid _PAGE_SPECIAL and cache inhibited pte. We also only do that
49 static inline int pte_looks_normal(pte_t pte, unsigned long addr) in pte_looks_normal() argument
52 if (pte_present(pte) && !pte_special(pte)) { in pte_looks_normal()
53 if (pte_ci(pte)) in pte_looks_normal()
61 static struct folio *maybe_pte_to_folio(pte_t pte) in maybe_pte_to_folio() argument
63 unsigned long pfn = pte_pfn(pte); in maybe_pte_to_folio()
82 static pte_t set_pte_filter_hash(pte_t pte, unsigned long addr) in set_pte_filter_hash() argument
84 pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); in set_pte_filter_hash()
85 if (pte_looks_normal(pte, addr) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) || in set_pte_filter_hash()
87 struct folio *folio = maybe_pte_to_folio(pte); in set_pte_filter_hash()
89 return pte; in set_pte_filter_hash()
95 return pte; in set_pte_filter_hash()
100 static pte_t set_pte_filter_hash(pte_t pte, unsigned long addr) { return pte; } in set_pte_filter_hash() argument
110 static inline pte_t set_pte_filter(pte_t pte, unsigned long addr) in set_pte_filter() argument
115 return pte; in set_pte_filter()
118 return set_pte_filter_hash(pte, addr); in set_pte_filter()
121 if (!pte_exec(pte) || !pte_looks_normal(pte, addr)) in set_pte_filter()
122 return pte; in set_pte_filter()
125 folio = maybe_pte_to_folio(pte); in set_pte_filter()
127 return pte; in set_pte_filter()
131 return pte; in set_pte_filter()
137 return pte; in set_pte_filter()
141 return pte_exprotect(pte); in set_pte_filter()
144 static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma, in set_access_flags_filter() argument
150 return pte; in set_access_flags_filter()
153 return pte; in set_access_flags_filter()
160 if (dirty || pte_exec(pte) || !is_exec_fault()) in set_access_flags_filter()
161 return pte; in set_access_flags_filter()
169 return pte; in set_access_flags_filter()
173 folio = maybe_pte_to_folio(pte); in set_access_flags_filter()
186 return pte_mkexec(pte); in set_access_flags_filter()
190 * set_pte stores a linux PTE into the linux page table.
193 pte_t pte, unsigned int nr) in set_ptes() argument
198 * is called. Filter the pte value and use the filtered value in set_ptes()
201 pte = set_pte_filter(pte, addr); in set_ptes()
217 /* Perform the setting of the PTE */ in set_ptes()
218 __set_pte_at(mm, addr, ptep, pte, 0); in set_ptes()
223 pte = pte_next_pfn(pte); in set_ptes()
237 * This is called when relaxing access to a PTE. It's also called in the page
260 pte_t pte, int dirty) in huge_ptep_set_access_flags() argument
268 ptep_set_access_flags(vma, addr, ptep, pte, dirty); in huge_ptep_set_access_flags()
273 pte = set_access_flags_filter(pte, vma, dirty); in huge_ptep_set_access_flags()
274 changed = !pte_same(*(ptep), pte); in huge_ptep_set_access_flags()
293 __ptep_set_access_flags(vma, ptep, pte, addr, psize); in huge_ptep_set_access_flags()
302 /* We need the same lock to protect the PMD table and the two PTE tables. */
324 pte_t pte, unsigned long sz) in set_huge_pte_at() argument
328 pte = set_pte_filter(pte, addr); in set_huge_pte_at()
334 __set_huge_pte_at(pmdp, pte_offset_kernel(pmdp, 0), pte_val(pte)); in set_huge_pte_at()
335 __set_huge_pte_at(pmdp, pte_offset_kernel(pmdp + 1, 0), pte_val(pte) + SZ_4M); in set_huge_pte_at()
337 __set_huge_pte_at(pmdp, ptep, pte_val(pte)); in set_huge_pte_at()
342 pte_t pte, unsigned long sz) in set_huge_pte_at() argument
347 pte = set_pte_filter(pte, addr); in set_huge_pte_at()
367 __set_pte_at(mm, addr, ptep, pte, 0); in set_huge_pte_at()
368 pte = __pte(pte_val(pte) + ((unsigned long long)pdsize / PAGE_SIZE << PFN_PTE_SHIFT)); in set_huge_pte_at()
381 pte_t *pte; in assert_pte_locked() local
401 pte = pte_offset_map_ro_nolock(mm, pmd, addr, &ptl); in assert_pte_locked()
402 BUG_ON(!pte); in assert_pte_locked()
404 pte_unmap(pte); in assert_pte_locked()
421 * (3) leaf pte for huge page _PAGE_PTE set