1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * S390 version
4 * Copyright IBM Corp. 1999, 2000
5 * Author(s): Hartmut Penner ([email protected])
6 * Martin Schwidefsky ([email protected])
7 *
8 * Derived from "include/asm-i386/pgalloc.h"
9 * Copyright (C) 1994 Linus Torvalds
10 */
11
12 #ifndef _S390_PGALLOC_H
13 #define _S390_PGALLOC_H
14
15 #include <linux/threads.h>
16 #include <linux/string.h>
17 #include <linux/gfp.h>
18 #include <linux/mm.h>
19
20 #define CRST_ALLOC_ORDER 2
21
22 unsigned long *crst_table_alloc(struct mm_struct *);
23 void crst_table_free(struct mm_struct *, unsigned long *);
24
25 unsigned long *page_table_alloc(struct mm_struct *);
26 struct ptdesc *page_table_alloc_pgste(struct mm_struct *mm);
27 void page_table_free(struct mm_struct *, unsigned long *);
28 void page_table_free_pgste(struct ptdesc *ptdesc);
29 extern int page_table_allocate_pgste;
30
crst_table_init(unsigned long * crst,unsigned long entry)31 static inline void crst_table_init(unsigned long *crst, unsigned long entry)
32 {
33 memset64((u64 *)crst, entry, _CRST_ENTRIES);
34 }
35
36 int crst_table_upgrade(struct mm_struct *mm, unsigned long limit);
37
check_asce_limit(struct mm_struct * mm,unsigned long addr,unsigned long len)38 static inline unsigned long check_asce_limit(struct mm_struct *mm, unsigned long addr,
39 unsigned long len)
40 {
41 int rc;
42
43 if (addr + len > mm->context.asce_limit &&
44 addr + len <= TASK_SIZE) {
45 rc = crst_table_upgrade(mm, addr + len);
46 if (rc)
47 return (unsigned long) rc;
48 }
49 return addr;
50 }
51
p4d_alloc_one(struct mm_struct * mm,unsigned long address)52 static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address)
53 {
54 unsigned long *table = crst_table_alloc(mm);
55
56 if (!table)
57 return NULL;
58 crst_table_init(table, _REGION2_ENTRY_EMPTY);
59 pagetable_p4d_ctor(virt_to_ptdesc(table));
60
61 return (p4d_t *) table;
62 }
63
p4d_free(struct mm_struct * mm,p4d_t * p4d)64 static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
65 {
66 if (mm_p4d_folded(mm))
67 return;
68
69 pagetable_dtor(virt_to_ptdesc(p4d));
70 crst_table_free(mm, (unsigned long *) p4d);
71 }
72
pud_alloc_one(struct mm_struct * mm,unsigned long address)73 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
74 {
75 unsigned long *table = crst_table_alloc(mm);
76
77 if (!table)
78 return NULL;
79 crst_table_init(table, _REGION3_ENTRY_EMPTY);
80 pagetable_pud_ctor(virt_to_ptdesc(table));
81
82 return (pud_t *) table;
83 }
84
pud_free(struct mm_struct * mm,pud_t * pud)85 static inline void pud_free(struct mm_struct *mm, pud_t *pud)
86 {
87 if (mm_pud_folded(mm))
88 return;
89
90 pagetable_dtor(virt_to_ptdesc(pud));
91 crst_table_free(mm, (unsigned long *) pud);
92 }
93
pmd_alloc_one(struct mm_struct * mm,unsigned long vmaddr)94 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
95 {
96 unsigned long *table = crst_table_alloc(mm);
97
98 if (!table)
99 return NULL;
100 crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
101 if (!pagetable_pmd_ctor(virt_to_ptdesc(table))) {
102 crst_table_free(mm, table);
103 return NULL;
104 }
105 return (pmd_t *) table;
106 }
107
pmd_free(struct mm_struct * mm,pmd_t * pmd)108 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
109 {
110 if (mm_pmd_folded(mm))
111 return;
112 pagetable_dtor(virt_to_ptdesc(pmd));
113 crst_table_free(mm, (unsigned long *) pmd);
114 }
115
pgd_populate(struct mm_struct * mm,pgd_t * pgd,p4d_t * p4d)116 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
117 {
118 set_pgd(pgd, __pgd(_REGION1_ENTRY | __pa(p4d)));
119 }
120
p4d_populate(struct mm_struct * mm,p4d_t * p4d,pud_t * pud)121 static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
122 {
123 set_p4d(p4d, __p4d(_REGION2_ENTRY | __pa(pud)));
124 }
125
pud_populate(struct mm_struct * mm,pud_t * pud,pmd_t * pmd)126 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
127 {
128 set_pud(pud, __pud(_REGION3_ENTRY | __pa(pmd)));
129 }
130
pgd_alloc(struct mm_struct * mm)131 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
132 {
133 unsigned long *table = crst_table_alloc(mm);
134
135 if (!table)
136 return NULL;
137 pagetable_pgd_ctor(virt_to_ptdesc(table));
138
139 return (pgd_t *) table;
140 }
141
pgd_free(struct mm_struct * mm,pgd_t * pgd)142 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
143 {
144 pagetable_dtor(virt_to_ptdesc(pgd));
145 crst_table_free(mm, (unsigned long *) pgd);
146 }
147
pmd_populate(struct mm_struct * mm,pmd_t * pmd,pgtable_t pte)148 static inline void pmd_populate(struct mm_struct *mm,
149 pmd_t *pmd, pgtable_t pte)
150 {
151 set_pmd(pmd, __pmd(_SEGMENT_ENTRY | __pa(pte)));
152 }
153
154 #define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte)
155
156 /*
157 * page table entry allocation/free routines.
158 */
159 #define pte_alloc_one_kernel(mm) ((pte_t *)page_table_alloc(mm))
160 #define pte_alloc_one(mm) ((pte_t *)page_table_alloc(mm))
161
162 #define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
163 #define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
164
165 /* arch use pte_free_defer() implementation in arch/s390/mm/pgalloc.c */
166 #define pte_free_defer pte_free_defer
167 void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable);
168
169 void vmem_map_init(void);
170 void *vmem_crst_alloc(unsigned long val);
171 pte_t *vmem_pte_alloc(void);
172
173 unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages);
174 void base_asce_free(unsigned long asce);
175
176 #endif /* _S390_PGALLOC_H */
177