1 /* SPDX-License-Identifier: GPL-2.0-only */
2
3 #ifndef _REGION_H_
4 #define _REGION_H_
5
6 #include <sys/types.h>
7 #include <stddef.h>
8 #include <stdbool.h>
9 #include <commonlib/bsd/helpers.h>
10 #include <commonlib/mem_pool.h>
11
12 /*
13 * Region support.
14 *
15 * Regions are intended to abstract away the access mechanisms for blocks of
16 * data. This could be SPI, eMMC, or a memory region as the backing store.
17 * They are accessed through a region_device. Subregions can be made by
18 * chaining together multiple region_devices.
19 */
20
21 struct region_device;
22
23 /*
24 * Returns NULL on error otherwise a buffer is returned with the contents of
25 * the requested data at offset of size.
26 */
27 void *rdev_mmap(const struct region_device *rd, size_t offset, size_t size);
28
29 /* Unmap a previously mapped area. Returns 0 on success, < 0 on error. */
30 int rdev_munmap(const struct region_device *rd, void *mapping);
31
32 /*
33 * Returns < 0 on error otherwise returns size of data read at provided
34 * offset filling in the buffer passed.
35 */
36 ssize_t rdev_readat(const struct region_device *rd, void *b, size_t offset,
37 size_t size);
38
39 /*
40 * Returns < 0 on error otherwise returns size of data wrote at provided
41 * offset from the buffer passed.
42 */
43 ssize_t rdev_writeat(const struct region_device *rd, const void *b,
44 size_t offset, size_t size);
45
46 /*
47 * Returns < 0 on error otherwise returns size of data erased.
48 * If eraseat ops is not defined it returns size which indicates
49 * that operation was successful.
50 */
51 ssize_t rdev_eraseat(const struct region_device *rd, size_t offset,
52 size_t size);
53
54 /****************************************
55 * Implementation of a region device *
56 ****************************************/
57
58 /*
59 * Create a child region of the parent provided the sub-region is within
60 * the parent's region. Returns < 0 on error otherwise 0 on success. Note
61 * that the child device only calls through the parent's operations.
62 */
63 int rdev_chain(struct region_device *child, const struct region_device *parent,
64 size_t offset, size_t size);
65
66 /* A region_device operations. */
67 struct region_device_ops {
68 void *(*mmap)(const struct region_device *, size_t, size_t);
69 int (*munmap)(const struct region_device *, void *);
70 ssize_t (*readat)(const struct region_device *, void *, size_t, size_t);
71 ssize_t (*writeat)(const struct region_device *, const void *, size_t,
72 size_t);
73 ssize_t (*eraseat)(const struct region_device *, size_t, size_t);
74 };
75
76 struct region {
77 size_t offset;
78 size_t size;
79 };
80
81 struct region_device {
82 const struct region_device *root;
83 const struct region_device_ops *ops;
84 struct region region;
85 };
86
87 #define REGION_DEV_INIT(ops_, offset_, size_) \
88 { \
89 .root = NULL, \
90 .ops = (ops_), \
91 .region = { \
92 .offset = (offset_), \
93 .size = (size_), \
94 }, \
95 }
96
97 /* Helper to dynamically initialize region device. */
98 void region_device_init(struct region_device *rdev,
99 const struct region_device_ops *ops, size_t offset,
100 size_t size);
101
102 /* Return 1 if child is subregion of parent, else 0. */
103 int region_is_subregion(const struct region *p, const struct region *c);
104
region_offset(const struct region * r)105 static inline size_t region_offset(const struct region *r)
106 {
107 return r->offset;
108 }
109
region_sz(const struct region * r)110 static inline size_t region_sz(const struct region *r)
111 {
112 return r->size;
113 }
114
region_end(const struct region * r)115 static inline size_t region_end(const struct region *r)
116 {
117 return region_offset(r) + region_sz(r);
118 }
119
region_overlap(const struct region * r1,const struct region * r2)120 static inline bool region_overlap(const struct region *r1, const struct region *r2)
121 {
122 return (region_end(r1) > region_offset(r2)) &&
123 (region_offset(r1) < region_end(r2));
124 }
125
region_device_region(const struct region_device * rdev)126 static inline const struct region *region_device_region(
127 const struct region_device *rdev)
128 {
129 return &rdev->region;
130 }
131
region_device_sz(const struct region_device * rdev)132 static inline size_t region_device_sz(const struct region_device *rdev)
133 {
134 return region_sz(region_device_region(rdev));
135 }
136
region_device_offset(const struct region_device * rdev)137 static inline size_t region_device_offset(const struct region_device *rdev)
138 {
139 return region_offset(region_device_region(rdev));
140 }
141
region_device_end(const struct region_device * rdev)142 static inline size_t region_device_end(const struct region_device *rdev)
143 {
144 return region_end(region_device_region(rdev));
145 }
146
147 /* Memory map entire region device. Same semantics as rdev_mmap() above. */
rdev_mmap_full(const struct region_device * rd)148 static inline void *rdev_mmap_full(const struct region_device *rd)
149 {
150 return rdev_mmap(rd, 0, region_device_sz(rd));
151 }
152
rdev_chain_full(struct region_device * child,const struct region_device * parent)153 static inline int rdev_chain_full(struct region_device *child,
154 const struct region_device *parent)
155 {
156 /* Chain full size of parent. */
157 return rdev_chain(child, parent, 0, region_device_sz(parent));
158 }
159
160 /*
161 * Returns < 0 on error otherwise returns size of data read at provided
162 * offset filling in the buffer passed.
163 *
164 * You must ensure the buffer is large enough to hold the full region_device.
165 */
rdev_read_full(const struct region_device * rd,void * b)166 static inline ssize_t rdev_read_full(const struct region_device *rd, void *b)
167 {
168 return rdev_readat(rd, b, 0, region_device_sz(rd));
169 }
170
171 /*
172 * Compute relative offset of the child (c) w.r.t. the parent (p). Returns < 0
173 * when child is not within the parent's region.
174 */
175 ssize_t rdev_relative_offset(const struct region_device *p,
176 const struct region_device *c);
177
178 /* Helper functions to create an rdev that represents memory. */
179 int rdev_chain_mem(struct region_device *child, const void *base, size_t size);
180 int rdev_chain_mem_rw(struct region_device *child, void *base, size_t size);
181
182 struct mem_region_device {
183 char *base;
184 struct region_device rdev;
185 };
186
187 /* Initialize at runtime a mem_region_device. Should only be used for mappings
188 that need to fit right up to the edge of the physical address space. Most use
189 cases will want to use rdev_chain_mem() instead. */
190 void mem_region_device_ro_init(struct mem_region_device *mdev, void *base,
191 size_t size);
192
193 void mem_region_device_rw_init(struct mem_region_device *mdev, void *base,
194 size_t size);
195
196 extern const struct region_device_ops mem_rdev_ro_ops;
197
198 extern const struct region_device_ops mem_rdev_rw_ops;
199
200 /* Statically initialize mem_region_device. Should normally only be used for
201 const globals. Most use cases will want to use rdev_chain_mem() instead. */
202 #define MEM_REGION_DEV_INIT(base_, size_, ops_) \
203 { \
204 .base = (void *)(base_), \
205 .rdev = REGION_DEV_INIT((ops_), 0, (size_)), \
206 }
207
208 #define MEM_REGION_DEV_RO_INIT(base_, size_) \
209 MEM_REGION_DEV_INIT(base_, size_, &mem_rdev_ro_ops) \
210
211 #define MEM_REGION_DEV_RW_INIT(base_, size_) \
212 MEM_REGION_DEV_INIT(base_, size_, &mem_rdev_rw_ops) \
213
214 struct mmap_helper_region_device {
215 struct mem_pool *pool;
216 struct region_device rdev;
217 };
218
219 #define MMAP_HELPER_DEV_INIT(ops_, offset_, size_, mpool_) \
220 { \
221 .rdev = REGION_DEV_INIT((ops_), (offset_), (size_)), \
222 .pool = (mpool_), \
223 }
224
225 void *mmap_helper_rdev_mmap(const struct region_device *, size_t, size_t);
226 int mmap_helper_rdev_munmap(const struct region_device *, void *);
227
228 /*
229 * A translated region device provides the ability to publish a region device in one address
230 * space and use an access mechanism within another address space. The sub region is the window
231 * within the 1st address space and the request is modified prior to accessing the second
232 * address space provided by access_dev.
233 *
234 * Each xlate_region_device can support multiple translation windows described using
235 * xlate_window structure. The windows need not be contiguous in either address space. However,
236 * this poses restrictions on the operations being performed i.e. callers cannot perform
237 * operations across multiple windows of a translated region device. It is possible to support
238 * readat/writeat/eraseat by translating them into multiple calls one to access device in each
239 * window. However, mmap support is tricky because the caller expects that the memory mapped
240 * region is contiguous in both address spaces. Thus, to keep the semantics consistent for all
241 * region ops, xlate_region_device does not support any operations across the window
242 * boundary.
243 *
244 * Note: The platform is expected to ensure that the fmap description does not place any
245 * section (that will be operated using the translated region device) across multiple windows.
246 */
247 struct xlate_window {
248 const struct region_device *access_dev;
249 struct region sub_region;
250 };
251
252 struct xlate_region_device {
253 size_t window_count;
254 const struct xlate_window *window_arr;
255 struct region_device rdev;
256 };
257
258 extern const struct region_device_ops xlate_rdev_ro_ops;
259
260 extern const struct region_device_ops xlate_rdev_rw_ops;
261
262 #define XLATE_REGION_DEV_INIT(window_arr_, parent_sz_, ops_) \
263 { \
264 .window_count = ARRAY_SIZE(window_arr_), \
265 .window_arr = window_arr_, \
266 .rdev = REGION_DEV_INIT((ops_), 0, (parent_sz_)), \
267 }
268
269 #define XLATE_REGION_DEV_RO_INIT(window_arr_, parent_sz_) \
270 XLATE_REGION_DEV_INIT(window_arr_, parent_sz_, &xlate_rdev_ro_ops)
271
272 #define XLATE_REGION_DEV_RW_INIT(window_count_, window_arr_, parent_sz_) \
273 XLATE_REGION_DEV_INIT(window_arr_, parent_sz_, &xlate_rdev_rw_ops)
274
275 /* Helper to dynamically initialize xlate region device. */
276 void xlate_region_device_ro_init(struct xlate_region_device *xdev,
277 size_t window_count, const struct xlate_window *window_arr,
278 size_t parent_size);
279
280 void xlate_region_device_rw_init(struct xlate_region_device *xdev,
281 size_t window_count, const struct xlate_window *window_arr,
282 size_t parent_size);
283
284 void xlate_window_init(struct xlate_window *window, const struct region_device *access_dev,
285 size_t sub_region_offset, size_t sub_region_size);
286
287 /* This type can be used for incoherent access where the read and write
288 * operations are backed by separate drivers. An example is x86 systems
289 * with memory mapped media for reading but use a spi flash driver for
290 * writing. One needs to ensure using this object is appropriate in context. */
291 struct incoherent_rdev {
292 struct region_device rdev;
293 const struct region_device *read;
294 const struct region_device *write;
295 };
296
297 /* Initialize an incoherent_rdev based on the region as well as the read and
298 * write rdevs. The read and write rdevs should match in size to the passed
299 * in region. If not the initialization will fail returning NULL. Otherwise
300 * the function will return a pointer to the containing region_device to
301 * be used for region operations. Therefore, the lifetime of the returned
302 * pointer matches the lifetime of the incoherent_rdev object. Likewise,
303 * the lifetime of the read and write rdev need to match the lifetime of
304 * the incoherent_rdev object. */
305 const struct region_device *incoherent_rdev_init(struct incoherent_rdev *irdev,
306 const struct region *r,
307 const struct region_device *read,
308 const struct region_device *write);
309
310 #endif /* _REGION_H_ */
311