1 /* SPDX-License-Identifier: GPL-2.0-only */
2
3 #include <commonlib/helpers.h>
4 #include <commonlib/region.h>
5 #include <stdint.h>
6 #include <string.h>
7
region_is_subregion(const struct region * p,const struct region * c)8 int region_is_subregion(const struct region *p, const struct region *c)
9 {
10 if (region_offset(c) < region_offset(p))
11 return 0;
12
13 if (region_end(c) > region_end(p))
14 return 0;
15
16 if (region_end(c) < region_offset(c))
17 return 0;
18
19 return 1;
20 }
21
normalize_and_ok(const struct region * outer,struct region * inner)22 static int normalize_and_ok(const struct region *outer, struct region *inner)
23 {
24 inner->offset += region_offset(outer);
25 return region_is_subregion(outer, inner);
26 }
27
rdev_root(const struct region_device * rdev)28 static const struct region_device *rdev_root(const struct region_device *rdev)
29 {
30 if (rdev->root == NULL)
31 return rdev;
32 return rdev->root;
33 }
34
rdev_relative_offset(const struct region_device * p,const struct region_device * c)35 ssize_t rdev_relative_offset(const struct region_device *p,
36 const struct region_device *c)
37 {
38 if (rdev_root(p) != rdev_root(c))
39 return -1;
40
41 if (!region_is_subregion(&p->region, &c->region))
42 return -1;
43
44 return region_device_offset(c) - region_device_offset(p);
45 }
46
rdev_mmap(const struct region_device * rd,size_t offset,size_t size)47 void *rdev_mmap(const struct region_device *rd, size_t offset, size_t size)
48 {
49 const struct region_device *rdev;
50 struct region req = {
51 .offset = offset,
52 .size = size,
53 };
54
55 if (!normalize_and_ok(&rd->region, &req))
56 return NULL;
57
58 rdev = rdev_root(rd);
59
60 if (rdev->ops->mmap == NULL)
61 return NULL;
62
63 return rdev->ops->mmap(rdev, req.offset, req.size);
64 }
65
rdev_munmap(const struct region_device * rd,void * mapping)66 int rdev_munmap(const struct region_device *rd, void *mapping)
67 {
68 const struct region_device *rdev;
69
70 rdev = rdev_root(rd);
71
72 if (rdev->ops->munmap == NULL)
73 return -1;
74
75 return rdev->ops->munmap(rdev, mapping);
76 }
77
rdev_readat(const struct region_device * rd,void * b,size_t offset,size_t size)78 ssize_t rdev_readat(const struct region_device *rd, void *b, size_t offset,
79 size_t size)
80 {
81 const struct region_device *rdev;
82 struct region req = {
83 .offset = offset,
84 .size = size,
85 };
86
87 if (!normalize_and_ok(&rd->region, &req))
88 return -1;
89
90 rdev = rdev_root(rd);
91
92 return rdev->ops->readat(rdev, b, req.offset, req.size);
93 }
94
rdev_writeat(const struct region_device * rd,const void * b,size_t offset,size_t size)95 ssize_t rdev_writeat(const struct region_device *rd, const void *b,
96 size_t offset, size_t size)
97 {
98 const struct region_device *rdev;
99 struct region req = {
100 .offset = offset,
101 .size = size,
102 };
103
104 if (!normalize_and_ok(&rd->region, &req))
105 return -1;
106
107 rdev = rdev_root(rd);
108
109 if (rdev->ops->writeat == NULL)
110 return -1;
111
112 return rdev->ops->writeat(rdev, b, req.offset, req.size);
113 }
114
rdev_eraseat(const struct region_device * rd,size_t offset,size_t size)115 ssize_t rdev_eraseat(const struct region_device *rd, size_t offset,
116 size_t size)
117 {
118 const struct region_device *rdev;
119 struct region req = {
120 .offset = offset,
121 .size = size,
122 };
123
124 if (!normalize_and_ok(&rd->region, &req))
125 return -1;
126
127 rdev = rdev_root(rd);
128
129 /* If the eraseat ptr is NULL we assume that the erase
130 * function was completed successfully. */
131 if (rdev->ops->eraseat == NULL)
132 return size;
133
134 return rdev->ops->eraseat(rdev, req.offset, req.size);
135 }
136
rdev_chain(struct region_device * child,const struct region_device * parent,size_t offset,size_t size)137 int rdev_chain(struct region_device *child, const struct region_device *parent,
138 size_t offset, size_t size)
139 {
140 struct region req = {
141 .offset = offset,
142 .size = size,
143 };
144
145 if (!normalize_and_ok(&parent->region, &req))
146 return -1;
147
148 /* Keep track of root region device. Note the offsets are relative
149 * to the root device. */
150 child->root = rdev_root(parent);
151 child->ops = NULL;
152 child->region.offset = req.offset;
153 child->region.size = req.size;
154
155 return 0;
156 }
157
mem_region_device_init(struct mem_region_device * mdev,const struct region_device_ops * ops,void * base,size_t size)158 static void mem_region_device_init(struct mem_region_device *mdev,
159 const struct region_device_ops *ops, void *base, size_t size)
160 {
161 memset(mdev, 0, sizeof(*mdev));
162 mdev->base = base;
163 mdev->rdev.ops = ops;
164 mdev->rdev.region.size = size;
165 }
166
mem_region_device_ro_init(struct mem_region_device * mdev,void * base,size_t size)167 void mem_region_device_ro_init(struct mem_region_device *mdev, void *base,
168 size_t size)
169 {
170 return mem_region_device_init(mdev, &mem_rdev_ro_ops, base, size);
171 }
172
mem_region_device_rw_init(struct mem_region_device * mdev,void * base,size_t size)173 void mem_region_device_rw_init(struct mem_region_device *mdev, void *base,
174 size_t size)
175 {
176 return mem_region_device_init(mdev, &mem_rdev_rw_ops, base, size);
177 }
178
region_device_init(struct region_device * rdev,const struct region_device_ops * ops,size_t offset,size_t size)179 void region_device_init(struct region_device *rdev,
180 const struct region_device_ops *ops, size_t offset,
181 size_t size)
182 {
183 memset(rdev, 0, sizeof(*rdev));
184 rdev->root = NULL;
185 rdev->ops = ops;
186 rdev->region.offset = offset;
187 rdev->region.size = size;
188 }
189
xlate_region_device_init(struct xlate_region_device * xdev,const struct region_device_ops * ops,size_t window_count,const struct xlate_window * window_arr,size_t parent_size)190 static void xlate_region_device_init(struct xlate_region_device *xdev,
191 const struct region_device_ops *ops,
192 size_t window_count, const struct xlate_window *window_arr,
193 size_t parent_size)
194 {
195 memset(xdev, 0, sizeof(*xdev));
196 xdev->window_count = window_count;
197 xdev->window_arr = window_arr;
198 region_device_init(&xdev->rdev, ops, 0, parent_size);
199 }
200
xlate_region_device_ro_init(struct xlate_region_device * xdev,size_t window_count,const struct xlate_window * window_arr,size_t parent_size)201 void xlate_region_device_ro_init(struct xlate_region_device *xdev,
202 size_t window_count, const struct xlate_window *window_arr,
203 size_t parent_size)
204 {
205 xlate_region_device_init(xdev, &xlate_rdev_ro_ops, window_count, window_arr,
206 parent_size);
207 }
208
xlate_region_device_rw_init(struct xlate_region_device * xdev,size_t window_count,const struct xlate_window * window_arr,size_t parent_size)209 void xlate_region_device_rw_init(struct xlate_region_device *xdev,
210 size_t window_count, const struct xlate_window *window_arr,
211 size_t parent_size)
212 {
213 xlate_region_device_init(xdev, &xlate_rdev_rw_ops, window_count, window_arr,
214 parent_size);
215 }
216
xlate_window_init(struct xlate_window * window,const struct region_device * access_dev,size_t sub_region_offset,size_t sub_region_size)217 void xlate_window_init(struct xlate_window *window, const struct region_device *access_dev,
218 size_t sub_region_offset, size_t sub_region_size)
219 {
220 window->access_dev = access_dev;
221 window->sub_region.offset = sub_region_offset;
222 window->sub_region.size = sub_region_size;
223 }
224
mdev_mmap(const struct region_device * rd,size_t offset,size_t size __always_unused)225 static void *mdev_mmap(const struct region_device *rd, size_t offset,
226 size_t size __always_unused)
227 {
228 const struct mem_region_device *mdev;
229
230 mdev = container_of(rd, __typeof__(*mdev), rdev);
231
232 return &mdev->base[offset];
233 }
234
mdev_munmap(const struct region_device * rd __always_unused,void * mapping __always_unused)235 static int mdev_munmap(const struct region_device *rd __always_unused,
236 void *mapping __always_unused)
237 {
238 return 0;
239 }
240
mdev_readat(const struct region_device * rd,void * b,size_t offset,size_t size)241 static ssize_t mdev_readat(const struct region_device *rd, void *b,
242 size_t offset, size_t size)
243 {
244 const struct mem_region_device *mdev;
245
246 mdev = container_of(rd, __typeof__(*mdev), rdev);
247
248 memcpy(b, &mdev->base[offset], size);
249
250 return size;
251 }
252
mdev_writeat(const struct region_device * rd,const void * b,size_t offset,size_t size)253 static ssize_t mdev_writeat(const struct region_device *rd, const void *b,
254 size_t offset, size_t size)
255 {
256 const struct mem_region_device *mdev;
257
258 mdev = container_of(rd, __typeof__(*mdev), rdev);
259
260 memcpy(&mdev->base[offset], b, size);
261
262 return size;
263 }
264
mdev_eraseat(const struct region_device * rd,size_t offset,size_t size)265 static ssize_t mdev_eraseat(const struct region_device *rd, size_t offset,
266 size_t size)
267 {
268 const struct mem_region_device *mdev;
269
270 mdev = container_of(rd, __typeof__(*mdev), rdev);
271
272 memset(&mdev->base[offset], 0, size);
273
274 return size;
275 }
276
277 const struct region_device_ops mem_rdev_ro_ops = {
278 .mmap = mdev_mmap,
279 .munmap = mdev_munmap,
280 .readat = mdev_readat,
281 };
282
283 const struct region_device_ops mem_rdev_rw_ops = {
284 .mmap = mdev_mmap,
285 .munmap = mdev_munmap,
286 .readat = mdev_readat,
287 .writeat = mdev_writeat,
288 .eraseat = mdev_eraseat,
289 };
290
291 static const struct mem_region_device mem_rdev = MEM_REGION_DEV_RO_INIT(0, ~(size_t)0);
292 static const struct mem_region_device mem_rdev_rw = MEM_REGION_DEV_RW_INIT(0, ~(size_t)0);
293
rdev_chain_mem(struct region_device * child,const void * base,size_t size)294 int rdev_chain_mem(struct region_device *child, const void *base, size_t size)
295 {
296 return rdev_chain(child, &mem_rdev.rdev, (uintptr_t)base, size);
297 }
298
rdev_chain_mem_rw(struct region_device * child,void * base,size_t size)299 int rdev_chain_mem_rw(struct region_device *child, void *base, size_t size)
300 {
301 return rdev_chain(child, &mem_rdev_rw.rdev, (uintptr_t)base, size);
302 }
303
mmap_helper_rdev_mmap(const struct region_device * rd,size_t offset,size_t size)304 void *mmap_helper_rdev_mmap(const struct region_device *rd, size_t offset,
305 size_t size)
306 {
307 struct mmap_helper_region_device *mdev;
308 void *mapping;
309
310 mdev = container_of((void *)rd, __typeof__(*mdev), rdev);
311
312 mapping = mem_pool_alloc(mdev->pool, size);
313
314 if (mapping == NULL)
315 return NULL;
316
317 if (rd->ops->readat(rd, mapping, offset, size) != size) {
318 mem_pool_free(mdev->pool, mapping);
319 return NULL;
320 }
321
322 return mapping;
323 }
324
mmap_helper_rdev_munmap(const struct region_device * rd,void * mapping)325 int mmap_helper_rdev_munmap(const struct region_device *rd, void *mapping)
326 {
327 struct mmap_helper_region_device *mdev;
328
329 mdev = container_of((void *)rd, __typeof__(*mdev), rdev);
330
331 mem_pool_free(mdev->pool, mapping);
332
333 return 0;
334 }
335
xlate_find_window(const struct xlate_region_device * xldev,const struct region * req)336 static const struct xlate_window *xlate_find_window(const struct xlate_region_device *xldev,
337 const struct region *req)
338 {
339 size_t i;
340 const struct xlate_window *xlwindow;
341
342 for (i = 0; i < xldev->window_count; i++) {
343 xlwindow = &xldev->window_arr[i];
344 if (region_is_subregion(&xlwindow->sub_region, req))
345 return xlwindow;
346 }
347
348 return NULL;
349 }
350
xlate_mmap(const struct region_device * rd,size_t offset,size_t size)351 static void *xlate_mmap(const struct region_device *rd, size_t offset,
352 size_t size)
353 {
354 const struct xlate_region_device *xldev;
355 struct region req = {
356 .offset = offset,
357 .size = size,
358 };
359 const struct xlate_window *xlwindow;
360
361 xldev = container_of(rd, __typeof__(*xldev), rdev);
362
363 xlwindow = xlate_find_window(xldev, &req);
364 if (!xlwindow)
365 return NULL;
366
367 offset -= region_offset(&xlwindow->sub_region);
368
369 return rdev_mmap(xlwindow->access_dev, offset, size);
370 }
371
xlate_munmap(const struct region_device * rd __always_unused,void * mapping __always_unused)372 static int xlate_munmap(const struct region_device *rd __always_unused,
373 void *mapping __always_unused)
374 {
375 /*
376 * xlate_region_device does not keep track of the access device that was used to service
377 * a mmap request. So, munmap does not do anything. If munmap functionality is required,
378 * then xlate_region_device will have to be updated to accept some pre-allocated space
379 * from caller to keep track of the mapping requests. Since xlate_region_device is only
380 * used for memory mapped boot media on the backend right now, skipping munmap is fine.
381 */
382 return 0;
383 }
384
xlate_readat(const struct region_device * rd,void * b,size_t offset,size_t size)385 static ssize_t xlate_readat(const struct region_device *rd, void *b,
386 size_t offset, size_t size)
387 {
388 struct region req = {
389 .offset = offset,
390 .size = size,
391 };
392 const struct xlate_window *xlwindow;
393 const struct xlate_region_device *xldev;
394
395 xldev = container_of(rd, __typeof__(*xldev), rdev);
396
397 xlwindow = xlate_find_window(xldev, &req);
398 if (!xlwindow)
399 return -1;
400
401 offset -= region_offset(&xlwindow->sub_region);
402
403 return rdev_readat(xlwindow->access_dev, b, offset, size);
404 }
405
xlate_writeat(const struct region_device * rd,const void * b,size_t offset,size_t size)406 static ssize_t xlate_writeat(const struct region_device *rd, const void *b,
407 size_t offset, size_t size)
408 {
409 struct region req = {
410 .offset = offset,
411 .size = size,
412 };
413 const struct xlate_window *xlwindow;
414 const struct xlate_region_device *xldev;
415
416 xldev = container_of(rd, __typeof__(*xldev), rdev);
417
418 xlwindow = xlate_find_window(xldev, &req);
419 if (!xlwindow)
420 return -1;
421
422 offset -= region_offset(&xlwindow->sub_region);
423
424 return rdev_writeat(xlwindow->access_dev, b, offset, size);
425 }
426
xlate_eraseat(const struct region_device * rd,size_t offset,size_t size)427 static ssize_t xlate_eraseat(const struct region_device *rd,
428 size_t offset, size_t size)
429 {
430 struct region req = {
431 .offset = offset,
432 .size = size,
433 };
434 const struct xlate_window *xlwindow;
435 const struct xlate_region_device *xldev;
436
437 xldev = container_of(rd, __typeof__(*xldev), rdev);
438
439 xlwindow = xlate_find_window(xldev, &req);
440 if (!xlwindow)
441 return -1;
442
443 offset -= region_offset(&xlwindow->sub_region);
444
445 return rdev_eraseat(xlwindow->access_dev, offset, size);
446 }
447
448 const struct region_device_ops xlate_rdev_ro_ops = {
449 .mmap = xlate_mmap,
450 .munmap = xlate_munmap,
451 .readat = xlate_readat,
452 };
453
454 const struct region_device_ops xlate_rdev_rw_ops = {
455 .mmap = xlate_mmap,
456 .munmap = xlate_munmap,
457 .readat = xlate_readat,
458 .writeat = xlate_writeat,
459 .eraseat = xlate_eraseat,
460 };
461
incoherent_mmap(const struct region_device * rd,size_t offset,size_t size)462 static void *incoherent_mmap(const struct region_device *rd, size_t offset,
463 size_t size)
464 {
465 const struct incoherent_rdev *irdev;
466
467 irdev = container_of(rd, const struct incoherent_rdev, rdev);
468
469 return rdev_mmap(irdev->read, offset, size);
470 }
471
incoherent_munmap(const struct region_device * rd,void * mapping)472 static int incoherent_munmap(const struct region_device *rd, void *mapping)
473 {
474 const struct incoherent_rdev *irdev;
475
476 irdev = container_of(rd, const struct incoherent_rdev, rdev);
477
478 return rdev_munmap(irdev->read, mapping);
479 }
480
incoherent_readat(const struct region_device * rd,void * b,size_t offset,size_t size)481 static ssize_t incoherent_readat(const struct region_device *rd, void *b,
482 size_t offset, size_t size)
483 {
484 const struct incoherent_rdev *irdev;
485
486 irdev = container_of(rd, const struct incoherent_rdev, rdev);
487
488 return rdev_readat(irdev->read, b, offset, size);
489 }
490
incoherent_writeat(const struct region_device * rd,const void * b,size_t offset,size_t size)491 static ssize_t incoherent_writeat(const struct region_device *rd, const void *b,
492 size_t offset, size_t size)
493 {
494 const struct incoherent_rdev *irdev;
495
496 irdev = container_of(rd, const struct incoherent_rdev, rdev);
497
498 return rdev_writeat(irdev->write, b, offset, size);
499 }
500
incoherent_eraseat(const struct region_device * rd,size_t offset,size_t size)501 static ssize_t incoherent_eraseat(const struct region_device *rd, size_t offset,
502 size_t size)
503 {
504 const struct incoherent_rdev *irdev;
505
506 irdev = container_of(rd, const struct incoherent_rdev, rdev);
507
508 return rdev_eraseat(irdev->write, offset, size);
509 }
510
511 static const struct region_device_ops incoherent_rdev_ops = {
512 .mmap = incoherent_mmap,
513 .munmap = incoherent_munmap,
514 .readat = incoherent_readat,
515 .writeat = incoherent_writeat,
516 .eraseat = incoherent_eraseat,
517 };
518
incoherent_rdev_init(struct incoherent_rdev * irdev,const struct region * r,const struct region_device * read,const struct region_device * write)519 const struct region_device *incoherent_rdev_init(struct incoherent_rdev *irdev,
520 const struct region *r,
521 const struct region_device *read,
522 const struct region_device *write)
523 {
524 const size_t size = region_sz(r);
525
526 if (size != region_device_sz(read) || size != region_device_sz(write))
527 return NULL;
528
529 /* The region is represented as offset 0 to size. That way, the generic
530 * rdev operations can be called on the read or write implementation
531 * without any unnecessary translation because the offsets all start
532 * at 0. */
533 region_device_init(&irdev->rdev, &incoherent_rdev_ops, 0, size);
534 irdev->read = read;
535 irdev->write = write;
536
537 return &irdev->rdev;
538 }
539