1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Support for Medifield PNW Camera Imaging ISP subsystem.
4 *
5 * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
6 *
7 * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
8 */
9 /*
10 * This file contains functions for buffer object structure management
11 */
12 #include <linux/kernel.h>
13 #include <linux/types.h>
14 #include <linux/gfp.h> /* for GFP_ATOMIC */
15 #include <linux/mm.h>
16 #include <linux/mm_types.h>
17 #include <linux/hugetlb.h>
18 #include <linux/highmem.h>
19 #include <linux/slab.h> /* for kmalloc */
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/string.h>
23 #include <linux/list.h>
24 #include <linux/errno.h>
25 #include <linux/io.h>
26 #include <asm/current.h>
27 #include <linux/sched/signal.h>
28 #include <linux/file.h>
29
30 #include <asm/set_memory.h>
31
32 #include "atomisp_internal.h"
33 #include "hmm/hmm_common.h"
34 #include "hmm/hmm_bo.h"
35
__bo_init(struct hmm_bo_device * bdev,struct hmm_buffer_object * bo,unsigned int pgnr)36 static int __bo_init(struct hmm_bo_device *bdev, struct hmm_buffer_object *bo,
37 unsigned int pgnr)
38 {
39 check_bodev_null_return(bdev, -EINVAL);
40 var_equal_return(hmm_bo_device_inited(bdev), 0, -EINVAL,
41 "hmm_bo_device not inited yet.\n");
42 /* prevent zero size buffer object */
43 if (pgnr == 0) {
44 dev_err(atomisp_dev, "0 size buffer is not allowed.\n");
45 return -EINVAL;
46 }
47
48 memset(bo, 0, sizeof(*bo));
49 mutex_init(&bo->mutex);
50
51 /* init the bo->list HEAD as an element of entire_bo_list */
52 INIT_LIST_HEAD(&bo->list);
53
54 bo->bdev = bdev;
55 bo->vmap_addr = NULL;
56 bo->status = HMM_BO_FREE;
57 bo->start = bdev->start;
58 bo->pgnr = pgnr;
59 bo->end = bo->start + pgnr_to_size(pgnr);
60 bo->prev = NULL;
61 bo->next = NULL;
62
63 return 0;
64 }
65
__bo_search_and_remove_from_free_rbtree(struct rb_node * node,unsigned int pgnr)66 static struct hmm_buffer_object *__bo_search_and_remove_from_free_rbtree(
67 struct rb_node *node, unsigned int pgnr)
68 {
69 struct hmm_buffer_object *this, *ret_bo, *temp_bo;
70
71 this = rb_entry(node, struct hmm_buffer_object, node);
72 if (this->pgnr == pgnr ||
73 (this->pgnr > pgnr && !this->node.rb_left)) {
74 goto remove_bo_and_return;
75 } else {
76 if (this->pgnr < pgnr) {
77 if (!this->node.rb_right)
78 return NULL;
79 ret_bo = __bo_search_and_remove_from_free_rbtree(
80 this->node.rb_right, pgnr);
81 } else {
82 ret_bo = __bo_search_and_remove_from_free_rbtree(
83 this->node.rb_left, pgnr);
84 }
85 if (!ret_bo) {
86 if (this->pgnr > pgnr)
87 goto remove_bo_and_return;
88 else
89 return NULL;
90 }
91 return ret_bo;
92 }
93
94 remove_bo_and_return:
95 /* NOTE: All nodes on free rbtree have a 'prev' that points to NULL.
96 * 1. check if 'this->next' is NULL:
97 * yes: erase 'this' node and rebalance rbtree, return 'this'.
98 */
99 if (!this->next) {
100 rb_erase(&this->node, &this->bdev->free_rbtree);
101 return this;
102 }
103 /* NOTE: if 'this->next' is not NULL, always return 'this->next' bo.
104 * 2. check if 'this->next->next' is NULL:
105 * yes: change the related 'next/prev' pointer,
106 * return 'this->next' but the rbtree stays unchanged.
107 */
108 temp_bo = this->next;
109 this->next = temp_bo->next;
110 if (temp_bo->next)
111 temp_bo->next->prev = this;
112 temp_bo->next = NULL;
113 temp_bo->prev = NULL;
114 return temp_bo;
115 }
116
__bo_search_by_addr(struct rb_root * root,ia_css_ptr start)117 static struct hmm_buffer_object *__bo_search_by_addr(struct rb_root *root,
118 ia_css_ptr start)
119 {
120 struct rb_node *n = root->rb_node;
121 struct hmm_buffer_object *bo;
122
123 do {
124 bo = rb_entry(n, struct hmm_buffer_object, node);
125
126 if (bo->start > start) {
127 if (!n->rb_left)
128 return NULL;
129 n = n->rb_left;
130 } else if (bo->start < start) {
131 if (!n->rb_right)
132 return NULL;
133 n = n->rb_right;
134 } else {
135 return bo;
136 }
137 } while (n);
138
139 return NULL;
140 }
141
__bo_search_by_addr_in_range(struct rb_root * root,unsigned int start)142 static struct hmm_buffer_object *__bo_search_by_addr_in_range(
143 struct rb_root *root, unsigned int start)
144 {
145 struct rb_node *n = root->rb_node;
146 struct hmm_buffer_object *bo;
147
148 do {
149 bo = rb_entry(n, struct hmm_buffer_object, node);
150
151 if (bo->start > start) {
152 if (!n->rb_left)
153 return NULL;
154 n = n->rb_left;
155 } else {
156 if (bo->end > start)
157 return bo;
158 if (!n->rb_right)
159 return NULL;
160 n = n->rb_right;
161 }
162 } while (n);
163
164 return NULL;
165 }
166
__bo_insert_to_free_rbtree(struct rb_root * root,struct hmm_buffer_object * bo)167 static void __bo_insert_to_free_rbtree(struct rb_root *root,
168 struct hmm_buffer_object *bo)
169 {
170 struct rb_node **new = &root->rb_node;
171 struct rb_node *parent = NULL;
172 struct hmm_buffer_object *this;
173 unsigned int pgnr = bo->pgnr;
174
175 while (*new) {
176 parent = *new;
177 this = container_of(*new, struct hmm_buffer_object, node);
178
179 if (pgnr < this->pgnr) {
180 new = &((*new)->rb_left);
181 } else if (pgnr > this->pgnr) {
182 new = &((*new)->rb_right);
183 } else {
184 bo->prev = this;
185 bo->next = this->next;
186 if (this->next)
187 this->next->prev = bo;
188 this->next = bo;
189 bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE;
190 return;
191 }
192 }
193
194 bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE;
195
196 rb_link_node(&bo->node, parent, new);
197 rb_insert_color(&bo->node, root);
198 }
199
__bo_insert_to_alloc_rbtree(struct rb_root * root,struct hmm_buffer_object * bo)200 static void __bo_insert_to_alloc_rbtree(struct rb_root *root,
201 struct hmm_buffer_object *bo)
202 {
203 struct rb_node **new = &root->rb_node;
204 struct rb_node *parent = NULL;
205 struct hmm_buffer_object *this;
206 unsigned int start = bo->start;
207
208 while (*new) {
209 parent = *new;
210 this = container_of(*new, struct hmm_buffer_object, node);
211
212 if (start < this->start)
213 new = &((*new)->rb_left);
214 else
215 new = &((*new)->rb_right);
216 }
217
218 kref_init(&bo->kref);
219 bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_ALLOCED;
220
221 rb_link_node(&bo->node, parent, new);
222 rb_insert_color(&bo->node, root);
223 }
224
__bo_break_up(struct hmm_bo_device * bdev,struct hmm_buffer_object * bo,unsigned int pgnr)225 static struct hmm_buffer_object *__bo_break_up(struct hmm_bo_device *bdev,
226 struct hmm_buffer_object *bo,
227 unsigned int pgnr)
228 {
229 struct hmm_buffer_object *new_bo;
230 unsigned long flags;
231 int ret;
232
233 new_bo = kmem_cache_alloc(bdev->bo_cache, GFP_KERNEL);
234 if (!new_bo) {
235 dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__);
236 return NULL;
237 }
238 ret = __bo_init(bdev, new_bo, pgnr);
239 if (ret) {
240 dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__);
241 kmem_cache_free(bdev->bo_cache, new_bo);
242 return NULL;
243 }
244
245 new_bo->start = bo->start;
246 new_bo->end = new_bo->start + pgnr_to_size(pgnr);
247 bo->start = new_bo->end;
248 bo->pgnr = bo->pgnr - pgnr;
249
250 spin_lock_irqsave(&bdev->list_lock, flags);
251 list_add_tail(&new_bo->list, &bo->list);
252 spin_unlock_irqrestore(&bdev->list_lock, flags);
253
254 return new_bo;
255 }
256
__bo_take_off_handling(struct hmm_buffer_object * bo)257 static void __bo_take_off_handling(struct hmm_buffer_object *bo)
258 {
259 struct hmm_bo_device *bdev = bo->bdev;
260 /* There are 4 situations when we take off a known bo from free rbtree:
261 * 1. if bo->next && bo->prev == NULL, bo is a rbtree node
262 * and does not have a linked list after bo, to take off this bo,
263 * we just need erase bo directly and rebalance the free rbtree
264 */
265 if (!bo->prev && !bo->next) {
266 rb_erase(&bo->node, &bdev->free_rbtree);
267 /* 2. when bo->next != NULL && bo->prev == NULL, bo is a rbtree node,
268 * and has a linked list,to take off this bo we need erase bo
269 * first, then, insert bo->next into free rbtree and rebalance
270 * the free rbtree
271 */
272 } else if (!bo->prev && bo->next) {
273 bo->next->prev = NULL;
274 rb_erase(&bo->node, &bdev->free_rbtree);
275 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo->next);
276 bo->next = NULL;
277 /* 3. when bo->prev != NULL && bo->next == NULL, bo is not a rbtree
278 * node, bo is the last element of the linked list after rbtree
279 * node, to take off this bo, we just need set the "prev/next"
280 * pointers to NULL, the free rbtree stays unchanged
281 */
282 } else if (bo->prev && !bo->next) {
283 bo->prev->next = NULL;
284 bo->prev = NULL;
285 /* 4. when bo->prev != NULL && bo->next != NULL ,bo is not a rbtree
286 * node, bo is in the middle of the linked list after rbtree node,
287 * to take off this bo, we just set take the "prev/next" pointers
288 * to NULL, the free rbtree stays unchanged
289 */
290 } else if (bo->prev && bo->next) {
291 bo->next->prev = bo->prev;
292 bo->prev->next = bo->next;
293 bo->next = NULL;
294 bo->prev = NULL;
295 }
296 }
297
__bo_merge(struct hmm_buffer_object * bo,struct hmm_buffer_object * next_bo)298 static struct hmm_buffer_object *__bo_merge(struct hmm_buffer_object *bo,
299 struct hmm_buffer_object *next_bo)
300 {
301 struct hmm_bo_device *bdev;
302 unsigned long flags;
303
304 bdev = bo->bdev;
305 next_bo->start = bo->start;
306 next_bo->pgnr = next_bo->pgnr + bo->pgnr;
307
308 spin_lock_irqsave(&bdev->list_lock, flags);
309 list_del(&bo->list);
310 spin_unlock_irqrestore(&bdev->list_lock, flags);
311
312 kmem_cache_free(bo->bdev->bo_cache, bo);
313
314 return next_bo;
315 }
316
317 /*
318 * hmm_bo_device functions.
319 */
hmm_bo_device_init(struct hmm_bo_device * bdev,struct isp_mmu_client * mmu_driver,unsigned int vaddr_start,unsigned int size)320 int hmm_bo_device_init(struct hmm_bo_device *bdev,
321 struct isp_mmu_client *mmu_driver,
322 unsigned int vaddr_start,
323 unsigned int size)
324 {
325 struct hmm_buffer_object *bo;
326 unsigned long flags;
327 int ret;
328
329 check_bodev_null_return(bdev, -EINVAL);
330
331 ret = isp_mmu_init(&bdev->mmu, mmu_driver);
332 if (ret) {
333 dev_err(atomisp_dev, "isp_mmu_init failed.\n");
334 return ret;
335 }
336
337 bdev->start = vaddr_start;
338 bdev->pgnr = size_to_pgnr_ceil(size);
339 bdev->size = pgnr_to_size(bdev->pgnr);
340
341 spin_lock_init(&bdev->list_lock);
342 mutex_init(&bdev->rbtree_mutex);
343
344 bdev->flag = HMM_BO_DEVICE_INITED;
345
346 INIT_LIST_HEAD(&bdev->entire_bo_list);
347 bdev->allocated_rbtree = RB_ROOT;
348 bdev->free_rbtree = RB_ROOT;
349
350 bdev->bo_cache = kmem_cache_create("bo_cache",
351 sizeof(struct hmm_buffer_object), 0, 0, NULL);
352 if (!bdev->bo_cache) {
353 dev_err(atomisp_dev, "%s: create cache failed!\n", __func__);
354 isp_mmu_exit(&bdev->mmu);
355 return -ENOMEM;
356 }
357
358 bo = kmem_cache_alloc(bdev->bo_cache, GFP_KERNEL);
359 if (!bo) {
360 dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__);
361 isp_mmu_exit(&bdev->mmu);
362 return -ENOMEM;
363 }
364
365 ret = __bo_init(bdev, bo, bdev->pgnr);
366 if (ret) {
367 dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__);
368 kmem_cache_free(bdev->bo_cache, bo);
369 isp_mmu_exit(&bdev->mmu);
370 return -EINVAL;
371 }
372
373 spin_lock_irqsave(&bdev->list_lock, flags);
374 list_add_tail(&bo->list, &bdev->entire_bo_list);
375 spin_unlock_irqrestore(&bdev->list_lock, flags);
376
377 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
378
379 return 0;
380 }
381
hmm_bo_alloc(struct hmm_bo_device * bdev,unsigned int pgnr)382 struct hmm_buffer_object *hmm_bo_alloc(struct hmm_bo_device *bdev,
383 unsigned int pgnr)
384 {
385 struct hmm_buffer_object *bo, *new_bo;
386 struct rb_root *root = &bdev->free_rbtree;
387
388 check_bodev_null_return(bdev, NULL);
389 var_equal_return(hmm_bo_device_inited(bdev), 0, NULL,
390 "hmm_bo_device not inited yet.\n");
391
392 if (pgnr == 0) {
393 dev_err(atomisp_dev, "0 size buffer is not allowed.\n");
394 return NULL;
395 }
396
397 mutex_lock(&bdev->rbtree_mutex);
398 bo = __bo_search_and_remove_from_free_rbtree(root->rb_node, pgnr);
399 if (!bo) {
400 mutex_unlock(&bdev->rbtree_mutex);
401 dev_err(atomisp_dev, "%s: Out of Memory! hmm_bo_alloc failed",
402 __func__);
403 return NULL;
404 }
405
406 if (bo->pgnr > pgnr) {
407 new_bo = __bo_break_up(bdev, bo, pgnr);
408 if (!new_bo) {
409 mutex_unlock(&bdev->rbtree_mutex);
410 dev_err(atomisp_dev, "%s: __bo_break_up failed!\n",
411 __func__);
412 return NULL;
413 }
414
415 __bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, new_bo);
416 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
417
418 mutex_unlock(&bdev->rbtree_mutex);
419 return new_bo;
420 }
421
422 __bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, bo);
423
424 mutex_unlock(&bdev->rbtree_mutex);
425 return bo;
426 }
427
hmm_bo_release(struct hmm_buffer_object * bo)428 void hmm_bo_release(struct hmm_buffer_object *bo)
429 {
430 struct hmm_bo_device *bdev = bo->bdev;
431 struct hmm_buffer_object *next_bo, *prev_bo;
432
433 mutex_lock(&bdev->rbtree_mutex);
434
435 /*
436 * FIX ME:
437 *
438 * how to destroy the bo when it is stilled MMAPED?
439 *
440 * ideally, this will not happened as hmm_bo_release
441 * will only be called when kref reaches 0, and in mmap
442 * operation the hmm_bo_ref will eventually be called.
443 * so, if this happened, something goes wrong.
444 */
445 if (bo->status & HMM_BO_MMAPED) {
446 mutex_unlock(&bdev->rbtree_mutex);
447 dev_dbg(atomisp_dev, "destroy bo which is MMAPED, do nothing\n");
448 return;
449 }
450
451 if (bo->status & HMM_BO_BINDED) {
452 dev_warn(atomisp_dev, "the bo is still binded, unbind it first...\n");
453 hmm_bo_unbind(bo);
454 }
455
456 if (bo->status & HMM_BO_PAGE_ALLOCED) {
457 dev_warn(atomisp_dev, "the pages is not freed, free pages first\n");
458 hmm_bo_free_pages(bo);
459 }
460 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
461 dev_warn(atomisp_dev, "the vunmap is not done, do it...\n");
462 hmm_bo_vunmap(bo);
463 }
464
465 rb_erase(&bo->node, &bdev->allocated_rbtree);
466
467 prev_bo = list_entry(bo->list.prev, struct hmm_buffer_object, list);
468 next_bo = list_entry(bo->list.next, struct hmm_buffer_object, list);
469
470 if (bo->list.prev != &bdev->entire_bo_list &&
471 prev_bo->end == bo->start &&
472 (prev_bo->status & HMM_BO_MASK) == HMM_BO_FREE) {
473 __bo_take_off_handling(prev_bo);
474 bo = __bo_merge(prev_bo, bo);
475 }
476
477 if (bo->list.next != &bdev->entire_bo_list &&
478 next_bo->start == bo->end &&
479 (next_bo->status & HMM_BO_MASK) == HMM_BO_FREE) {
480 __bo_take_off_handling(next_bo);
481 bo = __bo_merge(bo, next_bo);
482 }
483
484 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
485
486 mutex_unlock(&bdev->rbtree_mutex);
487 return;
488 }
489
hmm_bo_device_exit(struct hmm_bo_device * bdev)490 void hmm_bo_device_exit(struct hmm_bo_device *bdev)
491 {
492 struct hmm_buffer_object *bo;
493 unsigned long flags;
494
495 dev_dbg(atomisp_dev, "%s: entering!\n", __func__);
496
497 check_bodev_null_return_void(bdev);
498
499 /*
500 * release all allocated bos even they a in use
501 * and all bos will be merged into a big bo
502 */
503 while (!RB_EMPTY_ROOT(&bdev->allocated_rbtree))
504 hmm_bo_release(
505 rbtree_node_to_hmm_bo(bdev->allocated_rbtree.rb_node));
506
507 dev_dbg(atomisp_dev, "%s: finished releasing all allocated bos!\n",
508 __func__);
509
510 /* free all bos to release all ISP virtual memory */
511 while (!list_empty(&bdev->entire_bo_list)) {
512 bo = list_to_hmm_bo(bdev->entire_bo_list.next);
513
514 spin_lock_irqsave(&bdev->list_lock, flags);
515 list_del(&bo->list);
516 spin_unlock_irqrestore(&bdev->list_lock, flags);
517
518 kmem_cache_free(bdev->bo_cache, bo);
519 }
520
521 dev_dbg(atomisp_dev, "%s: finished to free all bos!\n", __func__);
522
523 kmem_cache_destroy(bdev->bo_cache);
524
525 isp_mmu_exit(&bdev->mmu);
526 }
527
hmm_bo_device_inited(struct hmm_bo_device * bdev)528 int hmm_bo_device_inited(struct hmm_bo_device *bdev)
529 {
530 check_bodev_null_return(bdev, -EINVAL);
531
532 return bdev->flag == HMM_BO_DEVICE_INITED;
533 }
534
hmm_bo_allocated(struct hmm_buffer_object * bo)535 int hmm_bo_allocated(struct hmm_buffer_object *bo)
536 {
537 check_bo_null_return(bo, 0);
538
539 return bo->status & HMM_BO_ALLOCED;
540 }
541
hmm_bo_device_search_start(struct hmm_bo_device * bdev,ia_css_ptr vaddr)542 struct hmm_buffer_object *hmm_bo_device_search_start(
543 struct hmm_bo_device *bdev, ia_css_ptr vaddr)
544 {
545 struct hmm_buffer_object *bo;
546
547 check_bodev_null_return(bdev, NULL);
548
549 mutex_lock(&bdev->rbtree_mutex);
550 bo = __bo_search_by_addr(&bdev->allocated_rbtree, vaddr);
551 if (!bo) {
552 mutex_unlock(&bdev->rbtree_mutex);
553 dev_err(atomisp_dev, "%s can not find bo with addr: 0x%x\n",
554 __func__, vaddr);
555 return NULL;
556 }
557 mutex_unlock(&bdev->rbtree_mutex);
558
559 return bo;
560 }
561
hmm_bo_device_search_in_range(struct hmm_bo_device * bdev,unsigned int vaddr)562 struct hmm_buffer_object *hmm_bo_device_search_in_range(
563 struct hmm_bo_device *bdev, unsigned int vaddr)
564 {
565 struct hmm_buffer_object *bo;
566
567 check_bodev_null_return(bdev, NULL);
568
569 mutex_lock(&bdev->rbtree_mutex);
570 bo = __bo_search_by_addr_in_range(&bdev->allocated_rbtree, vaddr);
571 if (!bo) {
572 mutex_unlock(&bdev->rbtree_mutex);
573 dev_err(atomisp_dev, "%s can not find bo contain addr: 0x%x\n",
574 __func__, vaddr);
575 return NULL;
576 }
577 mutex_unlock(&bdev->rbtree_mutex);
578
579 return bo;
580 }
581
hmm_bo_device_search_vmap_start(struct hmm_bo_device * bdev,const void * vaddr)582 struct hmm_buffer_object *hmm_bo_device_search_vmap_start(
583 struct hmm_bo_device *bdev, const void *vaddr)
584 {
585 struct list_head *pos;
586 struct hmm_buffer_object *bo;
587 unsigned long flags;
588
589 check_bodev_null_return(bdev, NULL);
590
591 spin_lock_irqsave(&bdev->list_lock, flags);
592 list_for_each(pos, &bdev->entire_bo_list) {
593 bo = list_to_hmm_bo(pos);
594 /* pass bo which has no vm_node allocated */
595 if ((bo->status & HMM_BO_MASK) == HMM_BO_FREE)
596 continue;
597 if (bo->vmap_addr == vaddr)
598 goto found;
599 }
600 spin_unlock_irqrestore(&bdev->list_lock, flags);
601 return NULL;
602 found:
603 spin_unlock_irqrestore(&bdev->list_lock, flags);
604 return bo;
605 }
606
free_pages_bulk_array(unsigned long nr_pages,struct page ** page_array)607 static void free_pages_bulk_array(unsigned long nr_pages, struct page **page_array)
608 {
609 unsigned long i;
610
611 for (i = 0; i < nr_pages; i++)
612 __free_pages(page_array[i], 0);
613 }
614
free_private_bo_pages(struct hmm_buffer_object * bo)615 static void free_private_bo_pages(struct hmm_buffer_object *bo)
616 {
617 set_pages_array_wb(bo->pages, bo->pgnr);
618 free_pages_bulk_array(bo->pgnr, bo->pages);
619 }
620
621 /*Allocate pages which will be used only by ISP*/
alloc_private_pages(struct hmm_buffer_object * bo)622 static int alloc_private_pages(struct hmm_buffer_object *bo)
623 {
624 const gfp_t gfp = __GFP_NOWARN | __GFP_RECLAIM | __GFP_FS;
625 int ret;
626
627 ret = alloc_pages_bulk(gfp, bo->pgnr, bo->pages);
628 if (ret != bo->pgnr) {
629 free_pages_bulk_array(ret, bo->pages);
630 dev_err(atomisp_dev, "alloc_pages_bulk() failed\n");
631 return -ENOMEM;
632 }
633
634 ret = set_pages_array_uc(bo->pages, bo->pgnr);
635 if (ret) {
636 dev_err(atomisp_dev, "set pages uncacheable failed.\n");
637 free_pages_bulk_array(bo->pgnr, bo->pages);
638 return ret;
639 }
640
641 return 0;
642 }
643
alloc_vmalloc_pages(struct hmm_buffer_object * bo,void * vmalloc_addr)644 static int alloc_vmalloc_pages(struct hmm_buffer_object *bo, void *vmalloc_addr)
645 {
646 void *vaddr = vmalloc_addr;
647 int i;
648
649 for (i = 0; i < bo->pgnr; i++) {
650 bo->pages[i] = vmalloc_to_page(vaddr);
651 if (!bo->pages[i]) {
652 dev_err(atomisp_dev, "Error could not get page %d of vmalloc buf\n", i);
653 return -ENOMEM;
654 }
655 vaddr += PAGE_SIZE;
656 }
657
658 return 0;
659 }
660
661 /*
662 * allocate/free physical pages for the bo.
663 *
664 * type indicate where are the pages from. currently we have 3 types
665 * of memory: HMM_BO_PRIVATE, HMM_BO_VMALLOC.
666 *
667 * vmalloc_addr is only valid when type is HMM_BO_VMALLOC.
668 */
hmm_bo_alloc_pages(struct hmm_buffer_object * bo,enum hmm_bo_type type,void * vmalloc_addr)669 int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
670 enum hmm_bo_type type,
671 void *vmalloc_addr)
672 {
673 int ret = -EINVAL;
674
675 check_bo_null_return(bo, -EINVAL);
676
677 mutex_lock(&bo->mutex);
678 check_bo_status_no_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
679
680 bo->pages = kcalloc(bo->pgnr, sizeof(struct page *), GFP_KERNEL);
681 if (unlikely(!bo->pages)) {
682 ret = -ENOMEM;
683 goto alloc_err;
684 }
685
686 if (type == HMM_BO_PRIVATE) {
687 ret = alloc_private_pages(bo);
688 } else if (type == HMM_BO_VMALLOC) {
689 ret = alloc_vmalloc_pages(bo, vmalloc_addr);
690 } else {
691 dev_err(atomisp_dev, "invalid buffer type.\n");
692 ret = -EINVAL;
693 }
694 if (ret)
695 goto alloc_err;
696
697 bo->type = type;
698
699 bo->status |= HMM_BO_PAGE_ALLOCED;
700
701 mutex_unlock(&bo->mutex);
702
703 return 0;
704
705 alloc_err:
706 kfree(bo->pages);
707 mutex_unlock(&bo->mutex);
708 dev_err(atomisp_dev, "alloc pages err...\n");
709 return ret;
710 status_err:
711 mutex_unlock(&bo->mutex);
712 dev_err(atomisp_dev,
713 "buffer object has already page allocated.\n");
714 return -EINVAL;
715 }
716
717 /*
718 * free physical pages of the bo.
719 */
hmm_bo_free_pages(struct hmm_buffer_object * bo)720 void hmm_bo_free_pages(struct hmm_buffer_object *bo)
721 {
722 check_bo_null_return_void(bo);
723
724 mutex_lock(&bo->mutex);
725
726 check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err2);
727
728 /* clear the flag anyway. */
729 bo->status &= (~HMM_BO_PAGE_ALLOCED);
730
731 if (bo->type == HMM_BO_PRIVATE)
732 free_private_bo_pages(bo);
733 else if (bo->type == HMM_BO_VMALLOC)
734 ; /* No-op, nothing to do */
735 else
736 dev_err(atomisp_dev, "invalid buffer type.\n");
737
738 kfree(bo->pages);
739 mutex_unlock(&bo->mutex);
740
741 return;
742
743 status_err2:
744 mutex_unlock(&bo->mutex);
745 dev_err(atomisp_dev,
746 "buffer object not page allocated yet.\n");
747 }
748
hmm_bo_page_allocated(struct hmm_buffer_object * bo)749 int hmm_bo_page_allocated(struct hmm_buffer_object *bo)
750 {
751 check_bo_null_return(bo, 0);
752
753 return bo->status & HMM_BO_PAGE_ALLOCED;
754 }
755
756 /*
757 * bind the physical pages to a virtual address space.
758 */
hmm_bo_bind(struct hmm_buffer_object * bo)759 int hmm_bo_bind(struct hmm_buffer_object *bo)
760 {
761 int ret;
762 unsigned int virt;
763 struct hmm_bo_device *bdev;
764 unsigned int i;
765
766 check_bo_null_return(bo, -EINVAL);
767
768 mutex_lock(&bo->mutex);
769
770 check_bo_status_yes_goto(bo,
771 HMM_BO_PAGE_ALLOCED | HMM_BO_ALLOCED,
772 status_err1);
773
774 check_bo_status_no_goto(bo, HMM_BO_BINDED, status_err2);
775
776 bdev = bo->bdev;
777
778 virt = bo->start;
779
780 for (i = 0; i < bo->pgnr; i++) {
781 ret =
782 isp_mmu_map(&bdev->mmu, virt,
783 page_to_phys(bo->pages[i]), 1);
784 if (ret)
785 goto map_err;
786 virt += (1 << PAGE_SHIFT);
787 }
788
789 /*
790 * flush TBL here.
791 *
792 * theoretically, we donot need to flush TLB as we didnot change
793 * any existed address mappings, but for Silicon Hive's MMU, its
794 * really a bug here. I guess when fetching PTEs (page table entity)
795 * to TLB, its MMU will fetch additional INVALID PTEs automatically
796 * for performance issue. EX, we only set up 1 page address mapping,
797 * meaning updating 1 PTE, but the MMU fetches 4 PTE at one time,
798 * so the additional 3 PTEs are invalid.
799 */
800 if (bo->start != 0x0)
801 isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
802 (bo->pgnr << PAGE_SHIFT));
803
804 bo->status |= HMM_BO_BINDED;
805
806 mutex_unlock(&bo->mutex);
807
808 return 0;
809
810 map_err:
811 /* unbind the physical pages with related virtual address space */
812 virt = bo->start;
813 for ( ; i > 0; i--) {
814 isp_mmu_unmap(&bdev->mmu, virt, 1);
815 virt += pgnr_to_size(1);
816 }
817
818 mutex_unlock(&bo->mutex);
819 dev_err(atomisp_dev,
820 "setup MMU address mapping failed.\n");
821 return ret;
822
823 status_err2:
824 mutex_unlock(&bo->mutex);
825 dev_err(atomisp_dev, "buffer object already binded.\n");
826 return -EINVAL;
827 status_err1:
828 mutex_unlock(&bo->mutex);
829 dev_err(atomisp_dev,
830 "buffer object vm_node or page not allocated.\n");
831 return -EINVAL;
832 }
833
834 /*
835 * unbind the physical pages with related virtual address space.
836 */
hmm_bo_unbind(struct hmm_buffer_object * bo)837 void hmm_bo_unbind(struct hmm_buffer_object *bo)
838 {
839 unsigned int virt;
840 struct hmm_bo_device *bdev;
841 unsigned int i;
842
843 check_bo_null_return_void(bo);
844
845 mutex_lock(&bo->mutex);
846
847 check_bo_status_yes_goto(bo,
848 HMM_BO_PAGE_ALLOCED |
849 HMM_BO_ALLOCED |
850 HMM_BO_BINDED, status_err);
851
852 bdev = bo->bdev;
853
854 virt = bo->start;
855
856 for (i = 0; i < bo->pgnr; i++) {
857 isp_mmu_unmap(&bdev->mmu, virt, 1);
858 virt += pgnr_to_size(1);
859 }
860
861 /*
862 * flush TLB as the address mapping has been removed and
863 * related TLBs should be invalidated.
864 */
865 isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
866 (bo->pgnr << PAGE_SHIFT));
867
868 bo->status &= (~HMM_BO_BINDED);
869
870 mutex_unlock(&bo->mutex);
871
872 return;
873
874 status_err:
875 mutex_unlock(&bo->mutex);
876 dev_err(atomisp_dev,
877 "buffer vm or page not allocated or not binded yet.\n");
878 }
879
hmm_bo_binded(struct hmm_buffer_object * bo)880 int hmm_bo_binded(struct hmm_buffer_object *bo)
881 {
882 int ret;
883
884 check_bo_null_return(bo, 0);
885
886 mutex_lock(&bo->mutex);
887
888 ret = bo->status & HMM_BO_BINDED;
889
890 mutex_unlock(&bo->mutex);
891
892 return ret;
893 }
894
hmm_bo_vmap(struct hmm_buffer_object * bo,bool cached)895 void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached)
896 {
897 check_bo_null_return(bo, NULL);
898
899 mutex_lock(&bo->mutex);
900 if (((bo->status & HMM_BO_VMAPED) && !cached) ||
901 ((bo->status & HMM_BO_VMAPED_CACHED) && cached)) {
902 mutex_unlock(&bo->mutex);
903 return bo->vmap_addr;
904 }
905
906 /* cached status need to be changed, so vunmap first */
907 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
908 vunmap(bo->vmap_addr);
909 bo->vmap_addr = NULL;
910 bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
911 }
912
913 bo->vmap_addr = vmap(bo->pages, bo->pgnr, VM_MAP,
914 cached ? PAGE_KERNEL : PAGE_KERNEL_NOCACHE);
915 if (unlikely(!bo->vmap_addr)) {
916 mutex_unlock(&bo->mutex);
917 dev_err(atomisp_dev, "vmap failed...\n");
918 return NULL;
919 }
920 bo->status |= (cached ? HMM_BO_VMAPED_CACHED : HMM_BO_VMAPED);
921
922 mutex_unlock(&bo->mutex);
923 return bo->vmap_addr;
924 }
925
hmm_bo_flush_vmap(struct hmm_buffer_object * bo)926 void hmm_bo_flush_vmap(struct hmm_buffer_object *bo)
927 {
928 check_bo_null_return_void(bo);
929
930 mutex_lock(&bo->mutex);
931 if (!(bo->status & HMM_BO_VMAPED_CACHED) || !bo->vmap_addr) {
932 mutex_unlock(&bo->mutex);
933 return;
934 }
935
936 clflush_cache_range(bo->vmap_addr, bo->pgnr * PAGE_SIZE);
937 mutex_unlock(&bo->mutex);
938 }
939
hmm_bo_vunmap(struct hmm_buffer_object * bo)940 void hmm_bo_vunmap(struct hmm_buffer_object *bo)
941 {
942 check_bo_null_return_void(bo);
943
944 mutex_lock(&bo->mutex);
945 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
946 vunmap(bo->vmap_addr);
947 bo->vmap_addr = NULL;
948 bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
949 }
950
951 mutex_unlock(&bo->mutex);
952 return;
953 }
954
hmm_bo_ref(struct hmm_buffer_object * bo)955 void hmm_bo_ref(struct hmm_buffer_object *bo)
956 {
957 check_bo_null_return_void(bo);
958
959 kref_get(&bo->kref);
960 }
961
kref_hmm_bo_release(struct kref * kref)962 static void kref_hmm_bo_release(struct kref *kref)
963 {
964 if (!kref)
965 return;
966
967 hmm_bo_release(kref_to_hmm_bo(kref));
968 }
969
hmm_bo_unref(struct hmm_buffer_object * bo)970 void hmm_bo_unref(struct hmm_buffer_object *bo)
971 {
972 check_bo_null_return_void(bo);
973
974 kref_put(&bo->kref, kref_hmm_bo_release);
975 }
976
hmm_bo_vm_open(struct vm_area_struct * vma)977 static void hmm_bo_vm_open(struct vm_area_struct *vma)
978 {
979 struct hmm_buffer_object *bo =
980 (struct hmm_buffer_object *)vma->vm_private_data;
981
982 check_bo_null_return_void(bo);
983
984 hmm_bo_ref(bo);
985
986 mutex_lock(&bo->mutex);
987
988 bo->status |= HMM_BO_MMAPED;
989
990 bo->mmap_count++;
991
992 mutex_unlock(&bo->mutex);
993 }
994
hmm_bo_vm_close(struct vm_area_struct * vma)995 static void hmm_bo_vm_close(struct vm_area_struct *vma)
996 {
997 struct hmm_buffer_object *bo =
998 (struct hmm_buffer_object *)vma->vm_private_data;
999
1000 check_bo_null_return_void(bo);
1001
1002 hmm_bo_unref(bo);
1003
1004 mutex_lock(&bo->mutex);
1005
1006 bo->mmap_count--;
1007
1008 if (!bo->mmap_count) {
1009 bo->status &= (~HMM_BO_MMAPED);
1010 vma->vm_private_data = NULL;
1011 }
1012
1013 mutex_unlock(&bo->mutex);
1014 }
1015
1016 static const struct vm_operations_struct hmm_bo_vm_ops = {
1017 .open = hmm_bo_vm_open,
1018 .close = hmm_bo_vm_close,
1019 };
1020
1021 /*
1022 * mmap the bo to user space.
1023 */
hmm_bo_mmap(struct vm_area_struct * vma,struct hmm_buffer_object * bo)1024 int hmm_bo_mmap(struct vm_area_struct *vma, struct hmm_buffer_object *bo)
1025 {
1026 unsigned int start, end;
1027 unsigned int virt;
1028 unsigned int pgnr, i;
1029 unsigned int pfn;
1030
1031 check_bo_null_return(bo, -EINVAL);
1032
1033 check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
1034
1035 pgnr = bo->pgnr;
1036 start = vma->vm_start;
1037 end = vma->vm_end;
1038
1039 /*
1040 * check vma's virtual address space size and buffer object's size.
1041 * must be the same.
1042 */
1043 if ((start + pgnr_to_size(pgnr)) != end) {
1044 dev_warn(atomisp_dev,
1045 "vma's address space size not equal to buffer object's size");
1046 return -EINVAL;
1047 }
1048
1049 virt = vma->vm_start;
1050 for (i = 0; i < pgnr; i++) {
1051 pfn = page_to_pfn(bo->pages[i]);
1052 if (remap_pfn_range(vma, virt, pfn, PAGE_SIZE, PAGE_SHARED)) {
1053 dev_warn(atomisp_dev,
1054 "remap_pfn_range failed: virt = 0x%x, pfn = 0x%x, mapped_pgnr = %d\n",
1055 virt, pfn, 1);
1056 return -EINVAL;
1057 }
1058 virt += PAGE_SIZE;
1059 }
1060
1061 vma->vm_private_data = bo;
1062
1063 vma->vm_ops = &hmm_bo_vm_ops;
1064 vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
1065
1066 /*
1067 * call hmm_bo_vm_open explicitly.
1068 */
1069 hmm_bo_vm_open(vma);
1070
1071 return 0;
1072
1073 status_err:
1074 dev_err(atomisp_dev, "buffer page not allocated yet.\n");
1075 return -EINVAL;
1076 }
1077