1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Support for Medifield PNW Camera Imaging ISP subsystem.
4  *
5  * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
6  *
7  * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
8  */
9 
10 #ifndef	__HMM_BO_H__
11 #define	__HMM_BO_H__
12 
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/list.h>
16 #include <linux/spinlock.h>
17 #include <linux/mutex.h>
18 #include "mmu/isp_mmu.h"
19 #include "hmm/hmm_common.h"
20 #include "ia_css_types.h"
21 
22 #define	check_bodev_null_return(bdev, exp)	\
23 		check_null_return(bdev, exp, \
24 			"NULL hmm_bo_device.\n")
25 
26 #define	check_bodev_null_return_void(bdev)	\
27 		check_null_return_void(bdev, \
28 			"NULL hmm_bo_device.\n")
29 
30 #define	check_bo_status_yes_goto(bo, _status, label) \
31 	var_not_equal_goto((bo->status & (_status)), (_status), \
32 			label, \
33 			"HMM buffer status not contain %s.\n", \
34 			#_status)
35 
36 #define	check_bo_status_no_goto(bo, _status, label) \
37 	var_equal_goto((bo->status & (_status)), (_status), \
38 			label, \
39 			"HMM buffer status contains %s.\n", \
40 			#_status)
41 
42 #define rbtree_node_to_hmm_bo(root_node)	\
43 	container_of((root_node), struct hmm_buffer_object, node)
44 
45 #define	list_to_hmm_bo(list_ptr)	\
46 	list_entry((list_ptr), struct hmm_buffer_object, list)
47 
48 #define	kref_to_hmm_bo(kref_ptr)	\
49 	list_entry((kref_ptr), struct hmm_buffer_object, kref)
50 
51 #define	check_bo_null_return(bo, exp)	\
52 	check_null_return(bo, exp, "NULL hmm buffer object.\n")
53 
54 #define	check_bo_null_return_void(bo)	\
55 	check_null_return_void(bo, "NULL hmm buffer object.\n")
56 
57 #define	ISP_VM_START	0x0
58 #define	ISP_VM_SIZE	(0x7FFFFFFF)	/* 2G address space */
59 #define	ISP_PTR_NULL	NULL
60 
61 #define	HMM_BO_DEVICE_INITED	0x1
62 
63 enum hmm_bo_type {
64 	HMM_BO_PRIVATE,
65 	HMM_BO_VMALLOC,
66 	HMM_BO_LAST,
67 };
68 
69 #define	HMM_BO_MASK		0x1
70 #define	HMM_BO_FREE		0x0
71 #define	HMM_BO_ALLOCED	0x1
72 #define	HMM_BO_PAGE_ALLOCED	0x2
73 #define	HMM_BO_BINDED		0x4
74 #define	HMM_BO_MMAPED		0x8
75 #define	HMM_BO_VMAPED		0x10
76 #define	HMM_BO_VMAPED_CACHED	0x20
77 #define	HMM_BO_ACTIVE		0x1000
78 
79 struct hmm_bo_device {
80 	struct isp_mmu		mmu;
81 
82 	/* start/pgnr/size is used to record the virtual memory of this bo */
83 	unsigned int start;
84 	unsigned int pgnr;
85 	unsigned int size;
86 
87 	/* list lock is used to protect the entire_bo_list */
88 	spinlock_t	list_lock;
89 	int flag;
90 
91 	/* linked list for entire buffer object */
92 	struct list_head entire_bo_list;
93 	/* rbtree for maintain entire allocated vm */
94 	struct rb_root allocated_rbtree;
95 	/* rbtree for maintain entire free vm */
96 	struct rb_root free_rbtree;
97 	struct mutex rbtree_mutex;
98 	struct kmem_cache *bo_cache;
99 };
100 
101 struct hmm_buffer_object {
102 	struct hmm_bo_device	*bdev;
103 	struct list_head	list;
104 	struct kref	kref;
105 
106 	struct page **pages;
107 
108 	/* mutex protecting this BO */
109 	struct mutex		mutex;
110 	enum hmm_bo_type	type;
111 	int		mmap_count;
112 	int		status;
113 	void		*vmap_addr; /* kernel virtual address by vmap */
114 
115 	struct rb_node	node;
116 	unsigned int	start;
117 	unsigned int	end;
118 	unsigned int	pgnr;
119 	/*
120 	 * When insert a bo which has the same pgnr with an existed
121 	 * bo node in the free_rbtree, using "prev & next" pointer
122 	 * to maintain a bo linked list instead of insert this bo
123 	 * into free_rbtree directly, it will make sure each node
124 	 * in free_rbtree has different pgnr.
125 	 * "prev & next" default is NULL.
126 	 */
127 	struct hmm_buffer_object	*prev;
128 	struct hmm_buffer_object	*next;
129 };
130 
131 struct hmm_buffer_object *hmm_bo_alloc(struct hmm_bo_device *bdev,
132 				       unsigned int pgnr);
133 
134 void hmm_bo_release(struct hmm_buffer_object *bo);
135 
136 int hmm_bo_device_init(struct hmm_bo_device *bdev,
137 		       struct isp_mmu_client *mmu_driver,
138 		       unsigned int vaddr_start, unsigned int size);
139 
140 /*
141  * clean up all hmm_bo_device related things.
142  */
143 void hmm_bo_device_exit(struct hmm_bo_device *bdev);
144 
145 /*
146  * whether the bo device is inited or not.
147  */
148 int hmm_bo_device_inited(struct hmm_bo_device *bdev);
149 
150 /*
151  * increase buffer object reference.
152  */
153 void hmm_bo_ref(struct hmm_buffer_object *bo);
154 
155 /*
156  * decrease buffer object reference. if reference reaches 0,
157  * release function of the buffer object will be called.
158  *
159  * this call is also used to release hmm_buffer_object or its
160  * upper level object with it embedded in. you need to call
161  * this function when it is no longer used.
162  *
163  * Note:
164  *
165  * user dont need to care about internal resource release of
166  * the buffer object in the release callback, it will be
167  * handled internally.
168  *
169  * this call will only release internal resource of the buffer
170  * object but will not free the buffer object itself, as the
171  * buffer object can be both pre-allocated statically or
172  * dynamically allocated. so user need to deal with the release
173  * of the buffer object itself manually. below example shows
174  * the normal case of using the buffer object.
175  *
176  *	struct hmm_buffer_object *bo = hmm_bo_create(bdev, pgnr);
177  *	......
178  *	hmm_bo_unref(bo);
179  *
180  * or:
181  *
182  *	struct hmm_buffer_object bo;
183  *
184  *	hmm_bo_init(bdev, &bo, pgnr, NULL);
185  *	...
186  *	hmm_bo_unref(&bo);
187  */
188 void hmm_bo_unref(struct hmm_buffer_object *bo);
189 
190 int hmm_bo_allocated(struct hmm_buffer_object *bo);
191 
192 /*
193  * Allocate/Free physical pages for the bo. Type indicates if the
194  * pages will be allocated by using video driver (for share buffer)
195  * or by ISP driver itself.
196  */
197 int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
198 		       enum hmm_bo_type type,
199 		       void *vmalloc_addr);
200 void hmm_bo_free_pages(struct hmm_buffer_object *bo);
201 int hmm_bo_page_allocated(struct hmm_buffer_object *bo);
202 
203 /*
204  * bind/unbind the physical pages to a virtual address space.
205  */
206 int hmm_bo_bind(struct hmm_buffer_object *bo);
207 void hmm_bo_unbind(struct hmm_buffer_object *bo);
208 int hmm_bo_binded(struct hmm_buffer_object *bo);
209 
210 /*
211  * vmap buffer object's pages to contiguous kernel virtual address.
212  * if the buffer has been vmaped, return the virtual address directly.
213  */
214 void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached);
215 
216 /*
217  * flush the cache for the vmapped buffer object's pages,
218  * if the buffer has not been vmapped, return directly.
219  */
220 void hmm_bo_flush_vmap(struct hmm_buffer_object *bo);
221 
222 /*
223  * vunmap buffer object's kernel virtual address.
224  */
225 void hmm_bo_vunmap(struct hmm_buffer_object *bo);
226 
227 /*
228  * mmap the bo's physical pages to specific vma.
229  *
230  * vma's address space size must be the same as bo's size,
231  * otherwise it will return -EINVAL.
232  *
233  * vma->vm_flags will be set to (VM_RESERVED | VM_IO).
234  */
235 int hmm_bo_mmap(struct vm_area_struct *vma,
236 		struct hmm_buffer_object *bo);
237 
238 /*
239  * find the buffer object by its virtual address vaddr.
240  * return NULL if no such buffer object found.
241  */
242 struct hmm_buffer_object *hmm_bo_device_search_start(
243     struct hmm_bo_device *bdev, ia_css_ptr vaddr);
244 
245 /*
246  * find the buffer object by its virtual address.
247  * it does not need to be the start address of one bo,
248  * it can be an address within the range of one bo.
249  * return NULL if no such buffer object found.
250  */
251 struct hmm_buffer_object *hmm_bo_device_search_in_range(
252     struct hmm_bo_device *bdev, ia_css_ptr vaddr);
253 
254 /*
255  * find the buffer object with kernel virtual address vaddr.
256  * return NULL if no such buffer object found.
257  */
258 struct hmm_buffer_object *hmm_bo_device_search_vmap_start(
259     struct hmm_bo_device *bdev, const void *vaddr);
260 
261 #endif
262