1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Support for Medifield PNW Camera Imaging ISP subsystem.
4 *
5 * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
6 *
7 * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
8 */
9 /*
10 * ISP MMU driver for classic two-level page tables
11 */
12 #ifndef __ISP_MMU_H__
13 #define __ISP_MMU_H__
14
15 #include <linux/types.h>
16 #include <linux/mutex.h>
17 #include <linux/slab.h>
18
19 /*
20 * do not change these values, the page size for ISP must be the
21 * same as kernel's page size.
22 */
23 #define ISP_PAGE_OFFSET 12
24 #define ISP_PAGE_SIZE BIT(ISP_PAGE_OFFSET)
25 #define ISP_PAGE_MASK (~(phys_addr_t)(ISP_PAGE_SIZE - 1))
26
27 #define ISP_L1PT_OFFSET 22
28 #define ISP_L1PT_MASK (~((1U << ISP_L1PT_OFFSET) - 1))
29
30 #define ISP_L2PT_OFFSET 12
31 #define ISP_L2PT_MASK (~(ISP_L1PT_MASK | (~(ISP_PAGE_MASK))))
32
33 #define ISP_L1PT_PTES 1024
34 #define ISP_L2PT_PTES 1024
35
36 #define ISP_PTR_TO_L1_IDX(x) ((((x) & ISP_L1PT_MASK)) \
37 >> ISP_L1PT_OFFSET)
38
39 #define ISP_PTR_TO_L2_IDX(x) ((((x) & ISP_L2PT_MASK)) \
40 >> ISP_L2PT_OFFSET)
41
42 #define ISP_PAGE_ALIGN(x) (((x) + (ISP_PAGE_SIZE - 1)) \
43 & ISP_PAGE_MASK)
44
45 #define ISP_PT_TO_VIRT(l1_idx, l2_idx, offset) do {\
46 ((l1_idx) << ISP_L1PT_OFFSET) | \
47 ((l2_idx) << ISP_L2PT_OFFSET) | \
48 (offset)\
49 } while (0)
50
51 #define pgnr_to_size(pgnr) ((pgnr) << ISP_PAGE_OFFSET)
52 #define size_to_pgnr_ceil(size) (((size) + (1 << ISP_PAGE_OFFSET) - 1)\
53 >> ISP_PAGE_OFFSET)
54 #define size_to_pgnr_bottom(size) ((size) >> ISP_PAGE_OFFSET)
55
56 struct isp_mmu;
57
58 struct isp_mmu_client {
59 /*
60 * const value
61 *
62 * @name:
63 * driver name
64 * @pte_valid_mask:
65 * should be 1 bit valid data, meaning the value should
66 * be power of 2.
67 */
68 char *name;
69 unsigned int pte_valid_mask;
70 unsigned int null_pte;
71
72 /*
73 * get page directory base address (physical address).
74 *
75 * must be provided.
76 */
77 unsigned int (*get_pd_base)(struct isp_mmu *mmu, phys_addr_t pd_base);
78 /*
79 * callback to flush tlb.
80 *
81 * tlb_flush_range will at least flush TLBs containing
82 * address mapping from addr to addr + size.
83 *
84 * tlb_flush_all will flush all TLBs.
85 *
86 * tlb_flush_all is must be provided. if tlb_flush_range is
87 * not valid, it will set to tlb_flush_all by default.
88 */
89 void (*tlb_flush_range)(struct isp_mmu *mmu,
90 unsigned int addr, unsigned int size);
91 void (*tlb_flush_all)(struct isp_mmu *mmu);
92 unsigned int (*phys_to_pte)(struct isp_mmu *mmu,
93 phys_addr_t phys);
94 phys_addr_t (*pte_to_phys)(struct isp_mmu *mmu,
95 unsigned int pte);
96
97 };
98
99 struct isp_mmu {
100 struct isp_mmu_client *driver;
101 unsigned int l1_pte;
102 int l2_pgt_refcount[ISP_L1PT_PTES];
103 phys_addr_t base_address;
104
105 struct mutex pt_mutex;
106 };
107
108 /* flags for PDE and PTE */
109 #define ISP_PTE_VALID_MASK(mmu) \
110 ((mmu)->driver->pte_valid_mask)
111
112 #define ISP_PTE_VALID(mmu, pte) \
113 ((pte) & ISP_PTE_VALID_MASK(mmu))
114
115 #define NULL_PAGE ((phys_addr_t)(-1) & ISP_PAGE_MASK)
116 #define PAGE_VALID(page) ((page) != NULL_PAGE)
117
118 /*
119 * init mmu with specific mmu driver.
120 */
121 int isp_mmu_init(struct isp_mmu *mmu, struct isp_mmu_client *driver);
122 /*
123 * cleanup all mmu related things.
124 */
125 void isp_mmu_exit(struct isp_mmu *mmu);
126
127 /*
128 * setup/remove address mapping for pgnr continuous physical pages
129 * and isp_virt.
130 *
131 * map/unmap is mutex lock protected, and caller does not have
132 * to do lock/unlock operation.
133 *
134 * map/unmap will not flush tlb, and caller needs to deal with
135 * this itself.
136 */
137 int isp_mmu_map(struct isp_mmu *mmu, unsigned int isp_virt,
138 phys_addr_t phys, unsigned int pgnr);
139
140 void isp_mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt,
141 unsigned int pgnr);
142
isp_mmu_flush_tlb_all(struct isp_mmu * mmu)143 static inline void isp_mmu_flush_tlb_all(struct isp_mmu *mmu)
144 {
145 if (mmu->driver && mmu->driver->tlb_flush_all)
146 mmu->driver->tlb_flush_all(mmu);
147 }
148
149 #define isp_mmu_flush_tlb isp_mmu_flush_tlb_all
150
isp_mmu_flush_tlb_range(struct isp_mmu * mmu,unsigned int start,unsigned int size)151 static inline void isp_mmu_flush_tlb_range(struct isp_mmu *mmu,
152 unsigned int start, unsigned int size)
153 {
154 if (mmu->driver && mmu->driver->tlb_flush_range)
155 mmu->driver->tlb_flush_range(mmu, start, size);
156 }
157
158 #endif /* ISP_MMU_H_ */
159