1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Support for Intel Camera Imaging ISP subsystem.
4 * Copyright (c) 2010 - 2016, Intel Corporation.
5 */
6
7 #include "isp.h"
8 #include "vmem.h"
9 #include "vmem_local.h"
10
11 #if !defined(HRT_MEMORY_ACCESS)
12 #include "ia_css_device_access.h"
13 #endif
14 #include "assert_support.h"
15
16 typedef unsigned long long hive_uedge;
17 typedef hive_uedge *hive_wide;
18
19 /* Copied from SDK: sim_semantics.c */
20
21 /* subword bits move like this: MSB[____xxxx____]LSB -> MSB[00000000xxxx]LSB */
22 static inline hive_uedge
subword(hive_uedge w,unsigned int start,unsigned int end)23 subword(hive_uedge w, unsigned int start, unsigned int end)
24 {
25 return (w & (((1ULL << (end - 1)) - 1) << 1 | 1)) >> start;
26 }
27
28 /* inverse subword bits move like this: MSB[xxxx____xxxx]LSB -> MSB[xxxx0000xxxx]LSB */
29 static inline hive_uedge
inv_subword(hive_uedge w,unsigned int start,unsigned int end)30 inv_subword(hive_uedge w, unsigned int start, unsigned int end)
31 {
32 return w & (~(((1ULL << (end - 1)) - 1) << 1 | 1) | ((1ULL << start) - 1));
33 }
34
35 #define uedge_bits (8 * sizeof(hive_uedge))
36 #define move_lower_bits(target, target_bit, src, src_bit) move_subword(target, target_bit, src, 0, src_bit)
37 #define move_upper_bits(target, target_bit, src, src_bit) move_subword(target, target_bit, src, src_bit, uedge_bits)
38 #define move_word(target, target_bit, src) move_subword(target, target_bit, src, 0, uedge_bits)
39
40 static void
move_subword(hive_uedge * target,unsigned int target_bit,hive_uedge src,unsigned int src_start,unsigned int src_end)41 move_subword(
42 hive_uedge *target,
43 unsigned int target_bit,
44 hive_uedge src,
45 unsigned int src_start,
46 unsigned int src_end)
47 {
48 unsigned int start_elem = target_bit / uedge_bits;
49 unsigned int start_bit = target_bit % uedge_bits;
50 unsigned int subword_width = src_end - src_start;
51
52 hive_uedge src_subword = subword(src, src_start, src_end);
53
54 if (subword_width + start_bit > uedge_bits) { /* overlap */
55 hive_uedge old_val1;
56 hive_uedge old_val0 = inv_subword(target[start_elem], start_bit, uedge_bits);
57
58 target[start_elem] = old_val0 | (src_subword << start_bit);
59 old_val1 = inv_subword(target[start_elem + 1], 0,
60 subword_width + start_bit - uedge_bits);
61 target[start_elem + 1] = old_val1 | (src_subword >> (uedge_bits - start_bit));
62 } else {
63 hive_uedge old_val = inv_subword(target[start_elem], start_bit,
64 start_bit + subword_width);
65
66 target[start_elem] = old_val | (src_subword << start_bit);
67 }
68 }
69
70 static void
hive_sim_wide_unpack(hive_wide vector,hive_wide elem,hive_uint elem_bits,hive_uint index)71 hive_sim_wide_unpack(
72 hive_wide vector,
73 hive_wide elem,
74 hive_uint elem_bits,
75 hive_uint index)
76 {
77 /* pointers into wide_type: */
78 unsigned int start_elem = (elem_bits * index) / uedge_bits;
79 unsigned int start_bit = (elem_bits * index) % uedge_bits;
80 unsigned int end_elem = (elem_bits * (index + 1) - 1) / uedge_bits;
81 unsigned int end_bit = ((elem_bits * (index + 1) - 1) % uedge_bits) + 1;
82
83 if (elem_bits == uedge_bits) {
84 /* easy case for speedup: */
85 elem[0] = vector[index];
86 } else if (start_elem == end_elem) {
87 /* only one (<=64 bits) element needs to be (partly) copied: */
88 move_subword(elem, 0, vector[start_elem], start_bit, end_bit);
89 } else {
90 /* general case: handles edge spanning cases (includes >64bit elements) */
91 unsigned int bits_written = 0;
92 unsigned int i;
93
94 move_upper_bits(elem, bits_written, vector[start_elem], start_bit);
95 bits_written += (64 - start_bit);
96 for (i = start_elem + 1; i < end_elem; i++) {
97 move_word(elem, bits_written, vector[i]);
98 bits_written += uedge_bits;
99 }
100 move_lower_bits(elem, bits_written, vector[end_elem], end_bit);
101 }
102 }
103
104 static void
hive_sim_wide_pack(hive_wide vector,hive_wide elem,hive_uint elem_bits,hive_uint index)105 hive_sim_wide_pack(
106 hive_wide vector,
107 hive_wide elem,
108 hive_uint elem_bits,
109 hive_uint index)
110 {
111 /* pointers into wide_type: */
112 unsigned int start_elem = (elem_bits * index) / uedge_bits;
113
114 /* easy case for speedup: */
115 if (elem_bits == uedge_bits) {
116 vector[start_elem] = elem[0];
117 } else if (elem_bits > uedge_bits) {
118 unsigned int bits_to_write = elem_bits;
119 unsigned int start_bit = elem_bits * index;
120 unsigned int i = 0;
121
122 for (; bits_to_write > uedge_bits;
123 bits_to_write -= uedge_bits, i++, start_bit += uedge_bits) {
124 move_word(vector, start_bit, elem[i]);
125 }
126 move_lower_bits(vector, start_bit, elem[i], bits_to_write);
127 } else {
128 /* only one element needs to be (partly) copied: */
129 move_lower_bits(vector, elem_bits * index, elem[0], elem_bits);
130 }
131 }
132
load_vector(const isp_ID_t ID,t_vmem_elem * to,const t_vmem_elem * from)133 static void load_vector(
134 const isp_ID_t ID,
135 t_vmem_elem *to,
136 const t_vmem_elem *from)
137 {
138 unsigned int i;
139 hive_uedge *data;
140 unsigned int size = sizeof(short) * ISP_NWAY;
141
142 VMEM_ARRAY(v, 2 * ISP_NWAY); /* Need 2 vectors to work around vmem hss bug */
143 assert(ISP_BAMEM_BASE[ID] != (hrt_address) - 1);
144 #if !defined(HRT_MEMORY_ACCESS)
145 ia_css_device_load(ISP_BAMEM_BASE[ID] + (unsigned long)from, &v[0][0], size);
146 #else
147 hrt_master_port_load(ISP_BAMEM_BASE[ID] + (unsigned long)from, &v[0][0], size);
148 #endif
149 data = (hive_uedge *)v;
150 for (i = 0; i < ISP_NWAY; i++) {
151 hive_uedge elem = 0;
152
153 hive_sim_wide_unpack(data, &elem, ISP_VEC_ELEMBITS, i);
154 to[i] = elem;
155 }
156 udelay(1); /* Spend at least 1 cycles per vector */
157 }
158
store_vector(const isp_ID_t ID,t_vmem_elem * to,const t_vmem_elem * from)159 static void store_vector(
160 const isp_ID_t ID,
161 t_vmem_elem *to,
162 const t_vmem_elem *from)
163 {
164 unsigned int i;
165 unsigned int size = sizeof(short) * ISP_NWAY;
166
167 VMEM_ARRAY(v, 2 * ISP_NWAY); /* Need 2 vectors to work around vmem hss bug */
168 //load_vector (&v[1][0], &to[ISP_NWAY]); /* Fetch the next vector, since it will be overwritten. */
169 hive_uedge *data = (hive_uedge *)v;
170
171 for (i = 0; i < ISP_NWAY; i++) {
172 hive_sim_wide_pack(data, (hive_wide)&from[i], ISP_VEC_ELEMBITS, i);
173 }
174 assert(ISP_BAMEM_BASE[ID] != (hrt_address) - 1);
175 #if !defined(HRT_MEMORY_ACCESS)
176 ia_css_device_store(ISP_BAMEM_BASE[ID] + (unsigned long)to, &v, size);
177 #else
178 //hrt_mem_store (ISP, VMEM, (unsigned)to, &v, siz); /* This will overwrite the next vector as well */
179 hrt_master_port_store(ISP_BAMEM_BASE[ID] + (unsigned long)to, &v, size);
180 #endif
181 udelay(1); /* Spend at least 1 cycles per vector */
182 }
183
isp_vmem_load(const isp_ID_t ID,const t_vmem_elem * from,t_vmem_elem * to,unsigned int elems)184 void isp_vmem_load(
185 const isp_ID_t ID,
186 const t_vmem_elem *from,
187 t_vmem_elem *to,
188 unsigned int elems) /* In t_vmem_elem */
189 {
190 unsigned int c;
191 const t_vmem_elem *vp = from;
192
193 assert(ID < N_ISP_ID);
194 assert((unsigned long)from % ISP_VEC_ALIGN == 0);
195 assert(elems % ISP_NWAY == 0);
196 for (c = 0; c < elems; c += ISP_NWAY) {
197 load_vector(ID, &to[c], vp);
198 vp = (t_vmem_elem *)((char *)vp + ISP_VEC_ALIGN);
199 }
200 }
201
isp_vmem_store(const isp_ID_t ID,t_vmem_elem * to,const t_vmem_elem * from,unsigned int elems)202 void isp_vmem_store(
203 const isp_ID_t ID,
204 t_vmem_elem *to,
205 const t_vmem_elem *from,
206 unsigned int elems) /* In t_vmem_elem */
207 {
208 unsigned int c;
209 t_vmem_elem *vp = to;
210
211 assert(ID < N_ISP_ID);
212 assert((unsigned long)to % ISP_VEC_ALIGN == 0);
213 assert(elems % ISP_NWAY == 0);
214 for (c = 0; c < elems; c += ISP_NWAY) {
215 store_vector(ID, vp, &from[c]);
216 vp = (t_vmem_elem *)((char *)vp + ISP_VEC_ALIGN);
217 }
218 }
219
isp_vmem_2d_load(const isp_ID_t ID,const t_vmem_elem * from,t_vmem_elem * to,unsigned int height,unsigned int width,unsigned int stride_to,unsigned stride_from)220 void isp_vmem_2d_load(
221 const isp_ID_t ID,
222 const t_vmem_elem *from,
223 t_vmem_elem *to,
224 unsigned int height,
225 unsigned int width,
226 unsigned int stride_to, /* In t_vmem_elem */
227
228 unsigned stride_from /* In t_vmem_elem */)
229 {
230 unsigned int h;
231
232 assert(ID < N_ISP_ID);
233 assert((unsigned long)from % ISP_VEC_ALIGN == 0);
234 assert(width % ISP_NWAY == 0);
235 assert(stride_from % ISP_NWAY == 0);
236 for (h = 0; h < height; h++) {
237 unsigned int c;
238 const t_vmem_elem *vp = from;
239
240 for (c = 0; c < width; c += ISP_NWAY) {
241 load_vector(ID, &to[stride_to * h + c], vp);
242 vp = (t_vmem_elem *)((char *)vp + ISP_VEC_ALIGN);
243 }
244 from = (const t_vmem_elem *)((const char *)from + stride_from / ISP_NWAY *
245 ISP_VEC_ALIGN);
246 }
247 }
248
isp_vmem_2d_store(const isp_ID_t ID,t_vmem_elem * to,const t_vmem_elem * from,unsigned int height,unsigned int width,unsigned int stride_to,unsigned stride_from)249 void isp_vmem_2d_store(
250 const isp_ID_t ID,
251 t_vmem_elem *to,
252 const t_vmem_elem *from,
253 unsigned int height,
254 unsigned int width,
255 unsigned int stride_to, /* In t_vmem_elem */
256
257 unsigned stride_from /* In t_vmem_elem */)
258 {
259 unsigned int h;
260
261 assert(ID < N_ISP_ID);
262 assert((unsigned long)to % ISP_VEC_ALIGN == 0);
263 assert(width % ISP_NWAY == 0);
264 assert(stride_to % ISP_NWAY == 0);
265 for (h = 0; h < height; h++) {
266 unsigned int c;
267 t_vmem_elem *vp = to;
268
269 for (c = 0; c < width; c += ISP_NWAY) {
270 store_vector(ID, vp, &from[stride_from * h + c]);
271 vp = (t_vmem_elem *)((char *)vp + ISP_VEC_ALIGN);
272 }
273 to = (t_vmem_elem *)((char *)to + stride_to / ISP_NWAY * ISP_VEC_ALIGN);
274 }
275 }
276