1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4 * Copyright 2016-2021 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8 #include "habanalabs.h"
9
10 #include <linux/slab.h>
11
encaps_handle_do_release(struct hl_cs_encaps_sig_handle * handle,bool put_hw_sob,bool put_ctx)12 static void encaps_handle_do_release(struct hl_cs_encaps_sig_handle *handle, bool put_hw_sob,
13 bool put_ctx)
14 {
15 struct hl_encaps_signals_mgr *mgr = &handle->ctx->sig_mgr;
16
17 if (put_hw_sob)
18 hw_sob_put(handle->hw_sob);
19
20 spin_lock(&mgr->lock);
21 idr_remove(&mgr->handles, handle->id);
22 spin_unlock(&mgr->lock);
23
24 if (put_ctx)
25 hl_ctx_put(handle->ctx);
26
27 kfree(handle);
28 }
29
hl_encaps_release_handle_and_put_ctx(struct kref * ref)30 void hl_encaps_release_handle_and_put_ctx(struct kref *ref)
31 {
32 struct hl_cs_encaps_sig_handle *handle =
33 container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
34
35 encaps_handle_do_release(handle, false, true);
36 }
37
hl_encaps_release_handle_and_put_sob(struct kref * ref)38 static void hl_encaps_release_handle_and_put_sob(struct kref *ref)
39 {
40 struct hl_cs_encaps_sig_handle *handle =
41 container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
42
43 encaps_handle_do_release(handle, true, false);
44 }
45
hl_encaps_release_handle_and_put_sob_ctx(struct kref * ref)46 void hl_encaps_release_handle_and_put_sob_ctx(struct kref *ref)
47 {
48 struct hl_cs_encaps_sig_handle *handle =
49 container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
50
51 encaps_handle_do_release(handle, true, true);
52 }
53
hl_encaps_sig_mgr_init(struct hl_encaps_signals_mgr * mgr)54 static void hl_encaps_sig_mgr_init(struct hl_encaps_signals_mgr *mgr)
55 {
56 spin_lock_init(&mgr->lock);
57 idr_init(&mgr->handles);
58 }
59
hl_encaps_sig_mgr_fini(struct hl_device * hdev,struct hl_encaps_signals_mgr * mgr)60 static void hl_encaps_sig_mgr_fini(struct hl_device *hdev, struct hl_encaps_signals_mgr *mgr)
61 {
62 struct hl_cs_encaps_sig_handle *handle;
63 struct idr *idp;
64 u32 id;
65
66 idp = &mgr->handles;
67
68 /* The IDR is expected to be empty at this stage, because any left signal should have been
69 * released as part of CS roll-back.
70 */
71 if (!idr_is_empty(idp)) {
72 dev_warn(hdev->dev,
73 "device released while some encaps signals handles are still allocated\n");
74 idr_for_each_entry(idp, handle, id)
75 kref_put(&handle->refcount, hl_encaps_release_handle_and_put_sob);
76 }
77
78 idr_destroy(&mgr->handles);
79 }
80
hl_ctx_fini(struct hl_ctx * ctx)81 static void hl_ctx_fini(struct hl_ctx *ctx)
82 {
83 struct hl_device *hdev = ctx->hdev;
84 int i;
85
86 /* Release all allocated HW block mapped list entries and destroy
87 * the mutex.
88 */
89 hl_hw_block_mem_fini(ctx);
90
91 /*
92 * If we arrived here, there are no jobs waiting for this context
93 * on its queues so we can safely remove it.
94 * This is because for each CS, we increment the ref count and for
95 * every CS that was finished we decrement it and we won't arrive
96 * to this function unless the ref count is 0
97 */
98
99 for (i = 0 ; i < hdev->asic_prop.max_pending_cs ; i++)
100 hl_fence_put(ctx->cs_pending[i]);
101
102 kfree(ctx->cs_pending);
103
104 if (ctx->asid != HL_KERNEL_ASID_ID) {
105 dev_dbg(hdev->dev, "closing user context, asid=%u\n", ctx->asid);
106
107 /* The engines are stopped as there is no executing CS, but the
108 * Coresight might be still working by accessing addresses
109 * related to the stopped engines. Hence stop it explicitly.
110 */
111 if (hdev->in_debug)
112 hl_device_set_debug_mode(hdev, ctx, false);
113
114 hdev->asic_funcs->ctx_fini(ctx);
115
116 hl_dec_ctx_fini(ctx);
117
118 hl_cb_va_pool_fini(ctx);
119 hl_vm_ctx_fini(ctx);
120 hl_asid_free(hdev, ctx->asid);
121 hl_encaps_sig_mgr_fini(hdev, &ctx->sig_mgr);
122 mutex_destroy(&ctx->ts_reg_lock);
123 } else {
124 dev_dbg(hdev->dev, "closing kernel context\n");
125 hdev->asic_funcs->ctx_fini(ctx);
126 hl_vm_ctx_fini(ctx);
127 hl_mmu_ctx_fini(ctx);
128 }
129 }
130
hl_ctx_do_release(struct kref * ref)131 void hl_ctx_do_release(struct kref *ref)
132 {
133 struct hl_ctx *ctx;
134
135 ctx = container_of(ref, struct hl_ctx, refcount);
136
137 hl_ctx_fini(ctx);
138
139 if (ctx->hpriv) {
140 struct hl_fpriv *hpriv = ctx->hpriv;
141
142 mutex_lock(&hpriv->ctx_lock);
143 hpriv->ctx = NULL;
144 mutex_unlock(&hpriv->ctx_lock);
145
146 hl_hpriv_put(hpriv);
147 }
148
149 kfree(ctx);
150 }
151
hl_ctx_create(struct hl_device * hdev,struct hl_fpriv * hpriv)152 int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv)
153 {
154 struct hl_ctx_mgr *ctx_mgr = &hpriv->ctx_mgr;
155 struct hl_ctx *ctx;
156 int rc;
157
158 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
159 if (!ctx) {
160 rc = -ENOMEM;
161 goto out_err;
162 }
163
164 mutex_lock(&ctx_mgr->lock);
165 rc = idr_alloc(&ctx_mgr->handles, ctx, 1, 0, GFP_KERNEL);
166 mutex_unlock(&ctx_mgr->lock);
167
168 if (rc < 0) {
169 dev_err(hdev->dev, "Failed to allocate IDR for a new CTX\n");
170 goto free_ctx;
171 }
172
173 ctx->handle = rc;
174
175 rc = hl_ctx_init(hdev, ctx, false);
176 if (rc)
177 goto remove_from_idr;
178
179 hl_hpriv_get(hpriv);
180 ctx->hpriv = hpriv;
181
182 /* TODO: remove for multiple contexts per process */
183 hpriv->ctx = ctx;
184
185 /* TODO: remove the following line for multiple process support */
186 hdev->is_compute_ctx_active = true;
187
188 return 0;
189
190 remove_from_idr:
191 mutex_lock(&ctx_mgr->lock);
192 idr_remove(&ctx_mgr->handles, ctx->handle);
193 mutex_unlock(&ctx_mgr->lock);
194 free_ctx:
195 kfree(ctx);
196 out_err:
197 return rc;
198 }
199
hl_ctx_init(struct hl_device * hdev,struct hl_ctx * ctx,bool is_kernel_ctx)200 int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
201 {
202 int rc = 0, i;
203
204 ctx->hdev = hdev;
205
206 kref_init(&ctx->refcount);
207
208 ctx->cs_sequence = 1;
209 spin_lock_init(&ctx->cs_lock);
210 atomic_set(&ctx->thread_ctx_switch_token, 1);
211 ctx->thread_ctx_switch_wait_token = 0;
212 ctx->cs_pending = kcalloc(hdev->asic_prop.max_pending_cs,
213 sizeof(struct hl_fence *),
214 GFP_KERNEL);
215 if (!ctx->cs_pending)
216 return -ENOMEM;
217
218 INIT_LIST_HEAD(&ctx->outcome_store.used_list);
219 INIT_LIST_HEAD(&ctx->outcome_store.free_list);
220 hash_init(ctx->outcome_store.outcome_map);
221 for (i = 0; i < ARRAY_SIZE(ctx->outcome_store.nodes_pool); ++i)
222 list_add(&ctx->outcome_store.nodes_pool[i].list_link,
223 &ctx->outcome_store.free_list);
224
225 hl_hw_block_mem_init(ctx);
226
227 if (is_kernel_ctx) {
228 ctx->asid = HL_KERNEL_ASID_ID; /* Kernel driver gets ASID 0 */
229 rc = hl_vm_ctx_init(ctx);
230 if (rc) {
231 dev_err(hdev->dev, "Failed to init mem ctx module\n");
232 rc = -ENOMEM;
233 goto err_hw_block_mem_fini;
234 }
235
236 rc = hdev->asic_funcs->ctx_init(ctx);
237 if (rc) {
238 dev_err(hdev->dev, "ctx_init failed\n");
239 goto err_vm_ctx_fini;
240 }
241 } else {
242 ctx->asid = hl_asid_alloc(hdev);
243 if (!ctx->asid) {
244 dev_err(hdev->dev, "No free ASID, failed to create context\n");
245 rc = -ENOMEM;
246 goto err_hw_block_mem_fini;
247 }
248
249 rc = hl_vm_ctx_init(ctx);
250 if (rc) {
251 dev_err(hdev->dev, "Failed to init mem ctx module\n");
252 rc = -ENOMEM;
253 goto err_asid_free;
254 }
255
256 rc = hl_cb_va_pool_init(ctx);
257 if (rc) {
258 dev_err(hdev->dev,
259 "Failed to init VA pool for mapped CB\n");
260 goto err_vm_ctx_fini;
261 }
262
263 rc = hdev->asic_funcs->ctx_init(ctx);
264 if (rc) {
265 dev_err(hdev->dev, "ctx_init failed\n");
266 goto err_cb_va_pool_fini;
267 }
268
269 hl_encaps_sig_mgr_init(&ctx->sig_mgr);
270
271 mutex_init(&ctx->ts_reg_lock);
272
273 dev_dbg(hdev->dev, "create user context, comm=\"%s\", asid=%u\n",
274 current->comm, ctx->asid);
275 }
276
277 return 0;
278
279 err_cb_va_pool_fini:
280 hl_cb_va_pool_fini(ctx);
281 err_vm_ctx_fini:
282 hl_vm_ctx_fini(ctx);
283 err_asid_free:
284 if (ctx->asid != HL_KERNEL_ASID_ID)
285 hl_asid_free(hdev, ctx->asid);
286 err_hw_block_mem_fini:
287 hl_hw_block_mem_fini(ctx);
288 kfree(ctx->cs_pending);
289
290 return rc;
291 }
292
hl_ctx_get_unless_zero(struct hl_ctx * ctx)293 static int hl_ctx_get_unless_zero(struct hl_ctx *ctx)
294 {
295 return kref_get_unless_zero(&ctx->refcount);
296 }
297
hl_ctx_get(struct hl_ctx * ctx)298 void hl_ctx_get(struct hl_ctx *ctx)
299 {
300 kref_get(&ctx->refcount);
301 }
302
hl_ctx_put(struct hl_ctx * ctx)303 int hl_ctx_put(struct hl_ctx *ctx)
304 {
305 return kref_put(&ctx->refcount, hl_ctx_do_release);
306 }
307
hl_get_compute_ctx(struct hl_device * hdev)308 struct hl_ctx *hl_get_compute_ctx(struct hl_device *hdev)
309 {
310 struct hl_ctx *ctx = NULL;
311 struct hl_fpriv *hpriv;
312
313 mutex_lock(&hdev->fpriv_list_lock);
314
315 list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node) {
316 mutex_lock(&hpriv->ctx_lock);
317 ctx = hpriv->ctx;
318 if (ctx && !hl_ctx_get_unless_zero(ctx))
319 ctx = NULL;
320 mutex_unlock(&hpriv->ctx_lock);
321
322 /* There can only be a single user which has opened the compute device, so exit
323 * immediately once we find its context or if we see that it has been released
324 */
325 break;
326 }
327
328 mutex_unlock(&hdev->fpriv_list_lock);
329
330 return ctx;
331 }
332
333 /*
334 * hl_ctx_get_fence_locked - get CS fence under CS lock
335 *
336 * @ctx: pointer to the context structure.
337 * @seq: CS sequences number
338 *
339 * @return valid fence pointer on success, NULL if fence is gone, otherwise
340 * error pointer.
341 *
342 * NOTE: this function shall be called with cs_lock locked
343 */
hl_ctx_get_fence_locked(struct hl_ctx * ctx,u64 seq)344 static struct hl_fence *hl_ctx_get_fence_locked(struct hl_ctx *ctx, u64 seq)
345 {
346 struct asic_fixed_properties *asic_prop = &ctx->hdev->asic_prop;
347 struct hl_fence *fence;
348
349 if (seq >= ctx->cs_sequence)
350 return ERR_PTR(-EINVAL);
351
352 if (seq + asic_prop->max_pending_cs < ctx->cs_sequence)
353 return NULL;
354
355 fence = ctx->cs_pending[seq & (asic_prop->max_pending_cs - 1)];
356 hl_fence_get(fence);
357 return fence;
358 }
359
hl_ctx_get_fence(struct hl_ctx * ctx,u64 seq)360 struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
361 {
362 struct hl_fence *fence;
363
364 spin_lock(&ctx->cs_lock);
365
366 fence = hl_ctx_get_fence_locked(ctx, seq);
367
368 spin_unlock(&ctx->cs_lock);
369
370 return fence;
371 }
372
373 /*
374 * hl_ctx_get_fences - get multiple CS fences under the same CS lock
375 *
376 * @ctx: pointer to the context structure.
377 * @seq_arr: array of CS sequences to wait for
378 * @fence: fence array to store the CS fences
379 * @arr_len: length of seq_arr and fence_arr
380 *
381 * @return 0 on success, otherwise non 0 error code
382 */
hl_ctx_get_fences(struct hl_ctx * ctx,u64 * seq_arr,struct hl_fence ** fence,u32 arr_len)383 int hl_ctx_get_fences(struct hl_ctx *ctx, u64 *seq_arr,
384 struct hl_fence **fence, u32 arr_len)
385 {
386 struct hl_fence **fence_arr_base = fence;
387 int i, rc = 0;
388
389 spin_lock(&ctx->cs_lock);
390
391 for (i = 0; i < arr_len; i++, fence++) {
392 u64 seq = seq_arr[i];
393
394 *fence = hl_ctx_get_fence_locked(ctx, seq);
395
396 if (IS_ERR(*fence)) {
397 dev_err(ctx->hdev->dev,
398 "Failed to get fence for CS with seq 0x%llx\n",
399 seq);
400 rc = PTR_ERR(*fence);
401 break;
402 }
403 }
404
405 spin_unlock(&ctx->cs_lock);
406
407 if (rc)
408 hl_fences_put(fence_arr_base, i);
409
410 return rc;
411 }
412
413 /*
414 * hl_ctx_mgr_init - initialize the context manager
415 *
416 * @ctx_mgr: pointer to context manager structure
417 *
418 * This manager is an object inside the hpriv object of the user process.
419 * The function is called when a user process opens the FD.
420 */
hl_ctx_mgr_init(struct hl_ctx_mgr * ctx_mgr)421 void hl_ctx_mgr_init(struct hl_ctx_mgr *ctx_mgr)
422 {
423 mutex_init(&ctx_mgr->lock);
424 idr_init(&ctx_mgr->handles);
425 }
426
427 /*
428 * hl_ctx_mgr_fini - finalize the context manager
429 *
430 * @hdev: pointer to device structure
431 * @ctx_mgr: pointer to context manager structure
432 *
433 * This function goes over all the contexts in the manager and frees them.
434 * It is called when a process closes the FD.
435 */
hl_ctx_mgr_fini(struct hl_device * hdev,struct hl_ctx_mgr * ctx_mgr)436 void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *ctx_mgr)
437 {
438 struct hl_ctx *ctx;
439 struct idr *idp;
440 u32 id;
441
442 idp = &ctx_mgr->handles;
443
444 idr_for_each_entry(idp, ctx, id)
445 kref_put(&ctx->refcount, hl_ctx_do_release);
446
447 idr_destroy(&ctx_mgr->handles);
448 mutex_destroy(&ctx_mgr->lock);
449 }
450