1 /*
2 * Copyright (c) 2019-2023, Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22 //!
23 //! \file mos_gpucontext_specific_next_xe.cpp
24 //! \brief Container class for the Linux specific gpu context
25 //!
26
27 #include <unistd.h>
28 #include "mos_gpucontext_specific_next_xe.h"
29 #include "mos_context_specific_next.h"
30 #include "mos_graphicsresource_specific_next.h"
31 #include "mos_commandbuffer_specific_next.h"
32 #include "mos_util_devult_specific_next.h"
33 #include "mos_cmdbufmgr_next.h"
34 #include "mos_os_virtualengine_next.h"
35 #include "mos_interface.h"
36 #include "mos_os_cp_interface_specific.h"
37
38 #define MI_BATCHBUFFER_END 0x05000000
39 static pthread_mutex_t command_dump_mutex = PTHREAD_MUTEX_INITIALIZER;
40
41
~GpuContextSpecificNextXe()42 GpuContextSpecificNextXe::~GpuContextSpecificNextXe()
43 {
44 MOS_OS_FUNCTION_ENTER;
45
46 Clear();
47 }
48
Init3DCtx(PMOS_CONTEXT osParameters,PMOS_GPUCTX_CREATOPTIONS createOption,unsigned int * nengine,void * engine_map)49 MOS_STATUS GpuContextSpecificNextXe::Init3DCtx(PMOS_CONTEXT osParameters,
50 PMOS_GPUCTX_CREATOPTIONS createOption,
51 unsigned int *nengine,
52 void *engine_map)
53 {
54 MOS_OS_FUNCTION_ENTER;
55
56 MOS_STATUS eStatus = MOS_STATUS_SUCCESS;
57
58 __u16 engine_class = DRM_XE_ENGINE_CLASS_RENDER;
59 __u64 caps = 0;
60 uint8_t ctxWidth = 1;
61 uint8_t numPlacement = 1;
62
63 if (mos_query_engines(osParameters->bufmgr, engine_class, caps, nengine, engine_map))
64 {
65 MOS_OS_ASSERTMESSAGE("Failed to query engines.");
66 return MOS_STATUS_UNKNOWN;
67 }
68 numPlacement = *nengine;
69
70 m_i915Context[0] = mos_context_create_shared(osParameters->bufmgr,
71 nullptr, //no need anymore, get vm_id from global
72 0,
73 m_bProtectedContext, // not support currently
74 engine_map,
75 ctxWidth,
76 numPlacement,
77 0);
78 if (m_i915Context[0] == nullptr)
79 {
80 MOS_OS_ASSERTMESSAGE("Failed to create context.");
81 return MOS_STATUS_UNKNOWN;
82 }
83 m_i915Context[0]->pOsContext = osParameters;
84
85 return eStatus;
86 }
87
InitComputeCtx(PMOS_CONTEXT osParameters,unsigned int * nengine,void * engine_map,MOS_GPU_NODE gpuNode,bool * isEngineSelectEnable)88 MOS_STATUS GpuContextSpecificNextXe::InitComputeCtx(PMOS_CONTEXT osParameters,
89 unsigned int *nengine,
90 void *engine_map,
91 MOS_GPU_NODE gpuNode,
92 bool *isEngineSelectEnable)
93 {
94 MOS_OS_FUNCTION_ENTER;
95
96 MOS_STATUS eStatus = MOS_STATUS_SUCCESS;
97
98 __u16 engine_class = DRM_XE_ENGINE_CLASS_COMPUTE;
99 __u64 caps = 0;
100 uint8_t ctxWidth = 1;
101 uint8_t numPlacement = 1;
102
103 if (mos_query_engines(osParameters->bufmgr, engine_class, caps, nengine, engine_map))
104 {
105 MOS_OS_ASSERTMESSAGE("Failed to query engines.");
106 return MOS_STATUS_UNKNOWN;
107 }
108
109 #if (_DEBUG || _RELEASE_INTERNAL)
110 //Note: this debug function need to refine or override since different engine_map struct
111 *isEngineSelectEnable = SelectEngineInstanceByUser(engine_map, nengine, m_engineInstanceSelect, gpuNode);
112 #endif
113
114 numPlacement = *nengine;
115 m_i915Context[0] = mos_context_create_shared(osParameters->bufmgr,
116 nullptr,
117 0,
118 m_bProtectedContext,
119 engine_map,
120 ctxWidth,
121 numPlacement,
122 0);
123 if (m_i915Context[0] == nullptr)
124 {
125 MOS_OS_ASSERTMESSAGE("Failed to create context.");
126 return MOS_STATUS_UNKNOWN;
127 }
128 m_i915Context[0]->pOsContext = osParameters;
129
130 return eStatus;
131 }
132
InitVdVeCtx(PMOS_CONTEXT osParameters,MOS_STREAM_HANDLE streamState,PMOS_GPUCTX_CREATOPTIONS createOption,unsigned int * nengine,void * engine_map,MOS_GPU_NODE gpuNode,bool * isEngineSelectEnable)133 MOS_STATUS GpuContextSpecificNextXe::InitVdVeCtx(PMOS_CONTEXT osParameters,
134 MOS_STREAM_HANDLE streamState,
135 PMOS_GPUCTX_CREATOPTIONS createOption,
136 unsigned int *nengine,
137 void *engine_map,
138 MOS_GPU_NODE gpuNode,
139 bool *isEngineSelectEnable)
140 {
141 MOS_OS_FUNCTION_ENTER;
142
143 MOS_STATUS eStatus = MOS_STATUS_SUCCESS;
144
145
146 __u16 engine_class = (gpuNode == MOS_GPU_NODE_VE)? DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE : DRM_XE_ENGINE_CLASS_VIDEO_DECODE;
147 __u64 caps = 0;
148 uint8_t ctxWidth = 1;
149 uint8_t numPlacement = 1;
150
151 SetEngineQueryFlags(createOption, caps);
152
153 if (mos_query_engines(osParameters->bufmgr, engine_class, caps, nengine, engine_map))
154 {
155 MOS_OS_ASSERTMESSAGE("Failed to query engines.");
156 return MOS_STATUS_UNKNOWN;
157 }
158
159 #if (_DEBUG || _RELEASE_INTERNAL)
160 *isEngineSelectEnable = SelectEngineInstanceByUser(engine_map, nengine, m_engineInstanceSelect, gpuNode);
161 #endif
162
163 numPlacement = *nengine;
164
165 m_i915Context[0] = mos_context_create_shared(osParameters->bufmgr,
166 nullptr,
167 0,
168 m_bProtectedContext,
169 engine_map,
170 ctxWidth,
171 numPlacement,
172 0);
173 if (m_i915Context[0] == nullptr)
174 {
175 MOS_OS_ASSERTMESSAGE("Failed to create context.");
176 return MOS_STATUS_UNKNOWN;
177 }
178
179 m_i915Context[0]->pOsContext = osParameters;
180
181 if (*nengine >= 2 && *nengine <= MAX_ENGINE_INSTANCE_NUM)
182 {
183 //if ctxWidth > 1, numPlacement should always be 1
184 numPlacement = 1;
185 streamState->bParallelSubmission = true;
186 //create context with different width
187 for(int i = 1; i < *nengine; i++)
188 {
189 ctxWidth = i + 1;
190 m_i915Context[i] = mos_context_create_shared(osParameters->bufmgr,
191 nullptr,
192 0,
193 m_bProtectedContext,
194 engine_map,
195 ctxWidth,
196 numPlacement,
197 0);
198 if (m_i915Context[i] == nullptr)
199 {
200 MOS_OS_ASSERTMESSAGE("Failed to create context.");
201 return MOS_STATUS_UNKNOWN;
202 }
203 m_i915Context[i]->pOsContext = osParameters;
204 }
205 }
206
207 return eStatus;
208 }
209
InitBltCtx(PMOS_CONTEXT osParameters,unsigned int * nengine,void * engine_map)210 MOS_STATUS GpuContextSpecificNextXe::InitBltCtx(PMOS_CONTEXT osParameters,
211 unsigned int *nengine,
212 void *engine_map)
213 {
214 MOS_OS_FUNCTION_ENTER;
215
216 MOS_STATUS eStatus = MOS_STATUS_SUCCESS;
217
218 __u16 engine_class = DRM_XE_ENGINE_CLASS_COPY;
219 __u64 caps = 0;
220 uint8_t ctxWidth = 1;
221 uint8_t numPlacement = 1;
222
223 if (mos_query_engines(osParameters->bufmgr, engine_class, caps, nengine, engine_map))
224 {
225 MOS_OS_ASSERTMESSAGE("Failed to query engines.");
226 return MOS_STATUS_UNKNOWN;
227 }
228 numPlacement = *nengine;
229 if (numPlacement >= 2)
230 {
231 // only use BCS0. BCS8 is paging copy
232 numPlacement = 1;
233 }
234
235 m_i915Context[0] = mos_context_create_shared(osParameters->bufmgr,
236 nullptr,
237 0,
238 m_bProtectedContext,
239 engine_map,
240 ctxWidth,
241 numPlacement,
242 0);
243 if (m_i915Context[0] == nullptr)
244 {
245 MOS_OS_ASSERTMESSAGE("Failed to create context.");
246 return MOS_STATUS_UNKNOWN;
247 }
248 m_i915Context[0]->pOsContext = osParameters;
249
250 return eStatus;
251 }
252
Clear()253 void GpuContextSpecificNextXe::Clear()
254 {
255 MOS_OS_FUNCTION_ENTER;
256 }
257
PatchCommandBuffer(MOS_STREAM_HANDLE streamState,PMOS_COMMAND_BUFFER cmdBuffer)258 MOS_STATUS GpuContextSpecificNextXe::PatchCommandBuffer(
259 MOS_STREAM_HANDLE streamState,
260 PMOS_COMMAND_BUFFER cmdBuffer)
261 {
262 MOS_OS_FUNCTION_ENTER;
263
264 auto perStreamParameters = (PMOS_CONTEXT)streamState->perStreamParameters;
265 auto cmd_bo = cmdBuffer->OsResource.bo;
266 std::vector<PMOS_RESOURCE> mappedResList;
267
268 // Now, the patching will be done, based on the patch list.
269 for (uint32_t patchIndex = 0; patchIndex < m_currentNumPatchLocations; patchIndex++)
270 {
271 auto currentPatch = &m_patchLocationList[patchIndex];
272 MOS_OS_CHK_NULL_RETURN(currentPatch);
273
274 /**
275 * Indicate whether temCmdBo is cmdbuffer in vector of m_secondaryCmdBufs;
276 * If yes, tempCmdBo is also a primary cmd bo for scalability; And in this case, we need to
277 * update its execlist.
278 * Otherwise if no and temCmdBo != cmdBuffer->OsResource.bo, it should be second level
279 * batch buffer for primary cmdbuffer. And in this case, we should not update its execlist.
280 */
281 bool isSecondaryCmdBuf = false;
282 auto tempCmdBo = currentPatch->cmdBo == nullptr ? cmd_bo : currentPatch->cmdBo;
283
284 // Following are for Nested BB buffer, if it's nested BB, we need to ensure it's locked.
285 if (tempCmdBo != cmd_bo)
286 {
287 auto it = m_secondaryCmdBufs.begin();
288 while(it != m_secondaryCmdBufs.end())
289 {
290 if (it->second->OsResource.bo == tempCmdBo)
291 {
292 isSecondaryCmdBuf = true;
293 break;
294 }
295 it++;
296 }
297
298 for(auto allocIdx = 0; allocIdx < m_numAllocations && (!isSecondaryCmdBuf); allocIdx++)
299 {
300 auto tempRes = (PMOS_RESOURCE)m_allocationList[allocIdx].hAllocation;
301 if (tempCmdBo == tempRes->bo)
302 {
303 GraphicsResourceNext::LockParams param;
304 param.m_writeRequest = true;
305 tempRes->pGfxResourceNext->Lock(m_osContext, param);
306 mappedResList.push_back(tempRes);
307 break;
308 }
309 }
310 }
311
312 // This is the resource for which patching will be done
313 auto resource = (PMOS_RESOURCE)m_allocationList[currentPatch->AllocationIndex].hAllocation;
314 MOS_OS_CHK_NULL_RETURN(resource);
315
316 // For now, we'll assume the system memory's DRM bo pointer
317 // is NULL. If nullptr is detected, then the resource has been
318 // placed inside the command buffer's indirect state area.
319 // We'll simply set alloc_bo to the command buffer's bo pointer.
320 MOS_OS_ASSERT(resource->bo);
321
322 auto alloc_bo = (resource->bo) ? resource->bo : tempCmdBo;
323
324 MOS_OS_CHK_STATUS_RETURN(streamState->osCpInterface->PermeatePatchForHM(
325 tempCmdBo->virt,
326 currentPatch,
327 resource));
328
329 uint64_t boOffset = alloc_bo->offset64;
330
331 MOS_OS_CHK_NULL_RETURN(tempCmdBo->virt);
332
333 if (perStreamParameters->bUse64BitRelocs)
334 {
335 *((uint64_t *)((uint8_t *)tempCmdBo->virt + currentPatch->PatchOffset)) =
336 boOffset + currentPatch->AllocationOffset;
337 }
338 else
339 {
340 *((uint32_t *)((uint8_t *)tempCmdBo->virt + currentPatch->PatchOffset)) =
341 boOffset + currentPatch->AllocationOffset;
342 }
343
344 #if (_DEBUG || _RELEASE_INTERNAL)
345 {
346 uint32_t evtData[] = {alloc_bo->handle, currentPatch->uiWriteOperation, currentPatch->AllocationOffset};
347 MOS_TraceEventExt(EVENT_MOS_BATCH_SUBMIT, EVENT_TYPE_INFO,
348 evtData, sizeof(evtData),
349 &boOffset, sizeof(boOffset));
350 }
351 #endif
352
353 if(tempCmdBo != alloc_bo)
354 {
355 // reuse this api to update exec list in cmd bo
356 mos_bo_add_softpin_target(isSecondaryCmdBuf ? tempCmdBo : cmd_bo, alloc_bo, currentPatch->uiWriteOperation);
357 }
358 }
359
360 for(auto res: mappedResList)
361 {
362 res->pGfxResourceNext->Unlock(m_osContext);
363 }
364 mappedResList.clear();
365
366 return MOS_STATUS_SUCCESS;
367 }
368
EndSubmitCommandBuffer(MOS_STREAM_HANDLE streamState,PMOS_COMMAND_BUFFER cmdBuffer,bool cmdBufMapIsReused)369 MOS_STATUS GpuContextSpecificNextXe::EndSubmitCommandBuffer(
370 MOS_STREAM_HANDLE streamState,
371 PMOS_COMMAND_BUFFER cmdBuffer,
372 bool cmdBufMapIsReused)
373 {
374 MOS_OS_FUNCTION_ENTER;
375
376 auto it = m_secondaryCmdBufs.begin();
377 #if MOS_COMMAND_BUFFER_DUMP_SUPPORTED
378 pthread_mutex_lock(&command_dump_mutex);
379 if (streamState->dumpCommandBuffer)
380 {
381 while(it != m_secondaryCmdBufs.end())
382 {
383 MosInterface::DumpCommandBuffer(streamState, it->second);
384 it++;
385 }
386 }
387 pthread_mutex_unlock(&command_dump_mutex);
388 #endif // MOS_COMMAND_BUFFER_DUMP_SUPPORTED
389
390 for (uint32_t patchIndex = 0; patchIndex < m_currentNumPatchLocations; patchIndex++)
391 {
392 auto currentPatch = &m_patchLocationList[patchIndex];
393 MOS_OS_CHK_NULL_RETURN(currentPatch);
394
395 // reuse this api to clear exec list in cmd bo
396 if(currentPatch->cmdBo)
397 mos_bo_clear_relocs(currentPatch->cmdBo, 0);
398 }
399 // Now, we can unmap the video command buffer, since we don't need CPU access anymore.
400 if (!cmdBufMapIsReused && cmdBuffer->OsResource.pGfxResourceNext)
401 {
402 cmdBuffer->OsResource.pGfxResourceNext->Unlock(m_osContext);
403 }
404 ClearSecondaryCmdBuffer(cmdBufMapIsReused);
405
406 // Reset resource allocation
407 m_numAllocations = 0;
408 MosUtilities::MosZeroMemory(m_allocationList, sizeof(ALLOCATION_LIST) * m_maxNumAllocations);
409 m_currentNumPatchLocations = 0;
410 MosUtilities::MosZeroMemory(m_patchLocationList, sizeof(PATCHLOCATIONLIST) * m_maxNumAllocations);
411 m_resCount = 0;
412
413 MosUtilities::MosZeroMemory(m_writeModeList, sizeof(bool) * m_maxNumAllocations);
414
415 return MOS_STATUS_SUCCESS;
416 }
417
SubmitCommandBuffer(MOS_STREAM_HANDLE streamState,PMOS_COMMAND_BUFFER cmdBuffer,bool nullRendering)418 MOS_STATUS GpuContextSpecificNextXe::SubmitCommandBuffer(
419 MOS_STREAM_HANDLE streamState,
420 PMOS_COMMAND_BUFFER cmdBuffer,
421 bool nullRendering)
422 {
423 MOS_OS_FUNCTION_ENTER;
424
425 MOS_TraceEventExt(EVENT_MOS_BATCH_SUBMIT, EVENT_TYPE_START, nullptr, 0, nullptr, 0);
426
427 MOS_OS_CHK_NULL_RETURN(streamState);
428 auto perStreamParameters = (PMOS_CONTEXT)streamState->perStreamParameters;
429 MOS_OS_CHK_NULL_RETURN(perStreamParameters);
430 MOS_OS_CHK_NULL_RETURN(cmdBuffer);
431 MOS_OS_CHK_NULL_RETURN(m_patchLocationList);
432
433 MOS_GPU_NODE gpuNode = OSKMGetGpuNode(m_gpuContext);
434 uint32_t execFlag = gpuNode;
435 MOS_STATUS eStatus = MOS_STATUS_SUCCESS;
436 int32_t ret = 0;
437 bool scalaEnabled = false;
438 bool cmdBufMapIsReused = false;
439 auto it = m_secondaryCmdBufs.begin();
440
441 m_cmdBufFlushed = true;
442 auto cmd_bo = cmdBuffer->OsResource.bo;
443
444 if (m_secondaryCmdBufs.size() >= 2)
445 {
446 scalaEnabled = true;
447 cmdBuffer->iSubmissionType = SUBMISSION_TYPE_MULTI_PIPE_MASTER;
448 }
449
450 // Patch cmdbuffer addr
451 MOS_OS_CHK_STATUS_RETURN(PatchCommandBuffer(streamState, cmdBuffer));
452
453 int32_t perfData = perStreamParameters->pPerfData != nullptr ? *(int32_t *)(perStreamParameters->pPerfData) : 0;
454
455 int32_t DR4 = perStreamParameters->uEnablePerfTag ? perfData : 0;
456
457 if (gpuNode != I915_EXEC_RENDER &&
458 streamState->osCpInterface->IsTearDownHappen())
459 {
460 // skip PAK command when CP tear down happen to avoid of GPU hang
461 // conditonal batch buffer start PoC is in progress
462 }
463 else if (nullRendering == false)
464 {
465 UnlockPendingOcaBuffers(cmdBuffer, perStreamParameters);
466 if (streamState->ctxBasedScheduling && m_i915Context[0] != nullptr)
467 {
468 //For multipipe, FE, BEs bb are all in m_secondaryCmdBufs;
469 //For single pipe, reuse m_secondaryCmdBufs and add cmdBuffer in it.
470 if(!scalaEnabled)
471 {
472 // Need to resue m_secondaryCmdBufs
473 ClearSecondaryCmdBuffer(cmdBufMapIsReused);
474 m_secondaryCmdBufs[0] = cmdBuffer;
475 cmdBufMapIsReused = true;
476 }
477
478 ret = ParallelSubmitCommands(m_secondaryCmdBufs,
479 perStreamParameters,
480 execFlag,
481 DR4);
482 }
483
484 if (ret != 0)
485 {
486 eStatus = MOS_STATUS_UNKNOWN;
487 }
488 }
489
490 if (eStatus != MOS_STATUS_SUCCESS)
491 {
492 MOS_OS_ASSERTMESSAGE("Command buffer submission failed!");
493 }
494
495 MosUtilDevUltSpecific::MOS_DEVULT_FuncCall(pfnUltGetCmdBuf, cmdBuffer);
496
497 // Reset global variable
498 MOS_OS_CHK_STATUS_RETURN(EndSubmitCommandBuffer(streamState, cmdBuffer, cmdBufMapIsReused));
499
500 MOS_TraceEventExt(EVENT_MOS_BATCH_SUBMIT, EVENT_TYPE_END, &eStatus, sizeof(eStatus), nullptr, 0);
501 return eStatus;
502 }
503
504 /**
505 * Both of single pipe and multi-pipe are submitted through this interface
506 * FE and BE sync through bo.deps since they use different ctx
507 */
ParallelSubmitCommands(std::map<uint32_t,PMOS_COMMAND_BUFFER> secondaryCmdBufs,PMOS_CONTEXT osContext,uint32_t execFlag,int32_t dr4)508 int32_t GpuContextSpecificNextXe::ParallelSubmitCommands(
509 std::map<uint32_t, PMOS_COMMAND_BUFFER> secondaryCmdBufs,
510 PMOS_CONTEXT osContext,
511 uint32_t execFlag,
512 int32_t dr4)
513 {
514 MOS_OS_FUNCTION_ENTER;
515
516 int32_t ret = 0;
517 int fence = -1;
518 auto it = m_secondaryCmdBufs.begin();
519 MOS_LINUX_BO *cmdBos[MAX_PARALLEN_CMD_BO_NUM];
520 int numBatch = 0;
521 uint32_t batchBufferEndCmd = MI_BATCHBUFFER_END;
522
523 MOS_LINUX_CONTEXT *queue = m_i915Context[0];
524
525 while(it != m_secondaryCmdBufs.end())
526 {
527 MapResourcesToAuxTable(it->second->OsResource.bo);
528 // Add Batch buffer End Command
529 if (MOS_FAILED(Mos_AddCommand(
530 it->second,
531 &batchBufferEndCmd,
532 sizeof(uint32_t))))
533 {
534 MOS_OS_ASSERTMESSAGE("Inserting BB_END failed!");
535 return MOS_STATUS_UNKNOWN;
536 }
537
538 if(it->second->iSubmissionType & SUBMISSION_TYPE_MULTI_PIPE_ALONE
539 || it->second->iSubmissionType & SUBMISSION_TYPE_SINGLE_PIPE
540 || m_secondaryCmdBufs.size() == 1)
541 {
542 queue = m_i915Context[0];
543 MOS_OS_CHK_NULL_RETURN(queue);
544 numBatch = 1;
545 cmdBos[0] = it->second->OsResource.bo;
546
547 ret = mos_bo_context_exec3(cmdBos,
548 numBatch,
549 queue,
550 nullptr,
551 0,
552 dr4,
553 execFlag, // not used
554 &fence); // not used
555 cmdBos[0] = nullptr;
556 numBatch = 0;
557 }
558
559 if((it->second->iSubmissionType & SUBMISSION_TYPE_MULTI_PIPE_MASTER)
560 || (it->second->iSubmissionType & SUBMISSION_TYPE_MULTI_PIPE_SLAVE))
561 {
562 cmdBos[numBatch++] = it->second->OsResource.bo;
563
564 if(it->second->iSubmissionType & SUBMISSION_TYPE_MULTI_PIPE_FLAGS_LAST_PIPE)
565 {
566 queue = m_i915Context[numBatch - 1];
567 MOS_OS_CHK_NULL_RETURN(queue);
568
569 ret = mos_bo_context_exec3(cmdBos,
570 numBatch,
571 queue,
572 nullptr,
573 0,
574 dr4,
575 execFlag,
576 &fence);
577
578 for(int i = 0; i < numBatch; i++)
579 {
580 cmdBos[i] = nullptr;
581 }
582 numBatch = 0;
583 }
584 }
585 it++;
586 }
587
588 return ret;
589 }
590
UpdatePriority(int32_t priority)591 void GpuContextSpecificNextXe::UpdatePriority(int32_t priority)
592 {
593 MOS_OS_FUNCTION_ENTER;
594 //Note: need to implement this bufmgr api to set exec queue property
595 }
596
ReportEngineInfo(void * engine_map,int engineNum,bool engineSelectEnable)597 MOS_STATUS GpuContextSpecificNextXe::ReportEngineInfo(
598 void *engine_map,
599 int engineNum, bool engineSelectEnable)
600 {
601 MOS_UNUSED(engine_map);
602 MOS_UNUSED(engineNum);
603 MOS_UNUSED(engineSelectEnable);
604 return MOS_STATUS_SUCCESS;
605 }
606
ClearSecondaryCmdBuffer(bool cmdBufMapIsReused)607 void GpuContextSpecificNextXe::ClearSecondaryCmdBuffer(
608 bool cmdBufMapIsReused)
609 {
610 auto it = m_secondaryCmdBufs.begin();
611 while (it != m_secondaryCmdBufs.end())
612 {
613 if (it->second->OsResource.pGfxResourceNext)
614 {
615 it->second->OsResource.pGfxResourceNext->Unlock(m_osContext);
616 }
617 if (!cmdBufMapIsReused)
618 {
619 MOS_FreeMemory(it->second);
620 }
621 it++;
622 }
623 m_secondaryCmdBufs.clear();
624 }
625