1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 * SPDX-License-Identifier: MIT
5 *
6 * based in part on anv driver which is:
7 * Copyright © 2015 Intel Corporation
8 */
9
10 #include "tu_event.h"
11
12 #include "tu_cmd_buffer.h"
13 #include "tu_rmv.h"
14
15 VKAPI_ATTR VkResult VKAPI_CALL
tu_CreateEvent(VkDevice _device,const VkEventCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkEvent * pEvent)16 tu_CreateEvent(VkDevice _device,
17 const VkEventCreateInfo *pCreateInfo,
18 const VkAllocationCallbacks *pAllocator,
19 VkEvent *pEvent)
20 {
21 VK_FROM_HANDLE(tu_device, device, _device);
22
23 struct tu_event *event = (struct tu_event *)
24 vk_object_alloc(&device->vk, pAllocator, sizeof(*event),
25 VK_OBJECT_TYPE_EVENT);
26 if (!event)
27 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
28
29 VkResult result = tu_bo_init_new(device, &event->base, &event->bo, 0x1000,
30 TU_BO_ALLOC_NO_FLAGS, "event");
31 if (result != VK_SUCCESS)
32 goto fail_alloc;
33
34 result = tu_bo_map(device, event->bo, NULL);
35 if (result != VK_SUCCESS)
36 goto fail_map;
37
38 TU_RMV(event_create, device, pCreateInfo, event);
39
40 *pEvent = tu_event_to_handle(event);
41
42 return VK_SUCCESS;
43
44 fail_map:
45 tu_bo_finish(device, event->bo);
46 fail_alloc:
47 vk_object_free(&device->vk, pAllocator, event);
48 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
49 }
50
51 VKAPI_ATTR void VKAPI_CALL
tu_DestroyEvent(VkDevice _device,VkEvent _event,const VkAllocationCallbacks * pAllocator)52 tu_DestroyEvent(VkDevice _device,
53 VkEvent _event,
54 const VkAllocationCallbacks *pAllocator)
55 {
56 VK_FROM_HANDLE(tu_device, device, _device);
57 VK_FROM_HANDLE(tu_event, event, _event);
58
59 if (!event)
60 return;
61
62 TU_RMV(resource_destroy, device, event);
63
64 tu_bo_finish(device, event->bo);
65 vk_object_free(&device->vk, pAllocator, event);
66 }
67
68 VKAPI_ATTR VkResult VKAPI_CALL
tu_GetEventStatus(VkDevice _device,VkEvent _event)69 tu_GetEventStatus(VkDevice _device, VkEvent _event)
70 {
71 VK_FROM_HANDLE(tu_device, device, _device);
72 VK_FROM_HANDLE(tu_event, event, _event);
73
74 if (vk_device_is_lost(&device->vk))
75 return VK_ERROR_DEVICE_LOST;
76
77 if (*(uint64_t*) event->bo->map == 1)
78 return VK_EVENT_SET;
79 return VK_EVENT_RESET;
80 }
81
82 VKAPI_ATTR VkResult VKAPI_CALL
tu_SetEvent(VkDevice _device,VkEvent _event)83 tu_SetEvent(VkDevice _device, VkEvent _event)
84 {
85 VK_FROM_HANDLE(tu_event, event, _event);
86 *(uint64_t*) event->bo->map = 1;
87
88 return VK_SUCCESS;
89 }
90
91 VKAPI_ATTR VkResult VKAPI_CALL
tu_ResetEvent(VkDevice _device,VkEvent _event)92 tu_ResetEvent(VkDevice _device, VkEvent _event)
93 {
94 VK_FROM_HANDLE(tu_event, event, _event);
95 *(uint64_t*) event->bo->map = 0;
96
97 return VK_SUCCESS;
98 }
99
100 template <chip CHIP>
101 VKAPI_ATTR void VKAPI_CALL
tu_CmdSetEvent2(VkCommandBuffer commandBuffer,VkEvent _event,const VkDependencyInfo * pDependencyInfo)102 tu_CmdSetEvent2(VkCommandBuffer commandBuffer,
103 VkEvent _event,
104 const VkDependencyInfo *pDependencyInfo)
105 {
106 VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
107 VK_FROM_HANDLE(tu_event, event, _event);
108 VkPipelineStageFlags2 src_stage_mask = 0;
109
110 for (uint32_t i = 0; i < pDependencyInfo->memoryBarrierCount; i++)
111 src_stage_mask |= pDependencyInfo->pMemoryBarriers[i].srcStageMask;
112 for (uint32_t i = 0; i < pDependencyInfo->bufferMemoryBarrierCount; i++)
113 src_stage_mask |= pDependencyInfo->pBufferMemoryBarriers[i].srcStageMask;
114 for (uint32_t i = 0; i < pDependencyInfo->imageMemoryBarrierCount; i++)
115 src_stage_mask |= pDependencyInfo->pImageMemoryBarriers[i].srcStageMask;
116
117 tu_write_event<CHIP>(cmd, event, src_stage_mask, 1);
118 }
119 TU_GENX(tu_CmdSetEvent2);
120
121 template <chip CHIP>
122 VKAPI_ATTR void VKAPI_CALL
tu_CmdResetEvent2(VkCommandBuffer commandBuffer,VkEvent _event,VkPipelineStageFlags2 stageMask)123 tu_CmdResetEvent2(VkCommandBuffer commandBuffer,
124 VkEvent _event,
125 VkPipelineStageFlags2 stageMask)
126 {
127 VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
128 VK_FROM_HANDLE(tu_event, event, _event);
129
130 tu_write_event<CHIP>(cmd, event, stageMask, 0);
131 }
132 TU_GENX(tu_CmdResetEvent2);
133
134 VKAPI_ATTR void VKAPI_CALL
tu_CmdWaitEvents2(VkCommandBuffer commandBuffer,uint32_t eventCount,const VkEvent * pEvents,const VkDependencyInfo * pDependencyInfos)135 tu_CmdWaitEvents2(VkCommandBuffer commandBuffer,
136 uint32_t eventCount,
137 const VkEvent *pEvents,
138 const VkDependencyInfo* pDependencyInfos)
139 {
140 VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
141 struct tu_cs *cs = cmd->state.pass ? &cmd->draw_cs : &cmd->cs;
142
143 for (uint32_t i = 0; i < eventCount; i++) {
144 VK_FROM_HANDLE(tu_event, event, pEvents[i]);
145
146 tu_cs_emit_pkt7(cs, CP_WAIT_REG_MEM, 6);
147 tu_cs_emit(cs, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
148 CP_WAIT_REG_MEM_0_POLL(POLL_MEMORY));
149 tu_cs_emit_qw(cs, event->bo->iova); /* POLL_ADDR_LO/HI */
150 tu_cs_emit(cs, CP_WAIT_REG_MEM_3_REF(1));
151 tu_cs_emit(cs, CP_WAIT_REG_MEM_4_MASK(~0u));
152 tu_cs_emit(cs, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(20));
153 }
154
155 tu_barrier(cmd, eventCount, pDependencyInfos);
156 }
157