1 /*
2 * Copyright © 2018 Rob Clark <[email protected]>
3 * SPDX-License-Identifier: MIT
4 *
5 * Authors:
6 * Rob Clark <[email protected]>
7 */
8
9 #include <assert.h>
10 #include <inttypes.h>
11 #include <pthread.h>
12
13 #include "util/os_file.h"
14
15 #include "drm/freedreno_ringbuffer_sp.h"
16 #include "freedreno_rd_output.h"
17 #include "msm_priv.h"
18
19 static int
flush_submit_list(struct list_head * submit_list)20 flush_submit_list(struct list_head *submit_list)
21 {
22 struct fd_submit_sp *fd_submit = to_fd_submit_sp(last_submit(submit_list));
23 struct fd_pipe *pipe = fd_submit->base.pipe;
24 struct msm_pipe *msm_pipe = to_msm_pipe(pipe);
25 struct drm_msm_gem_submit req = {
26 .flags = msm_pipe->pipe,
27 .queueid = msm_pipe->queue_id,
28 };
29 int ret;
30
31 unsigned nr_cmds = 0;
32
33 MESA_TRACE_FUNC();
34
35 /* Determine the number of extra cmds's from deferred submits that
36 * we will be merging in:
37 */
38 foreach_submit (submit, submit_list) {
39 assert(submit->pipe == &msm_pipe->base);
40 nr_cmds += to_fd_ringbuffer_sp(submit->primary)->u.nr_cmds;
41 }
42
43 struct drm_msm_gem_submit_cmd cmds[nr_cmds];
44
45 unsigned cmd_idx = 0;
46
47 /* Build up the table of cmds, and for all but the last submit in the
48 * list, merge their bo tables into the last submit.
49 */
50 foreach_submit_safe (submit, submit_list) {
51 struct fd_ringbuffer_sp *deferred_primary =
52 to_fd_ringbuffer_sp(submit->primary);
53
54 for (unsigned i = 0; i < deferred_primary->u.nr_cmds; i++) {
55 struct fd_bo *ring_bo = deferred_primary->u.cmds[i].ring_bo;
56 cmds[cmd_idx].type = MSM_SUBMIT_CMD_BUF;
57 cmds[cmd_idx].submit_idx = fd_submit_append_bo(fd_submit, ring_bo);
58 cmds[cmd_idx].submit_offset = submit_offset(ring_bo, deferred_primary->offset);
59 cmds[cmd_idx].size = deferred_primary->u.cmds[i].size;
60 cmds[cmd_idx].pad = 0;
61 cmds[cmd_idx].nr_relocs = 0;
62
63 cmd_idx++;
64 }
65
66 /* We are merging all the submits in the list into the last submit,
67 * so the remainder of the loop body doesn't apply to the last submit
68 */
69 if (submit == last_submit(submit_list)) {
70 DEBUG_MSG("merged %u submits", cmd_idx);
71 break;
72 }
73
74 struct fd_submit_sp *fd_deferred_submit = to_fd_submit_sp(submit);
75 for (unsigned i = 0; i < fd_deferred_submit->nr_bos; i++) {
76 /* Note: if bo is used in both the current submit and the deferred
77 * submit being merged, we expect to hit the fast-path as we add it
78 * to the current submit:
79 */
80 fd_submit_append_bo(fd_submit, fd_deferred_submit->bos[i]);
81 }
82
83 /* Now that the cmds/bos have been transfered over to the current submit,
84 * we can remove the deferred submit from the list and drop it's reference
85 */
86 list_del(&submit->node);
87 fd_submit_del(submit);
88 }
89
90 if (fd_submit->in_fence_fd != -1) {
91 req.flags |= MSM_SUBMIT_FENCE_FD_IN;
92 req.fence_fd = fd_submit->in_fence_fd;
93 }
94
95 if (pipe->no_implicit_sync) {
96 req.flags |= MSM_SUBMIT_NO_IMPLICIT;
97 }
98
99 if (fd_submit->out_fence->use_fence_fd) {
100 req.flags |= MSM_SUBMIT_FENCE_FD_OUT;
101 }
102
103 /* Needs to be after get_cmd() as that could create bos/cmds table:
104 *
105 * NOTE allocate on-stack in the common case, but with an upper-
106 * bound to limit on-stack allocation to 4k:
107 */
108 const unsigned bo_limit = 4096 / sizeof(struct drm_msm_gem_submit_bo);
109 bool bos_on_stack = fd_submit->nr_bos < bo_limit;
110 struct drm_msm_gem_submit_bo
111 _submit_bos[bos_on_stack ? fd_submit->nr_bos : 0];
112 struct drm_msm_gem_submit_bo *submit_bos;
113 if (bos_on_stack) {
114 submit_bos = _submit_bos;
115 } else {
116 submit_bos = malloc(fd_submit->nr_bos * sizeof(submit_bos[0]));
117 }
118
119 for (unsigned i = 0; i < fd_submit->nr_bos; i++) {
120 submit_bos[i].flags = fd_submit->bos[i]->reloc_flags;
121 submit_bos[i].handle = fd_submit->bos[i]->handle;
122 submit_bos[i].presumed = 0;
123 }
124
125 req.bos = VOID2U64(submit_bos);
126 req.nr_bos = fd_submit->nr_bos;
127 req.cmds = VOID2U64(cmds);
128 req.nr_cmds = nr_cmds;
129
130 DEBUG_MSG("nr_cmds=%u, nr_bos=%u", req.nr_cmds, req.nr_bos);
131
132 ret = drmCommandWriteRead(pipe->dev->fd, DRM_MSM_GEM_SUBMIT, &req,
133 sizeof(req));
134 if (ret) {
135 ERROR_MSG("submit failed: %d (%s)", ret, strerror(errno));
136 msm_dump_submit(&req);
137 } else if (!ret) {
138 fd_submit->out_fence->kfence = req.fence;
139 fd_submit->out_fence->fence_fd = req.fence_fd;
140 }
141
142 msm_dump_rd(pipe, &req);
143
144 if (!bos_on_stack)
145 free(submit_bos);
146
147 if (fd_submit->in_fence_fd != -1)
148 close(fd_submit->in_fence_fd);
149
150 return ret;
151 }
152
153 struct fd_submit *
msm_submit_sp_new(struct fd_pipe * pipe)154 msm_submit_sp_new(struct fd_pipe *pipe)
155 {
156 /* We don't do any translation from internal FD_RELOC flags to MSM flags. */
157 STATIC_ASSERT(FD_RELOC_READ == MSM_SUBMIT_BO_READ);
158 STATIC_ASSERT(FD_RELOC_WRITE == MSM_SUBMIT_BO_WRITE);
159 STATIC_ASSERT(FD_RELOC_DUMP == MSM_SUBMIT_BO_DUMP);
160
161 return fd_submit_sp_new(pipe, flush_submit_list);
162 }
163