1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Discovery service for the NVMe over Fabrics target.
4 * Copyright (C) 2016 Intel Corporation. All rights reserved.
5 */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/slab.h>
8 #include <generated/utsrelease.h>
9 #include "nvmet.h"
10
11 struct nvmet_subsys *nvmet_disc_subsys;
12
13 static u64 nvmet_genctr;
14
__nvmet_disc_changed(struct nvmet_port * port,struct nvmet_ctrl * ctrl)15 static void __nvmet_disc_changed(struct nvmet_port *port,
16 struct nvmet_ctrl *ctrl)
17 {
18 if (ctrl->port != port)
19 return;
20
21 if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_DISC_CHANGE))
22 return;
23
24 nvmet_add_async_event(ctrl, NVME_AER_NOTICE,
25 NVME_AER_NOTICE_DISC_CHANGED, NVME_LOG_DISC);
26 }
27
nvmet_port_disc_changed(struct nvmet_port * port,struct nvmet_subsys * subsys)28 void nvmet_port_disc_changed(struct nvmet_port *port,
29 struct nvmet_subsys *subsys)
30 {
31 struct nvmet_ctrl *ctrl;
32
33 lockdep_assert_held(&nvmet_config_sem);
34 nvmet_genctr++;
35
36 mutex_lock(&nvmet_disc_subsys->lock);
37 list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) {
38 if (subsys && !nvmet_host_allowed(subsys, ctrl->hostnqn))
39 continue;
40
41 __nvmet_disc_changed(port, ctrl);
42 }
43 mutex_unlock(&nvmet_disc_subsys->lock);
44
45 /* If transport can signal change, notify transport */
46 if (port->tr_ops && port->tr_ops->discovery_chg)
47 port->tr_ops->discovery_chg(port);
48 }
49
__nvmet_subsys_disc_changed(struct nvmet_port * port,struct nvmet_subsys * subsys,struct nvmet_host * host)50 static void __nvmet_subsys_disc_changed(struct nvmet_port *port,
51 struct nvmet_subsys *subsys,
52 struct nvmet_host *host)
53 {
54 struct nvmet_ctrl *ctrl;
55
56 mutex_lock(&nvmet_disc_subsys->lock);
57 list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) {
58 if (host && strcmp(nvmet_host_name(host), ctrl->hostnqn))
59 continue;
60
61 __nvmet_disc_changed(port, ctrl);
62 }
63 mutex_unlock(&nvmet_disc_subsys->lock);
64 }
65
nvmet_subsys_disc_changed(struct nvmet_subsys * subsys,struct nvmet_host * host)66 void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
67 struct nvmet_host *host)
68 {
69 struct nvmet_port *port;
70 struct nvmet_subsys_link *s;
71
72 lockdep_assert_held(&nvmet_config_sem);
73 nvmet_genctr++;
74
75 list_for_each_entry(port, nvmet_ports, global_entry)
76 list_for_each_entry(s, &port->subsystems, entry) {
77 if (s->subsys != subsys)
78 continue;
79 __nvmet_subsys_disc_changed(port, subsys, host);
80 }
81 }
82
nvmet_referral_enable(struct nvmet_port * parent,struct nvmet_port * port)83 void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port)
84 {
85 down_write(&nvmet_config_sem);
86 if (list_empty(&port->entry)) {
87 list_add_tail(&port->entry, &parent->referrals);
88 port->enabled = true;
89 nvmet_port_disc_changed(parent, NULL);
90 }
91 up_write(&nvmet_config_sem);
92 }
93
nvmet_referral_disable(struct nvmet_port * parent,struct nvmet_port * port)94 void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port)
95 {
96 down_write(&nvmet_config_sem);
97 if (!list_empty(&port->entry)) {
98 port->enabled = false;
99 list_del_init(&port->entry);
100 nvmet_port_disc_changed(parent, NULL);
101 }
102 up_write(&nvmet_config_sem);
103 }
104
nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr * hdr,struct nvmet_port * port,char * subsys_nqn,char * traddr,u8 type,u32 numrec)105 static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr,
106 struct nvmet_port *port, char *subsys_nqn, char *traddr,
107 u8 type, u32 numrec)
108 {
109 struct nvmf_disc_rsp_page_entry *e = &hdr->entries[numrec];
110
111 e->trtype = port->disc_addr.trtype;
112 e->adrfam = port->disc_addr.adrfam;
113 e->treq = port->disc_addr.treq;
114 e->portid = port->disc_addr.portid;
115 /* we support only dynamic controllers */
116 e->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC);
117 e->asqsz = cpu_to_le16(NVME_AQ_DEPTH);
118 e->subtype = type;
119 memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE);
120 memcpy(e->traddr, traddr, NVMF_TRADDR_SIZE);
121 memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE);
122 strncpy(e->subnqn, subsys_nqn, NVMF_NQN_SIZE);
123 }
124
125 /*
126 * nvmet_set_disc_traddr - set a correct discovery log entry traddr
127 *
128 * IP based transports (e.g RDMA) can listen on "any" ipv4/ipv6 addresses
129 * (INADDR_ANY or IN6ADDR_ANY_INIT). The discovery log page traddr reply
130 * must not contain that "any" IP address. If the transport implements
131 * .disc_traddr, use it. this callback will set the discovery traddr
132 * from the req->port address in case the port in question listens
133 * "any" IP address.
134 */
nvmet_set_disc_traddr(struct nvmet_req * req,struct nvmet_port * port,char * traddr)135 static void nvmet_set_disc_traddr(struct nvmet_req *req, struct nvmet_port *port,
136 char *traddr)
137 {
138 if (req->ops->disc_traddr)
139 req->ops->disc_traddr(req, port, traddr);
140 else
141 memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
142 }
143
discovery_log_entries(struct nvmet_req * req)144 static size_t discovery_log_entries(struct nvmet_req *req)
145 {
146 struct nvmet_ctrl *ctrl = req->sq->ctrl;
147 struct nvmet_subsys_link *p;
148 struct nvmet_port *r;
149 size_t entries = 1;
150
151 list_for_each_entry(p, &req->port->subsystems, entry) {
152 if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
153 continue;
154 entries++;
155 }
156 list_for_each_entry(r, &req->port->referrals, entry)
157 entries++;
158 return entries;
159 }
160
nvmet_execute_disc_get_log_page(struct nvmet_req * req)161 static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
162 {
163 const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry);
164 struct nvmet_ctrl *ctrl = req->sq->ctrl;
165 struct nvmf_disc_rsp_page_hdr *hdr;
166 u64 offset = nvmet_get_log_page_offset(req->cmd);
167 size_t data_len = nvmet_get_log_page_len(req->cmd);
168 size_t alloc_len;
169 struct nvmet_subsys_link *p;
170 struct nvmet_port *r;
171 u32 numrec = 0;
172 u16 status = 0;
173 void *buffer;
174 char traddr[NVMF_TRADDR_SIZE];
175
176 if (!nvmet_check_transfer_len(req, data_len))
177 return;
178
179 if (req->cmd->get_log_page.lid != NVME_LOG_DISC) {
180 req->error_loc =
181 offsetof(struct nvme_get_log_page_command, lid);
182 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
183 goto out;
184 }
185
186 /* Spec requires dword aligned offsets */
187 if (offset & 0x3) {
188 req->error_loc =
189 offsetof(struct nvme_get_log_page_command, lpo);
190 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
191 goto out;
192 }
193
194 /*
195 * Make sure we're passing at least a buffer of response header size.
196 * If host provided data len is less than the header size, only the
197 * number of bytes requested by host will be sent to host.
198 */
199 down_read(&nvmet_config_sem);
200 alloc_len = sizeof(*hdr) + entry_size * discovery_log_entries(req);
201 buffer = kzalloc(alloc_len, GFP_KERNEL);
202 if (!buffer) {
203 up_read(&nvmet_config_sem);
204 status = NVME_SC_INTERNAL;
205 goto out;
206 }
207 hdr = buffer;
208
209 nvmet_set_disc_traddr(req, req->port, traddr);
210
211 nvmet_format_discovery_entry(hdr, req->port,
212 nvmet_disc_subsys->subsysnqn,
213 traddr, NVME_NQN_CURR, numrec);
214 numrec++;
215
216 list_for_each_entry(p, &req->port->subsystems, entry) {
217 if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
218 continue;
219
220 nvmet_format_discovery_entry(hdr, req->port,
221 p->subsys->subsysnqn, traddr,
222 NVME_NQN_NVME, numrec);
223 numrec++;
224 }
225
226 list_for_each_entry(r, &req->port->referrals, entry) {
227 if (r->disc_addr.trtype == NVMF_TRTYPE_PCI)
228 continue;
229
230 nvmet_format_discovery_entry(hdr, r,
231 NVME_DISC_SUBSYS_NAME,
232 r->disc_addr.traddr,
233 NVME_NQN_DISC, numrec);
234 numrec++;
235 }
236
237 hdr->genctr = cpu_to_le64(nvmet_genctr);
238 hdr->numrec = cpu_to_le64(numrec);
239 hdr->recfmt = cpu_to_le16(0);
240
241 nvmet_clear_aen_bit(req, NVME_AEN_BIT_DISC_CHANGE);
242
243 up_read(&nvmet_config_sem);
244
245 status = nvmet_copy_to_sgl(req, 0, buffer + offset, data_len);
246 kfree(buffer);
247 out:
248 nvmet_req_complete(req, status);
249 }
250
nvmet_execute_disc_identify(struct nvmet_req * req)251 static void nvmet_execute_disc_identify(struct nvmet_req *req)
252 {
253 struct nvmet_ctrl *ctrl = req->sq->ctrl;
254 struct nvme_id_ctrl *id;
255 u16 status = 0;
256
257 if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
258 return;
259
260 if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) {
261 req->error_loc = offsetof(struct nvme_identify, cns);
262 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
263 goto out;
264 }
265
266 id = kzalloc(sizeof(*id), GFP_KERNEL);
267 if (!id) {
268 status = NVME_SC_INTERNAL;
269 goto out;
270 }
271
272 memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE);
273 memset(id->fr, ' ', sizeof(id->fr));
274 memcpy_and_pad(id->mn, sizeof(id->mn), ctrl->subsys->model_number,
275 strlen(ctrl->subsys->model_number), ' ');
276 memcpy_and_pad(id->fr, sizeof(id->fr),
277 UTS_RELEASE, strlen(UTS_RELEASE), ' ');
278
279 id->cntrltype = NVME_CTRL_DISC;
280
281 /* no limit on data transfer sizes for now */
282 id->mdts = 0;
283 id->cntlid = cpu_to_le16(ctrl->cntlid);
284 id->ver = cpu_to_le32(ctrl->subsys->ver);
285 id->lpa = (1 << 2);
286
287 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
288 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD(ctrl));
289
290 id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
291 if (ctrl->ops->flags & NVMF_KEYED_SGLS)
292 id->sgls |= cpu_to_le32(1 << 2);
293 if (req->port->inline_data_size)
294 id->sgls |= cpu_to_le32(1 << 20);
295
296 id->oaes = cpu_to_le32(NVMET_DISC_AEN_CFG_OPTIONAL);
297
298 strscpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
299
300 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
301
302 kfree(id);
303 out:
304 nvmet_req_complete(req, status);
305 }
306
nvmet_execute_disc_set_features(struct nvmet_req * req)307 static void nvmet_execute_disc_set_features(struct nvmet_req *req)
308 {
309 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
310 u16 stat;
311
312 if (!nvmet_check_transfer_len(req, 0))
313 return;
314
315 switch (cdw10 & 0xff) {
316 case NVME_FEAT_KATO:
317 stat = nvmet_set_feat_kato(req);
318 break;
319 case NVME_FEAT_ASYNC_EVENT:
320 stat = nvmet_set_feat_async_event(req,
321 NVMET_DISC_AEN_CFG_OPTIONAL);
322 break;
323 default:
324 req->error_loc =
325 offsetof(struct nvme_common_command, cdw10);
326 stat = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
327 break;
328 }
329
330 nvmet_req_complete(req, stat);
331 }
332
nvmet_execute_disc_get_features(struct nvmet_req * req)333 static void nvmet_execute_disc_get_features(struct nvmet_req *req)
334 {
335 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
336 u16 stat = 0;
337
338 if (!nvmet_check_transfer_len(req, 0))
339 return;
340
341 switch (cdw10 & 0xff) {
342 case NVME_FEAT_KATO:
343 nvmet_get_feat_kato(req);
344 break;
345 case NVME_FEAT_ASYNC_EVENT:
346 nvmet_get_feat_async_event(req);
347 break;
348 default:
349 req->error_loc =
350 offsetof(struct nvme_common_command, cdw10);
351 stat = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
352 break;
353 }
354
355 nvmet_req_complete(req, stat);
356 }
357
nvmet_discovery_cmd_data_len(struct nvmet_req * req)358 u32 nvmet_discovery_cmd_data_len(struct nvmet_req *req)
359 {
360 struct nvme_command *cmd = req->cmd;
361
362 switch (cmd->common.opcode) {
363 case nvme_admin_get_log_page:
364 return nvmet_get_log_page_len(req->cmd);
365 case nvme_admin_identify:
366 return NVME_IDENTIFY_DATA_SIZE;
367 default:
368 return 0;
369 }
370 }
371
nvmet_parse_discovery_cmd(struct nvmet_req * req)372 u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
373 {
374 struct nvme_command *cmd = req->cmd;
375
376 if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
377 pr_err("got cmd %d while not ready\n",
378 cmd->common.opcode);
379 req->error_loc =
380 offsetof(struct nvme_common_command, opcode);
381 return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
382 }
383
384 switch (cmd->common.opcode) {
385 case nvme_admin_set_features:
386 req->execute = nvmet_execute_disc_set_features;
387 return 0;
388 case nvme_admin_get_features:
389 req->execute = nvmet_execute_disc_get_features;
390 return 0;
391 case nvme_admin_async_event:
392 req->execute = nvmet_execute_async_event;
393 return 0;
394 case nvme_admin_keep_alive:
395 req->execute = nvmet_execute_keep_alive;
396 return 0;
397 case nvme_admin_get_log_page:
398 req->execute = nvmet_execute_disc_get_log_page;
399 return 0;
400 case nvme_admin_identify:
401 req->execute = nvmet_execute_disc_identify;
402 return 0;
403 default:
404 pr_debug("unhandled cmd %d\n", cmd->common.opcode);
405 req->error_loc = offsetof(struct nvme_common_command, opcode);
406 return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
407 }
408
409 }
410
nvmet_init_discovery(void)411 int __init nvmet_init_discovery(void)
412 {
413 nvmet_disc_subsys =
414 nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME, NVME_NQN_CURR);
415 return PTR_ERR_OR_ZERO(nvmet_disc_subsys);
416 }
417
nvmet_exit_discovery(void)418 void nvmet_exit_discovery(void)
419 {
420 nvmet_subsys_put(nvmet_disc_subsys);
421 }
422