Lines Matching +full:msb +full:- +full:-

1 // SPDX-License-Identifier: GPL-2.0
17 #include <linux/blk-mq.h>
46 free_page((unsigned long) scmrq->aob); in __scm_free_rq()
47 kfree(scmrq->request); in __scm_free_rq()
59 list_del(&scmrq->list); in scm_free_rqs()
74 return -ENOMEM; in __scm_alloc_rq()
76 scmrq = (void *) aobrq->data; in __scm_alloc_rq()
77 scmrq->aob = (void *) get_zeroed_page(GFP_DMA); in __scm_alloc_rq()
78 if (!scmrq->aob) in __scm_alloc_rq()
81 scmrq->request = kcalloc(nr_requests_per_io, sizeof(scmrq->request[0]), in __scm_alloc_rq()
83 if (!scmrq->request) in __scm_alloc_rq()
86 INIT_LIST_HEAD(&scmrq->list); in __scm_alloc_rq()
88 list_add(&scmrq->list, &inactive_requests); in __scm_alloc_rq()
94 return -ENOMEM; in __scm_alloc_rq()
103 return -ENOMEM; in scm_alloc_rqs()
105 while (nrqs-- && !ret) in scm_alloc_rqs()
119 list_del(&scmrq->list); in scm_request_fetch()
128 struct msb *msb; in scm_request_done() local
132 for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) { in scm_request_done()
133 msb = &scmrq->aob->msb[i]; in scm_request_done()
134 aidaw = (u64)dma64_to_virt(msb->data_addr); in scm_request_done()
136 if ((msb->flags & MSB_FLAG_IDA) && aidaw && in scm_request_done()
142 list_add(&scmrq->list, &inactive_requests); in scm_request_done()
148 return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT; in scm_permit_request()
161 unsigned long bytes = ALIGN(_aidaw, PAGE_SIZE) - _aidaw; in scm_aidaw_bytes()
170 if (scm_aidaw_bytes(scmrq->next_aidaw) >= bytes) in scm_aidaw_fetch()
171 return scmrq->next_aidaw; in scm_aidaw_fetch()
181 struct scm_blk_dev *bdev = scmrq->bdev; in scm_request_prepare()
182 struct scm_device *scmdev = bdev->gendisk->private_data; in scm_request_prepare()
183 int pos = scmrq->aob->request.msb_count; in scm_request_prepare()
184 struct msb *msb = &scmrq->aob->msb[pos]; in scm_request_prepare() local
185 struct request *req = scmrq->request[pos]; in scm_request_prepare()
192 return -ENOMEM; in scm_request_prepare()
194 msb->bs = MSB_BS_4K; in scm_request_prepare()
195 scmrq->aob->request.msb_count++; in scm_request_prepare()
196 msb->scm_addr = scmdev->address + ((u64) blk_rq_pos(req) << 9); in scm_request_prepare()
197 msb->oc = (rq_data_dir(req) == READ) ? MSB_OC_READ : MSB_OC_WRITE; in scm_request_prepare()
198 msb->flags |= MSB_FLAG_IDA; in scm_request_prepare()
199 msb->data_addr = virt_to_dma64(aidaw); in scm_request_prepare()
203 msb->blk_count += bv.bv_len >> 12; in scm_request_prepare()
204 aidaw->data_addr = virt_to_dma64(page_address(bv.bv_page)); in scm_request_prepare()
208 scmrq->next_aidaw = aidaw; in scm_request_prepare()
215 scmrq->request[scmrq->aob->request.msb_count] = req; in scm_request_set()
222 struct aob *aob = scmrq->aob; in scm_request_init()
224 memset(scmrq->request, 0, in scm_request_init()
225 nr_requests_per_io * sizeof(scmrq->request[0])); in scm_request_init()
227 aobrq->scmdev = bdev->scmdev; in scm_request_init()
228 aob->request.cmd_code = ARQB_CMD_MOVE; in scm_request_init()
229 aob->request.data = (u64) aobrq; in scm_request_init()
230 scmrq->bdev = bdev; in scm_request_init()
231 scmrq->retries = 4; in scm_request_init()
232 scmrq->error = BLK_STS_OK; in scm_request_init()
233 /* We don't use all msbs - place aidaws at the end of the aob page. */ in scm_request_init()
234 scmrq->next_aidaw = (void *) &aob->msb[nr_requests_per_io]; in scm_request_init()
239 struct scm_blk_dev *bdev = scmrq->bdev; in scm_request_requeue()
242 for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) in scm_request_requeue()
243 blk_mq_requeue_request(scmrq->request[i], false); in scm_request_requeue()
245 atomic_dec(&bdev->queued_reqs); in scm_request_requeue()
247 blk_mq_kick_requeue_list(bdev->rq); in scm_request_requeue()
252 struct scm_blk_dev *bdev = scmrq->bdev; in scm_request_finish()
256 for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) { in scm_request_finish()
257 error = blk_mq_rq_to_pdu(scmrq->request[i]); in scm_request_finish()
258 *error = scmrq->error; in scm_request_finish()
259 if (likely(!blk_should_fake_timeout(scmrq->request[i]->q))) in scm_request_finish()
260 blk_mq_complete_request(scmrq->request[i]); in scm_request_finish()
263 atomic_dec(&bdev->queued_reqs); in scm_request_finish()
269 struct scm_blk_dev *bdev = scmrq->bdev; in scm_request_start()
271 atomic_inc(&bdev->queued_reqs); in scm_request_start()
272 if (eadm_start_aob(scmrq->aob)) { in scm_request_start()
286 struct scm_device *scmdev = hctx->queue->queuedata; in scm_blk_request()
287 struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev); in scm_blk_request()
288 struct scm_queue *sq = hctx->driver_data; in scm_blk_request()
289 struct request *req = qd->rq; in scm_blk_request()
292 spin_lock(&sq->lock); in scm_blk_request()
294 spin_unlock(&sq->lock); in scm_blk_request()
298 scmrq = sq->scmrq; in scm_blk_request()
303 spin_unlock(&sq->lock); in scm_blk_request()
307 sq->scmrq = scmrq; in scm_blk_request()
315 if (scmrq->aob->request.msb_count) in scm_blk_request()
318 sq->scmrq = NULL; in scm_blk_request()
319 spin_unlock(&sq->lock); in scm_blk_request()
324 if (qd->last || scmrq->aob->request.msb_count == nr_requests_per_io) { in scm_blk_request()
326 sq->scmrq = NULL; in scm_blk_request()
328 spin_unlock(&sq->lock); in scm_blk_request()
338 return -ENOMEM; in scm_blk_init_hctx()
340 spin_lock_init(&qd->lock); in scm_blk_init_hctx()
341 hctx->driver_data = qd; in scm_blk_init_hctx()
348 struct scm_queue *qd = hctx->driver_data; in scm_blk_exit_hctx()
350 WARN_ON(qd->scmrq); in scm_blk_exit_hctx()
351 kfree(hctx->driver_data); in scm_blk_exit_hctx()
352 hctx->driver_data = NULL; in scm_blk_exit_hctx()
357 struct aob *aob = scmrq->aob; in __scmrq_log_error()
359 if (scmrq->error == BLK_STS_TIMEOUT) in __scmrq_log_error()
363 SCM_LOG_HEX(1, &aob->response, sizeof(aob->response)); in __scmrq_log_error()
365 if (scmrq->retries) in __scmrq_log_error()
369 scmrq->error); in __scmrq_log_error()
374 struct scm_blk_dev *bdev = scmrq->bdev; in scm_blk_handle_error()
377 if (scmrq->error != BLK_STS_IOERR) in scm_blk_handle_error()
380 /* For -EIO the response block is valid. */ in scm_blk_handle_error()
381 switch (scmrq->aob->response.eqc) { in scm_blk_handle_error()
383 spin_lock_irqsave(&bdev->lock, flags); in scm_blk_handle_error()
384 if (bdev->state != SCM_WR_PROHIBIT) in scm_blk_handle_error()
386 (unsigned long) bdev->scmdev->address); in scm_blk_handle_error()
387 bdev->state = SCM_WR_PROHIBIT; in scm_blk_handle_error()
388 spin_unlock_irqrestore(&bdev->lock, flags); in scm_blk_handle_error()
395 if (!eadm_start_aob(scmrq->aob)) in scm_blk_handle_error()
406 scmrq->error = error; in scm_blk_irq()
409 if (scmrq->retries-- > 0) { in scm_blk_irq()
444 lim.max_segments = min(scmdev->nr_max_block, in scm_blk_dev_setup()
448 devindex = atomic_inc_return(&nr_devices) - 1; in scm_blk_dev_setup()
451 ret = -ENODEV; in scm_blk_dev_setup()
455 bdev->scmdev = scmdev; in scm_blk_dev_setup()
456 bdev->state = SCM_OPER; in scm_blk_dev_setup()
457 spin_lock_init(&bdev->lock); in scm_blk_dev_setup()
458 atomic_set(&bdev->queued_reqs, 0); in scm_blk_dev_setup()
460 bdev->tag_set.ops = &scm_mq_ops; in scm_blk_dev_setup()
461 bdev->tag_set.cmd_size = sizeof(blk_status_t); in scm_blk_dev_setup()
462 bdev->tag_set.nr_hw_queues = nr_requests; in scm_blk_dev_setup()
463 bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests; in scm_blk_dev_setup()
464 bdev->tag_set.numa_node = NUMA_NO_NODE; in scm_blk_dev_setup()
466 ret = blk_mq_alloc_tag_set(&bdev->tag_set); in scm_blk_dev_setup()
470 bdev->gendisk = blk_mq_alloc_disk(&bdev->tag_set, &lim, scmdev); in scm_blk_dev_setup()
471 if (IS_ERR(bdev->gendisk)) { in scm_blk_dev_setup()
472 ret = PTR_ERR(bdev->gendisk); in scm_blk_dev_setup()
475 bdev->gendisk->private_data = scmdev; in scm_blk_dev_setup()
476 bdev->gendisk->fops = &scm_blk_devops; in scm_blk_dev_setup()
477 bdev->gendisk->major = scm_major; in scm_blk_dev_setup()
478 bdev->gendisk->first_minor = devindex * SCM_NR_PARTS; in scm_blk_dev_setup()
479 bdev->gendisk->minors = SCM_NR_PARTS; in scm_blk_dev_setup()
481 len = snprintf(bdev->gendisk->disk_name, DISK_NAME_LEN, "scm"); in scm_blk_dev_setup()
483 len += snprintf(bdev->gendisk->disk_name + len, in scm_blk_dev_setup()
484 DISK_NAME_LEN - len, "%c", in scm_blk_dev_setup()
485 'a' + (devindex / 26) - 1); in scm_blk_dev_setup()
488 snprintf(bdev->gendisk->disk_name + len, DISK_NAME_LEN - len, "%c", in scm_blk_dev_setup()
492 set_capacity(bdev->gendisk, scmdev->size >> 9); in scm_blk_dev_setup()
493 ret = device_add_disk(&scmdev->dev, bdev->gendisk, NULL); in scm_blk_dev_setup()
500 put_disk(bdev->gendisk); in scm_blk_dev_setup()
502 blk_mq_free_tag_set(&bdev->tag_set); in scm_blk_dev_setup()
510 del_gendisk(bdev->gendisk); in scm_blk_dev_cleanup()
511 put_disk(bdev->gendisk); in scm_blk_dev_cleanup()
512 blk_mq_free_tag_set(&bdev->tag_set); in scm_blk_dev_cleanup()
519 spin_lock_irqsave(&bdev->lock, flags); in scm_blk_set_available()
520 if (bdev->state == SCM_WR_PROHIBIT) in scm_blk_set_available()
522 (unsigned long) bdev->scmdev->address); in scm_blk_set_available()
523 bdev->state = SCM_OPER; in scm_blk_set_available()
524 spin_unlock_irqrestore(&bdev->lock, flags); in scm_blk_set_available()
537 int ret = -EINVAL; in scm_blk_init()
553 ret = -ENOMEM; in scm_blk_init()