Lines Matching full:ns
38 int nvme_query_zone_info(struct nvme_ns *ns, unsigned lbaf, in nvme_query_zone_info() argument
41 struct nvme_effects_log *log = ns->head->effects; in nvme_query_zone_info()
49 if (test_and_clear_bit(NVME_NS_FORCE_RO, &ns->flags)) in nvme_query_zone_info()
50 dev_warn(ns->ctrl->device, in nvme_query_zone_info()
52 ns->head->ns_id); in nvme_query_zone_info()
54 set_bit(NVME_NS_FORCE_RO, &ns->flags); in nvme_query_zone_info()
55 dev_warn(ns->ctrl->device, in nvme_query_zone_info()
57 ns->head->ns_id); in nvme_query_zone_info()
61 if (!ns->ctrl->max_zone_append) { in nvme_query_zone_info()
62 status = nvme_set_max_append(ns->ctrl); in nvme_query_zone_info()
72 c.identify.nsid = cpu_to_le32(ns->head->ns_id); in nvme_query_zone_info()
76 status = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, id, sizeof(*id)); in nvme_query_zone_info()
85 dev_warn(ns->ctrl->device, in nvme_query_zone_info()
87 le16_to_cpu(id->zoc), ns->head->ns_id); in nvme_query_zone_info()
94 dev_warn(ns->ctrl->device, in nvme_query_zone_info()
96 zi->zone_size, ns->head->ns_id); in nvme_query_zone_info()
108 void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim, in nvme_update_zone_info() argument
114 lim->max_hw_zone_append_sectors = ns->ctrl->max_zone_append; in nvme_update_zone_info()
115 lim->chunk_sectors = ns->head->zsze = in nvme_update_zone_info()
116 nvme_lba_to_sect(ns->head, zi->zone_size); in nvme_update_zone_info()
119 static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns, in nvme_zns_alloc_report_buffer() argument
122 struct request_queue *q = ns->disk->queue; in nvme_zns_alloc_report_buffer()
130 get_capacity(ns->disk) >> ilog2(ns->head->zsze)); in nvme_zns_alloc_report_buffer()
176 int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector, in nvme_ns_report_zones() argument
185 if (ns->head->ids.csi != NVME_CSI_ZNS) in nvme_ns_report_zones()
188 report = nvme_zns_alloc_report_buffer(ns, nr_zones, &buflen); in nvme_ns_report_zones()
193 c.zmr.nsid = cpu_to_le32(ns->head->ns_id); in nvme_ns_report_zones()
199 sector &= ~(ns->head->zsze - 1); in nvme_ns_report_zones()
200 while (zone_idx < nr_zones && sector < get_capacity(ns->disk)) { in nvme_ns_report_zones()
203 c.zmr.slba = cpu_to_le64(nvme_sect_to_lba(ns->head, sector)); in nvme_ns_report_zones()
204 ret = nvme_submit_sync_cmd(ns->queue, &c, report, buflen); in nvme_ns_report_zones()
216 ret = nvme_zone_parse_entry(ns->ctrl, ns->head, in nvme_ns_report_zones()
224 sector += ns->head->zsze * nz; in nvme_ns_report_zones()
236 blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req, in nvme_setup_zone_mgmt_send() argument
242 c->zms.nsid = cpu_to_le32(ns->head->ns_id); in nvme_setup_zone_mgmt_send()
243 c->zms.slba = cpu_to_le64(nvme_sect_to_lba(ns->head, blk_rq_pos(req))); in nvme_setup_zone_mgmt_send()