Skip to content

Commit

Permalink
Merge tag 'block-6.14-20250207' of git://git.kernel.dk/linux
Browse files Browse the repository at this point in the history
Pull block fixes from Jens Axboe:

 - MD pull request via Song:
      - fix an error handling path for md-linear

 - NVMe pull request via Keith:
      - Connection fixes for fibre channel transport (Daniel)
      - Endian fixes (Keith, Christoph)
      - Cleanup fix for host memory buffer (Francis)
      - Platform specific power quirks (Georg)
      - Target memory leak (Sagi)
      - Use appropriate controller state accessor (Daniel)

 - Fixup for a regression introduced last week, where sunvdc wasn't
   updated for an API change, causing compilation failures on sparc64.

* tag 'block-6.14-20250207' of git://git.kernel.dk/linux:
  drivers/block/sunvdc.c: update the correct AIP call
  md: Fix linear_set_limits()
  nvme-fc: use ctrl state getter
  nvme: make nvme_tls_attrs_group static
  nvmet: add a missing endianess conversion in nvmet_execute_admin_connect
  nvmet: the result field in nvmet_alloc_ctrl_args is little endian
  nvmet: fix a memory leak in controller identify
  nvme-fc: do not ignore connectivity loss during connecting
  nvme: handle connectivity loss in nvme_set_queue_count
  nvme-fc: go straight to connecting state when initializing
  nvme-pci: Add TUXEDO IBP Gen9 to Samsung sleep quirk
  nvme-pci: Add TUXEDO InfinityFlex to Samsung sleep quirk
  nvme-pci: remove redundant dma frees in hmb
  nvmet: fix rw control endian access
  • Loading branch information
torvalds committed Feb 7, 2025
2 parents 1fa9970 + 96b531f commit a67d0a0
Show file tree
Hide file tree
Showing 10 changed files with 43 additions and 29 deletions.
4 changes: 2 additions & 2 deletions drivers/block/sunvdc.c
Original file line number Diff line number Diff line change
Expand Up @@ -1127,8 +1127,8 @@ static void vdc_queue_drain(struct vdc_port *port)

spin_lock_irq(&port->vio.lock);
port->drain = 0;
blk_mq_unquiesce_queue(q, memflags);
blk_mq_unfreeze_queue(q);
blk_mq_unquiesce_queue(q);
blk_mq_unfreeze_queue(q, memflags);
}

static void vdc_ldc_reset_timer_work(struct work_struct *work)
Expand Down
4 changes: 1 addition & 3 deletions drivers/md/md-linear.c
Original file line number Diff line number Diff line change
Expand Up @@ -76,10 +76,8 @@ static int linear_set_limits(struct mddev *mddev)
lim.max_write_zeroes_sectors = mddev->chunk_sectors;
lim.io_min = mddev->chunk_sectors << 9;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
if (err) {
queue_limits_cancel_update(mddev->gendisk->queue);
if (err)
return err;
}

return queue_limits_set(mddev->gendisk->queue, &lim);
}
Expand Down
8 changes: 7 additions & 1 deletion drivers/nvme/host/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -1700,7 +1700,13 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)

status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0,
&result);
if (status < 0)

/*
* It's either a kernel error or the host observed a connection
* lost. In either case it's not possible communicate with the
* controller and thus enter the error code path.
*/
if (status < 0 || status == NVME_SC_HOST_PATH_ERROR)
return status;

/*
Expand Down
35 changes: 25 additions & 10 deletions drivers/nvme/host/fc.c
Original file line number Diff line number Diff line change
Expand Up @@ -781,11 +781,19 @@ nvme_fc_abort_lsops(struct nvme_fc_rport *rport)
static void
nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
{
enum nvme_ctrl_state state;
unsigned long flags;

dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: controller connectivity lost. Awaiting "
"Reconnect", ctrl->cnum);

switch (nvme_ctrl_state(&ctrl->ctrl)) {
spin_lock_irqsave(&ctrl->lock, flags);
set_bit(ASSOC_FAILED, &ctrl->flags);
state = nvme_ctrl_state(&ctrl->ctrl);
spin_unlock_irqrestore(&ctrl->lock, flags);

switch (state) {
case NVME_CTRL_NEW:
case NVME_CTRL_LIVE:
/*
Expand Down Expand Up @@ -2079,7 +2087,8 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
nvme_fc_complete_rq(rq);

check_error:
if (terminate_assoc && ctrl->ctrl.state != NVME_CTRL_RESETTING)
if (terminate_assoc &&
nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_RESETTING)
queue_work(nvme_reset_wq, &ctrl->ioerr_work);
}

Expand Down Expand Up @@ -2533,24 +2542,25 @@ __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
static void
nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
{
enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);

/*
* if an error (io timeout, etc) while (re)connecting, the remote
* port requested terminating of the association (disconnect_ls)
* or an error (timeout or abort) occurred on an io while creating
* the controller. Abort any ios on the association and let the
* create_association error path resolve things.
*/
if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
if (state == NVME_CTRL_CONNECTING) {
__nvme_fc_abort_outstanding_ios(ctrl, true);
set_bit(ASSOC_FAILED, &ctrl->flags);
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: transport error during (re)connect\n",
ctrl->cnum);
return;
}

/* Otherwise, only proceed if in LIVE state - e.g. on first error */
if (ctrl->ctrl.state != NVME_CTRL_LIVE)
if (state != NVME_CTRL_LIVE)
return;

dev_warn(ctrl->ctrl.device,
Expand Down Expand Up @@ -3167,12 +3177,18 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
else
ret = nvme_fc_recreate_io_queues(ctrl);
}
if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags))
ret = -EIO;
if (ret)
goto out_term_aen_ops;

changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
spin_lock_irqsave(&ctrl->lock, flags);
if (!test_bit(ASSOC_FAILED, &ctrl->flags))
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
else
ret = -EIO;
spin_unlock_irqrestore(&ctrl->lock, flags);

if (ret)
goto out_term_aen_ops;

ctrl->ctrl.nr_reconnects = 0;

Expand Down Expand Up @@ -3578,8 +3594,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
spin_unlock_irqrestore(&rport->lock, flags);

if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) ||
!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
dev_err(ctrl->ctrl.device,
"NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum);
goto fail_ctrl;
Expand Down
12 changes: 3 additions & 9 deletions drivers/nvme/host/pci.c
Original file line number Diff line number Diff line change
Expand Up @@ -2153,14 +2153,6 @@ static int nvme_alloc_host_mem_multi(struct nvme_dev *dev, u64 preferred,
return 0;

out_free_bufs:
while (--i >= 0) {
size_t size = le32_to_cpu(descs[i].size) * NVME_CTRL_PAGE_SIZE;

dma_free_attrs(dev->dev, size, bufs[i],
le64_to_cpu(descs[i].addr),
DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
}

kfree(bufs);
out_free_descs:
dma_free_coherent(dev->dev, descs_size, descs, descs_dma);
Expand Down Expand Up @@ -3147,7 +3139,9 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
* because of high power consumption (> 2 Watt) in s2idle
* sleep. Only some boards with Intel CPU are affected.
*/
if (dmi_match(DMI_BOARD_NAME, "GMxPXxx") ||
if (dmi_match(DMI_BOARD_NAME, "DN50Z-140HC-YD") ||
dmi_match(DMI_BOARD_NAME, "GMxPXxx") ||
dmi_match(DMI_BOARD_NAME, "GXxMRXx") ||
dmi_match(DMI_BOARD_NAME, "PH4PG31") ||
dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1") ||
dmi_match(DMI_BOARD_NAME, "PH6PG01_PH6PG71"))
Expand Down
2 changes: 1 addition & 1 deletion drivers/nvme/host/sysfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -792,7 +792,7 @@ static umode_t nvme_tls_attrs_are_visible(struct kobject *kobj,
return a->mode;
}

const struct attribute_group nvme_tls_attrs_group = {
static const struct attribute_group nvme_tls_attrs_group = {
.attrs = nvme_tls_attrs,
.is_visible = nvme_tls_attrs_are_visible,
};
Expand Down
1 change: 1 addition & 0 deletions drivers/nvme/target/admin-cmd.c
Original file line number Diff line number Diff line change
Expand Up @@ -1068,6 +1068,7 @@ static void nvme_execute_identify_ns_nvm(struct nvmet_req *req)
goto out;
}
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
kfree(id);
out:
nvmet_req_complete(req, status);
}
Expand Down
2 changes: 1 addition & 1 deletion drivers/nvme/target/fabrics-cmd.c
Original file line number Diff line number Diff line change
Expand Up @@ -287,7 +287,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
args.subsysnqn = d->subsysnqn;
args.hostnqn = d->hostnqn;
args.hostid = &d->hostid;
args.kato = c->kato;
args.kato = le32_to_cpu(c->kato);

ctrl = nvmet_alloc_ctrl(&args);
if (!ctrl)
Expand Down
2 changes: 1 addition & 1 deletion drivers/nvme/target/io-cmd-bdev.c
Original file line number Diff line number Diff line change
Expand Up @@ -272,7 +272,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
iter_flags = SG_MITER_FROM_SG;
}

if (req->cmd->rw.control & NVME_RW_LR)
if (req->cmd->rw.control & cpu_to_le16(NVME_RW_LR))
opf |= REQ_FAILFAST_DEV;

if (is_pci_p2pdma_page(sg_page(req->sg)))
Expand Down
2 changes: 1 addition & 1 deletion drivers/nvme/target/nvmet.h
Original file line number Diff line number Diff line change
Expand Up @@ -589,7 +589,7 @@ struct nvmet_alloc_ctrl_args {
const struct nvmet_fabrics_ops *ops;
struct device *p2p_client;
u32 kato;
u32 result;
__le32 result;
u16 error_loc;
u16 status;
};
Expand Down

0 comments on commit a67d0a0

Please sign in to comment.