Skip to content

Commit

Permalink
Merge tag 'block-6.14-20250221' of git://git.kernel.dk/linux
Browse files Browse the repository at this point in the history
Pull block fixes from Jens Axboe:

 - NVMe pull request via Keith:
      - FC controller state check fixes (Daniel)
      - PCI Endpoint fixes (Damien)
      - TCP connection failure fixe (Caleb)
      - TCP handling C2HTermReq PDU (Maurizio)
      - RDMA queue state check (Ruozhu)
      - Apple controller fixes (Hector)
      - Target crash on disbaled namespace (Hannes)

 - MD pull request via Yu:
      - Fix queue limits error handling for raid0, raid1 and raid10

 - Fix for a NULL pointer deref in request data mapping

 - Code cleanup for request merging

* tag 'block-6.14-20250221' of git://git.kernel.dk/linux:
  nvme: only allow entering LIVE from CONNECTING state
  nvme-fc: rely on state transitions to handle connectivity loss
  apple-nvme: Support coprocessors left idle
  apple-nvme: Release power domains when probe fails
  nvmet: Use enum definitions instead of hardcoded values
  nvme: Cleanup the definition of the controller config register fields
  nvme/ioctl: add missing space in err message
  nvme-tcp: fix connect failure on receiving partial ICResp PDU
  nvme: tcp: Fix compilation warning with W=1
  nvmet: pci-epf: Avoid RCU stalls under heavy workload
  nvmet: pci-epf: Do not uselessly write the CSTS register
  nvmet: pci-epf: Correctly initialize CSTS when enabling the controller
  nvmet-rdma: recheck queue state is LIVE in state lock in recv done
  nvmet: Fix crash when a namespace is disabled
  nvme-tcp: add basic support for the C2HTermReq PDU
  nvme-pci: quirk Acer FA100 for non-uniqueue identifiers
  block: fix NULL pointer dereferenced within __blk_rq_map_sg
  block/merge: remove unnecessary min() with UINT_MAX
  md/raid*: Fix the set_queue_limits implementations
  • Loading branch information
torvalds committed Feb 21, 2025
2 parents f679ebf + 7055044 commit 8a61cb6
Show file tree
Hide file tree
Showing 16 changed files with 216 additions and 150 deletions.
7 changes: 5 additions & 2 deletions block/blk-merge.c
Original file line number Diff line number Diff line change
Expand Up @@ -270,7 +270,7 @@ static bool bvec_split_segs(const struct queue_limits *lim,
const struct bio_vec *bv, unsigned *nsegs, unsigned *bytes,
unsigned max_segs, unsigned max_bytes)
{
unsigned max_len = min(max_bytes, UINT_MAX) - *bytes;
unsigned max_len = max_bytes - *bytes;
unsigned len = min(bv->bv_len, max_len);
unsigned total_len = 0;
unsigned seg_size = 0;
Expand Down Expand Up @@ -556,11 +556,14 @@ int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
{
struct req_iterator iter = {
.bio = rq->bio,
.iter = rq->bio->bi_iter,
};
struct phys_vec vec;
int nsegs = 0;

/* the internal flush request may not have bio attached */
if (iter.bio)
iter.iter = iter.bio->bi_iter;

while (blk_map_iter_next(rq, &iter, &vec)) {
*last_sg = blk_next_sg(last_sg, sglist);
sg_set_page(*last_sg, phys_to_page(vec.paddr), vec.len,
Expand Down
4 changes: 1 addition & 3 deletions drivers/md/raid0.c
Original file line number Diff line number Diff line change
Expand Up @@ -386,10 +386,8 @@ static int raid0_set_limits(struct mddev *mddev)
lim.io_opt = lim.io_min * mddev->raid_disks;
lim.features |= BLK_FEAT_ATOMIC_WRITES;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
if (err) {
queue_limits_cancel_update(mddev->gendisk->queue);
if (err)
return err;
}
return queue_limits_set(mddev->gendisk->queue, &lim);
}

Expand Down
4 changes: 1 addition & 3 deletions drivers/md/raid1.c
Original file line number Diff line number Diff line change
Expand Up @@ -3219,10 +3219,8 @@ static int raid1_set_limits(struct mddev *mddev)
lim.max_write_zeroes_sectors = 0;
lim.features |= BLK_FEAT_ATOMIC_WRITES;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
if (err) {
queue_limits_cancel_update(mddev->gendisk->queue);
if (err)
return err;
}
return queue_limits_set(mddev->gendisk->queue, &lim);
}

Expand Down
4 changes: 1 addition & 3 deletions drivers/md/raid10.c
Original file line number Diff line number Diff line change
Expand Up @@ -4020,10 +4020,8 @@ static int raid10_set_queue_limits(struct mddev *mddev)
lim.io_opt = lim.io_min * raid10_nr_stripes(conf);
lim.features |= BLK_FEAT_ATOMIC_WRITES;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
if (err) {
queue_limits_cancel_update(mddev->gendisk->queue);
if (err)
return err;
}
return queue_limits_set(mddev->gendisk->queue, &lim);
}

Expand Down
55 changes: 38 additions & 17 deletions drivers/nvme/host/apple.c
Original file line number Diff line number Diff line change
Expand Up @@ -1011,25 +1011,37 @@ static void apple_nvme_reset_work(struct work_struct *work)
ret = apple_rtkit_shutdown(anv->rtk);
if (ret)
goto out;

writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
}

writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
/*
* Only do the soft-reset if the CPU is not running, which means either we
* or the previous stage shut it down cleanly.
*/
if (!(readl(anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL) &
APPLE_ANS_COPROC_CPU_CONTROL_RUN)) {

ret = reset_control_assert(anv->reset);
if (ret)
goto out;
ret = reset_control_assert(anv->reset);
if (ret)
goto out;

ret = apple_rtkit_reinit(anv->rtk);
if (ret)
goto out;
ret = apple_rtkit_reinit(anv->rtk);
if (ret)
goto out;

ret = reset_control_deassert(anv->reset);
if (ret)
goto out;
ret = reset_control_deassert(anv->reset);
if (ret)
goto out;

writel(APPLE_ANS_COPROC_CPU_CONTROL_RUN,
anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);

ret = apple_rtkit_boot(anv->rtk);
} else {
ret = apple_rtkit_wake(anv->rtk);
}

writel(APPLE_ANS_COPROC_CPU_CONTROL_RUN,
anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
ret = apple_rtkit_boot(anv->rtk);
if (ret) {
dev_err(anv->dev, "ANS did not boot");
goto out;
Expand Down Expand Up @@ -1516,6 +1528,7 @@ static struct apple_nvme *apple_nvme_alloc(struct platform_device *pdev)

return anv;
put_dev:
apple_nvme_detach_genpd(anv);
put_device(anv->dev);
return ERR_PTR(ret);
}
Expand Down Expand Up @@ -1549,6 +1562,7 @@ static int apple_nvme_probe(struct platform_device *pdev)
nvme_uninit_ctrl(&anv->ctrl);
out_put_ctrl:
nvme_put_ctrl(&anv->ctrl);
apple_nvme_detach_genpd(anv);
return ret;
}

Expand All @@ -1563,9 +1577,12 @@ static void apple_nvme_remove(struct platform_device *pdev)
apple_nvme_disable(anv, true);
nvme_uninit_ctrl(&anv->ctrl);

if (apple_rtkit_is_running(anv->rtk))
if (apple_rtkit_is_running(anv->rtk)) {
apple_rtkit_shutdown(anv->rtk);

writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
}

apple_nvme_detach_genpd(anv);
}

Expand All @@ -1574,8 +1591,11 @@ static void apple_nvme_shutdown(struct platform_device *pdev)
struct apple_nvme *anv = platform_get_drvdata(pdev);

apple_nvme_disable(anv, true);
if (apple_rtkit_is_running(anv->rtk))
if (apple_rtkit_is_running(anv->rtk)) {
apple_rtkit_shutdown(anv->rtk);

writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
}
}

static int apple_nvme_resume(struct device *dev)
Expand All @@ -1592,10 +1612,11 @@ static int apple_nvme_suspend(struct device *dev)

apple_nvme_disable(anv, true);

if (apple_rtkit_is_running(anv->rtk))
if (apple_rtkit_is_running(anv->rtk)) {
ret = apple_rtkit_shutdown(anv->rtk);

writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
}

return ret;
}
Expand Down
2 changes: 0 additions & 2 deletions drivers/nvme/host/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -564,8 +564,6 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
switch (new_state) {
case NVME_CTRL_LIVE:
switch (old_state) {
case NVME_CTRL_NEW:
case NVME_CTRL_RESETTING:
case NVME_CTRL_CONNECTING:
changed = true;
fallthrough;
Expand Down
67 changes: 6 additions & 61 deletions drivers/nvme/host/fc.c
Original file line number Diff line number Diff line change
Expand Up @@ -781,61 +781,12 @@ nvme_fc_abort_lsops(struct nvme_fc_rport *rport)
static void
nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
{
enum nvme_ctrl_state state;
unsigned long flags;

dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: controller connectivity lost. Awaiting "
"Reconnect", ctrl->cnum);

spin_lock_irqsave(&ctrl->lock, flags);
set_bit(ASSOC_FAILED, &ctrl->flags);
state = nvme_ctrl_state(&ctrl->ctrl);
spin_unlock_irqrestore(&ctrl->lock, flags);

switch (state) {
case NVME_CTRL_NEW:
case NVME_CTRL_LIVE:
/*
* Schedule a controller reset. The reset will terminate the
* association and schedule the reconnect timer. Reconnects
* will be attempted until either the ctlr_loss_tmo
* (max_retries * connect_delay) expires or the remoteport's
* dev_loss_tmo expires.
*/
if (nvme_reset_ctrl(&ctrl->ctrl)) {
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: Couldn't schedule reset.\n",
ctrl->cnum);
nvme_delete_ctrl(&ctrl->ctrl);
}
break;

case NVME_CTRL_CONNECTING:
/*
* The association has already been terminated and the
* controller is attempting reconnects. No need to do anything
* futher. Reconnects will be attempted until either the
* ctlr_loss_tmo (max_retries * connect_delay) expires or the
* remoteport's dev_loss_tmo expires.
*/
break;

case NVME_CTRL_RESETTING:
/*
* Controller is already in the process of terminating the
* association. No need to do anything further. The reconnect
* step will kick in naturally after the association is
* terminated.
*/
break;

case NVME_CTRL_DELETING:
case NVME_CTRL_DELETING_NOIO:
default:
/* no action to take - let it delete */
break;
}
nvme_reset_ctrl(&ctrl->ctrl);
}

/**
Expand Down Expand Up @@ -3071,7 +3022,6 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
struct nvmefc_ls_rcv_op *disls = NULL;
unsigned long flags;
int ret;
bool changed;

++ctrl->ctrl.nr_reconnects;

Expand Down Expand Up @@ -3177,23 +3127,18 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
else
ret = nvme_fc_recreate_io_queues(ctrl);
}
if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags))
ret = -EIO;
if (ret)
goto out_term_aen_ops;

spin_lock_irqsave(&ctrl->lock, flags);
if (!test_bit(ASSOC_FAILED, &ctrl->flags))
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
else
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE)) {
ret = -EIO;
spin_unlock_irqrestore(&ctrl->lock, flags);

if (ret)
goto out_term_aen_ops;
}

ctrl->ctrl.nr_reconnects = 0;

if (changed)
nvme_start_ctrl(&ctrl->ctrl);
nvme_start_ctrl(&ctrl->ctrl);

return 0; /* Success */

Expand Down
3 changes: 1 addition & 2 deletions drivers/nvme/host/ioctl.c
Original file line number Diff line number Diff line change
Expand Up @@ -283,8 +283,7 @@ static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
{
if (ns && nsid != ns->head->ns_id) {
dev_err(ctrl->device,
"%s: nsid (%u) in cmd does not match nsid (%u)"
"of namespace\n",
"%s: nsid (%u) in cmd does not match nsid (%u) of namespace\n",
current->comm, nsid, ns->head->ns_id);
return false;
}
Expand Down
2 changes: 2 additions & 0 deletions drivers/nvme/host/pci.c
Original file line number Diff line number Diff line change
Expand Up @@ -3706,6 +3706,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1cc1, 0x5350), /* ADATA XPG GAMMIX S50 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1dbe, 0x5216), /* Acer/INNOGRIT FA100/5216 NVMe SSD */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1dbe, 0x5236), /* ADATA XPG GAMMIX S70 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1e49, 0x0021), /* ZHITAI TiPro5000 NVMe SSD */
Expand Down
50 changes: 48 additions & 2 deletions drivers/nvme/host/tcp.c
Original file line number Diff line number Diff line change
Expand Up @@ -763,6 +763,40 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
return 0;
}

static void nvme_tcp_handle_c2h_term(struct nvme_tcp_queue *queue,
struct nvme_tcp_term_pdu *pdu)
{
u16 fes;
const char *msg;
u32 plen = le32_to_cpu(pdu->hdr.plen);

static const char * const msg_table[] = {
[NVME_TCP_FES_INVALID_PDU_HDR] = "Invalid PDU Header Field",
[NVME_TCP_FES_PDU_SEQ_ERR] = "PDU Sequence Error",
[NVME_TCP_FES_HDR_DIGEST_ERR] = "Header Digest Error",
[NVME_TCP_FES_DATA_OUT_OF_RANGE] = "Data Transfer Out Of Range",
[NVME_TCP_FES_R2T_LIMIT_EXCEEDED] = "R2T Limit Exceeded",
[NVME_TCP_FES_UNSUPPORTED_PARAM] = "Unsupported Parameter",
};

if (plen < NVME_TCP_MIN_C2HTERM_PLEN ||
plen > NVME_TCP_MAX_C2HTERM_PLEN) {
dev_err(queue->ctrl->ctrl.device,
"Received a malformed C2HTermReq PDU (plen = %u)\n",
plen);
return;
}

fes = le16_to_cpu(pdu->fes);
if (fes && fes < ARRAY_SIZE(msg_table))
msg = msg_table[fes];
else
msg = "Unknown";

dev_err(queue->ctrl->ctrl.device,
"Received C2HTermReq (FES = %s)\n", msg);
}

static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
unsigned int *offset, size_t *len)
{
Expand All @@ -784,6 +818,15 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
return 0;

hdr = queue->pdu;
if (unlikely(hdr->type == nvme_tcp_c2h_term)) {
/*
* C2HTermReq never includes Header or Data digests.
* Skip the checks.
*/
nvme_tcp_handle_c2h_term(queue, (void *)queue->pdu);
return -EINVAL;
}

if (queue->hdr_digest) {
ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
if (unlikely(ret))
Expand Down Expand Up @@ -1449,11 +1492,14 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
msg.msg_control = cbuf;
msg.msg_controllen = sizeof(cbuf);
}
msg.msg_flags = MSG_WAITALL;
ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
iov.iov_len, msg.msg_flags);
if (ret < 0) {
if (ret < sizeof(*icresp)) {
pr_warn("queue %d: failed to receive icresp, error %d\n",
nvme_tcp_queue_id(queue), ret);
if (ret >= 0)
ret = -ECONNRESET;
goto free_icresp;
}
ret = -ENOTCONN;
Expand Down Expand Up @@ -1565,7 +1611,7 @@ static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
ctrl->io_queues[HCTX_TYPE_POLL];
}

/**
/*
* Track the number of queues assigned to each cpu using a global per-cpu
* counter and select the least used cpu from the mq_map. Our goal is to spread
* different controllers I/O threads across different cpu cores.
Expand Down
Loading

0 comments on commit 8a61cb6

Please sign in to comment.