Skip to content

Commit

Permalink
Ubuntu 22.04 kernel 6.2 Changes (#8374)
Browse files Browse the repository at this point in the history
XRT-changes-to-support-kernel-6.2-and-6.5

Signed-off-by: suresh avula <[email protected]>
Co-authored-by: Bhyaswanth <[email protected]>
  • Loading branch information
sureshreddyavula and Bhyaswanth committed Sep 4, 2024
1 parent 00a701a commit 9cd30c4
Show file tree
Hide file tree
Showing 30 changed files with 265 additions and 79 deletions.
14 changes: 14 additions & 0 deletions src/runtime_src/core/edge/drm/zocl/zocl_drv.c
Original file line number Diff line number Diff line change
Expand Up @@ -405,7 +405,12 @@ zocl_gem_mmap(struct file *filp, struct vm_area_struct *vma)
* and set the vm_pgoff (used as a fake buffer offset by DRM)
* to 0 as we want to map the whole buffer.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 3, 0)
vma->vm_flags &= ~VM_PFNMAP;
#else
vm_flags_clear(vma, VM_PFNMAP);
#endif

vma->vm_pgoff = 0;

gem_obj = vma->vm_private_data;
Expand Down Expand Up @@ -483,8 +488,13 @@ static int zocl_mmap(struct file *filp, struct vm_area_struct *vma)
rc = zocl_iommu_map_bo(dev, bo);
if (rc)
return rc;
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 3, 0)
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_flags |= VM_MIXEDMAP;
#else
vm_flags_clear(vma, VM_PFNMAP);
vm_flags_set(vma, VM_MIXEDMAP);
#endif
/* Reset the fake offset used to identify the BO */
vma->vm_pgoff = 0;
return 0;
Expand Down Expand Up @@ -522,8 +532,12 @@ static int zocl_mmap(struct file *filp, struct vm_area_struct *vma)
return -EINVAL;

vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 3, 0)
vma->vm_flags |= VM_IO;
vma->vm_flags |= VM_RESERVED;
#else
vm_flags_set(vma, VM_IO | VM_RESERVED);
#endif

vma->vm_ops = &reg_physical_vm_ops;
rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@

/* Ensure compatibility with newer Linux kernels. */
/* access_ok lost its first parameter with Linux 5.0. */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
#define AWSMGMT_ACCESS_OK(TYPE, ADDR, SIZE) access_ok(ADDR, SIZE)
#else
#define AWSMGMT_ACCESS_OK(TYPE, ADDR, SIZE) access_ok(TYPE, ADDR, SIZE)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -240,7 +240,7 @@ void qdma_request_unmap(struct pci_dev *pdev, struct qdma_request *req)
DMA_FROM_DEVICE;

if (req->use_sgt) {
pci_unmap_sg(pdev, req->sgt->sgl, req->sgt->orig_nents, dir);
dma_unmap_sg(&pdev->dev, req->sgt->sgl, req->sgt->orig_nents, dir);
} else {
struct qdma_sw_sg *sg = req->sgl;
unsigned int sgcnt = req->sgcnt;
Expand All @@ -250,7 +250,7 @@ void qdma_request_unmap(struct pci_dev *pdev, struct qdma_request *req)
if (!sg->pg)
break;
if (sg->dma_addr) {
pci_unmap_page(pdev, sg->dma_addr - sg->offset,
dma_unmap_page(&pdev->dev, sg->dma_addr - sg->offset,
PAGE_SIZE, dir);
sg->dma_addr = 0UL;
}
Expand All @@ -274,7 +274,7 @@ int qdma_request_map(struct pci_dev *pdev, struct qdma_request *req)
DMA_FROM_DEVICE;

if (req->use_sgt) {
int nents = pci_map_sg(pdev, req->sgt->sgl,
int nents = dma_map_sg(&pdev->dev, req->sgt->sgl,
req->sgt->orig_nents, dir);

if (!nents) {
Expand All @@ -290,9 +290,9 @@ int qdma_request_map(struct pci_dev *pdev, struct qdma_request *req)

for (i = 0; i < sgcnt; i++, sg++) {
/* !! TODO page size !! */
sg->dma_addr = pci_map_page(pdev, sg->pg, 0,
sg->dma_addr = dma_map_page(&pdev->dev, sg->pg, 0,
PAGE_SIZE, dir);
if (unlikely(pci_dma_mapping_error(pdev,
if (unlikely(dma_mapping_error(&pdev->dev,
sg->dma_addr))) {
pr_info("map sgl failed, sg %d, %u.\n",
i, sg->len);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ static inline int flq_fill_one(struct qdma_sw_sg *sdesc,
}

mapping = dma_map_page(dev, pg, 0, PAGE_SIZE << pg_order,
PCI_DMA_FROMDEVICE);
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, mapping))) {
dev_err(dev, "page 0x%p mapping error 0x%llx.\n",
pg, (unsigned long long)mapping);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -538,12 +538,12 @@ static struct xlnx_dma_dev *xdev_alloc(struct qdma_dev_conf *conf)
static int pci_dma_mask_set(struct pci_dev *pdev)
{
/** 64-bit addressing capability for XDMA? */
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
/** use 32-bit DMA for descriptors */
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
/** use 64-bit DMA, 32-bit for consistent */
} else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
} else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
/** use 32-bit DMA */
dev_info(&pdev->dev, "Using a 32-bit DMA mask.\n");
} else {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2243,7 +2243,7 @@ void sgl_unmap(struct pci_dev *pdev, struct qdma_sw_sg *sg, unsigned int sgcnt,
if (!sg->pg)
break;
if (sg->dma_addr) {
pci_unmap_page(pdev, sg->dma_addr - sg->offset,
dma_unmap_page(&pdev->dev, sg->dma_addr - sg->offset,
PAGE_SIZE, dir);
sg->dma_addr = 0UL;
}
Expand Down Expand Up @@ -2275,8 +2275,8 @@ int sgl_map(struct pci_dev *pdev, struct qdma_sw_sg *sgl, unsigned int sgcnt,
*/
for (i = 0; i < sgcnt; i++, sg++) {
/* !! TODO page size !! */
sg->dma_addr = pci_map_page(pdev, sg->pg, 0, PAGE_SIZE, dir);
if (unlikely(pci_dma_mapping_error(pdev, sg->dma_addr))) {
sg->dma_addr = dma_map_page(&pdev->dev, sg->pg, 0, PAGE_SIZE, dir);
if (unlikely(dma_mapping_error(&pdev->dev, sg->dma_addr))) {
pr_err("map sgl failed, sg %d, %u.\n", i, sg->len);
if (i)
sgl_unmap(pdev, sgl, i, dir);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ static inline int flq_fill_page_one(struct qdma_sw_pg_sg *pg_sdesc,
}

mapping = dma_map_page(dev, pg, 0, (PAGE_SIZE << pg_order),
PCI_DMA_FROMDEVICE);
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, mapping))) {
dev_err(dev, "page 0x%p mapping error 0x%llx.\n",
pg, (unsigned long long)mapping);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -91,12 +91,12 @@ struct qdma_resource_lock {
static int pci_dma_mask_set(struct pci_dev *pdev)
{
/** 64-bit addressing capability for XDMA? */
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
/** use 64-bit DMA for descriptors */
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
/** use 64-bit DMA, 32-bit for consistent */
} else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
} else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
/** use 32-bit DMA */
dev_info(&pdev->dev, "Using a 32-bit DMA mask.\n");
} else {
Expand Down
33 changes: 18 additions & 15 deletions src/runtime_src/core/pcie/driver/linux/xocl/lib/libxdma.c
Original file line number Diff line number Diff line change
Expand Up @@ -836,7 +836,7 @@ static void xdma_request_release(struct xdma_dev *xdev,
{
struct sg_table *sgt = req->sgt;
if (!req->dma_mapped) {
pci_unmap_sg(xdev->pdev, sgt->sgl, sgt->orig_nents,
dma_unmap_sg(&xdev->pdev->dev, sgt->sgl, sgt->orig_nents,
req->dir);
sgt->nents = 0;
}
Expand Down Expand Up @@ -2312,7 +2312,7 @@ static int enable_msi_msix(struct xdma_dev *xdev, struct pci_dev *pdev)
int req_nvec = xdev->c2h_channel_max + xdev->h2c_channel_max +
xdev->user_max;

#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
dbg_init("Enabling MSI-X\n");
rv = pci_alloc_irq_vectors(pdev, req_nvec, req_nvec,
PCI_IRQ_MSIX);
Expand Down Expand Up @@ -2482,7 +2482,7 @@ static int irq_msix_channel_setup(struct xdma_dev *xdev)

engine = xdev->engine_h2c;
for (i = 0; i < xdev->h2c_channel_max; i++, engine++) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
vector = pci_irq_vector(xdev->pdev, i);
#else
vector = xdev->entry[i].vector;
Expand All @@ -2500,7 +2500,7 @@ static int irq_msix_channel_setup(struct xdma_dev *xdev)

engine = xdev->engine_c2h;
for (i = 0; i < xdev->c2h_channel_max; i++, j++, engine++) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
vector = pci_irq_vector(xdev->pdev, j);
#else
vector = xdev->entry[j].vector;
Expand Down Expand Up @@ -2530,7 +2530,7 @@ static void irq_msix_user_teardown(struct xdma_dev *xdev)
prog_irq_msix_user(xdev, 1);

for (i = 0; i < xdev->user_max; i++, j++) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
u32 vector = pci_irq_vector(xdev->pdev, j);
#else
u32 vector = xdev->entry[j].vector;
Expand All @@ -2553,7 +2553,7 @@ static int irq_msix_user_setup(struct xdma_dev *xdev)

/* vectors set in probe_scan_for_msi() */
for (i = 0; i < xdev->user_max; i++, j++) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
u32 vector = pci_irq_vector(xdev->pdev, j);
#else
u32 vector = xdev->entry[j].vector;
Expand All @@ -2572,7 +2572,7 @@ static int irq_msix_user_setup(struct xdma_dev *xdev)
/* If any errors occur, free IRQs that were successfully requested */
if (rv) {
for (i--, j--; i >= 0; i--, j--) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
u32 vector = pci_irq_vector(xdev->pdev, j);
#else
u32 vector = xdev->entry[j].vector;
Expand Down Expand Up @@ -3178,7 +3178,7 @@ ssize_t xdma_xfer_submit(void *dev_hndl, int channel, bool write, u64 ep_addr,
}

if (!dma_mapped) {
nents = pci_map_sg(xdev->pdev, sg, sgt->orig_nents, dir);
nents = dma_map_sg(&xdev->pdev->dev, sg, sgt->orig_nents, dir);
if (!nents) {
xocl_pr_info("map sgl failed, sgt 0x%p.\n", sgt);
return -EIO;
Expand Down Expand Up @@ -3463,18 +3463,18 @@ static int set_dma_mask(struct pci_dev *pdev)

dbg_init("sizeof(dma_addr_t) == %ld\n", sizeof(dma_addr_t));
/* 64-bit addressing capability for XDMA? */
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
/* query for DMA transfer */
/* @see Documentation/DMA-mapping.txt */
dbg_init("pci_set_dma_mask()\n");
dbg_init("dma_set_mask()\n");
/* use 64-bit DMA */
dbg_init("Using a 64-bit DMA mask.\n");
/* use 32-bit DMA for descriptors */
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
/* use 64-bit DMA, 32-bit for consistent */
} else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
} else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
dbg_init("Could not set 64-bit DMA mask.\n");
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
/* use 32-bit DMA */
dbg_init("Using a 32-bit DMA mask.\n");
} else {
Expand Down Expand Up @@ -3619,7 +3619,7 @@ static int probe_engines(struct xdma_dev *xdev)
return 0;
}

#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
static void pci_enable_capability(struct pci_dev *pdev, int cap)
{
pcie_capability_set_word(pdev, PCI_EXP_DEVCTL, cap);
Expand Down Expand Up @@ -3647,7 +3647,7 @@ static int pci_check_extended_tag(struct xdma_dev *xdev, struct pci_dev *pdev)
void *__iomem reg;
#endif

#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &cap);
#else
int pos;
Expand Down Expand Up @@ -4097,6 +4097,9 @@ MODULE_AUTHOR("Xilinx, Inc.");
MODULE_DESCRIPTION(DRV_MODULE_DESC);
MODULE_VERSION(DRV_MODULE_VERSION);
MODULE_LICENSE("GPL v2");
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 16, 0) || defined(RHEL_9_0_GE)
MODULE_IMPORT_NS(DMA_BUF);
#endif

static int __init xdma_base_init(void)
{
Expand Down
2 changes: 1 addition & 1 deletion src/runtime_src/core/pcie/driver/linux/xocl/lib/libxdma.h
Original file line number Diff line number Diff line change
Expand Up @@ -574,7 +574,7 @@ struct xdma_dev {
int irq_line; /* flag if irq allocated successfully */
int msi_enabled; /* flag if msi was enabled for the device */
int msix_enabled; /* flag if msi-x was enabled for the device */
#if LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)
struct msix_entry entry[32]; /* msi-x vector/entry table */
#endif
struct xdma_user_irq user_irq[16]; /* user IRQ management */
Expand Down
12 changes: 12 additions & 0 deletions src/runtime_src/core/pcie/driver/linux/xocl/mgmtpf/mgmt-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -319,9 +319,17 @@ static int bridge_mmap(struct file *file, struct vm_area_struct *vma)
* and prevent the pages from being swapped out
*/
#ifndef VM_RESERVED
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 3, 0)
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
#else
vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
#endif
#else
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 3, 0)
vma->vm_flags |= VM_IO | VM_RESERVED;
#else
vm_flags_set(vma, VM_IO | VM_RESERVED);
#endif
#endif

/* make MMIO accessible to user space */
Expand Down Expand Up @@ -1547,7 +1555,11 @@ static int __init xclmgmt_init(void)
int res, i;

pr_info(DRV_NAME " init()\n");
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 4, 0)
xrt_class = class_create(THIS_MODULE, "xrt_mgmt");
#else
xrt_class = class_create("xrt_mgmt");
#endif
if (IS_ERR(xrt_class))
return PTR_ERR(xrt_class);

Expand Down
8 changes: 8 additions & 0 deletions src/runtime_src/core/pcie/driver/linux/xocl/subdev/aim.c
Original file line number Diff line number Diff line change
Expand Up @@ -409,9 +409,17 @@ static int aim_mmap(struct file *filp, struct vm_area_struct *vma)
* and prevent the pages from being swapped out
*/
#ifndef VM_RESERVED
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 3, 0)
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
#else
vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
#endif
#else
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 3, 0)
vma->vm_flags |= VM_IO | VM_RESERVED;
#else
vm_flags_set(vma, VM_IO | VM_RESERVED);
#endif
#endif

/* make MMIO accessible to user space */
Expand Down
8 changes: 8 additions & 0 deletions src/runtime_src/core/pcie/driver/linux/xocl/subdev/am.c
Original file line number Diff line number Diff line change
Expand Up @@ -401,9 +401,17 @@ static int am_mmap(struct file *filp, struct vm_area_struct *vma)
* and prevent the pages from being swapped out
*/
#ifndef VM_RESERVED
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 3, 0)
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
#else
vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
#endif
#else
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 3, 0)
vma->vm_flags |= VM_IO | VM_RESERVED;
#else
vm_flags_set(vma, VM_IO | VM_RESERVED);
#endif
#endif

/* make MMIO accessible to user space */
Expand Down
8 changes: 8 additions & 0 deletions src/runtime_src/core/pcie/driver/linux/xocl/subdev/asm.c
Original file line number Diff line number Diff line change
Expand Up @@ -322,9 +322,17 @@ static int asm_mmap(struct file *filp, struct vm_area_struct *vma)
* and prevent the pages from being swapped out
*/
#ifndef VM_RESERVED
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 3, 0)
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
#else
vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
#endif
#else
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 3, 0)
vma->vm_flags |= VM_IO | VM_RESERVED;
#else
vm_flags_set(vma, VM_IO | VM_RESERVED);
#endif
#endif

/* make MMIO accessible to user space */
Expand Down
Loading

0 comments on commit 9cd30c4

Please sign in to comment.