mirror of
https://github.com/EEVengers/ThunderScope.git
synced 2025-04-03 05:16:33 +00:00
Merge pull request #256 from EEVengers/xdma-linux-kernel-6.8
Changes required to get it to work on kernel 6.8
This commit is contained in:
commit
12a9b479c4
Software/xdma_driver_linux/xdma
@ -156,8 +156,8 @@ static long version_ioctl(struct xdma_cdev *xcdev, void __user *arg)
|
||||
obj.subsystem_device = xdev->pdev->subsystem_device;
|
||||
obj.feature_id = xdev->feature_id;
|
||||
obj.driver_version = DRV_MOD_VERSION_NUMBER;
|
||||
obj.domain = 0;
|
||||
obj.bus = PCI_BUS_NUM(xdev->pdev->devfn);
|
||||
obj.domain = xdev->pdev->slot->number;
|
||||
obj.bus = xdev->pdev->bus->number;
|
||||
obj.dev = PCI_SLOT(xdev->pdev->devfn);
|
||||
obj.func = PCI_FUNC(xdev->pdev->devfn);
|
||||
if (copy_to_user(arg, &obj, sizeof(struct xdma_ioc_info)))
|
||||
@ -235,9 +235,9 @@ int bridge_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
struct xdma_dev *xdev;
|
||||
struct xdma_cdev *xcdev = (struct xdma_cdev *)file->private_data;
|
||||
unsigned long off;
|
||||
unsigned long phys;
|
||||
resource_size_t phys;
|
||||
unsigned long vsize;
|
||||
unsigned long psize;
|
||||
resource_size_t psize;
|
||||
int rv;
|
||||
|
||||
rv = xcdev_check(__func__, xcdev, 0);
|
||||
@ -274,7 +274,11 @@ int bridge_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
* prevent touching the pages (byte access) for swap-in,
|
||||
* and prevent the pages from being swapped out
|
||||
*/
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 3, 0)
|
||||
vma->vm_flags |= VMEM_FLAGS;
|
||||
#else
|
||||
vm_flags_set(vma, VMEM_FLAGS);
|
||||
#endif
|
||||
/* make MMIO accessible to user space */
|
||||
rv = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
|
||||
vsize, vma->vm_page_prot);
|
||||
|
@ -36,13 +36,13 @@
|
||||
#include "xdma_thread.h"
|
||||
|
||||
/* Module Parameters */
|
||||
unsigned int h2c_timeout = 10;
|
||||
module_param(h2c_timeout, uint, 0644);
|
||||
MODULE_PARM_DESC(h2c_timeout, "H2C sgdma timeout in seconds, default is 10 sec.");
|
||||
unsigned int h2c_timeout_ms = 10000;
|
||||
module_param(h2c_timeout_ms, uint, 0644);
|
||||
MODULE_PARM_DESC(h2c_timeout_ms, "H2C sgdma timeout in milliseconds, default is 10 seconds.");
|
||||
|
||||
unsigned int c2h_timeout = 10;
|
||||
module_param(c2h_timeout, uint, 0644);
|
||||
MODULE_PARM_DESC(c2h_timeout, "C2H sgdma timeout in seconds, default is 10 sec.");
|
||||
unsigned int c2h_timeout_ms = 10000;
|
||||
module_param(c2h_timeout_ms, uint, 0644);
|
||||
MODULE_PARM_DESC(c2h_timeout_ms, "C2H sgdma timeout in milliseconds, default is 10 seconds.");
|
||||
|
||||
extern struct kmem_cache *cdev_cache;
|
||||
static void char_sgdma_unmap_user_buf(struct xdma_io_cb *cb, bool write);
|
||||
@ -89,8 +89,7 @@ static void async_io_handler(unsigned long cb_hndl, int err)
|
||||
numbytes = xdma_xfer_completion((void *)cb, xdev,
|
||||
engine->channel, cb->write, cb->ep_addr,
|
||||
&cb->sgt, 0,
|
||||
cb->write ? h2c_timeout * 1000 :
|
||||
c2h_timeout * 1000);
|
||||
cb->write ? h2c_timeout_ms : c2h_timeout_ms);
|
||||
|
||||
char_sgdma_unmap_user_buf(cb, cb->write);
|
||||
|
||||
@ -104,7 +103,9 @@ static void async_io_handler(unsigned long cb_hndl, int err)
|
||||
if (caio->cmpl_cnt == caio->req_cnt) {
|
||||
res = caio->res;
|
||||
res2 = caio->res2;
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 16, 0)
|
||||
caio->iocb->ki_complete(caio->iocb, caio->err_cnt ? res2 : res);
|
||||
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
|
||||
caio->iocb->ki_complete(caio->iocb, res, res2);
|
||||
#else
|
||||
aio_complete(caio->iocb, res, res2);
|
||||
@ -119,7 +120,9 @@ skip_tran:
|
||||
return;
|
||||
|
||||
skip_dev_lock:
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 16, 0)
|
||||
caio->iocb->ki_complete(caio->iocb, -EBUSY);
|
||||
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
|
||||
caio->iocb->ki_complete(caio->iocb, numbytes, -EBUSY);
|
||||
#else
|
||||
aio_complete(caio->iocb, numbytes, -EBUSY);
|
||||
@ -392,8 +395,7 @@ static ssize_t char_sgdma_read_write(struct file *file, const char __user *buf,
|
||||
return rv;
|
||||
|
||||
res = xdma_xfer_submit(xdev, engine->channel, write, *pos, &cb.sgt,
|
||||
0, write ? h2c_timeout * 1000 :
|
||||
c2h_timeout * 1000);
|
||||
0, write ? h2c_timeout_ms : c2h_timeout_ms);
|
||||
|
||||
char_sgdma_unmap_user_buf(&cb, write);
|
||||
|
||||
@ -476,7 +478,7 @@ static ssize_t cdev_aio_write(struct kiocb *iocb, const struct iovec *io,
|
||||
rv = xdma_xfer_submit_nowait((void *)&caio->cb[i], xdev,
|
||||
engine->channel, caio->cb[i].write,
|
||||
caio->cb[i].ep_addr, &caio->cb[i].sgt,
|
||||
0, h2c_timeout * 1000);
|
||||
0, h2c_timeout_ms);
|
||||
}
|
||||
|
||||
if (engine->cmplthp)
|
||||
@ -550,7 +552,7 @@ static ssize_t cdev_aio_read(struct kiocb *iocb, const struct iovec *io,
|
||||
rv = xdma_xfer_submit_nowait((void *)&caio->cb[i], xdev,
|
||||
engine->channel, caio->cb[i].write,
|
||||
caio->cb[i].ep_addr, &caio->cb[i].sgt,
|
||||
0, c2h_timeout * 1000);
|
||||
0, c2h_timeout_ms);
|
||||
}
|
||||
|
||||
if (engine->cmplthp)
|
||||
@ -560,6 +562,8 @@ static ssize_t cdev_aio_read(struct kiocb *iocb, const struct iovec *io,
|
||||
}
|
||||
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
|
||||
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 4, 0)
|
||||
static ssize_t cdev_write_iter(struct kiocb *iocb, struct iov_iter *io)
|
||||
{
|
||||
return cdev_aio_write(iocb, io->iov, io->nr_segs, io->iov_offset);
|
||||
@ -569,6 +573,18 @@ static ssize_t cdev_read_iter(struct kiocb *iocb, struct iov_iter *io)
|
||||
{
|
||||
return cdev_aio_read(iocb, io->iov, io->nr_segs, io->iov_offset);
|
||||
}
|
||||
#else
|
||||
static ssize_t cdev_write_iter(struct kiocb *iocb, struct iov_iter *io)
|
||||
{
|
||||
return cdev_aio_write(iocb, iter_iov(io), io->nr_segs, io->iov_offset);
|
||||
}
|
||||
|
||||
static ssize_t cdev_read_iter(struct kiocb *iocb, struct iov_iter *io)
|
||||
{
|
||||
return cdev_aio_read(iocb, iter_iov(io), io->nr_segs, io->iov_offset);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
static int ioctl_do_perf_start(struct xdma_engine *engine, unsigned long arg)
|
||||
@ -776,8 +792,7 @@ static int ioctl_do_aperture_dma(struct xdma_engine *engine, unsigned long arg,
|
||||
|
||||
io.error = 0;
|
||||
res = xdma_xfer_aperture(engine, write, io.ep_addr, io.aperture,
|
||||
&cb.sgt, 0, write ? h2c_timeout * 1000 :
|
||||
c2h_timeout * 1000);
|
||||
&cb.sgt, 0, write ? h2c_timeout_ms : c2h_timeout_ms);
|
||||
|
||||
char_sgdma_unmap_user_buf(&cb, write);
|
||||
if (res < 0)
|
||||
|
@ -966,10 +966,10 @@ engine_service_final_transfer(struct xdma_engine *engine,
|
||||
}
|
||||
}
|
||||
|
||||
transfer->desc_cmpl += *pdesc_completed;
|
||||
if (!(transfer->flags & XFER_FLAG_ST_C2H_EOP_RCVED)) {
|
||||
return NULL;
|
||||
}
|
||||
transfer->desc_cmpl = *pdesc_completed;
|
||||
|
||||
/* mark transfer as successfully completed */
|
||||
engine_service_shutdown(engine);
|
||||
@ -1002,6 +1002,7 @@ engine_service_final_transfer(struct xdma_engine *engine,
|
||||
WARN_ON(*pdesc_completed > transfer->desc_num);
|
||||
}
|
||||
/* mark transfer as successfully completed */
|
||||
engine_service_shutdown(engine);
|
||||
transfer->state = TRANSFER_STATE_COMPLETED;
|
||||
transfer->desc_cmpl = transfer->desc_num;
|
||||
/* add dequeued number of descriptors during this run */
|
||||
@ -2327,12 +2328,7 @@ static void xdma_desc_link(struct xdma_desc *first, struct xdma_desc *second,
|
||||
/* xdma_desc_adjacent -- Set how many descriptors are adjacent to this one */
|
||||
static void xdma_desc_adjacent(struct xdma_desc *desc, u32 next_adjacent)
|
||||
{
|
||||
/* remember reserved and control bits */
|
||||
u32 control = le32_to_cpu(desc->control) & 0x0000f0ffUL;
|
||||
/* merge adjacent and control field */
|
||||
control |= 0xAD4B0000UL | (next_adjacent << 8);
|
||||
/* write control and next_adjacent */
|
||||
desc->control = cpu_to_le32(control);
|
||||
desc->control = cpu_to_le32(le32_to_cpu(desc->control) | next_adjacent << 8);
|
||||
}
|
||||
|
||||
/* xdma_desc_control -- Set complete control field of a descriptor. */
|
||||
@ -2908,8 +2904,13 @@ static void transfer_destroy(struct xdma_dev *xdev, struct xdma_transfer *xfer)
|
||||
struct sg_table *sgt = xfer->sgt;
|
||||
|
||||
if (sgt->nents) {
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)
|
||||
pci_unmap_sg(xdev->pdev, sgt->sgl, sgt->nents,
|
||||
xfer->dir);
|
||||
#else
|
||||
dma_unmap_sg(&xdev->pdev->dev, sgt->sgl, sgt->nents,
|
||||
xfer->dir);
|
||||
#endif
|
||||
sgt->nents = 0;
|
||||
}
|
||||
}
|
||||
@ -3192,8 +3193,13 @@ ssize_t xdma_xfer_aperture(struct xdma_engine *engine, bool write, u64 ep_addr,
|
||||
}
|
||||
|
||||
if (!dma_mapped) {
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)
|
||||
sgt->nents = pci_map_sg(xdev->pdev, sgt->sgl, sgt->orig_nents,
|
||||
dir);
|
||||
#else
|
||||
sgt->nents = dma_map_sg(&xdev->pdev->dev, sgt->sgl,
|
||||
sgt->orig_nents, dir);
|
||||
#endif
|
||||
if (!sgt->nents) {
|
||||
pr_info("map sgl failed, sgt 0x%p.\n", sgt);
|
||||
return -EIO;
|
||||
@ -3434,7 +3440,11 @@ ssize_t xdma_xfer_aperture(struct xdma_engine *engine, bool write, u64 ep_addr,
|
||||
|
||||
unmap_sgl:
|
||||
if (!dma_mapped && sgt->nents) {
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)
|
||||
pci_unmap_sg(xdev->pdev, sgt->sgl, sgt->orig_nents, dir);
|
||||
#else
|
||||
dma_unmap_sg(&xdev->pdev->dev, sgt->sgl, sgt->orig_nents, dir);
|
||||
#endif
|
||||
sgt->nents = 0;
|
||||
}
|
||||
|
||||
@ -3504,7 +3514,12 @@ ssize_t xdma_xfer_submit(void *dev_hndl, int channel, bool write, u64 ep_addr,
|
||||
}
|
||||
|
||||
if (!dma_mapped) {
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)
|
||||
nents = pci_map_sg(xdev->pdev, sg, sgt->orig_nents, dir);
|
||||
#else
|
||||
nents = dma_map_sg(&xdev->pdev->dev, sg, sgt->orig_nents, dir);
|
||||
#endif
|
||||
|
||||
if (!nents) {
|
||||
pr_info("map sgl failed, sgt 0x%p.\n", sgt);
|
||||
return -EIO;
|
||||
@ -3597,8 +3612,8 @@ ssize_t xdma_xfer_submit(void *dev_hndl, int channel, bool write, u64 ep_addr,
|
||||
for (i = 0; i < xfer->desc_cmpl; i++)
|
||||
done += result[i].length;
|
||||
|
||||
/* finish the whole request */
|
||||
if (engine->eop_flush)
|
||||
/* finish the whole request when EOP revcived */
|
||||
if (engine->eop_flush && (xfer->flags & XFER_FLAG_ST_C2H_EOP_RCVED))
|
||||
nents = 0;
|
||||
} else
|
||||
done += xfer->len;
|
||||
@ -3660,7 +3675,11 @@ ssize_t xdma_xfer_submit(void *dev_hndl, int channel, bool write, u64 ep_addr,
|
||||
|
||||
unmap_sgl:
|
||||
if (!dma_mapped && sgt->nents) {
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)
|
||||
pci_unmap_sg(xdev->pdev, sgt->sgl, sgt->orig_nents, dir);
|
||||
#else
|
||||
dma_unmap_sg(&xdev->pdev->dev, sgt->sgl, sgt->orig_nents, dir);
|
||||
#endif
|
||||
sgt->nents = 0;
|
||||
}
|
||||
|
||||
@ -3781,7 +3800,11 @@ ssize_t xdma_xfer_completion(void *cb_hndl, void *dev_hndl, int channel,
|
||||
|
||||
unmap_sgl:
|
||||
if (!dma_mapped && sgt->nents) {
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)
|
||||
pci_unmap_sg(xdev->pdev, sgt->sgl, sgt->orig_nents, dir);
|
||||
#else
|
||||
dma_unmap_sg(&xdev->pdev->dev, sgt->sgl, sgt->orig_nents, dir);
|
||||
#endif
|
||||
sgt->nents = 0;
|
||||
}
|
||||
|
||||
@ -3855,7 +3878,11 @@ ssize_t xdma_xfer_submit_nowait(void *cb_hndl, void *dev_hndl, int channel,
|
||||
}
|
||||
|
||||
if (!dma_mapped) {
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)
|
||||
nents = pci_map_sg(xdev->pdev, sg, sgt->orig_nents, dir);
|
||||
#else
|
||||
nents = dma_map_sg(&xdev->pdev->dev, sg, sgt->orig_nents, dir);
|
||||
#endif
|
||||
if (!nents) {
|
||||
pr_info("map sgl failed, sgt 0x%p.\n", sgt);
|
||||
return -EIO;
|
||||
@ -3895,8 +3922,13 @@ ssize_t xdma_xfer_submit_nowait(void *cb_hndl, void *dev_hndl, int channel,
|
||||
pr_info("transfer_init failed\n");
|
||||
|
||||
if (!dma_mapped && sgt->nents) {
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)
|
||||
pci_unmap_sg(xdev->pdev, sgt->sgl,
|
||||
sgt->orig_nents, dir);
|
||||
#else
|
||||
dma_unmap_sg(&xdev->pdev->dev, sgt->sgl,
|
||||
sgt->orig_nents, dir);
|
||||
#endif
|
||||
sgt->nents = 0;
|
||||
}
|
||||
|
||||
@ -3943,7 +3975,11 @@ ssize_t xdma_xfer_submit_nowait(void *cb_hndl, void *dev_hndl, int channel,
|
||||
|
||||
unmap_sgl:
|
||||
if (!dma_mapped && sgt->nents) {
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)
|
||||
pci_unmap_sg(xdev->pdev, sgt->sgl, sgt->orig_nents, dir);
|
||||
#else
|
||||
dma_unmap_sg(&xdev->pdev->dev, sgt->sgl, sgt->orig_nents, dir);
|
||||
#endif
|
||||
sgt->nents = 0;
|
||||
}
|
||||
|
||||
@ -4191,18 +4227,31 @@ static int set_dma_mask(struct pci_dev *pdev)
|
||||
|
||||
dbg_init("sizeof(dma_addr_t) == %ld\n", sizeof(dma_addr_t));
|
||||
/* 64-bit addressing capability for XDMA? */
|
||||
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)
|
||||
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
|
||||
#else
|
||||
if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)))
|
||||
#endif
|
||||
{
|
||||
/* query for DMA transfer */
|
||||
/* @see Documentation/DMA-mapping.txt */
|
||||
dbg_init("pci_set_dma_mask()\n");
|
||||
dbg_init("set_dma_mask(64)\n");
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)
|
||||
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||
#endif
|
||||
/* use 64-bit DMA */
|
||||
dbg_init("Using a 64-bit DMA mask.\n");
|
||||
/* use 32-bit DMA for descriptors */
|
||||
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
/* use 64-bit DMA, 32-bit for consistent */
|
||||
} else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
|
||||
} else
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)
|
||||
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
|
||||
#else
|
||||
if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
|
||||
#endif
|
||||
{
|
||||
dbg_init("Could not set 64-bit DMA mask.\n");
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)
|
||||
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
#endif
|
||||
/* use 32-bit DMA */
|
||||
dbg_init("Using a 32-bit DMA mask.\n");
|
||||
} else {
|
||||
@ -4386,7 +4435,7 @@ void *xdma_device_open(const char *mname, struct pci_dev *pdev, int *user_max,
|
||||
/* allocate zeroed device book keeping structure */
|
||||
xdev = alloc_dev_instance(pdev);
|
||||
if (!xdev)
|
||||
return NULL;
|
||||
goto err_alloc_dev_instance;
|
||||
xdev->mod_name = mname;
|
||||
xdev->user_max = *user_max;
|
||||
xdev->h2c_channel_max = *h2c_channel_max;
|
||||
@ -4405,12 +4454,12 @@ void *xdma_device_open(const char *mname, struct pci_dev *pdev, int *user_max,
|
||||
|
||||
rv = xdev_list_add(xdev);
|
||||
if (rv < 0)
|
||||
goto free_xdev;
|
||||
goto err_xdev_list_add;
|
||||
|
||||
rv = pci_enable_device(pdev);
|
||||
if (rv) {
|
||||
dbg_init("pci_enable_device() failed, %d.\n", rv);
|
||||
goto err_enable;
|
||||
goto err_pci_enable_device;
|
||||
}
|
||||
|
||||
/* keep INTx enabled */
|
||||
@ -4433,15 +4482,15 @@ void *xdma_device_open(const char *mname, struct pci_dev *pdev, int *user_max,
|
||||
|
||||
rv = request_regions(xdev, pdev);
|
||||
if (rv)
|
||||
goto err_regions;
|
||||
goto err_request_regions;
|
||||
|
||||
rv = map_bars(xdev, pdev);
|
||||
if (rv)
|
||||
goto err_map;
|
||||
goto err_map_bars;
|
||||
|
||||
rv = set_dma_mask(pdev);
|
||||
if (rv)
|
||||
goto err_mask;
|
||||
goto err_set_dma_mask;
|
||||
|
||||
check_nonzero_interrupt_status(xdev);
|
||||
/* explicitely zero all interrupt enable masks */
|
||||
@ -4451,15 +4500,15 @@ void *xdma_device_open(const char *mname, struct pci_dev *pdev, int *user_max,
|
||||
|
||||
rv = probe_engines(xdev);
|
||||
if (rv)
|
||||
goto err_mask;
|
||||
goto err_probe_engines;
|
||||
|
||||
rv = enable_msi_msix(xdev, pdev);
|
||||
if (rv < 0)
|
||||
goto err_engines;
|
||||
goto err_enable_msi_msix;
|
||||
|
||||
rv = irq_setup(xdev, pdev);
|
||||
if (rv < 0)
|
||||
goto err_msix;
|
||||
goto err_irq_setup;
|
||||
|
||||
if (!poll_mode)
|
||||
channel_interrupts_enable(xdev, ~0);
|
||||
@ -4474,22 +4523,24 @@ void *xdma_device_open(const char *mname, struct pci_dev *pdev, int *user_max,
|
||||
xdma_device_flag_clear(xdev, XDEV_FLAG_OFFLINE);
|
||||
return (void *)xdev;
|
||||
|
||||
err_msix:
|
||||
err_irq_setup:
|
||||
disable_msi_msix(xdev, pdev);
|
||||
err_engines:
|
||||
err_enable_msi_msix:
|
||||
remove_engines(xdev);
|
||||
err_mask:
|
||||
err_probe_engines:
|
||||
err_set_dma_mask:
|
||||
unmap_bars(xdev, pdev);
|
||||
err_map:
|
||||
err_map_bars:
|
||||
if (xdev->got_regions)
|
||||
pci_release_regions(pdev);
|
||||
err_regions:
|
||||
err_request_regions:
|
||||
if (!xdev->regions_in_use)
|
||||
pci_disable_device(pdev);
|
||||
err_enable:
|
||||
err_pci_enable_device:
|
||||
xdev_list_remove(xdev);
|
||||
free_xdev:
|
||||
err_xdev_list_add:
|
||||
kfree(xdev);
|
||||
err_alloc_dev_instance:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -53,6 +53,12 @@
|
||||
# define HAS_SWAKE_UP (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
|
||||
#endif
|
||||
|
||||
#if defined(RHEL_RELEASE_CODE)
|
||||
# define PCI_AER_NAMECHANGE (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 3))
|
||||
#else
|
||||
# define PCI_AER_NAMECHANGE (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
|
||||
#endif
|
||||
|
||||
#if HAS_SWAKE_UP
|
||||
#include <linux/swait.h>
|
||||
#endif
|
||||
|
@ -603,7 +603,11 @@ fail:
|
||||
|
||||
int xdma_cdev_init(void)
|
||||
{
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 4, 0)
|
||||
g_xdma_class = class_create(THIS_MODULE, XDMA_NODE_NAME);
|
||||
#else
|
||||
g_xdma_class = class_create(XDMA_NODE_NAME);
|
||||
#endif
|
||||
if (IS_ERR(g_xdma_class)) {
|
||||
dbg_init(XDMA_NODE_NAME ": failed to create class");
|
||||
return -EINVAL;
|
||||
|
@ -30,7 +30,7 @@
|
||||
#include "xdma_cdev.h"
|
||||
#include "version.h"
|
||||
|
||||
#define DRV_MODULE_NAME "xdma"
|
||||
#define DRV_MODULE_NAME "xdma-chr"
|
||||
#define DRV_MODULE_DESC "Xilinx XDMA Reference Driver"
|
||||
|
||||
static char version[] =
|
||||
@ -173,13 +173,13 @@ static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
}
|
||||
|
||||
if (xpdev->h2c_channel_max > XDMA_CHANNEL_NUM_MAX) {
|
||||
pr_err("Maximun H2C channel limit reached\n");
|
||||
pr_err("Maximum H2C channel limit reached\n");
|
||||
rv = -EINVAL;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
if (xpdev->c2h_channel_max > XDMA_CHANNEL_NUM_MAX) {
|
||||
pr_err("Maximun C2H channel limit reached\n");
|
||||
pr_err("Maximum C2H channel limit reached\n");
|
||||
rv = -EINVAL;
|
||||
goto err_out;
|
||||
}
|
||||
@ -293,11 +293,12 @@ static void xdma_error_resume(struct pci_dev *pdev)
|
||||
struct xdma_pci_dev *xpdev = dev_get_drvdata(&pdev->dev);
|
||||
|
||||
pr_info("dev 0x%p,0x%p.\n", pdev, xpdev);
|
||||
#if KERNEL_VERSION(5, 7, 0) <= LINUX_VERSION_CODE
|
||||
#if PCI_AER_NAMECHANGE
|
||||
pci_aer_clear_nonfatal_status(pdev);
|
||||
#else
|
||||
pci_cleanup_aer_uncorrect_error_status(pdev);
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
#if KERNEL_VERSION(4, 13, 0) <= LINUX_VERSION_CODE
|
||||
@ -359,8 +360,8 @@ static int xdma_mod_init(void)
|
||||
|
||||
if (desc_blen_max > XDMA_DESC_BLEN_MAX)
|
||||
desc_blen_max = XDMA_DESC_BLEN_MAX;
|
||||
pr_info("desc_blen_max: 0x%x/%u, timeout: h2c %u c2h %u sec.\n",
|
||||
desc_blen_max, desc_blen_max, h2c_timeout, c2h_timeout);
|
||||
pr_info("desc_blen_max: 0x%x/%u, timeout: h2c %u c2h %u (ms)\n",
|
||||
desc_blen_max, desc_blen_max, h2c_timeout_ms, c2h_timeout_ms);
|
||||
|
||||
rv = xdma_cdev_init();
|
||||
if (rv < 0)
|
||||
|
@ -55,8 +55,8 @@
|
||||
#define MAGIC_BITSTREAM 0xBBBBBBBBUL
|
||||
|
||||
extern unsigned int desc_blen_max;
|
||||
extern unsigned int h2c_timeout;
|
||||
extern unsigned int c2h_timeout;
|
||||
extern unsigned int h2c_timeout_ms;
|
||||
extern unsigned int c2h_timeout_ms;
|
||||
|
||||
struct xdma_cdev {
|
||||
unsigned long magic; /* structure ID for sanity checks */
|
||||
|
Loading…
Reference in New Issue
Block a user