aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/kpc2000
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/kpc2000')
-rw-r--r--drivers/staging/kpc2000/kpc2000/cell_probe.c71
-rw-r--r--drivers/staging/kpc2000/kpc2000/core.c3
-rw-r--r--drivers/staging/kpc2000/kpc2000/dma_common_defs.h17
-rw-r--r--drivers/staging/kpc2000/kpc_dma/dma.c21
-rw-r--r--drivers/staging/kpc2000/kpc_dma/fileops.c28
-rw-r--r--drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c13
6 files changed, 108 insertions, 45 deletions
diff --git a/drivers/staging/kpc2000/kpc2000/cell_probe.c b/drivers/staging/kpc2000/kpc2000/cell_probe.c
index 738122afc2ae..e7e963d62699 100644
--- a/drivers/staging/kpc2000/kpc2000/cell_probe.c
+++ b/drivers/staging/kpc2000/kpc2000/cell_probe.c
@@ -30,9 +30,12 @@
*
*/
-#define KPC_OLD_DMA_CH_NUM(present, channel) ((present) ? (0x8 | ((channel) & 0x7)) : 0)
-#define KPC_OLD_S2C_DMA_CH_NUM(cte) KPC_OLD_DMA_CH_NUM(cte.s2c_dma_present, cte.s2c_dma_channel_num)
-#define KPC_OLD_C2S_DMA_CH_NUM(cte) KPC_OLD_DMA_CH_NUM(cte.c2s_dma_present, cte.c2s_dma_channel_num)
+#define KPC_OLD_DMA_CH_NUM(present, channel) \
+ ((present) ? (0x8 | ((channel) & 0x7)) : 0)
+#define KPC_OLD_S2C_DMA_CH_NUM(cte) \
+ KPC_OLD_DMA_CH_NUM(cte.s2c_dma_present, cte.s2c_dma_channel_num)
+#define KPC_OLD_C2S_DMA_CH_NUM(cte) \
+ KPC_OLD_DMA_CH_NUM(cte.c2s_dma_present, cte.c2s_dma_channel_num)
#define KP_CORE_ID_INVALID 0
#define KP_CORE_ID_I2C 3
@@ -67,7 +70,8 @@ void parse_core_table_entry_v0(struct core_table_entry *cte, const u64 read_val
static
void dbg_cte(struct kp2000_device *pcard, struct core_table_entry *cte)
{
- dev_dbg(&pcard->pdev->dev, "CTE: type:%3d offset:%3d (%3d) length:%3d (%3d) s2c:%d c2s:%d irq_count:%d base_irq:%d\n",
+ dev_dbg(&pcard->pdev->dev,
+ "CTE: type:%3d offset:%3d (%3d) length:%3d (%3d) s2c:%d c2s:%d irq_count:%d base_irq:%d\n",
cte->type,
cte->offset,
cte->offset / 4096,
@@ -107,7 +111,14 @@ static int probe_core_basic(unsigned int core_num, struct kp2000_device *pcard,
.ddna = pcard->ddna,
};
- dev_dbg(&pcard->pdev->dev, "Found Basic core: type = %02d dma = %02x / %02x offset = 0x%x length = 0x%x (%d regs)\n", cte.type, KPC_OLD_S2C_DMA_CH_NUM(cte), KPC_OLD_C2S_DMA_CH_NUM(cte), cte.offset, cte.length, cte.length / 8);
+ dev_dbg(&pcard->pdev->dev,
+ "Found Basic core: type = %02d dma = %02x / %02x offset = 0x%x length = 0x%x (%d regs)\n",
+ cte.type,
+ KPC_OLD_S2C_DMA_CH_NUM(cte),
+ KPC_OLD_C2S_DMA_CH_NUM(cte),
+ cte.offset,
+ cte.length,
+ cte.length / 8);
cell.platform_data = &core_pdata;
cell.pdata_size = sizeof(struct kpc_core_device_platdata);
@@ -290,7 +301,14 @@ static int probe_core_uio(unsigned int core_num, struct kp2000_device *pcard,
struct kpc_uio_device *kudev;
int rv;
- dev_dbg(&pcard->pdev->dev, "Found UIO core: type = %02d dma = %02x / %02x offset = 0x%x length = 0x%x (%d regs)\n", cte.type, KPC_OLD_S2C_DMA_CH_NUM(cte), KPC_OLD_C2S_DMA_CH_NUM(cte), cte.offset, cte.length, cte.length / 8);
+ dev_dbg(&pcard->pdev->dev,
+ "Found UIO core: type = %02d dma = %02x / %02x offset = 0x%x length = 0x%x (%d regs)\n",
+ cte.type,
+ KPC_OLD_S2C_DMA_CH_NUM(cte),
+ KPC_OLD_C2S_DMA_CH_NUM(cte),
+ cte.offset,
+ cte.length,
+ cte.length / 8);
kudev = kzalloc(sizeof(*kudev), GFP_KERNEL);
if (!kudev)
@@ -315,10 +333,14 @@ static int probe_core_uio(unsigned int core_num, struct kp2000_device *pcard,
kudev->uioinfo.mem[0].name = "uiomap";
kudev->uioinfo.mem[0].addr = pci_resource_start(pcard->pdev, REG_BAR) + cte.offset;
- kudev->uioinfo.mem[0].size = (cte.length + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1); // Round up to nearest PAGE_SIZE boundary
+
+ // Round up to nearest PAGE_SIZE boundary
+ kudev->uioinfo.mem[0].size = (cte.length + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
kudev->uioinfo.mem[0].memtype = UIO_MEM_PHYS;
- kudev->dev = device_create(kpc_uio_class, &pcard->pdev->dev, MKDEV(0, 0), kudev, "%s.%d.%d.%d", kudev->uioinfo.name, pcard->card_num, cte.type, kudev->core_num);
+ kudev->dev = device_create(kpc_uio_class,
+ &pcard->pdev->dev, MKDEV(0, 0), kudev, "%s.%d.%d.%d",
+ kudev->uioinfo.name, pcard->card_num, cte.type, kudev->core_num);
if (IS_ERR(kudev->dev)) {
dev_err(&pcard->pdev->dev, "%s: device_create failed!\n",
__func__);
@@ -341,7 +363,9 @@ static int probe_core_uio(unsigned int core_num, struct kp2000_device *pcard,
return 0;
}
-static int create_dma_engine_core(struct kp2000_device *pcard, size_t engine_regs_offset, int engine_num, int irq_num)
+static int create_dma_engine_core(struct kp2000_device *pcard,
+ size_t engine_regs_offset,
+ int engine_num, int irq_num)
{
struct mfd_cell cell = { .id = engine_num };
struct resource resources[2];
@@ -380,18 +404,28 @@ static int kp2000_setup_dma_controller(struct kp2000_device *pcard)
// S2C Engines
for (i = 0 ; i < 32 ; i++) {
- capabilities_reg = readq(pcard->dma_bar_base + KPC_DMA_S2C_BASE_OFFSET + (KPC_DMA_ENGINE_SIZE * i));
+ capabilities_reg = readq(pcard->dma_bar_base +
+ KPC_DMA_S2C_BASE_OFFSET +
+ (KPC_DMA_ENGINE_SIZE * i));
+
if (capabilities_reg & ENGINE_CAP_PRESENT_MASK) {
- err = create_dma_engine_core(pcard, (KPC_DMA_S2C_BASE_OFFSET + (KPC_DMA_ENGINE_SIZE * i)), i, pcard->pdev->irq);
+ err = create_dma_engine_core(pcard, (KPC_DMA_S2C_BASE_OFFSET +
+ (KPC_DMA_ENGINE_SIZE * i)),
+ i, pcard->pdev->irq);
if (err)
goto err_out;
}
}
// C2S Engines
for (i = 0 ; i < 32 ; i++) {
- capabilities_reg = readq(pcard->dma_bar_base + KPC_DMA_C2S_BASE_OFFSET + (KPC_DMA_ENGINE_SIZE * i));
+ capabilities_reg = readq(pcard->dma_bar_base +
+ KPC_DMA_C2S_BASE_OFFSET +
+ (KPC_DMA_ENGINE_SIZE * i));
+
if (capabilities_reg & ENGINE_CAP_PRESENT_MASK) {
- err = create_dma_engine_core(pcard, (KPC_DMA_C2S_BASE_OFFSET + (KPC_DMA_ENGINE_SIZE * i)), 32 + i, pcard->pdev->irq);
+ err = create_dma_engine_core(pcard, (KPC_DMA_C2S_BASE_OFFSET +
+ (KPC_DMA_ENGINE_SIZE * i)),
+ 32 + i, pcard->pdev->irq);
if (err)
goto err_out;
}
@@ -433,10 +467,15 @@ int kp2000_probe_cores(struct kp2000_device *pcard)
// Then, iterate over the possible core types.
for (current_type_id = 1 ; current_type_id <= highest_core_id ; current_type_id++) {
unsigned int core_num = 0;
- // Foreach core type, iterate the whole table and instantiate subdevices for each core.
- // Yes, this is O(n*m) but the actual runtime is small enough that it's an acceptable tradeoff.
+ /*
+ * Foreach core type, iterate the whole table and instantiate
+ * subdevices for each core.
+ * Yes, this is O(n*m) but the actual runtime is small enough
+ * that it's an acceptable tradeoff.
+ */
for (i = 0 ; i < pcard->core_table_length ; i++) {
- read_val = readq(pcard->sysinfo_regs_base + ((pcard->core_table_offset + i) * 8));
+ read_val = readq(pcard->sysinfo_regs_base +
+ ((pcard->core_table_offset + i) * 8));
parse_core_table_entry(&cte, read_val, pcard->core_table_rev);
if (cte.type != current_type_id)
diff --git a/drivers/staging/kpc2000/kpc2000/core.c b/drivers/staging/kpc2000/kpc2000/core.c
index 358d7b2f4ad1..6462a3059fb0 100644
--- a/drivers/staging/kpc2000/kpc2000/core.c
+++ b/drivers/staging/kpc2000/kpc2000/core.c
@@ -124,6 +124,7 @@ static ssize_t cpld_reconfigure(struct device *dev,
writeq(wr_val, pcard->sysinfo_regs_base + REG_CPLD_CONFIG);
return count;
}
+
static DEVICE_ATTR(cpld_reconfigure, 0220, NULL, cpld_reconfigure);
static ssize_t irq_mask_reg_show(struct device *dev,
@@ -367,7 +368,7 @@ static int kp2000_pcie_probe(struct pci_dev *pdev,
dma_bar_phys_len = pci_resource_len(pcard->pdev, DMA_BAR);
pcard->dma_bar_base = ioremap(dma_bar_phys_addr,
- dma_bar_phys_len);
+ dma_bar_phys_len);
if (!pcard->dma_bar_base) {
dev_err(&pcard->pdev->dev,
"probe: DMA_BAR could not remap memory to virtual space\n");
diff --git a/drivers/staging/kpc2000/kpc2000/dma_common_defs.h b/drivers/staging/kpc2000/kpc2000/dma_common_defs.h
index 21450e3d408f..613c4898f65e 100644
--- a/drivers/staging/kpc2000/kpc2000/dma_common_defs.h
+++ b/drivers/staging/kpc2000/kpc2000/dma_common_defs.h
@@ -6,16 +6,15 @@
#define KPC_DMA_S2C_BASE_OFFSET 0x0000
#define KPC_DMA_C2S_BASE_OFFSET 0x2000
#define KPC_DMA_ENGINE_SIZE 0x0100
-#define ENGINE_CAP_PRESENT_MASK 0x1
+#define ENGINE_CAP_PRESENT_MASK 0x1
-
-#define KPC_DMA_CARD_IRQ_ENABLE (1 << 0)
-#define KPC_DMA_CARD_IRQ_ACTIVE (1 << 1)
-#define KPC_DMA_CARD_IRQ_PENDING (1 << 2)
-#define KPC_DMA_CARD_IRQ_MSI (1 << 3)
-#define KPC_DMA_CARD_USER_INTERRUPT_MODE (1 << 4)
-#define KPC_DMA_CARD_USER_INTERRUPT_ACTIVE (1 << 5)
-#define KPC_DMA_CARD_IRQ_MSIX_MODE (1 << 6)
+#define KPC_DMA_CARD_IRQ_ENABLE BIT(0)
+#define KPC_DMA_CARD_IRQ_ACTIVE BIT(1)
+#define KPC_DMA_CARD_IRQ_PENDING BIT(2)
+#define KPC_DMA_CARD_IRQ_MSI BIT(3)
+#define KPC_DMA_CARD_USER_INTERRUPT_MODE BIT(4)
+#define KPC_DMA_CARD_USER_INTERRUPT_ACTIVE BIT(5)
+#define KPC_DMA_CARD_IRQ_MSIX_MODE BIT(6)
#define KPC_DMA_CARD_MAX_PAYLOAD_SIZE_MASK 0x0700
#define KPC_DMA_CARD_MAX_READ_REQUEST_SIZE_MASK 0x7000
#define KPC_DMA_CARD_S2C_INTERRUPT_STATUS_MASK 0x00FF0000
diff --git a/drivers/staging/kpc2000/kpc_dma/dma.c b/drivers/staging/kpc2000/kpc_dma/dma.c
index 452a3f7c835d..e169ac609ba4 100644
--- a/drivers/staging/kpc2000/kpc_dma/dma.c
+++ b/drivers/staging/kpc2000/kpc_dma/dma.c
@@ -16,7 +16,8 @@ irqreturn_t ndd_irq_handler(int irq, void *dev_id)
{
struct kpc_dma_device *ldev = (struct kpc_dma_device *)dev_id;
- if ((GetEngineControl(ldev) & ENG_CTL_IRQ_ACTIVE) || (ldev->desc_completed->MyDMAAddr != GetEngineCompletePtr(ldev)))
+ if ((GetEngineControl(ldev) & ENG_CTL_IRQ_ACTIVE) ||
+ (ldev->desc_completed->MyDMAAddr != GetEngineCompletePtr(ldev)))
schedule_work(&ldev->irq_work);
return IRQ_HANDLED;
@@ -39,7 +40,8 @@ void ndd_irq_worker(struct work_struct *ws)
cur = eng->desc_completed;
do {
cur = cur->Next;
- dev_dbg(&eng->pldev->dev, "Handling completed descriptor %p (acd = %p)\n", cur, cur->acd);
+ dev_dbg(&eng->pldev->dev, "Handling completed descriptor %p (acd = %p)\n",
+ cur, cur->acd);
BUG_ON(cur == eng->desc_next); // Ordering failure.
if (cur->DescControlFlags & DMA_DESC_CTL_SOP) {
@@ -56,7 +58,8 @@ void ndd_irq_worker(struct work_struct *ws)
if (cur->DescControlFlags & DMA_DESC_CTL_EOP) {
if (cur->acd)
- transfer_complete_cb(cur->acd, eng->accumulated_bytes, eng->accumulated_flags | ACD_FLAG_DONE);
+ transfer_complete_cb(cur->acd, eng->accumulated_bytes,
+ eng->accumulated_flags | ACD_FLAG_DONE);
}
eng->desc_completed = cur;
@@ -103,7 +106,9 @@ int setup_dma_engine(struct kpc_dma_device *eng, u32 desc_cnt)
eng->dir = DMA_TO_DEVICE;
eng->desc_pool_cnt = desc_cnt;
- eng->desc_pool = dma_pool_create("KPC DMA Descriptors", &eng->pldev->dev, sizeof(struct kpc_dma_descriptor), DMA_DESC_ALIGNMENT, 4096);
+ eng->desc_pool = dma_pool_create("KPC DMA Descriptors", &eng->pldev->dev,
+ sizeof(struct kpc_dma_descriptor),
+ DMA_DESC_ALIGNMENT, 4096);
eng->desc_pool_first = dma_pool_alloc(eng->desc_pool, GFP_KERNEL | GFP_DMA, &head_handle);
if (!eng->desc_pool_first) {
@@ -141,7 +146,8 @@ int setup_dma_engine(struct kpc_dma_device *eng, u32 desc_cnt)
INIT_WORK(&eng->irq_work, ndd_irq_worker);
// Grab IRQ line
- rv = request_irq(eng->irq, ndd_irq_handler, IRQF_SHARED, KP_DRIVER_NAME_DMA_CONTROLLER, eng);
+ rv = request_irq(eng->irq, ndd_irq_handler, IRQF_SHARED,
+ KP_DRIVER_NAME_DMA_CONTROLLER, eng);
if (rv) {
dev_err(&eng->pldev->dev, "%s: failed to request_irq: %d\n", __func__, rv);
return rv;
@@ -195,7 +201,10 @@ void stop_dma_engine(struct kpc_dma_device *eng)
}
// Clear any persistent bits just to make sure there is no residue from the reset
- SetClearEngineControl(eng, (ENG_CTL_IRQ_ACTIVE | ENG_CTL_DESC_COMPLETE | ENG_CTL_DESC_ALIGN_ERR | ENG_CTL_DESC_FETCH_ERR | ENG_CTL_SW_ABORT_ERR | ENG_CTL_DESC_CHAIN_END | ENG_CTL_DMA_WAITING_PERSIST), 0);
+ SetClearEngineControl(eng, (ENG_CTL_IRQ_ACTIVE | ENG_CTL_DESC_COMPLETE |
+ ENG_CTL_DESC_ALIGN_ERR | ENG_CTL_DESC_FETCH_ERR |
+ ENG_CTL_SW_ABORT_ERR | ENG_CTL_DESC_CHAIN_END |
+ ENG_CTL_DMA_WAITING_PERSIST), 0);
// Reset performance counters
diff --git a/drivers/staging/kpc2000/kpc_dma/fileops.c b/drivers/staging/kpc2000/kpc_dma/fileops.c
index e1c7c04f16fe..10dcd6646b01 100644
--- a/drivers/staging/kpc2000/kpc_dma/fileops.c
+++ b/drivers/staging/kpc2000/kpc_dma/fileops.c
@@ -76,7 +76,8 @@ static int kpc_dma_transfer(struct dev_private_data *priv,
// Lock the user buffer pages in memory, and hold on to the page pointers (for the sglist)
mmap_read_lock(current->mm); /* get memory map semaphore */
- rv = pin_user_pages(iov_base, acd->page_count, FOLL_TOUCH | FOLL_WRITE, acd->user_pages, NULL);
+ rv = pin_user_pages(iov_base, acd->page_count, FOLL_TOUCH | FOLL_WRITE,
+ acd->user_pages, NULL);
mmap_read_unlock(current->mm); /* release the semaphore */
if (rv != acd->page_count) {
nr_pages = rv;
@@ -89,16 +90,19 @@ static int kpc_dma_transfer(struct dev_private_data *priv,
nr_pages = acd->page_count;
// Allocate and setup the sg_table (scatterlist entries)
- rv = sg_alloc_table_from_pages(&acd->sgt, acd->user_pages, acd->page_count, iov_base & (PAGE_SIZE - 1), iov_len, GFP_KERNEL);
+ rv = sg_alloc_table_from_pages(&acd->sgt, acd->user_pages, acd->page_count,
+ iov_base & (PAGE_SIZE - 1), iov_len, GFP_KERNEL);
if (rv) {
dev_err(&priv->ldev->pldev->dev, "Couldn't alloc sg_table (%d)\n", rv);
goto unpin_pages;
}
// Setup the DMA mapping for all the sg entries
- acd->mapped_entry_count = dma_map_sg(&ldev->pldev->dev, acd->sgt.sgl, acd->sgt.nents, ldev->dir);
+ acd->mapped_entry_count = dma_map_sg(&ldev->pldev->dev, acd->sgt.sgl, acd->sgt.nents,
+ ldev->dir);
if (acd->mapped_entry_count <= 0) {
- dev_err(&priv->ldev->pldev->dev, "Couldn't dma_map_sg (%d)\n", acd->mapped_entry_count);
+ dev_err(&priv->ldev->pldev->dev, "Couldn't dma_map_sg (%d)\n",
+ acd->mapped_entry_count);
goto free_table;
}
@@ -111,14 +115,21 @@ static int kpc_dma_transfer(struct dev_private_data *priv,
// Figoure out how many descriptors are available and return an error if there aren't enough
num_descrs_avail = count_descriptors_available(ldev);
- dev_dbg(&priv->ldev->pldev->dev, " mapped_entry_count = %d num_descrs_needed = %d num_descrs_avail = %d\n", acd->mapped_entry_count, desc_needed, num_descrs_avail);
+ dev_dbg(&priv->ldev->pldev->dev,
+ " mapped_entry_count = %d num_descrs_needed = %d num_descrs_avail = %d\n",
+ acd->mapped_entry_count, desc_needed, num_descrs_avail);
+
if (desc_needed >= ldev->desc_pool_cnt) {
- dev_warn(&priv->ldev->pldev->dev, " mapped_entry_count = %d num_descrs_needed = %d num_descrs_avail = %d TOO MANY to ever complete!\n", acd->mapped_entry_count, desc_needed, num_descrs_avail);
+ dev_warn(&priv->ldev->pldev->dev,
+ " mapped_entry_count = %d num_descrs_needed = %d num_descrs_avail = %d TOO MANY to ever complete!\n",
+ acd->mapped_entry_count, desc_needed, num_descrs_avail);
rv = -EAGAIN;
goto err_descr_too_many;
}
if (desc_needed > num_descrs_avail) {
- dev_warn(&priv->ldev->pldev->dev, " mapped_entry_count = %d num_descrs_needed = %d num_descrs_avail = %d Too many to complete right now.\n", acd->mapped_entry_count, desc_needed, num_descrs_avail);
+ dev_warn(&priv->ldev->pldev->dev,
+ " mapped_entry_count = %d num_descrs_needed = %d num_descrs_avail = %d Too many to complete right now.\n",
+ acd->mapped_entry_count, desc_needed, num_descrs_avail);
rv = -EMSGSIZE;
goto err_descr_too_many;
}
@@ -163,7 +174,8 @@ static int kpc_dma_transfer(struct dev_private_data *priv,
if (i == acd->mapped_entry_count - 1 && p == pcnt - 1)
desc->acd = acd;
- dev_dbg(&priv->ldev->pldev->dev, " Filled descriptor %p (acd = %p)\n", desc, desc->acd);
+ dev_dbg(&priv->ldev->pldev->dev, " Filled descriptor %p (acd = %p)\n",
+ desc, desc->acd);
ldev->desc_next = desc->Next;
desc = desc->Next;
diff --git a/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c b/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c
index 624d47bae4d1..175fe8b0d055 100644
--- a/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c
+++ b/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c
@@ -50,7 +50,7 @@ static void kpc_dma_del_device(struct kpc_dma_device *ldev)
}
/********** SysFS Attributes **********/
-static ssize_t show_engine_regs(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t engine_regs_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct kpc_dma_device *ldev;
struct platform_device *pldev = to_platform_device(dev);
@@ -80,7 +80,7 @@ static ssize_t show_engine_regs(struct device *dev, struct device_attribute *at
ldev->desc_completed
);
}
-static DEVICE_ATTR(engine_regs, 0444, show_engine_regs, NULL);
+static DEVICE_ATTR_RO(engine_regs);
static const struct attribute *ndd_attr_list[] = {
&dev_attr_engine_regs.attr,
@@ -138,7 +138,8 @@ int kpc_dma_probe(struct platform_device *pldev)
// Setup miscdev struct
dev = MKDEV(assigned_major_num, pldev->id);
- ldev->kpc_dma_dev = device_create(kpc_dma_class, &pldev->dev, dev, ldev, "kpc_dma%d", pldev->id);
+ ldev->kpc_dma_dev = device_create(kpc_dma_class, &pldev->dev, dev, ldev,
+ "kpc_dma%d", pldev->id);
if (IS_ERR(ldev->kpc_dma_dev)) {
rv = PTR_ERR(ldev->kpc_dma_dev);
dev_err(&ldev->pldev->dev, "%s: device_create failed: %d\n", __func__, rv);
@@ -205,9 +206,11 @@ int __init kpc_dma_driver_init(void)
{
int err;
- err = __register_chrdev(KPC_DMA_CHAR_MAJOR, 0, KPC_DMA_NUM_MINORS, "kpc_dma", &kpc_dma_fops);
+ err = __register_chrdev(KPC_DMA_CHAR_MAJOR, 0, KPC_DMA_NUM_MINORS,
+ "kpc_dma", &kpc_dma_fops);
if (err < 0) {
- pr_err("Can't allocate a major number (%d) for kpc_dma (err = %d)\n", KPC_DMA_CHAR_MAJOR, err);
+ pr_err("Can't allocate a major number (%d) for kpc_dma (err = %d)\n",
+ KPC_DMA_CHAR_MAJOR, err);
goto fail_chrdev_register;
}
assigned_major_num = err;