aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/block/umem.c2
-rw-r--r--drivers/clocksource/timer-riscv.c43
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c4
-rw-r--r--drivers/crypto/omap-aes-gcm.c1
-rw-r--r--drivers/crypto/omap-aes.c8
-rw-r--r--drivers/crypto/omap-crypto.c10
-rw-r--r--drivers/crypto/omap-sham.c101
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c2
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c1
-rw-r--r--drivers/gpu/drm/drm_connector.c5
-rw-r--r--drivers/gpu/drm/drm_sysfs.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c56
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c2
-rw-r--r--drivers/gpu/drm/i915/i915_params.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi.h2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c2
-rw-r--r--drivers/irqchip/Kconfig13
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-riscv-intc.c138
-rw-r--r--drivers/irqchip/irq-sifive-plic.c46
-rw-r--r--drivers/mailbox/Kconfig18
-rw-r--r--drivers/mailbox/Makefile4
-rw-r--r--drivers/mailbox/imx-mailbox.c117
-rw-r--r--drivers/mailbox/pcc.c2
-rw-r--r--drivers/mailbox/qcom-apcs-ipc-mailbox.c61
-rw-r--r--drivers/mailbox/qcom-ipcc.c286
-rw-r--r--drivers/mailbox/sprd-mailbox.c361
-rw-r--r--drivers/mailbox/zynqmp-ipi-mailbox.c25
-rw-r--r--drivers/nvme/host/core.c4
-rw-r--r--drivers/nvme/host/fc.c5
-rw-r--r--drivers/nvme/host/nvme.h3
-rw-r--r--drivers/nvme/host/pci.c6
-rw-r--r--drivers/nvme/host/tcp.c8
-rw-r--r--drivers/nvme/target/core.c27
-rw-r--r--drivers/nvme/target/tcp.c4
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c2
-rw-r--r--drivers/usb/gadget/function/f_fs.c10
-rw-r--r--drivers/usb/gadget/legacy/inode.c6
-rw-r--r--drivers/vfio/vfio_iommu_type1.c6
-rw-r--r--drivers/vhost/vhost.c8
48 files changed, 1236 insertions, 189 deletions
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 2e96d8b8758b..c33bbbfd1bd9 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1390,7 +1390,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
goto out_unfreeze;
/* Mask out flags that can't be set using LOOP_SET_STATUS. */
- lo->lo_flags &= ~LOOP_SET_STATUS_SETTABLE_FLAGS;
+ lo->lo_flags &= LOOP_SET_STATUS_SETTABLE_FLAGS;
/* For those flags, use the previous values instead */
lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_SETTABLE_FLAGS;
/* For flags that can't be cleared, use previous values too */
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 0b944ac96d6b..27a33adc41e4 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -1613,7 +1613,7 @@ static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd,
disc_information di;
track_information ti;
__u32 last_track;
- int ret = -1;
+ int ret;
ret = pkt_get_disc_info(pd, &di);
if (ret)
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index d84e8a878df2..1e2aa5ae2796 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -784,7 +784,7 @@ static const struct block_device_operations mm_fops = {
static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
- int ret = -ENODEV;
+ int ret;
struct cardinfo *card = &cards[num_cards];
unsigned char mem_present;
unsigned char batt_status;
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index c4f15c4068c0..9de1dabfb126 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -12,8 +12,11 @@
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/sched_clock.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/interrupt.h>
+#include <linux/of_irq.h>
#include <asm/smp.h>
#include <asm/sbi.h>
@@ -39,6 +42,7 @@ static int riscv_clock_next_event(unsigned long delta,
return 0;
}
+static unsigned int riscv_clock_event_irq;
static DEFINE_PER_CPU(struct clock_event_device, riscv_clock_event) = {
.name = "riscv_timer_clockevent",
.features = CLOCK_EVT_FEAT_ONESHOT,
@@ -74,30 +78,36 @@ static int riscv_timer_starting_cpu(unsigned int cpu)
struct clock_event_device *ce = per_cpu_ptr(&riscv_clock_event, cpu);
ce->cpumask = cpumask_of(cpu);
+ ce->irq = riscv_clock_event_irq;
clockevents_config_and_register(ce, riscv_timebase, 100, 0x7fffffff);
- csr_set(CSR_IE, IE_TIE);
+ enable_percpu_irq(riscv_clock_event_irq,
+ irq_get_trigger_type(riscv_clock_event_irq));
return 0;
}
static int riscv_timer_dying_cpu(unsigned int cpu)
{
- csr_clear(CSR_IE, IE_TIE);
+ disable_percpu_irq(riscv_clock_event_irq);
return 0;
}
/* called directly from the low-level interrupt handler */
-void riscv_timer_interrupt(void)
+static irqreturn_t riscv_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evdev = this_cpu_ptr(&riscv_clock_event);
csr_clear(CSR_IE, IE_TIE);
evdev->event_handler(evdev);
+
+ return IRQ_HANDLED;
}
static int __init riscv_timer_init_dt(struct device_node *n)
{
int cpuid, hartid, error;
+ struct device_node *child;
+ struct irq_domain *domain;
hartid = riscv_of_processor_hartid(n);
if (hartid < 0) {
@@ -115,6 +125,25 @@ static int __init riscv_timer_init_dt(struct device_node *n)
if (cpuid != smp_processor_id())
return 0;
+ domain = NULL;
+ child = of_get_compatible_child(n, "riscv,cpu-intc");
+ if (!child) {
+ pr_err("Failed to find INTC node [%pOF]\n", n);
+ return -ENODEV;
+ }
+ domain = irq_find_host(child);
+ of_node_put(child);
+ if (!domain) {
+ pr_err("Failed to find IRQ domain for node [%pOF]\n", n);
+ return -ENODEV;
+ }
+
+ riscv_clock_event_irq = irq_create_mapping(domain, RV_IRQ_TIMER);
+ if (!riscv_clock_event_irq) {
+ pr_err("Failed to map timer interrupt for node [%pOF]\n", n);
+ return -ENODEV;
+ }
+
pr_info("%s: Registering clocksource cpuid [%d] hartid [%d]\n",
__func__, cpuid, hartid);
error = clocksource_register_hz(&riscv_clocksource, riscv_timebase);
@@ -126,6 +155,14 @@ static int __init riscv_timer_init_dt(struct device_node *n)
sched_clock_register(riscv_sched_clock, 64, riscv_timebase);
+ error = request_percpu_irq(riscv_clock_event_irq,
+ riscv_timer_interrupt,
+ "riscv-timer", &riscv_clock_event);
+ if (error) {
+ pr_err("registering percpu irq failed [%d]\n", error);
+ return error;
+ }
+
error = cpuhp_setup_state(CPUHP_AP_RISCV_TIMER_STARTING,
"clockevents/riscv/timer:starting",
riscv_timer_starting_cpu, riscv_timer_dying_cpu);
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index 788c6607078b..cee2a2713038 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -278,7 +278,7 @@ static void nitrox_remove_from_devlist(struct nitrox_device *ndev)
struct nitrox_device *nitrox_get_first_device(void)
{
- struct nitrox_device *ndev = NULL;
+ struct nitrox_device *ndev;
mutex_lock(&devlist_lock);
list_for_each_entry(ndev, &ndevlist, list) {
@@ -286,7 +286,7 @@ struct nitrox_device *nitrox_get_first_device(void)
break;
}
mutex_unlock(&devlist_lock);
- if (!ndev)
+ if (&ndev->list == &ndevlist)
return NULL;
refcount_inc(&ndev->refcnt);
diff --git a/drivers/crypto/omap-aes-gcm.c b/drivers/crypto/omap-aes-gcm.c
index 32dc00dc570b..9f937bdc53a7 100644
--- a/drivers/crypto/omap-aes-gcm.c
+++ b/drivers/crypto/omap-aes-gcm.c
@@ -77,7 +77,6 @@ static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
tag = (u8 *)rctx->auth_tag;
for (i = 0; i < dd->authsize; i++) {
if (tag[i]) {
- dev_err(dd->dev, "GCM decryption: Tag Message is wrong\n");
ret = -EBADMSG;
}
}
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 824ddf2a66ff..b5aff20c5900 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -1269,13 +1269,17 @@ static int omap_aes_remove(struct platform_device *pdev)
spin_unlock(&list_lock);
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
- for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
+ for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
crypto_unregister_skcipher(
&dd->pdata->algs_info[i].algs_list[j]);
+ dd->pdata->algs_info[i].registered--;
+ }
- for (i = dd->pdata->aead_algs_info->size - 1; i >= 0; i--) {
+ for (i = dd->pdata->aead_algs_info->registered - 1; i >= 0; i--) {
aalg = &dd->pdata->aead_algs_info->algs_list[i];
crypto_unregister_aead(aalg);
+ dd->pdata->aead_algs_info->registered--;
+
}
crypto_engine_exit(dd->engine);
diff --git a/drivers/crypto/omap-crypto.c b/drivers/crypto/omap-crypto.c
index cc88b7362bc2..94b2dba90f0d 100644
--- a/drivers/crypto/omap-crypto.c
+++ b/drivers/crypto/omap-crypto.c
@@ -178,11 +178,17 @@ static void omap_crypto_copy_data(struct scatterlist *src,
amt = min(src->length - srco, dst->length - dsto);
amt = min(len, amt);
- srcb = sg_virt(src) + srco;
- dstb = sg_virt(dst) + dsto;
+ srcb = kmap_atomic(sg_page(src)) + srco + src->offset;
+ dstb = kmap_atomic(sg_page(dst)) + dsto + dst->offset;
memcpy(dstb, srcb, amt);
+ if (!PageSlab(sg_page(dst)))
+ flush_kernel_dcache_page(sg_page(dst));
+
+ kunmap_atomic(srcb);
+ kunmap_atomic(dstb);
+
srco += amt;
dsto += amt;
len -= amt;
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 063ad5d03f33..82691a057d2a 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -168,8 +168,6 @@ struct omap_sham_hmac_ctx {
};
struct omap_sham_ctx {
- struct omap_sham_dev *dd;
-
unsigned long flags;
/* fallback stuff */
@@ -750,8 +748,17 @@ static int omap_sham_align_sgs(struct scatterlist *sg,
int offset = rctx->offset;
int bufcnt = rctx->bufcnt;
- if (!sg || !sg->length || !nbytes)
+ if (!sg || !sg->length || !nbytes) {
+ if (bufcnt) {
+ bufcnt = DIV_ROUND_UP(bufcnt, bs) * bs;
+ sg_init_table(rctx->sgl, 1);
+ sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, bufcnt);
+ rctx->sg = rctx->sgl;
+ rctx->sg_len = 1;
+ }
+
return 0;
+ }
new_len = nbytes;
@@ -895,7 +902,7 @@ static int omap_sham_prepare_request(struct ahash_request *req, bool update)
if (hash_later < 0)
hash_later = 0;
- if (hash_later) {
+ if (hash_later && hash_later <= rctx->buflen) {
scatterwalk_map_and_copy(rctx->buffer,
req->src,
req->nbytes - hash_later,
@@ -925,27 +932,35 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
return 0;
}
+struct omap_sham_dev *omap_sham_find_dev(struct omap_sham_reqctx *ctx)
+{
+ struct omap_sham_dev *dd;
+
+ if (ctx->dd)
+ return ctx->dd;
+
+ spin_lock_bh(&sham.lock);
+ dd = list_first_entry(&sham.dev_list, struct omap_sham_dev, list);
+ list_move_tail(&dd->list, &sham.dev_list);
+ ctx->dd = dd;
+ spin_unlock_bh(&sham.lock);
+
+ return dd;
+}
+
static int omap_sham_init(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
- struct omap_sham_dev *dd = NULL, *tmp;
+ struct omap_sham_dev *dd;
int bs = 0;
- spin_lock_bh(&sham.lock);
- if (!tctx->dd) {
- list_for_each_entry(tmp, &sham.dev_list, list) {
- dd = tmp;
- break;
- }
- tctx->dd = dd;
- } else {
- dd = tctx->dd;
- }
- spin_unlock_bh(&sham.lock);
+ ctx->dd = NULL;
- ctx->dd = dd;
+ dd = omap_sham_find_dev(ctx);
+ if (!dd)
+ return -ENODEV;
ctx->flags = 0;
@@ -1215,8 +1230,7 @@ err1:
static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
{
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
- struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
- struct omap_sham_dev *dd = tctx->dd;
+ struct omap_sham_dev *dd = ctx->dd;
ctx->op = op;
@@ -1226,7 +1240,7 @@ static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
static int omap_sham_update(struct ahash_request *req)
{
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
- struct omap_sham_dev *dd = ctx->dd;
+ struct omap_sham_dev *dd = omap_sham_find_dev(ctx);
if (!req->nbytes)
return 0;
@@ -1319,21 +1333,8 @@ static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
struct omap_sham_hmac_ctx *bctx = tctx->base;
int bs = crypto_shash_blocksize(bctx->shash);
int ds = crypto_shash_digestsize(bctx->shash);
- struct omap_sham_dev *dd = NULL, *tmp;
int err, i;
- spin_lock_bh(&sham.lock);
- if (!tctx->dd) {
- list_for_each_entry(tmp, &sham.dev_list, list) {
- dd = tmp;
- break;
- }
- tctx->dd = dd;
- } else {
- dd = tctx->dd;
- }
- spin_unlock_bh(&sham.lock);
-
err = crypto_shash_setkey(tctx->fallback, key, keylen);
if (err)
return err;
@@ -1350,7 +1351,7 @@ static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
memset(bctx->ipad + keylen, 0, bs - keylen);
- if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
+ if (!test_bit(FLAGS_AUTO_XOR, &sham.flags)) {
memcpy(bctx->opad, bctx->ipad, bs);
for (i = 0; i < bs; i++) {
@@ -1571,7 +1572,8 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "sha224",
.cra_driver_name = "omap-sha224",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx),
@@ -1592,7 +1594,8 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "sha256",
.cra_driver_name = "omap-sha256",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx),
@@ -1614,7 +1617,8 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "hmac(sha224)",
.cra_driver_name = "omap-hmac-sha224",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
@@ -1637,7 +1641,8 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "omap-hmac-sha256",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
@@ -1662,7 +1667,8 @@ static struct ahash_alg algs_sha384_sha512[] = {
.cra_name = "sha384",
.cra_driver_name = "omap-sha384",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx),
@@ -1683,7 +1689,8 @@ static struct ahash_alg algs_sha384_sha512[] = {
.cra_name = "sha512",
.cra_driver_name = "omap-sha512",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx),
@@ -1705,7 +1712,8 @@ static struct ahash_alg algs_sha384_sha512[] = {
.cra_name = "hmac(sha384)",
.cra_driver_name = "omap-hmac-sha384",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
@@ -1728,7 +1736,8 @@ static struct ahash_alg algs_sha384_sha512[] = {
.cra_name = "hmac(sha512)",
.cra_driver_name = "omap-hmac-sha512",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
@@ -2154,6 +2163,7 @@ static int omap_sham_probe(struct platform_device *pdev)
}
dd->flags |= dd->pdata->flags;
+ sham.flags |= dd->pdata->flags;
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
@@ -2181,6 +2191,9 @@ static int omap_sham_probe(struct platform_device *pdev)
spin_unlock(&sham.lock);
for (i = 0; i < dd->pdata->algs_info_size; i++) {
+ if (dd->pdata->algs_info[i].registered)
+ break;
+
for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
struct ahash_alg *alg;
@@ -2232,9 +2245,11 @@ static int omap_sham_remove(struct platform_device *pdev)
list_del(&dd->list);
spin_unlock(&sham.lock);
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
- for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
+ for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
crypto_unregister_ahash(
&dd->pdata->algs_info[i].algs_list[j]);
+ dd->pdata->algs_info[i].registered--;
+ }
tasklet_kill(&dd->done_task);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 53b4126373a5..ffe149aafc39 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -27,6 +27,7 @@
#include <linux/types.h>
#include <linux/mm.h>
+#include <linux/kthread.h>
#include <linux/workqueue.h>
#include <kgd_kfd_interface.h>
#include <drm/ttm/ttm_execbuf_util.h>
@@ -195,10 +196,10 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s
pagefault_disable(); \
if ((mmptr) == current->mm) { \
valid = !get_user((dst), (wptr)); \
- } else if (current->mm == NULL) { \
- use_mm(mmptr); \
+ } else if (current->flags & PF_KTHREAD) { \
+ kthread_use_mm(mmptr); \
valid = !get_user((dst), (wptr)); \
- unuse_mm(mmptr); \
+ kthread_unuse_mm(mmptr); \
} \
pagefault_enable(); \
} \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index 6529caca88fe..35d4a5ab0228 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -22,7 +22,6 @@
#include <linux/module.h>
#include <linux/fdtable.h>
#include <linux/uaccess.h>
-#include <linux/mmu_context.h>
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index 691c89705bcd..bf927f432506 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -19,7 +19,6 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/mmu_context.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
#include "gc/gc_10_1_0_offset.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index c6944739183a..744366c7ee85 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -20,8 +20,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/mmu_context.h>
-
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
#include "cikd.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 2f4bdc80a6b2..feab4cc6e836 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -20,8 +20,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/mmu_context.h>
-
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
#include "gfx_v8_0.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index df841c2ac5e7..c7fd0c47b254 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -19,8 +19,6 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/mmu_context.h>
-
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
#include "gc/gc_9_0_offset.h"
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 7d39b858c9f1..3a3a511670c9 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -226,6 +226,7 @@ static void ast_set_vbios_color_reg(struct ast_private *ast,
case 3:
case 4:
color_index = TrueCModeIndex;
+ break;
default:
return;
}
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index b1099e1251a2..d877ddc6dc57 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -27,6 +27,7 @@
#include <drm/drm_print.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
+#include <drm/drm_sysfs.h>
#include <linux/uaccess.h>
@@ -523,6 +524,10 @@ int drm_connector_register(struct drm_connector *connector)
drm_mode_object_register(connector->dev, &connector->base);
connector->registration_state = DRM_CONNECTOR_REGISTERED;
+
+ /* Let userspace know we have a new connector */
+ drm_sysfs_hotplug_event(connector->dev);
+
goto unlock;
err_debugfs:
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 939f0032aab1..f0336c804639 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -291,9 +291,6 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
return PTR_ERR(connector->kdev);
}
- /* Let userspace know we have a new connector */
- drm_sysfs_hotplug_event(dev);
-
if (connector->ddc)
return sysfs_create_link(&connector->kdev->kobj,
&connector->ddc->dev.kobj, "ddc");
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 40d42dcff0b7..ed9e53c373a7 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -5206,6 +5206,9 @@ void intel_read_dp_sdp(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
unsigned int type)
{
+ if (encoder->type != INTEL_OUTPUT_DDI)
+ return;
+
switch (type) {
case DP_SDP_VSC:
intel_read_dp_vsc_sdp(encoder, crtc_state,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 3ce185670ca4..db8eb1c6afe9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -1988,6 +1988,38 @@ static const struct dma_fence_work_ops eb_parse_ops = {
.release = __eb_parse_release,
};
+static inline int
+__parser_mark_active(struct i915_vma *vma,
+ struct intel_timeline *tl,
+ struct dma_fence *fence)
+{
+ struct intel_gt_buffer_pool_node *node = vma->private;
+
+ return i915_active_ref(&node->active, tl, fence);
+}
+
+static int
+parser_mark_active(struct eb_parse_work *pw, struct intel_timeline *tl)
+{
+ int err;
+
+ mutex_lock(&tl->mutex);
+
+ err = __parser_mark_active(pw->shadow, tl, &pw->base.dma);
+ if (err)
+ goto unlock;
+
+ if (pw->trampoline) {
+ err = __parser_mark_active(pw->trampoline, tl, &pw->base.dma);
+ if (err)
+ goto unlock;
+ }
+
+unlock:
+ mutex_unlock(&tl->mutex);
+ return err;
+}
+
static int eb_parse_pipeline(struct i915_execbuffer *eb,
struct i915_vma *shadow,
struct i915_vma *trampoline)
@@ -2022,20 +2054,25 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
pw->shadow = shadow;
pw->trampoline = trampoline;
+ /* Mark active refs early for this worker, in case we get interrupted */
+ err = parser_mark_active(pw, eb->context->timeline);
+ if (err)
+ goto err_commit;
+
err = dma_resv_lock_interruptible(pw->batch->resv, NULL);
if (err)
- goto err_trampoline;
+ goto err_commit;
err = dma_resv_reserve_shared(pw->batch->resv, 1);
if (err)
- goto err_batch_unlock;
+ goto err_commit_unlock;
/* Wait for all writes (and relocs) into the batch to complete */
err = i915_sw_fence_await_reservation(&pw->base.chain,
pw->batch->resv, NULL, false,
0, I915_FENCE_GFP);
if (err < 0)
- goto err_batch_unlock;
+ goto err_commit_unlock;
/* Keep the batch alive and unwritten as we parse */
dma_resv_add_shared_fence(pw->batch->resv, &pw->base.dma);
@@ -2050,11 +2087,13 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
dma_fence_work_commit_imm(&pw->base);
return 0;
-err_batch_unlock:
+err_commit_unlock:
dma_resv_unlock(pw->batch->resv);
-err_trampoline:
- if (trampoline)
- i915_active_release(&trampoline->active);
+err_commit:
+ i915_sw_fence_set_error_once(&pw->base.chain, err);
+ dma_fence_work_commit_imm(&pw->base);
+ return err;
+
err_shadow:
i915_active_release(&shadow->active);
err_batch:
@@ -2100,6 +2139,7 @@ static int eb_parse(struct i915_execbuffer *eb)
goto err;
}
i915_gem_object_set_readonly(shadow->obj);
+ shadow->private = pool;
trampoline = NULL;
if (CMDPARSER_USES_GGTT(eb->i915)) {
@@ -2113,6 +2153,7 @@ static int eb_parse(struct i915_execbuffer *eb)
shadow = trampoline;
goto err_shadow;
}
+ shadow->private = pool;
eb->batch_flags |= I915_DISPATCH_SECURE;
}
@@ -2129,7 +2170,6 @@ static int eb_parse(struct i915_execbuffer *eb)
eb->trampoline = trampoline;
eb->batch_start_offset = 0;
- shadow->private = pool;
return 0;
err_trampoline:
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index eee530453aa6..ad8a9df49f29 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -31,7 +31,7 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/mm.h>
-#include <linux/mmu_context.h>
+#include <linux/kthread.h>
#include <linux/sched/mm.h>
#include <linux/types.h>
#include <linux/list.h>
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index a3dde770226d..02559da61e6e 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -65,7 +65,7 @@ i915_param_named_unsafe(vbt_sdvo_panel_type, int, 0400,
"Override/Ignore selection of SDVO panel mode in the VBT "
"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
-i915_param_named_unsafe(reset, int, 0600,
+i915_param_named_unsafe(reset, uint, 0600,
"Attempt GPU resets (0=disabled, 1=full gpu reset, 2=engine reset [default])");
i915_param_named_unsafe(vbt_firmware, charp, 0400,
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi.h b/drivers/gpu/drm/sun4i/sun4i_hdmi.h
index 7ad3f06c127e..00ca35f07ba5 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi.h
@@ -148,7 +148,7 @@
#define SUN4I_HDMI_DDC_CMD_IMPLICIT_WRITE 3
#define SUN4I_HDMI_DDC_CLK_REG 0x528
-#define SUN4I_HDMI_DDC_CLK_M(m) (((m) & 0x7) << 3)
+#define SUN4I_HDMI_DDC_CLK_M(m) (((m) & 0xf) << 3)
#define SUN4I_HDMI_DDC_CLK_N(n) ((n) & 0x7)
#define SUN4I_HDMI_DDC_LINE_CTRL_REG 0x540
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c
index 2ff780114106..12430b9d4e93 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c
@@ -33,7 +33,7 @@ static unsigned long sun4i_ddc_calc_divider(unsigned long rate,
unsigned long best_rate = 0;
u8 best_m = 0, best_n = 0, _m, _n;
- for (_m = 0; _m < 8; _m++) {
+ for (_m = 0; _m < 16; _m++) {
for (_n = 0; _n < 8; _n++) {
unsigned long tmp_rate;
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 66b9a68f5e9f..29fead208cad 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -493,6 +493,19 @@ config TI_SCI_INTA_IRQCHIP
If you wish to use interrupt aggregator irq resources managed by the
TI System Controller, say Y here. Otherwise, say N.
+config RISCV_INTC
+ bool "RISC-V Local Interrupt Controller"
+ depends on RISCV
+ default y
+ help
+ This enables support for the per-HART local interrupt controller
+ found in standard RISC-V systems. The per-HART local interrupt
+ controller handles timer interrupts, software interrupts, and
+ hardware interrupts. Without a per-HART local interrupt controller,
+ a RISC-V system will be unable to handle any interrupts.
+
+ If you don't know what to do here, say Y.
+
config SIFIVE_PLIC
bool "SiFive Platform-Level Interrupt Controller"
depends on RISCV
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 3a4ce283189a..133f9c45744a 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -98,6 +98,7 @@ obj-$(CONFIG_NDS32) += irq-ativic32.o
obj-$(CONFIG_QCOM_PDC) += qcom-pdc.o
obj-$(CONFIG_CSKY_MPINTC) += irq-csky-mpintc.o
obj-$(CONFIG_CSKY_APB_INTC) += irq-csky-apb-intc.o
+obj-$(CONFIG_RISCV_INTC) += irq-riscv-intc.o
obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o
obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o
obj-$(CONFIG_IMX_INTMUX) += irq-imx-intmux.o
diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c
new file mode 100644
index 000000000000..a6f97fa6ff69
--- /dev/null
+++ b/drivers/irqchip/irq-riscv-intc.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017-2018 SiFive
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ */
+
+#define pr_fmt(fmt) "riscv-intc: " fmt
+#include <linux/atomic.h>
+#include <linux/bits.h>
+#include <linux/cpu.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/smp.h>
+
+static struct irq_domain *intc_domain;
+
+static asmlinkage void riscv_intc_irq(struct pt_regs *regs)
+{
+ unsigned long cause = regs->cause & ~CAUSE_IRQ_FLAG;
+
+ if (unlikely(cause >= BITS_PER_LONG))
+ panic("unexpected interrupt cause");
+
+ switch (cause) {
+#ifdef CONFIG_SMP
+ case RV_IRQ_SOFT:
+ /*
+ * We only use software interrupts to pass IPIs, so if a
+ * non-SMP system gets one, then we don't know what to do.
+ */
+ handle_IPI(regs);
+ break;
+#endif
+ default:
+ handle_domain_irq(intc_domain, cause, regs);
+ break;
+ }
+}
+
+/*
+ * On RISC-V systems local interrupts are masked or unmasked by writing
+ * the SIE (Supervisor Interrupt Enable) CSR. As CSRs can only be written
+ * on the local hart, these functions can only be called on the hart that
+ * corresponds to the IRQ chip.
+ */
+
+static void riscv_intc_irq_mask(struct irq_data *d)
+{
+ csr_clear(CSR_IE, BIT(d->hwirq));
+}
+
+static void riscv_intc_irq_unmask(struct irq_data *d)
+{
+ csr_set(CSR_IE, BIT(d->hwirq));
+}
+
+static int riscv_intc_cpu_starting(unsigned int cpu)
+{
+ csr_set(CSR_IE, BIT(RV_IRQ_SOFT));
+ return 0;
+}
+
+static int riscv_intc_cpu_dying(unsigned int cpu)
+{
+ csr_clear(CSR_IE, BIT(RV_IRQ_SOFT));
+ return 0;
+}
+
+static struct irq_chip riscv_intc_chip = {
+ .name = "RISC-V INTC",
+ .irq_mask = riscv_intc_irq_mask,
+ .irq_unmask = riscv_intc_irq_unmask,
+};
+
+static int riscv_intc_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_percpu_devid(irq);
+ irq_domain_set_info(d, irq, hwirq, &riscv_intc_chip, d->host_data,
+ handle_percpu_devid_irq, NULL, NULL);
+
+ return 0;
+}
+
+static const struct irq_domain_ops riscv_intc_domain_ops = {
+ .map = riscv_intc_domain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int __init riscv_intc_init(struct device_node *node,
+ struct device_node *parent)
+{
+ int rc, hartid;
+
+ hartid = riscv_of_parent_hartid(node);
+ if (hartid < 0) {
+ pr_warn("unable to fine hart id for %pOF\n", node);
+ return 0;
+ }
+
+ /*
+ * The DT will have one INTC DT node under each CPU (or HART)
+ * DT node so riscv_intc_init() function will be called once
+ * for each INTC DT node. We only need to do INTC initialization
+ * for the INTC DT node belonging to boot CPU (or boot HART).
+ */
+ if (riscv_hartid_to_cpuid(hartid) != smp_processor_id())
+ return 0;
+
+ intc_domain = irq_domain_add_linear(node, BITS_PER_LONG,
+ &riscv_intc_domain_ops, NULL);
+ if (!intc_domain) {
+ pr_err("unable to add IRQ domain\n");
+ return -ENXIO;
+ }
+
+ rc = set_handle_irq(&riscv_intc_irq);
+ if (rc) {
+ pr_err("failed to set irq handler\n");
+ return rc;
+ }
+
+ cpuhp_setup_state(CPUHP_AP_IRQ_RISCV_STARTING,
+ "irqchip/riscv/intc:starting",
+ riscv_intc_cpu_starting,
+ riscv_intc_cpu_dying);
+
+ pr_info("%d local interrupts mapped\n", BITS_PER_LONG);
+
+ return 0;
+}
+
+IRQCHIP_DECLARE(riscv, "riscv,cpu-intc", riscv_intc_init);
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index d9c53f85a68e..eaa3e9fe54e9 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -9,6 +9,7 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -76,6 +77,7 @@ struct plic_handler {
void __iomem *enable_base;
struct plic_priv *priv;
};
+static int plic_parent_irq;
static bool plic_cpuhp_setup_done;
static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
@@ -219,15 +221,17 @@ static const struct irq_domain_ops plic_irqdomain_ops = {
* that source ID back to the same claim register. This automatically enables
* and disables the interrupt, so there's nothing else to do.
*/
-static void plic_handle_irq(struct pt_regs *regs)
+static void plic_handle_irq(struct irq_desc *desc)
{
struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
void __iomem *claim = handler->hart_base + CONTEXT_CLAIM;
irq_hw_number_t hwirq;
WARN_ON_ONCE(!handler->present);
- csr_clear(CSR_IE, IE_EIE);
+ chained_irq_enter(chip, desc);
+
while ((hwirq = readl(claim))) {
int irq = irq_find_mapping(handler->priv->irqdomain, hwirq);
@@ -237,21 +241,8 @@ static void plic_handle_irq(struct pt_regs *regs)
else
generic_handle_irq(irq);
}
- csr_set(CSR_IE, IE_EIE);
-}
-
-/*
- * Walk up the DT tree until we find an active RISC-V core (HART) node and
- * extract the cpuid from it.
- */
-static int plic_find_hart_id(struct device_node *node)
-{
- for (; node; node = node->parent) {
- if (of_device_is_compatible(node, "riscv"))
- return riscv_of_processor_hartid(node);
- }
- return -1;
+ chained_irq_exit(chip, desc);
}
static void plic_set_threshold(struct plic_handler *handler, u32 threshold)
@@ -262,10 +253,8 @@ static void plic_set_threshold(struct plic_handler *handler, u32 threshold)
static int plic_dying_cpu(unsigned int cpu)
{
- struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
-
- csr_clear(CSR_IE, IE_EIE);
- plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD);
+ if (plic_parent_irq)
+ disable_percpu_irq(plic_parent_irq);
return 0;
}
@@ -274,7 +263,11 @@ static int plic_starting_cpu(unsigned int cpu)
{
struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
- csr_set(CSR_IE, IE_EIE);
+ if (plic_parent_irq)
+ enable_percpu_irq(plic_parent_irq,
+ irq_get_trigger_type(plic_parent_irq));
+ else
+ pr_warn("cpu%d: parent irq not available\n", cpu);
plic_set_threshold(handler, PLIC_ENABLE_THRESHOLD);
return 0;
@@ -330,7 +323,7 @@ static int __init plic_init(struct device_node *node,
if (parent.args[0] != RV_IRQ_EXT)
continue;
- hartid = plic_find_hart_id(parent.np);
+ hartid = riscv_of_parent_hartid(parent.np);
if (hartid < 0) {
pr_warn("failed to parse hart ID for context %d.\n", i);
continue;
@@ -342,6 +335,14 @@ static int __init plic_init(struct device_node *node,
continue;
}
+ /* Find parent domain and register chained handler */
+ if (!plic_parent_irq && irq_find_host(parent.np)) {
+ plic_parent_irq = irq_of_parse_and_map(node, i);
+ if (plic_parent_irq)
+ irq_set_chained_handler(plic_parent_irq,
+ plic_handle_irq);
+ }
+
/*
* When running in M-mode we need to ignore the S-mode handler.
* Here we assume it always comes later, but that might be a
@@ -382,7 +383,6 @@ done:
pr_info("%pOFP: mapped %d interrupts with %d handlers for"
" %d contexts.\n", node, nr_irqs, nr_handlers, nr_contexts);
- set_handle_irq(plic_handle_irq);
return 0;
out_iounmap:
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 5a577a6734cf..05b1009e2820 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -236,4 +236,22 @@ config SUN6I_MSGBOX
various Allwinner SoCs. This mailbox is used for communication
between the application CPUs and the power management coprocessor.
+config SPRD_MBOX
+ tristate "Spreadtrum Mailbox"
+ depends on ARCH_SPRD || COMPILE_TEST
+ help
+ Mailbox driver implementation for the Spreadtrum platform. It is used
+ to send message between application processors and MCU. Say Y here if
+ you want to build the Spreatrum mailbox controller driver.
+
+config QCOM_IPCC
+ bool "Qualcomm Technologies, Inc. IPCC driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ help
+ Qualcomm Technologies, Inc. Inter-Processor Communication Controller
+ (IPCC) driver for MSM devices. The driver provides mailbox support for
+ sending interrupts to the clients. On the other hand, the driver also
+ acts as an interrupt controller for receiving interrupts from clients.
+ Say Y here if you want to build this driver.
+
endif
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 2e4364ef5c47..60d224b723a1 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -50,3 +50,7 @@ obj-$(CONFIG_MTK_CMDQ_MBOX) += mtk-cmdq-mailbox.o
obj-$(CONFIG_ZYNQMP_IPI_MBOX) += zynqmp-ipi-mailbox.o
obj-$(CONFIG_SUN6I_MSGBOX) += sun6i-msgbox.o
+
+obj-$(CONFIG_SPRD_MBOX) += sprd-mailbox.o
+
+obj-$(CONFIG_QCOM_IPCC) += qcom-ipcc.o
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index 7906624a731c..7205b825c8b5 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -12,6 +12,7 @@
#include <linux/mailbox_controller.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#define IMX_MU_xSR_GIPn(x) BIT(28 + (3 - (x)))
@@ -66,6 +67,8 @@ struct imx_mu_priv {
struct clk *clk;
int irq;
+ u32 xcr;
+
bool side_b;
};
@@ -154,12 +157,17 @@ static int imx_mu_scu_tx(struct imx_mu_priv *priv,
switch (cp->type) {
case IMX_MU_TYPE_TX:
- if (msg->hdr.size > sizeof(*msg)) {
+ /*
+ * msg->hdr.size specifies the number of u32 words while
+ * sizeof yields bytes.
+ */
+
+ if (msg->hdr.size > sizeof(*msg) / 4) {
/*
* The real message size can be different to
* struct imx_sc_rpc_msg_max size
*/
- dev_err(priv->dev, "Exceed max msg size (%zu) on TX, got: %i\n", sizeof(*msg), msg->hdr.size);
+ dev_err(priv->dev, "Maximal message size (%zu bytes) exceeded on TX; got: %i bytes\n", sizeof(*msg), msg->hdr.size << 2);
return -EINVAL;
}
@@ -198,9 +206,8 @@ static int imx_mu_scu_rx(struct imx_mu_priv *priv,
imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_RIEn(0));
*data++ = imx_mu_read(priv, priv->dcfg->xRR[0]);
- if (msg.hdr.size > sizeof(msg)) {
- dev_err(priv->dev, "Exceed max msg size (%zu) on RX, got: %i\n",
- sizeof(msg), msg.hdr.size);
+ if (msg.hdr.size > sizeof(msg) / 4) {
+ dev_err(priv->dev, "Maximal message size (%zu bytes) exceeded on RX; got: %i bytes\n", sizeof(msg), msg.hdr.size << 2);
return -EINVAL;
}
@@ -285,8 +292,10 @@ static int imx_mu_startup(struct mbox_chan *chan)
{
struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
struct imx_mu_con_priv *cp = chan->con_priv;
+ unsigned long irq_flag = IRQF_SHARED;
int ret;
+ pm_runtime_get_sync(priv->dev);
if (cp->type == IMX_MU_TYPE_TXDB) {
/* Tx doorbell don't have ACK support */
tasklet_init(&cp->txdb_tasklet, imx_mu_txdb_tasklet,
@@ -294,8 +303,12 @@ static int imx_mu_startup(struct mbox_chan *chan)
return 0;
}
- ret = request_irq(priv->irq, imx_mu_isr, IRQF_SHARED |
- IRQF_NO_SUSPEND, cp->irq_desc, chan);
+ /* IPC MU should be with IRQF_NO_SUSPEND set */
+ if (!priv->dev->pm_domain)
+ irq_flag |= IRQF_NO_SUSPEND;
+
+ ret = request_irq(priv->irq, imx_mu_isr, irq_flag,
+ cp->irq_desc, chan);
if (ret) {
dev_err(priv->dev,
"Unable to acquire IRQ %d\n", priv->irq);
@@ -323,6 +336,7 @@ static void imx_mu_shutdown(struct mbox_chan *chan)
if (cp->type == IMX_MU_TYPE_TXDB) {
tasklet_kill(&cp->txdb_tasklet);
+ pm_runtime_put_sync(priv->dev);
return;
}
@@ -341,6 +355,7 @@ static void imx_mu_shutdown(struct mbox_chan *chan)
}
free_irq(priv->irq, chan);
+ pm_runtime_put_sync(priv->dev);
}
static const struct mbox_chan_ops imx_mu_ops = {
@@ -374,7 +389,7 @@ static struct mbox_chan *imx_mu_scu_xlate(struct mbox_controller *mbox,
break;
default:
dev_err(mbox->dev, "Invalid chan type: %d\n", type);
- return NULL;
+ return ERR_PTR(-EINVAL);
}
if (chan >= mbox->num_chans) {
@@ -508,14 +523,39 @@ static int imx_mu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- return devm_mbox_controller_register(dev, &priv->mbox);
+ ret = devm_mbox_controller_register(dev, &priv->mbox);
+ if (ret) {
+ clk_disable_unprepare(priv->clk);
+ return ret;
+ }
+
+ pm_runtime_enable(dev);
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ goto disable_runtime_pm;
+ }
+
+ ret = pm_runtime_put_sync(dev);
+ if (ret < 0)
+ goto disable_runtime_pm;
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+
+disable_runtime_pm:
+ pm_runtime_disable(dev);
+ clk_disable_unprepare(priv->clk);
+ return ret;
}
static int imx_mu_remove(struct platform_device *pdev)
{
struct imx_mu_priv *priv = platform_get_drvdata(pdev);
- clk_disable_unprepare(priv->clk);
+ pm_runtime_disable(priv->dev);
return 0;
}
@@ -558,12 +598,69 @@ static const struct of_device_id imx_mu_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, imx_mu_dt_ids);
+static int imx_mu_suspend_noirq(struct device *dev)
+{
+ struct imx_mu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv->clk)
+ priv->xcr = imx_mu_read(priv, priv->dcfg->xCR);
+
+ return 0;
+}
+
+static int imx_mu_resume_noirq(struct device *dev)
+{
+ struct imx_mu_priv *priv = dev_get_drvdata(dev);
+
+ /*
+ * ONLY restore MU when context lost, the TIE could
+ * be set during noirq resume as there is MU data
+ * communication going on, and restore the saved
+ * value will overwrite the TIE and cause MU data
+ * send failed, may lead to system freeze. This issue
+ * is observed by testing freeze mode suspend.
+ */
+ if (!imx_mu_read(priv, priv->dcfg->xCR) && !priv->clk)
+ imx_mu_write(priv, priv->xcr, priv->dcfg->xCR);
+
+ return 0;
+}
+
+static int imx_mu_runtime_suspend(struct device *dev)
+{
+ struct imx_mu_priv *priv = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static int imx_mu_runtime_resume(struct device *dev)
+{
+ struct imx_mu_priv *priv = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ dev_err(dev, "failed to enable clock\n");
+
+ return ret;
+}
+
+static const struct dev_pm_ops imx_mu_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_mu_suspend_noirq,
+ imx_mu_resume_noirq)
+ SET_RUNTIME_PM_OPS(imx_mu_runtime_suspend,
+ imx_mu_runtime_resume, NULL)
+};
+
static struct platform_driver imx_mu_driver = {
.probe = imx_mu_probe,
.remove = imx_mu_remove,
.driver = {
.name = "imx_mu",
.of_match_table = imx_mu_dt_ids,
+ .pm = &imx_mu_pm_ops,
},
};
module_platform_driver(imx_mu_driver);
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 34844b7a3675..8c7fac38bb1c 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -568,7 +568,7 @@ static int pcc_mbox_probe(struct platform_device *pdev)
return ret;
}
-struct platform_driver pcc_mbox_driver = {
+static struct platform_driver pcc_mbox_driver = {
.probe = pcc_mbox_probe,
.driver = {
.name = "PCCT",
diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
index eeebafd546e5..cec34f0af6ce 100644
--- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
@@ -24,6 +24,35 @@ struct qcom_apcs_ipc {
struct platform_device *clk;
};
+struct qcom_apcs_ipc_data {
+ int offset;
+ char *clk_name;
+};
+
+static const struct qcom_apcs_ipc_data ipq6018_apcs_data = {
+ .offset = 8, .clk_name = "qcom,apss-ipq6018-clk"
+};
+
+static const struct qcom_apcs_ipc_data ipq8074_apcs_data = {
+ .offset = 8, .clk_name = NULL
+};
+
+static const struct qcom_apcs_ipc_data msm8916_apcs_data = {
+ .offset = 8, .clk_name = "qcom-apcs-msm8916-clk"
+};
+
+static const struct qcom_apcs_ipc_data msm8996_apcs_data = {
+ .offset = 16, .clk_name = NULL
+};
+
+static const struct qcom_apcs_ipc_data msm8998_apcs_data = {
+ .offset = 8, .clk_name = NULL
+};
+
+static const struct qcom_apcs_ipc_data apps_shared_apcs_data = {
+ .offset = 12, .clk_name = NULL
+};
+
static const struct regmap_config apcs_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -48,17 +77,12 @@ static const struct mbox_chan_ops qcom_apcs_ipc_ops = {
static int qcom_apcs_ipc_probe(struct platform_device *pdev)
{
struct qcom_apcs_ipc *apcs;
+ const struct qcom_apcs_ipc_data *apcs_data;
struct regmap *regmap;
struct resource *res;
- unsigned long offset;
void __iomem *base;
unsigned long i;
int ret;
- const struct of_device_id apcs_clk_match_table[] = {
- { .compatible = "qcom,msm8916-apcs-kpss-global", },
- { .compatible = "qcom,qcs404-apcs-apps-global", },
- {}
- };
apcs = devm_kzalloc(&pdev->dev, sizeof(*apcs), GFP_KERNEL);
if (!apcs)
@@ -73,10 +97,10 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- offset = (unsigned long)of_device_get_match_data(&pdev->dev);
+ apcs_data = of_device_get_match_data(&pdev->dev);
apcs->regmap = regmap;
- apcs->offset = offset;
+ apcs->offset = apcs_data->offset;
/* Initialize channel identifiers */
for (i = 0; i < ARRAY_SIZE(apcs->mbox_chans); i++)
@@ -93,9 +117,9 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
return ret;
}
- if (of_match_device(apcs_clk_match_table, &pdev->dev)) {
+ if (apcs_data->clk_name) {
apcs->clk = platform_device_register_data(&pdev->dev,
- "qcom-apcs-msm8916-clk",
+ apcs_data->clk_name,
PLATFORM_DEVID_NONE,
NULL, 0);
if (IS_ERR(apcs->clk))
@@ -119,14 +143,15 @@ static int qcom_apcs_ipc_remove(struct platform_device *pdev)
/* .data is the offset of the ipc register within the global block */
static const struct of_device_id qcom_apcs_ipc_of_match[] = {
- { .compatible = "qcom,msm8916-apcs-kpss-global", .data = (void *)8 },
- { .compatible = "qcom,msm8996-apcs-hmss-global", .data = (void *)16 },
- { .compatible = "qcom,msm8998-apcs-hmss-global", .data = (void *)8 },
- { .compatible = "qcom,qcs404-apcs-apps-global", .data = (void *)8 },
- { .compatible = "qcom,sc7180-apss-shared", .data = (void *)12 },
- { .compatible = "qcom,sdm845-apss-shared", .data = (void *)12 },
- { .compatible = "qcom,sm8150-apss-shared", .data = (void *)12 },
- { .compatible = "qcom,ipq8074-apcs-apps-global", .data = (void *)8 },
+ { .compatible = "qcom,ipq6018-apcs-apps-global", .data = &ipq6018_apcs_data },
+ { .compatible = "qcom,ipq8074-apcs-apps-global", .data = &ipq8074_apcs_data },
+ { .compatible = "qcom,msm8916-apcs-kpss-global", .data = &msm8916_apcs_data },
+ { .compatible = "qcom,msm8996-apcs-hmss-global", .data = &msm8996_apcs_data },
+ { .compatible = "qcom,msm8998-apcs-hmss-global", .data = &msm8998_apcs_data },
+ { .compatible = "qcom,qcs404-apcs-apps-global", .data = &msm8916_apcs_data },
+ { .compatible = "qcom,sc7180-apss-shared", .data = &apps_shared_apcs_data },
+ { .compatible = "qcom,sdm845-apss-shared", .data = &apps_shared_apcs_data },
+ { .compatible = "qcom,sm8150-apss-shared", .data = &apps_shared_apcs_data },
{}
};
MODULE_DEVICE_TABLE(of, qcom_apcs_ipc_of_match);
diff --git a/drivers/mailbox/qcom-ipcc.c b/drivers/mailbox/qcom-ipcc.c
new file mode 100644
index 000000000000..2d13c72944c6
--- /dev/null
+++ b/drivers/mailbox/qcom-ipcc.c
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/mailbox/qcom-ipcc.h>
+
+#define IPCC_MBOX_MAX_CHAN 48
+
+/* IPCC Register offsets */
+#define IPCC_REG_SEND_ID 0x0c
+#define IPCC_REG_RECV_ID 0x10
+#define IPCC_REG_RECV_SIGNAL_ENABLE 0x14
+#define IPCC_REG_RECV_SIGNAL_DISABLE 0x18
+#define IPCC_REG_RECV_SIGNAL_CLEAR 0x1c
+#define IPCC_REG_CLIENT_CLEAR 0x38
+
+#define IPCC_SIGNAL_ID_MASK GENMASK(15, 0)
+#define IPCC_CLIENT_ID_MASK GENMASK(31, 16)
+
+#define IPCC_NO_PENDING_IRQ GENMASK(31, 0)
+
+/**
+ * struct qcom_ipcc_chan_info - Per-mailbox-channel info
+ * @client_id: The client-id to which the interrupt has to be triggered
+ * @signal_id: The signal-id to which the interrupt has to be triggered
+ */
+struct qcom_ipcc_chan_info {
+ u16 client_id;
+ u16 signal_id;
+};
+
+/**
+ * struct qcom_ipcc - Holder for the mailbox driver
+ * @dev: Device associated with this instance
+ * @base: Base address of the IPCC frame associated to APSS
+ * @irq_domain: The irq_domain associated with this instance
+ * @chan: The mailbox channels array
+ * @mchan: The per-mailbox channel info array
+ * @mbox: The mailbox controller
+ * @irq: Summary irq
+ */
+struct qcom_ipcc {
+ struct device *dev;
+ void __iomem *base;
+ struct irq_domain *irq_domain;
+ struct mbox_chan chan[IPCC_MBOX_MAX_CHAN];
+ struct qcom_ipcc_chan_info mchan[IPCC_MBOX_MAX_CHAN];
+ struct mbox_controller mbox;
+ int irq;
+};
+
+static inline struct qcom_ipcc *to_qcom_ipcc(struct mbox_controller *mbox)
+{
+ return container_of(mbox, struct qcom_ipcc, mbox);
+}
+
+static inline u32 qcom_ipcc_get_hwirq(u16 client_id, u16 signal_id)
+{
+ return FIELD_PREP(IPCC_CLIENT_ID_MASK, client_id) |
+ FIELD_PREP(IPCC_SIGNAL_ID_MASK, signal_id);
+}
+
+static irqreturn_t qcom_ipcc_irq_fn(int irq, void *data)
+{
+ struct qcom_ipcc *ipcc = data;
+ u32 hwirq;
+ int virq;
+
+ for (;;) {
+ hwirq = readl(ipcc->base + IPCC_REG_RECV_ID);
+ if (hwirq == IPCC_NO_PENDING_IRQ)
+ break;
+
+ virq = irq_find_mapping(ipcc->irq_domain, hwirq);
+ writel(hwirq, ipcc->base + IPCC_REG_RECV_SIGNAL_CLEAR);
+ generic_handle_irq(virq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void qcom_ipcc_mask_irq(struct irq_data *irqd)
+{
+ struct qcom_ipcc *ipcc = irq_data_get_irq_chip_data(irqd);
+ irq_hw_number_t hwirq = irqd_to_hwirq(irqd);
+
+ writel(hwirq, ipcc->base + IPCC_REG_RECV_SIGNAL_DISABLE);
+}
+
+static void qcom_ipcc_unmask_irq(struct irq_data *irqd)
+{
+ struct qcom_ipcc *ipcc = irq_data_get_irq_chip_data(irqd);
+ irq_hw_number_t hwirq = irqd_to_hwirq(irqd);
+
+ writel(hwirq, ipcc->base + IPCC_REG_RECV_SIGNAL_ENABLE);
+}
+
+static struct irq_chip qcom_ipcc_irq_chip = {
+ .name = "ipcc",
+ .irq_mask = qcom_ipcc_mask_irq,
+ .irq_unmask = qcom_ipcc_unmask_irq,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
+};
+
+static int qcom_ipcc_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct qcom_ipcc *ipcc = d->host_data;
+
+ irq_set_chip_and_handler(irq, &qcom_ipcc_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, ipcc);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static int qcom_ipcc_domain_xlate(struct irq_domain *d,
+ struct device_node *node, const u32 *intspec,
+ unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ if (intsize != 3)
+ return -EINVAL;
+
+ *out_hwirq = qcom_ipcc_get_hwirq(intspec[0], intspec[1]);
+ *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
+
+ return 0;
+}
+
+static const struct irq_domain_ops qcom_ipcc_irq_ops = {
+ .map = qcom_ipcc_domain_map,
+ .xlate = qcom_ipcc_domain_xlate,
+};
+
+static int qcom_ipcc_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct qcom_ipcc *ipcc = to_qcom_ipcc(chan->mbox);
+ struct qcom_ipcc_chan_info *mchan = chan->con_priv;
+ u32 hwirq;
+
+ hwirq = qcom_ipcc_get_hwirq(mchan->client_id, mchan->signal_id);
+ writel(hwirq, ipcc->base + IPCC_REG_SEND_ID);
+
+ return 0;
+}
+
+static struct mbox_chan *qcom_ipcc_mbox_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *ph)
+{
+ struct qcom_ipcc *ipcc = to_qcom_ipcc(mbox);
+ struct qcom_ipcc_chan_info *mchan;
+ struct mbox_chan *chan;
+ unsigned int i;
+
+ if (ph->args_count != 2)
+ return ERR_PTR(-EINVAL);
+
+ for (i = 0; i < IPCC_MBOX_MAX_CHAN; i++) {
+ chan = &ipcc->chan[i];
+ if (!chan->con_priv) {
+ mchan = &ipcc->mchan[i];
+ mchan->client_id = ph->args[0];
+ mchan->signal_id = ph->args[1];
+ chan->con_priv = mchan;
+ break;
+ }
+
+ chan = NULL;
+ }
+
+ return chan ?: ERR_PTR(-EBUSY);
+}
+
+static const struct mbox_chan_ops ipcc_mbox_chan_ops = {
+ .send_data = qcom_ipcc_mbox_send_data,
+};
+
+static int qcom_ipcc_setup_mbox(struct qcom_ipcc *ipcc)
+{
+ struct mbox_controller *mbox;
+ struct device *dev = ipcc->dev;
+
+ mbox = &ipcc->mbox;
+ mbox->dev = dev;
+ mbox->num_chans = IPCC_MBOX_MAX_CHAN;
+ mbox->chans = ipcc->chan;
+ mbox->ops = &ipcc_mbox_chan_ops;
+ mbox->of_xlate = qcom_ipcc_mbox_xlate;
+ mbox->txdone_irq = false;
+ mbox->txdone_poll = false;
+
+ return devm_mbox_controller_register(dev, mbox);
+}
+
+static int qcom_ipcc_probe(struct platform_device *pdev)
+{
+ struct qcom_ipcc *ipcc;
+ int ret;
+
+ ipcc = devm_kzalloc(&pdev->dev, sizeof(*ipcc), GFP_KERNEL);
+ if (!ipcc)
+ return -ENOMEM;
+
+ ipcc->dev = &pdev->dev;
+
+ ipcc->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ipcc->base))
+ return PTR_ERR(ipcc->base);
+
+ ipcc->irq = platform_get_irq(pdev, 0);
+ if (ipcc->irq < 0)
+ return ipcc->irq;
+
+ ipcc->irq_domain = irq_domain_add_tree(pdev->dev.of_node,
+ &qcom_ipcc_irq_ops, ipcc);
+ if (!ipcc->irq_domain)
+ return -ENOMEM;
+
+ ret = qcom_ipcc_setup_mbox(ipcc);
+ if (ret)
+ goto err_mbox;
+
+ ret = devm_request_irq(&pdev->dev, ipcc->irq, qcom_ipcc_irq_fn,
+ IRQF_TRIGGER_HIGH, "ipcc", ipcc);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register the irq: %d\n", ret);
+ goto err_mbox;
+ }
+
+ enable_irq_wake(ipcc->irq);
+ platform_set_drvdata(pdev, ipcc);
+
+ return 0;
+
+err_mbox:
+ irq_domain_remove(ipcc->irq_domain);
+
+ return ret;
+}
+
+static int qcom_ipcc_remove(struct platform_device *pdev)
+{
+ struct qcom_ipcc *ipcc = platform_get_drvdata(pdev);
+
+ disable_irq_wake(ipcc->irq);
+ irq_domain_remove(ipcc->irq_domain);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_ipcc_of_match[] = {
+ { .compatible = "qcom,ipcc"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_ipcc_of_match);
+
+static struct platform_driver qcom_ipcc_driver = {
+ .probe = qcom_ipcc_probe,
+ .remove = qcom_ipcc_remove,
+ .driver = {
+ .name = "qcom-ipcc",
+ .of_match_table = qcom_ipcc_of_match,
+ },
+};
+
+static int __init qcom_ipcc_init(void)
+{
+ return platform_driver_register(&qcom_ipcc_driver);
+}
+arch_initcall(qcom_ipcc_init);
+
+MODULE_AUTHOR("Venkata Narendra Kumar Gutta <vnkgutta@codeaurora.org>");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. IPCC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/sprd-mailbox.c b/drivers/mailbox/sprd-mailbox.c
new file mode 100644
index 000000000000..f6fab24ae8a9
--- /dev/null
+++ b/drivers/mailbox/sprd-mailbox.c
@@ -0,0 +1,361 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Spreadtrum mailbox driver
+ *
+ * Copyright (c) 2020 Spreadtrum Communications Inc.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+
+#define SPRD_MBOX_ID 0x0
+#define SPRD_MBOX_MSG_LOW 0x4
+#define SPRD_MBOX_MSG_HIGH 0x8
+#define SPRD_MBOX_TRIGGER 0xc
+#define SPRD_MBOX_FIFO_RST 0x10
+#define SPRD_MBOX_FIFO_STS 0x14
+#define SPRD_MBOX_IRQ_STS 0x18
+#define SPRD_MBOX_IRQ_MSK 0x1c
+#define SPRD_MBOX_LOCK 0x20
+#define SPRD_MBOX_FIFO_DEPTH 0x24
+
+/* Bit and mask definiation for inbox's SPRD_MBOX_FIFO_STS register */
+#define SPRD_INBOX_FIFO_DELIVER_MASK GENMASK(23, 16)
+#define SPRD_INBOX_FIFO_OVERLOW_MASK GENMASK(15, 8)
+#define SPRD_INBOX_FIFO_DELIVER_SHIFT 16
+#define SPRD_INBOX_FIFO_BUSY_MASK GENMASK(7, 0)
+
+/* Bit and mask definiation for SPRD_MBOX_IRQ_STS register */
+#define SPRD_MBOX_IRQ_CLR BIT(0)
+
+/* Bit and mask definiation for outbox's SPRD_MBOX_FIFO_STS register */
+#define SPRD_OUTBOX_FIFO_FULL BIT(0)
+#define SPRD_OUTBOX_FIFO_WR_SHIFT 16
+#define SPRD_OUTBOX_FIFO_RD_SHIFT 24
+#define SPRD_OUTBOX_FIFO_POS_MASK GENMASK(7, 0)
+
+/* Bit and mask definiation for inbox's SPRD_MBOX_IRQ_MSK register */
+#define SPRD_INBOX_FIFO_BLOCK_IRQ BIT(0)
+#define SPRD_INBOX_FIFO_OVERFLOW_IRQ BIT(1)
+#define SPRD_INBOX_FIFO_DELIVER_IRQ BIT(2)
+#define SPRD_INBOX_FIFO_IRQ_MASK GENMASK(2, 0)
+
+/* Bit and mask definiation for outbox's SPRD_MBOX_IRQ_MSK register */
+#define SPRD_OUTBOX_FIFO_NOT_EMPTY_IRQ BIT(0)
+#define SPRD_OUTBOX_FIFO_IRQ_MASK GENMASK(4, 0)
+
+#define SPRD_MBOX_CHAN_MAX 8
+
+struct sprd_mbox_priv {
+ struct mbox_controller mbox;
+ struct device *dev;
+ void __iomem *inbox_base;
+ void __iomem *outbox_base;
+ struct clk *clk;
+ u32 outbox_fifo_depth;
+
+ struct mbox_chan chan[SPRD_MBOX_CHAN_MAX];
+};
+
+static struct sprd_mbox_priv *to_sprd_mbox_priv(struct mbox_controller *mbox)
+{
+ return container_of(mbox, struct sprd_mbox_priv, mbox);
+}
+
+static u32 sprd_mbox_get_fifo_len(struct sprd_mbox_priv *priv, u32 fifo_sts)
+{
+ u32 wr_pos = (fifo_sts >> SPRD_OUTBOX_FIFO_WR_SHIFT) &
+ SPRD_OUTBOX_FIFO_POS_MASK;
+ u32 rd_pos = (fifo_sts >> SPRD_OUTBOX_FIFO_RD_SHIFT) &
+ SPRD_OUTBOX_FIFO_POS_MASK;
+ u32 fifo_len;
+
+ /*
+ * If the read pointer is equal with write pointer, which means the fifo
+ * is full or empty.
+ */
+ if (wr_pos == rd_pos) {
+ if (fifo_sts & SPRD_OUTBOX_FIFO_FULL)
+ fifo_len = priv->outbox_fifo_depth;
+ else
+ fifo_len = 0;
+ } else if (wr_pos > rd_pos) {
+ fifo_len = wr_pos - rd_pos;
+ } else {
+ fifo_len = priv->outbox_fifo_depth - rd_pos + wr_pos;
+ }
+
+ return fifo_len;
+}
+
+static irqreturn_t sprd_mbox_outbox_isr(int irq, void *data)
+{
+ struct sprd_mbox_priv *priv = data;
+ struct mbox_chan *chan;
+ u32 fifo_sts, fifo_len, msg[2];
+ int i, id;
+
+ fifo_sts = readl(priv->outbox_base + SPRD_MBOX_FIFO_STS);
+
+ fifo_len = sprd_mbox_get_fifo_len(priv, fifo_sts);
+ if (!fifo_len) {
+ dev_warn_ratelimited(priv->dev, "spurious outbox interrupt\n");
+ return IRQ_NONE;
+ }
+
+ for (i = 0; i < fifo_len; i++) {
+ msg[0] = readl(priv->outbox_base + SPRD_MBOX_MSG_LOW);
+ msg[1] = readl(priv->outbox_base + SPRD_MBOX_MSG_HIGH);
+ id = readl(priv->outbox_base + SPRD_MBOX_ID);
+
+ chan = &priv->chan[id];
+ mbox_chan_received_data(chan, (void *)msg);
+
+ /* Trigger to update outbox FIFO pointer */
+ writel(0x1, priv->outbox_base + SPRD_MBOX_TRIGGER);
+ }
+
+ /* Clear irq status after reading all message. */
+ writel(SPRD_MBOX_IRQ_CLR, priv->outbox_base + SPRD_MBOX_IRQ_STS);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t sprd_mbox_inbox_isr(int irq, void *data)
+{
+ struct sprd_mbox_priv *priv = data;
+ struct mbox_chan *chan;
+ u32 fifo_sts, send_sts, busy, id;
+
+ fifo_sts = readl(priv->inbox_base + SPRD_MBOX_FIFO_STS);
+
+ /* Get the inbox data delivery status */
+ send_sts = (fifo_sts & SPRD_INBOX_FIFO_DELIVER_MASK) >>
+ SPRD_INBOX_FIFO_DELIVER_SHIFT;
+ if (!send_sts) {
+ dev_warn_ratelimited(priv->dev, "spurious inbox interrupt\n");
+ return IRQ_NONE;
+ }
+
+ while (send_sts) {
+ id = __ffs(send_sts);
+ send_sts &= (send_sts - 1);
+
+ chan = &priv->chan[id];
+
+ /*
+ * Check if the message was fetched by remote traget, if yes,
+ * that means the transmission has been completed.
+ */
+ busy = fifo_sts & SPRD_INBOX_FIFO_BUSY_MASK;
+ if (!(busy & BIT(id)))
+ mbox_chan_txdone(chan, 0);
+ }
+
+ /* Clear FIFO delivery and overflow status */
+ writel(fifo_sts &
+ (SPRD_INBOX_FIFO_DELIVER_MASK | SPRD_INBOX_FIFO_OVERLOW_MASK),
+ priv->inbox_base + SPRD_MBOX_FIFO_RST);
+
+ /* Clear irq status */
+ writel(SPRD_MBOX_IRQ_CLR, priv->inbox_base + SPRD_MBOX_IRQ_STS);
+
+ return IRQ_HANDLED;
+}
+
+static int sprd_mbox_send_data(struct mbox_chan *chan, void *msg)
+{
+ struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
+ unsigned long id = (unsigned long)chan->con_priv;
+ u32 *data = msg;
+
+ /* Write data into inbox FIFO, and only support 8 bytes every time */
+ writel(data[0], priv->inbox_base + SPRD_MBOX_MSG_LOW);
+ writel(data[1], priv->inbox_base + SPRD_MBOX_MSG_HIGH);
+
+ /* Set target core id */
+ writel(id, priv->inbox_base + SPRD_MBOX_ID);
+
+ /* Trigger remote request */
+ writel(0x1, priv->inbox_base + SPRD_MBOX_TRIGGER);
+
+ return 0;
+}
+
+static int sprd_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
+{
+ struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
+ unsigned long id = (unsigned long)chan->con_priv;
+ u32 busy;
+
+ timeout = jiffies + msecs_to_jiffies(timeout);
+
+ while (time_before(jiffies, timeout)) {
+ busy = readl(priv->inbox_base + SPRD_MBOX_FIFO_STS) &
+ SPRD_INBOX_FIFO_BUSY_MASK;
+ if (!(busy & BIT(id))) {
+ mbox_chan_txdone(chan, 0);
+ return 0;
+ }
+
+ udelay(1);
+ }
+
+ return -ETIME;
+}
+
+static int sprd_mbox_startup(struct mbox_chan *chan)
+{
+ struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
+ u32 val;
+
+ /* Select outbox FIFO mode and reset the outbox FIFO status */
+ writel(0x0, priv->outbox_base + SPRD_MBOX_FIFO_RST);
+
+ /* Enable inbox FIFO overflow and delivery interrupt */
+ val = readl(priv->inbox_base + SPRD_MBOX_IRQ_MSK);
+ val &= ~(SPRD_INBOX_FIFO_OVERFLOW_IRQ | SPRD_INBOX_FIFO_DELIVER_IRQ);
+ writel(val, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
+
+ /* Enable outbox FIFO not empty interrupt */
+ val = readl(priv->outbox_base + SPRD_MBOX_IRQ_MSK);
+ val &= ~SPRD_OUTBOX_FIFO_NOT_EMPTY_IRQ;
+ writel(val, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
+
+ return 0;
+}
+
+static void sprd_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
+
+ /* Disable inbox & outbox interrupt */
+ writel(SPRD_INBOX_FIFO_IRQ_MASK, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
+ writel(SPRD_OUTBOX_FIFO_IRQ_MASK, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
+}
+
+static const struct mbox_chan_ops sprd_mbox_ops = {
+ .send_data = sprd_mbox_send_data,
+ .flush = sprd_mbox_flush,
+ .startup = sprd_mbox_startup,
+ .shutdown = sprd_mbox_shutdown,
+};
+
+static void sprd_mbox_disable(void *data)
+{
+ struct sprd_mbox_priv *priv = data;
+
+ clk_disable_unprepare(priv->clk);
+}
+
+static int sprd_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sprd_mbox_priv *priv;
+ int ret, inbox_irq, outbox_irq;
+ unsigned long id;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+
+ /*
+ * The Spreadtrum mailbox uses an inbox to send messages to the target
+ * core, and uses an outbox to receive messages from other cores.
+ *
+ * Thus the mailbox controller supplies 2 different register addresses
+ * and IRQ numbers for inbox and outbox.
+ */
+ priv->inbox_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->inbox_base))
+ return PTR_ERR(priv->inbox_base);
+
+ priv->outbox_base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(priv->outbox_base))
+ return PTR_ERR(priv->outbox_base);
+
+ priv->clk = devm_clk_get(dev, "enable");
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "failed to get mailbox clock\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, sprd_mbox_disable, priv);
+ if (ret) {
+ dev_err(dev, "failed to add mailbox disable action\n");
+ return ret;
+ }
+
+ inbox_irq = platform_get_irq(pdev, 0);
+ if (inbox_irq < 0)
+ return inbox_irq;
+
+ ret = devm_request_irq(dev, inbox_irq, sprd_mbox_inbox_isr,
+ IRQF_NO_SUSPEND, dev_name(dev), priv);
+ if (ret) {
+ dev_err(dev, "failed to request inbox IRQ: %d\n", ret);
+ return ret;
+ }
+
+ outbox_irq = platform_get_irq(pdev, 1);
+ if (outbox_irq < 0)
+ return outbox_irq;
+
+ ret = devm_request_irq(dev, outbox_irq, sprd_mbox_outbox_isr,
+ IRQF_NO_SUSPEND, dev_name(dev), priv);
+ if (ret) {
+ dev_err(dev, "failed to request outbox IRQ: %d\n", ret);
+ return ret;
+ }
+
+ /* Get the default outbox FIFO depth */
+ priv->outbox_fifo_depth =
+ readl(priv->outbox_base + SPRD_MBOX_FIFO_DEPTH) + 1;
+ priv->mbox.dev = dev;
+ priv->mbox.chans = &priv->chan[0];
+ priv->mbox.num_chans = SPRD_MBOX_CHAN_MAX;
+ priv->mbox.ops = &sprd_mbox_ops;
+ priv->mbox.txdone_irq = true;
+
+ for (id = 0; id < SPRD_MBOX_CHAN_MAX; id++)
+ priv->chan[id].con_priv = (void *)id;
+
+ ret = devm_mbox_controller_register(dev, &priv->mbox);
+ if (ret) {
+ dev_err(dev, "failed to register mailbox: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id sprd_mbox_of_match[] = {
+ { .compatible = "sprd,sc9860-mailbox", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sprd_mbox_of_match);
+
+static struct platform_driver sprd_mbox_driver = {
+ .driver = {
+ .name = "sprd-mailbox",
+ .of_match_table = sprd_mbox_of_match,
+ },
+ .probe = sprd_mbox_probe,
+};
+module_platform_driver(sprd_mbox_driver);
+
+MODULE_AUTHOR("Baolin Wang <baolin.wang@unisoc.com>");
+MODULE_DESCRIPTION("Spreadtrum mailbox driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c
index 86887c9a349a..f44079d62b1a 100644
--- a/drivers/mailbox/zynqmp-ipi-mailbox.c
+++ b/drivers/mailbox/zynqmp-ipi-mailbox.c
@@ -504,10 +504,9 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox,
mchan->req_buf_size = resource_size(&res);
mchan->req_buf = devm_ioremap(mdev, res.start,
mchan->req_buf_size);
- if (IS_ERR(mchan->req_buf)) {
+ if (!mchan->req_buf) {
dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
- ret = PTR_ERR(mchan->req_buf);
- return ret;
+ return -ENOMEM;
}
} else if (ret != -ENODEV) {
dev_err(mdev, "Unmatched resource %s, %d.\n", name, ret);
@@ -520,10 +519,9 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox,
mchan->resp_buf_size = resource_size(&res);
mchan->resp_buf = devm_ioremap(mdev, res.start,
mchan->resp_buf_size);
- if (IS_ERR(mchan->resp_buf)) {
+ if (!mchan->resp_buf) {
dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
- ret = PTR_ERR(mchan->resp_buf);
- return ret;
+ return -ENOMEM;
}
} else if (ret != -ENODEV) {
dev_err(mdev, "Unmatched resource %s.\n", name);
@@ -543,10 +541,9 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox,
mchan->req_buf_size = resource_size(&res);
mchan->req_buf = devm_ioremap(mdev, res.start,
mchan->req_buf_size);
- if (IS_ERR(mchan->req_buf)) {
+ if (!mchan->req_buf) {
dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
- ret = PTR_ERR(mchan->req_buf);
- return ret;
+ return -ENOMEM;
}
} else if (ret != -ENODEV) {
dev_err(mdev, "Unmatched resource %s.\n", name);
@@ -559,10 +556,9 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox,
mchan->resp_buf_size = resource_size(&res);
mchan->resp_buf = devm_ioremap(mdev, res.start,
mchan->resp_buf_size);
- if (IS_ERR(mchan->resp_buf)) {
+ if (!mchan->resp_buf) {
dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
- ret = PTR_ERR(mchan->resp_buf);
- return ret;
+ return -ENOMEM;
}
} else if (ret != -ENODEV) {
dev_err(mdev, "Unmatched resource %s.\n", name);
@@ -668,10 +664,9 @@ static int zynqmp_ipi_probe(struct platform_device *pdev)
/* IPI IRQ */
ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(dev, "unable to find IPI IRQ.\n");
+ if (ret < 0)
goto free_mbox_dev;
- }
+
pdata->irq = ret;
ret = devm_request_irq(dev, pdata->irq, zynqmp_ipi_interrupt,
IRQF_SHARED, dev_name(dev), pdata);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 0585efa47d8f..c2c5bc4fb702 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -3669,7 +3669,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
ns->disk = disk;
if (__nvme_revalidate_disk(disk, id))
- goto out_free_disk;
+ goto out_put_disk;
if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {
ret = nvme_nvm_register(ns, disk_name, node);
@@ -3696,8 +3696,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
/* prevent double queue cleanup */
ns->disk->queue = NULL;
put_disk(ns->disk);
- out_free_disk:
- del_gendisk(ns->disk);
out_unlink_ns:
mutex_lock(&ctrl->subsys->lock);
list_del_rcu(&ns->siblings);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index cb0007592c12..e999a8c4b7e8 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2634,10 +2634,11 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
__nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
- if (!(op->flags & FCOP_FLAGS_AEN))
+ if (!(op->flags & FCOP_FLAGS_AEN)) {
nvme_fc_unmap_data(ctrl, op->rq, op);
+ nvme_cleanup_cmd(op->rq);
+ }
- nvme_cleanup_cmd(op->rq);
nvme_fc_ctrl_put(ctrl);
if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE &&
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index fa5c75501049..c0f4226d3299 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -599,8 +599,7 @@ static inline void nvme_trace_bio_complete(struct request *req,
struct nvme_ns *ns = req->q->queuedata;
if (req->cmd_flags & REQ_NVME_MPATH)
- trace_block_bio_complete(ns->head->disk->queue,
- req->bio, status);
+ trace_block_bio_complete(ns->head->disk->queue, req->bio);
}
extern struct device_attribute dev_attr_ana_grpid;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index d690d5593a80..e2bacd369a88 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2950,9 +2950,15 @@ static int nvme_suspend(struct device *dev)
* the PCI bus layer to put it into D3 in order to take the PCIe link
* down, so as to allow the platform to achieve its minimum low-power
* state (which may not be possible if the link is up).
+ *
+ * If a host memory buffer is enabled, shut down the device as the NVMe
+ * specification allows the device to access the host memory buffer in
+ * host DRAM from all power states, but hosts will fail access to DRAM
+ * during S3.
*/
if (pm_suspend_via_firmware() || !ctrl->npss ||
!pcie_aspm_enabled(pdev) ||
+ ndev->nr_host_mem_descs ||
(ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND))
return nvme_disable_prepare_reset(ndev, true);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 1843110ec34f..3345ec7efaff 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -131,8 +131,8 @@ struct nvme_tcp_ctrl {
static LIST_HEAD(nvme_tcp_ctrl_list);
static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
static struct workqueue_struct *nvme_tcp_wq;
-static struct blk_mq_ops nvme_tcp_mq_ops;
-static struct blk_mq_ops nvme_tcp_admin_mq_ops;
+static const struct blk_mq_ops nvme_tcp_mq_ops;
+static const struct blk_mq_ops nvme_tcp_admin_mq_ops;
static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
@@ -2301,7 +2301,7 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx)
return queue->nr_cqe;
}
-static struct blk_mq_ops nvme_tcp_mq_ops = {
+static const struct blk_mq_ops nvme_tcp_mq_ops = {
.queue_rq = nvme_tcp_queue_rq,
.complete = nvme_complete_rq,
.init_request = nvme_tcp_init_request,
@@ -2312,7 +2312,7 @@ static struct blk_mq_ops nvme_tcp_mq_ops = {
.poll = nvme_tcp_poll,
};
-static struct blk_mq_ops nvme_tcp_admin_mq_ops = {
+static const struct blk_mq_ops nvme_tcp_admin_mq_ops = {
.queue_rq = nvme_tcp_queue_rq,
.complete = nvme_complete_rq,
.init_request = nvme_tcp_init_request,
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 6392bcd30bd7..6e2f623e472e 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -129,7 +129,22 @@ static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
}
-static void nvmet_async_events_process(struct nvmet_ctrl *ctrl, u16 status)
+static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl)
+{
+ u16 status = NVME_SC_INTERNAL | NVME_SC_DNR;
+ struct nvmet_req *req;
+
+ mutex_lock(&ctrl->lock);
+ while (ctrl->nr_async_event_cmds) {
+ req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
+ mutex_unlock(&ctrl->lock);
+ nvmet_req_complete(req, status);
+ mutex_lock(&ctrl->lock);
+ }
+ mutex_unlock(&ctrl->lock);
+}
+
+static void nvmet_async_events_process(struct nvmet_ctrl *ctrl)
{
struct nvmet_async_event *aen;
struct nvmet_req *req;
@@ -139,15 +154,14 @@ static void nvmet_async_events_process(struct nvmet_ctrl *ctrl, u16 status)
aen = list_first_entry(&ctrl->async_events,
struct nvmet_async_event, entry);
req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
- if (status == 0)
- nvmet_set_result(req, nvmet_async_event_result(aen));
+ nvmet_set_result(req, nvmet_async_event_result(aen));
list_del(&aen->entry);
kfree(aen);
mutex_unlock(&ctrl->lock);
trace_nvmet_async_event(ctrl, req->cqe->result.u32);
- nvmet_req_complete(req, status);
+ nvmet_req_complete(req, 0);
mutex_lock(&ctrl->lock);
}
mutex_unlock(&ctrl->lock);
@@ -170,7 +184,7 @@ static void nvmet_async_event_work(struct work_struct *work)
struct nvmet_ctrl *ctrl =
container_of(work, struct nvmet_ctrl, async_event_work);
- nvmet_async_events_process(ctrl, 0);
+ nvmet_async_events_process(ctrl);
}
void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
@@ -779,7 +793,6 @@ static void nvmet_confirm_sq(struct percpu_ref *ref)
void nvmet_sq_destroy(struct nvmet_sq *sq)
{
- u16 status = NVME_SC_INTERNAL | NVME_SC_DNR;
struct nvmet_ctrl *ctrl = sq->ctrl;
/*
@@ -787,7 +800,7 @@ void nvmet_sq_destroy(struct nvmet_sq *sq)
* queue doesn't have outstanding requests on it.
*/
if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq)
- nvmet_async_events_process(ctrl, status);
+ nvmet_async_events_failall(ctrl);
percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
wait_for_completion(&sq->confirm_done);
wait_for_completion(&sq->free_done);
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 1669177cd26c..de9217cfd22d 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -153,7 +153,7 @@ static LIST_HEAD(nvmet_tcp_queue_list);
static DEFINE_MUTEX(nvmet_tcp_queue_mutex);
static struct workqueue_struct *nvmet_tcp_wq;
-static struct nvmet_fabrics_ops nvmet_tcp_ops;
+static const struct nvmet_fabrics_ops nvmet_tcp_ops;
static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd);
@@ -1713,7 +1713,7 @@ static void nvmet_tcp_disc_port_addr(struct nvmet_req *req,
}
}
-static struct nvmet_fabrics_ops nvmet_tcp_ops = {
+static const struct nvmet_fabrics_ops nvmet_tcp_ops = {
.owner = THIS_MODULE,
.type = NVMF_TRTYPE_TCP,
.msdbd = 1,
diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
index 8dd1278bec04..7719ae4e2c56 100644
--- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
@@ -35,7 +35,7 @@
/* L3C has 8-counters */
#define L3C_NR_COUNTERS 0x8
-#define L3C_PERF_CTRL_EN 0x20000
+#define L3C_PERF_CTRL_EN 0x10000
#define L3C_EVTYPE_NONE 0xff
/*
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 494f853f2206..490d353d5fde 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -32,7 +32,7 @@
#include <linux/usb/functionfs.h>
#include <linux/aio.h>
-#include <linux/mmu_context.h>
+#include <linux/kthread.h>
#include <linux/poll.h>
#include <linux/eventfd.h>
@@ -824,13 +824,9 @@ static void ffs_user_copy_worker(struct work_struct *work)
bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
if (io_data->read && ret > 0) {
- mm_segment_t oldfs = get_fs();
-
- set_fs(USER_DS);
- use_mm(io_data->mm);
+ kthread_use_mm(io_data->mm);
ret = ffs_copy_to_iter(io_data->buf, ret, &io_data->data);
- unuse_mm(io_data->mm);
- set_fs(oldfs);
+ kthread_unuse_mm(io_data->mm);
}
io_data->kiocb->ki_complete(io_data->kiocb, ret, ret);
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 3afddd3bea6e..9ee0bfe7bcda 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -21,7 +21,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/poll.h>
-#include <linux/mmu_context.h>
+#include <linux/kthread.h>
#include <linux/aio.h>
#include <linux/uio.h>
#include <linux/refcount.h>
@@ -462,9 +462,9 @@ static void ep_user_copy_worker(struct work_struct *work)
struct kiocb *iocb = priv->iocb;
size_t ret;
- use_mm(mm);
+ kthread_use_mm(mm);
ret = copy_to_iter(priv->buf, priv->actual, &priv->to);
- unuse_mm(mm);
+ kthread_unuse_mm(mm);
if (!ret)
ret = -EFAULT;
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 186acd8675ff..5e556ac9102a 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -27,7 +27,7 @@
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <linux/mmu_context.h>
+#include <linux/kthread.h>
#include <linux/rbtree.h>
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
@@ -2817,7 +2817,7 @@ static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu,
return -EPERM;
if (kthread)
- use_mm(mm);
+ kthread_use_mm(mm);
else if (current->mm != mm)
goto out;
@@ -2844,7 +2844,7 @@ static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu,
*copied = copy_from_user(data, (void __user *)vaddr,
count) ? 0 : count;
if (kthread)
- unuse_mm(mm);
+ kthread_unuse_mm(mm);
out:
mmput(mm);
return *copied ? 0 : -EFAULT;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 062595ee1f83..d7b8df3edffc 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -14,7 +14,6 @@
#include <linux/vhost.h>
#include <linux/uio.h>
#include <linux/mm.h>
-#include <linux/mmu_context.h>
#include <linux/miscdevice.h>
#include <linux/mutex.h>
#include <linux/poll.h>
@@ -335,10 +334,8 @@ static int vhost_worker(void *data)
struct vhost_dev *dev = data;
struct vhost_work *work, *work_next;
struct llist_node *node;
- mm_segment_t oldfs = get_fs();
- set_fs(USER_DS);
- use_mm(dev->mm);
+ kthread_use_mm(dev->mm);
for (;;) {
/* mb paired w/ kthread_stop */
@@ -366,8 +363,7 @@ static int vhost_worker(void *data)
schedule();
}
}
- unuse_mm(dev->mm);
- set_fs(oldfs);
+ kthread_unuse_mm(dev->mm);
return 0;
}