aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/Kconfig51
-rw-r--r--drivers/iommu/Makefile5
-rw-r--r--drivers/iommu/amd_iommu.c15
-rw-r--r--drivers/iommu/amd_iommu_init.c17
-rw-r--r--drivers/iommu/amd_iommu_proto.h3
-rw-r--r--drivers/iommu/amd_iommu_types.h2
-rw-r--r--drivers/iommu/amd_iommu_v2.c35
-rw-r--r--drivers/iommu/arm-smmu.c935
-rw-r--r--drivers/iommu/fsl_pamu.c216
-rw-r--r--drivers/iommu/fsl_pamu.h15
-rw-r--r--drivers/iommu/fsl_pamu_domain.c173
-rw-r--r--drivers/iommu/intel-iommu.c57
-rw-r--r--drivers/iommu/intel_irq_remapping.c96
-rw-r--r--drivers/iommu/io-pgtable-arm.c986
-rw-r--r--drivers/iommu/io-pgtable.c82
-rw-r--r--drivers/iommu/io-pgtable.h143
-rw-r--r--drivers/iommu/iommu.c7
-rw-r--r--drivers/iommu/iova.c53
-rw-r--r--drivers/iommu/ipmmu-vmsa.c674
-rw-r--r--drivers/iommu/irq_remapping.c74
-rw-r--r--drivers/iommu/irq_remapping.h7
-rw-r--r--drivers/iommu/omap-iommu.c2
-rw-r--r--drivers/iommu/rockchip-iommu.c1
-rw-r--r--drivers/iommu/tegra-gart.c3
24 files changed, 2085 insertions, 1567 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 325188eef1c1..baa0d9786f50 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -4,6 +4,7 @@ config IOMMU_API
menuconfig IOMMU_SUPPORT
bool "IOMMU Hardware Support"
+ depends on MMU
default y
---help---
Say Y here if you want to compile device drivers for IO Memory
@@ -13,13 +14,43 @@ menuconfig IOMMU_SUPPORT
if IOMMU_SUPPORT
+menu "Generic IOMMU Pagetable Support"
+
+# Selected by the actual pagetable implementations
+config IOMMU_IO_PGTABLE
+ bool
+
+config IOMMU_IO_PGTABLE_LPAE
+ bool "ARMv7/v8 Long Descriptor Format"
+ select IOMMU_IO_PGTABLE
+ help
+ Enable support for the ARM long descriptor pagetable format.
+ This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page
+ sizes at both stage-1 and stage-2, as well as address spaces
+ up to 48-bits in size.
+
+config IOMMU_IO_PGTABLE_LPAE_SELFTEST
+ bool "LPAE selftests"
+ depends on IOMMU_IO_PGTABLE_LPAE
+ help
+ Enable self-tests for LPAE page table allocator. This performs
+ a series of page-table consistency checks during boot.
+
+ If unsure, say N here.
+
+endmenu
+
+config IOMMU_IOVA
+ bool
+
config OF_IOMMU
def_bool y
depends on OF && IOMMU_API
config FSL_PAMU
bool "Freescale IOMMU support"
- depends on PPC_E500MC
+ depends on PPC32
+ depends on PPC_E500MC || COMPILE_TEST
select IOMMU_API
select GENERIC_ALLOCATOR
help
@@ -30,7 +61,8 @@ config FSL_PAMU
# MSM IOMMU support
config MSM_IOMMU
bool "MSM IOMMU Support"
- depends on ARCH_MSM8X60 || ARCH_MSM8960
+ depends on ARM
+ depends on ARCH_MSM8X60 || ARCH_MSM8960 || COMPILE_TEST
select IOMMU_API
help
Support for the IOMMUs found on certain Qualcomm SOCs.
@@ -91,6 +123,7 @@ config INTEL_IOMMU
bool "Support for Intel IOMMU using DMA Remapping Devices"
depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC)
select IOMMU_API
+ select IOMMU_IOVA
select DMAR_TABLE
help
DMA remapping (DMAR) devices support enables independent address
@@ -140,7 +173,8 @@ config IRQ_REMAP
# OMAP IOMMU support
config OMAP_IOMMU
bool "OMAP IOMMU Support"
- depends on ARCH_OMAP2PLUS
+ depends on ARM && MMU
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
select IOMMU_API
config OMAP_IOMMU_DEBUG
@@ -187,7 +221,7 @@ config TEGRA_IOMMU_SMMU
config EXYNOS_IOMMU
bool "Exynos IOMMU Support"
- depends on ARCH_EXYNOS && ARM
+ depends on ARCH_EXYNOS && ARM && MMU
select IOMMU_API
select ARM_DMA_USE_IOMMU
help
@@ -216,7 +250,7 @@ config SHMOBILE_IPMMU_TLB
config SHMOBILE_IOMMU
bool "IOMMU for Renesas IPMMU/IPMMUI"
default n
- depends on ARM
+ depends on ARM && MMU
depends on ARCH_SHMOBILE || COMPILE_TEST
select IOMMU_API
select ARM_DMA_USE_IOMMU
@@ -287,6 +321,7 @@ config IPMMU_VMSA
depends on ARM_LPAE
depends on ARCH_SHMOBILE || COMPILE_TEST
select IOMMU_API
+ select IOMMU_IO_PGTABLE_LPAE
select ARM_DMA_USE_IOMMU
help
Support for the Renesas VMSA-compatible IPMMU Renesas found in the
@@ -304,13 +339,13 @@ config SPAPR_TCE_IOMMU
config ARM_SMMU
bool "ARM Ltd. System MMU (SMMU) Support"
- depends on ARM64 || (ARM_LPAE && OF)
+ depends on (ARM64 || ARM) && MMU
select IOMMU_API
+ select IOMMU_IO_PGTABLE_LPAE
select ARM_DMA_USE_IOMMU if ARM
help
Support for implementations of the ARM System MMU architecture
- versions 1 and 2. The driver supports both v7l and v8l table
- formats with 4k and 64k page sizes.
+ versions 1 and 2.
Say Y here if your SoC includes an IOMMU device implementing
the ARM SMMU architecture.
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 7b976f294a69..080ffab4ed1c 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -1,13 +1,16 @@
obj-$(CONFIG_IOMMU_API) += iommu.o
obj-$(CONFIG_IOMMU_API) += iommu-traces.o
obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o
+obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o
+obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
+obj-$(CONFIG_IOMMU_IOVA) += iova.o
obj-$(CONFIG_OF_IOMMU) += of_iommu.o
obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
obj-$(CONFIG_DMAR_TABLE) += dmar.o
-obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o
+obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o
obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o
obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o
obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 98024856df07..48882c126245 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
- * Author: Joerg Roedel <joerg.roedel@amd.com>
+ * Author: Joerg Roedel <jroedel@suse.de>
* Leo Duran <leo.duran@amd.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -843,10 +843,10 @@ static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
size_t size, u16 domid, int pde)
{
u64 pages;
- int s;
+ bool s;
pages = iommu_num_pages(address, size, PAGE_SIZE);
- s = 0;
+ s = false;
if (pages > 1) {
/*
@@ -854,7 +854,7 @@ static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
* TLB entries for this domain
*/
address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
- s = 1;
+ s = true;
}
address &= PAGE_MASK;
@@ -874,10 +874,10 @@ static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
u64 address, size_t size)
{
u64 pages;
- int s;
+ bool s;
pages = iommu_num_pages(address, size, PAGE_SIZE);
- s = 0;
+ s = false;
if (pages > 1) {
/*
@@ -885,7 +885,7 @@ static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
* TLB entries for this domain
*/
address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
- s = 1;
+ s = true;
}
address &= PAGE_MASK;
@@ -4284,7 +4284,6 @@ static int alloc_hpet_msi(unsigned int irq, unsigned int id)
}
struct irq_remap_ops amd_iommu_irq_ops = {
- .supported = amd_iommu_supported,
.prepare = amd_iommu_prepare,
.enable = amd_iommu_enable,
.disable = amd_iommu_disable,
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index b0522f15730f..450ef5001a65 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
- * Author: Joerg Roedel <joerg.roedel@amd.com>
+ * Author: Joerg Roedel <jroedel@suse.de>
* Leo Duran <leo.duran@amd.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -2014,9 +2014,6 @@ static bool detect_ivrs(void)
/* Make sure ACS will be enabled during PCI probe */
pci_request_acs();
- if (!disable_irq_remap)
- amd_iommu_irq_remap = true;
-
return true;
}
@@ -2123,12 +2120,14 @@ static int __init iommu_go_to_state(enum iommu_init_state state)
#ifdef CONFIG_IRQ_REMAP
int __init amd_iommu_prepare(void)
{
- return iommu_go_to_state(IOMMU_ACPI_FINISHED);
-}
+ int ret;
-int __init amd_iommu_supported(void)
-{
- return amd_iommu_irq_remap ? 1 : 0;
+ amd_iommu_irq_remap = true;
+
+ ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
+ if (ret)
+ return ret;
+ return amd_iommu_irq_remap ? 0 : -ENODEV;
}
int __init amd_iommu_enable(void)
diff --git a/drivers/iommu/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h
index 95ed6deae47f..72b0fd455e24 100644
--- a/drivers/iommu/amd_iommu_proto.h
+++ b/drivers/iommu/amd_iommu_proto.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2009-2010 Advanced Micro Devices, Inc.
- * Author: Joerg Roedel <joerg.roedel@amd.com>
+ * Author: Joerg Roedel <jroedel@suse.de>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -33,7 +33,6 @@ extern void amd_iommu_init_notifier(void);
extern void amd_iommu_init_api(void);
/* Needed for interrupt remapping */
-extern int amd_iommu_supported(void);
extern int amd_iommu_prepare(void);
extern int amd_iommu_enable(void);
extern void amd_iommu_disable(void);
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index cec51a8ba844..c4fffb710c58 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
- * Author: Joerg Roedel <joerg.roedel@amd.com>
+ * Author: Joerg Roedel <jroedel@suse.de>
* Leo Duran <leo.duran@amd.com>
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 90f70d0e1141..6d5a5c44453b 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2010-2012 Advanced Micro Devices, Inc.
- * Author: Joerg Roedel <joerg.roedel@amd.com>
+ * Author: Joerg Roedel <jroedel@suse.de>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -31,7 +31,7 @@
#include "amd_iommu_proto.h"
MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Joerg Roedel <joerg.roedel@amd.com>");
+MODULE_AUTHOR("Joerg Roedel <jroedel@suse.de>");
#define MAX_DEVICES 0x10000
#define PRI_QUEUE_SIZE 512
@@ -151,18 +151,6 @@ static void put_device_state(struct device_state *dev_state)
wake_up(&dev_state->wq);
}
-static void put_device_state_wait(struct device_state *dev_state)
-{
- DEFINE_WAIT(wait);
-
- prepare_to_wait(&dev_state->wq, &wait, TASK_UNINTERRUPTIBLE);
- if (!atomic_dec_and_test(&dev_state->count))
- schedule();
- finish_wait(&dev_state->wq, &wait);
-
- free_device_state(dev_state);
-}
-
/* Must be called under dev_state->lock */
static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
int pasid, bool alloc)
@@ -278,14 +266,7 @@ static void put_pasid_state(struct pasid_state *pasid_state)
static void put_pasid_state_wait(struct pasid_state *pasid_state)
{
- DEFINE_WAIT(wait);
-
- prepare_to_wait(&pasid_state->wq, &wait, TASK_UNINTERRUPTIBLE);
-
- if (!atomic_dec_and_test(&pasid_state->count))
- schedule();
-
- finish_wait(&pasid_state->wq, &wait);
+ wait_event(pasid_state->wq, !atomic_read(&pasid_state->count));
free_pasid_state(pasid_state);
}
@@ -851,7 +832,13 @@ void amd_iommu_free_device(struct pci_dev *pdev)
/* Get rid of any remaining pasid states */
free_pasid_states(dev_state);
- put_device_state_wait(dev_state);
+ put_device_state(dev_state);
+ /*
+ * Wait until the last reference is dropped before freeing
+ * the device state.
+ */
+ wait_event(dev_state->wq, !atomic_read(&dev_state->count));
+ free_device_state(dev_state);
}
EXPORT_SYMBOL(amd_iommu_free_device);
@@ -921,7 +908,7 @@ static int __init amd_iommu_v2_init(void)
{
int ret;
- pr_info("AMD IOMMUv2 driver by Joerg Roedel <joerg.roedel@amd.com>\n");
+ pr_info("AMD IOMMUv2 driver by Joerg Roedel <jroedel@suse.de>\n");
if (!amd_iommu_v2_supported()) {
pr_info("AMD IOMMUv2 functionality not available on this system\n");
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 6cd47b75286f..fc13dd56953e 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -23,8 +23,6 @@
* - Stream-matching and stream-indexing
* - v7/v8 long-descriptor format
* - Non-secure access to the SMMU
- * - 4k and 64k pages, with contiguous pte hints.
- * - Up to 48-bit addressing (dependent on VA_BITS)
* - Context fault reporting
*/
@@ -36,7 +34,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iommu.h>
-#include <linux/mm.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pci.h>
@@ -46,7 +44,7 @@
#include <linux/amba/bus.h>
-#include <asm/pgalloc.h>
+#include "io-pgtable.h"
/* Maximum number of stream IDs assigned to a single device */
#define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS
@@ -71,40 +69,6 @@
((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
? 0x400 : 0))
-/* Page table bits */
-#define ARM_SMMU_PTE_XN (((pteval_t)3) << 53)
-#define ARM_SMMU_PTE_CONT (((pteval_t)1) << 52)
-#define ARM_SMMU_PTE_AF (((pteval_t)1) << 10)
-#define ARM_SMMU_PTE_SH_NS (((pteval_t)0) << 8)
-#define ARM_SMMU_PTE_SH_OS (((pteval_t)2) << 8)
-#define ARM_SMMU_PTE_SH_IS (((pteval_t)3) << 8)
-#define ARM_SMMU_PTE_PAGE (((pteval_t)3) << 0)
-
-#if PAGE_SIZE == SZ_4K
-#define ARM_SMMU_PTE_CONT_ENTRIES 16
-#elif PAGE_SIZE == SZ_64K
-#define ARM_SMMU_PTE_CONT_ENTRIES 32
-#else
-#define ARM_SMMU_PTE_CONT_ENTRIES 1
-#endif
-
-#define ARM_SMMU_PTE_CONT_SIZE (PAGE_SIZE * ARM_SMMU_PTE_CONT_ENTRIES)
-#define ARM_SMMU_PTE_CONT_MASK (~(ARM_SMMU_PTE_CONT_SIZE - 1))
-
-/* Stage-1 PTE */
-#define ARM_SMMU_PTE_AP_UNPRIV (((pteval_t)1) << 6)
-#define ARM_SMMU_PTE_AP_RDONLY (((pteval_t)2) << 6)
-#define ARM_SMMU_PTE_ATTRINDX_SHIFT 2
-#define ARM_SMMU_PTE_nG (((pteval_t)1) << 11)
-
-/* Stage-2 PTE */
-#define ARM_SMMU_PTE_HAP_FAULT (((pteval_t)0) << 6)
-#define ARM_SMMU_PTE_HAP_READ (((pteval_t)1) << 6)
-#define ARM_SMMU_PTE_HAP_WRITE (((pteval_t)2) << 6)
-#define ARM_SMMU_PTE_MEMATTR_OIWB (((pteval_t)0xf) << 2)
-#define ARM_SMMU_PTE_MEMATTR_NC (((pteval_t)0x5) << 2)
-#define ARM_SMMU_PTE_MEMATTR_DEV (((pteval_t)0x1) << 2)
-
/* Configuration registers */
#define ARM_SMMU_GR0_sCR0 0x0
#define sCR0_CLIENTPD (1 << 0)
@@ -132,17 +96,12 @@
#define ARM_SMMU_GR0_sGFSYNR0 0x50
#define ARM_SMMU_GR0_sGFSYNR1 0x54
#define ARM_SMMU_GR0_sGFSYNR2 0x58
-#define ARM_SMMU_GR0_PIDR0 0xfe0
-#define ARM_SMMU_GR0_PIDR1 0xfe4
-#define ARM_SMMU_GR0_PIDR2 0xfe8
#define ID0_S1TS (1 << 30)
#define ID0_S2TS (1 << 29)
#define ID0_NTS (1 << 28)
#define ID0_SMS (1 << 27)
-#define ID0_PTFS_SHIFT 24
-#define ID0_PTFS_MASK 0x2
-#define ID0_PTFS_V8_ONLY 0x2
+#define ID0_ATOSNS (1 << 26)
#define ID0_CTTW (1 << 14)
#define ID0_NUMIRPT_SHIFT 16
#define ID0_NUMIRPT_MASK 0xff
@@ -169,11 +128,7 @@
#define ID2_PTFS_16K (1 << 13)
#define ID2_PTFS_64K (1 << 14)
-#define PIDR2_ARCH_SHIFT 4
-#define PIDR2_ARCH_MASK 0xf
-
/* Global TLB invalidation */
-#define ARM_SMMU_GR0_STLBIALL 0x60
#define ARM_SMMU_GR0_TLBIVMID 0x64
#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
#define ARM_SMMU_GR0_TLBIALLH 0x6c
@@ -231,13 +186,25 @@
#define ARM_SMMU_CB_TTBCR2 0x10
#define ARM_SMMU_CB_TTBR0_LO 0x20
#define ARM_SMMU_CB_TTBR0_HI 0x24
+#define ARM_SMMU_CB_TTBR1_LO 0x28
+#define ARM_SMMU_CB_TTBR1_HI 0x2c
#define ARM_SMMU_CB_TTBCR 0x30
#define ARM_SMMU_CB_S1_MAIR0 0x38
+#define ARM_SMMU_CB_S1_MAIR1 0x3c
+#define ARM_SMMU_CB_PAR_LO 0x50
+#define ARM_SMMU_CB_PAR_HI 0x54
#define ARM_SMMU_CB_FSR 0x58
#define ARM_SMMU_CB_FAR_LO 0x60
#define ARM_SMMU_CB_FAR_HI 0x64
#define ARM_SMMU_CB_FSYNR0 0x68
+#define ARM_SMMU_CB_S1_TLBIVA 0x600
#define ARM_SMMU_CB_S1_TLBIASID 0x610
+#define ARM_SMMU_CB_S1_TLBIVAL 0x620
+#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
+#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
+#define ARM_SMMU_CB_ATS1PR_LO 0x800
+#define ARM_SMMU_CB_ATS1PR_HI 0x804
+#define ARM_SMMU_CB_ATSR 0x8f0
#define SCTLR_S1_ASIDPNE (1 << 12)
#define SCTLR_CFCFG (1 << 7)
@@ -249,47 +216,16 @@
#define SCTLR_M (1 << 0)
#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
-#define RESUME_RETRY (0 << 0)
-#define RESUME_TERMINATE (1 << 0)
-
-#define TTBCR_EAE (1 << 31)
+#define CB_PAR_F (1 << 0)
-#define TTBCR_PASIZE_SHIFT 16
-#define TTBCR_PASIZE_MASK 0x7
+#define ATSR_ACTIVE (1 << 0)
-#define TTBCR_TG0_4K (0 << 14)
-#define TTBCR_TG0_64K (1 << 14)
-
-#define TTBCR_SH0_SHIFT 12
-#define TTBCR_SH0_MASK 0x3
-#define TTBCR_SH_NS 0
-#define TTBCR_SH_OS 2
-#define TTBCR_SH_IS 3
-
-#define TTBCR_ORGN0_SHIFT 10
-#define TTBCR_IRGN0_SHIFT 8
-#define TTBCR_RGN_MASK 0x3
-#define TTBCR_RGN_NC 0
-#define TTBCR_RGN_WBWA 1
-#define TTBCR_RGN_WT 2
-#define TTBCR_RGN_WB 3
-
-#define TTBCR_SL0_SHIFT 6
-#define TTBCR_SL0_MASK 0x3
-#define TTBCR_SL0_LVL_2 0
-#define TTBCR_SL0_LVL_1 1
-
-#define TTBCR_T1SZ_SHIFT 16
-#define TTBCR_T0SZ_SHIFT 0
-#define TTBCR_SZ_MASK 0xf
+#define RESUME_RETRY (0 << 0)
+#define RESUME_TERMINATE (1 << 0)
#define TTBCR2_SEP_SHIFT 15
#define TTBCR2_SEP_MASK 0x7
-#define TTBCR2_PASIZE_SHIFT 0
-#define TTBCR2_PASIZE_MASK 0x7
-
-/* Common definitions for PASize and SEP fields */
#define TTBCR2_ADDR_32 0
#define TTBCR2_ADDR_36 1
#define TTBCR2_ADDR_40 2
@@ -297,16 +233,7 @@
#define TTBCR2_ADDR_44 4
#define TTBCR2_ADDR_48 5
-#define TTBRn_HI_ASID_SHIFT 16
-
-#define MAIR_ATTR_SHIFT(n) ((n) << 3)
-#define MAIR_ATTR_MASK 0xff
-#define MAIR_ATTR_DEVICE 0x04
-#define MAIR_ATTR_NC 0x44
-#define MAIR_ATTR_WBRWA 0xff
-#define MAIR_ATTR_IDX_NC 0
-#define MAIR_ATTR_IDX_CACHE 1
-#define MAIR_ATTR_IDX_DEV 2
+#define TTBRn_HI_ASID_SHIFT 16
#define FSR_MULTI (1 << 31)
#define FSR_SS (1 << 30)
@@ -366,6 +293,7 @@ struct arm_smmu_device {
#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
+#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
u32 features;
#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
@@ -380,10 +308,9 @@ struct arm_smmu_device {
u32 num_mapping_groups;
DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
- unsigned long s1_input_size;
- unsigned long s1_output_size;
- unsigned long s2_input_size;
- unsigned long s2_output_size;
+ unsigned long va_size;
+ unsigned long ipa_size;
+ unsigned long pa_size;
u32 num_global_irqs;
u32 num_context_irqs;
@@ -397,7 +324,6 @@ struct arm_smmu_cfg {
u8 cbndx;
u8 irptndx;
u32 cbar;
- pgd_t *pgd;
};
#define INVALID_IRPTNDX 0xff
@@ -412,11 +338,15 @@ enum arm_smmu_domain_stage {
struct arm_smmu_domain {
struct arm_smmu_device *smmu;
+ struct io_pgtable_ops *pgtbl_ops;
+ spinlock_t pgtbl_lock;
struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage;
- spinlock_t lock;
+ struct mutex init_mutex; /* Protects smmu pointer */
};
+static struct iommu_ops arm_smmu_ops;
+
static DEFINE_SPINLOCK(arm_smmu_devices_lock);
static LIST_HEAD(arm_smmu_devices);
@@ -597,7 +527,7 @@ static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
}
/* Wait for any pending TLB invalidations to complete */
-static void arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
+static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
{
int count = 0;
void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
@@ -615,12 +545,19 @@ static void arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
}
}
-static void arm_smmu_tlb_inv_context(struct arm_smmu_domain *smmu_domain)
+static void arm_smmu_tlb_sync(void *cookie)
{
+ struct arm_smmu_domain *smmu_domain = cookie;
+ __arm_smmu_tlb_sync(smmu_domain->smmu);
+}
+
+static void arm_smmu_tlb_inv_context(void *cookie)
+{
+ struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
struct arm_smmu_device *smmu = smmu_domain->smmu;
- void __iomem *base = ARM_SMMU_GR0(smmu);
bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
+ void __iomem *base;
if (stage1) {
base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
@@ -632,9 +569,76 @@ static void arm_smmu_tlb_inv_context(struct arm_smmu_domain *smmu_domain)
base + ARM_SMMU_GR0_TLBIVMID);
}
- arm_smmu_tlb_sync(smmu);
+ __arm_smmu_tlb_sync(smmu);
+}
+
+static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
+ bool leaf, void *cookie)
+{
+ struct arm_smmu_domain *smmu_domain = cookie;
+ struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
+ void __iomem *reg;
+
+ if (stage1) {
+ reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+ reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
+
+ if (!IS_ENABLED(CONFIG_64BIT) || smmu->version == ARM_SMMU_V1) {
+ iova &= ~12UL;
+ iova |= ARM_SMMU_CB_ASID(cfg);
+ writel_relaxed(iova, reg);
+#ifdef CONFIG_64BIT
+ } else {
+ iova >>= 12;
+ iova |= (u64)ARM_SMMU_CB_ASID(cfg) << 48;
+ writeq_relaxed(iova, reg);
+#endif
+ }
+#ifdef CONFIG_64BIT
+ } else if (smmu->version == ARM_SMMU_V2) {
+ reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+ reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
+ ARM_SMMU_CB_S2_TLBIIPAS2;
+ writeq_relaxed(iova >> 12, reg);
+#endif
+ } else {
+ reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
+ writel_relaxed(ARM_SMMU_CB_VMID(cfg), reg);
+ }
+}
+
+static void arm_smmu_flush_pgtable(void *addr, size_t size, void *cookie)
+{
+ struct arm_smmu_domain *smmu_domain = cookie;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
+
+
+ /* Ensure new page tables are visible to the hardware walker */
+ if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) {
+ dsb(ishst);
+ } else {
+ /*
+ * If the SMMU can't walk tables in the CPU caches, treat them
+ * like non-coherent DMA since we need to flush the new entries
+ * all the way out to memory. There's no possibility of
+ * recursion here as the SMMU table walker will not be wired
+ * through another SMMU.
+ */
+ dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
+ DMA_TO_DEVICE);
+ }
}
+static struct iommu_gather_ops arm_smmu_gather_ops = {
+ .tlb_flush_all = arm_smmu_tlb_inv_context,
+ .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
+ .tlb_sync = arm_smmu_tlb_sync,
+ .flush_pgtable = arm_smmu_flush_pgtable,
+};
+
static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
{
int flags, ret;
@@ -712,29 +716,8 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
return IRQ_HANDLED;
}
-static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr,
- size_t size)
-{
- unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
-
-
- /* Ensure new page tables are visible to the hardware walker */
- if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) {
- dsb(ishst);
- } else {
- /*
- * If the SMMU can't walk tables in the CPU caches, treat them
- * like non-coherent DMA since we need to flush the new entries
- * all the way out to memory. There's no possibility of
- * recursion here as the SMMU table walker will not be wired
- * through another SMMU.
- */
- dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
- DMA_TO_DEVICE);
- }
-}
-
-static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
+static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
+ struct io_pgtable_cfg *pgtbl_cfg)
{
u32 reg;
bool stage1;
@@ -771,124 +754,68 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
#else
reg = CBA2R_RW64_32BIT;
#endif
- writel_relaxed(reg,
- gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
-
- /* TTBCR2 */
- switch (smmu->s1_input_size) {
- case 32:
- reg = (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT);
- break;
- case 36:
- reg = (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT);
- break;
- case 39:
- case 40:
- reg = (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT);
- break;
- case 42:
- reg = (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT);
- break;
- case 44:
- reg = (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT);
- break;
- case 48:
- reg = (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT);
- break;
- }
-
- switch (smmu->s1_output_size) {
- case 32:
- reg |= (TTBCR2_ADDR_32 << TTBCR2_PASIZE_SHIFT);
- break;
- case 36:
- reg |= (TTBCR2_ADDR_36 << TTBCR2_PASIZE_SHIFT);
- break;
- case 39:
- case 40:
- reg |= (TTBCR2_ADDR_40 << TTBCR2_PASIZE_SHIFT);
- break;
- case 42:
- reg |= (TTBCR2_ADDR_42 << TTBCR2_PASIZE_SHIFT);
- break;
- case 44:
- reg |= (TTBCR2_ADDR_44 << TTBCR2_PASIZE_SHIFT);
- break;
- case 48:
- reg |= (TTBCR2_ADDR_48 << TTBCR2_PASIZE_SHIFT);
- break;
- }
-
- if (stage1)
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
+ writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
}
- /* TTBR0 */
- arm_smmu_flush_pgtable(smmu, cfg->pgd,
- PTRS_PER_PGD * sizeof(pgd_t));
- reg = __pa(cfg->pgd);
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
- reg = (phys_addr_t)__pa(cfg->pgd) >> 32;
- if (stage1)
+ /* TTBRs */
+ if (stage1) {
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0] >> 32;
reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT;
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
-
- /*
- * TTBCR
- * We use long descriptor, with inner-shareable WBWA tables in TTBR0.
- */
- if (smmu->version > ARM_SMMU_V1) {
- if (PAGE_SIZE == SZ_4K)
- reg = TTBCR_TG0_4K;
- else
- reg = TTBCR_TG0_64K;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
- if (!stage1) {
- reg |= (64 - smmu->s2_input_size) << TTBCR_T0SZ_SHIFT;
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1_LO);
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1] >> 32;
+ reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1_HI);
+ } else {
+ reg = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
+ reg = pgtbl_cfg->arm_lpae_s2_cfg.vttbr >> 32;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
+ }
- switch (smmu->s2_output_size) {
+ /* TTBCR */
+ if (stage1) {
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
+ if (smmu->version > ARM_SMMU_V1) {
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
+ switch (smmu->va_size) {
case 32:
- reg |= (TTBCR2_ADDR_32 << TTBCR_PASIZE_SHIFT);
+ reg |= (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT);
break;
case 36:
- reg |= (TTBCR2_ADDR_36 << TTBCR_PASIZE_SHIFT);
+ reg |= (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT);
break;
case 40:
- reg |= (TTBCR2_ADDR_40 << TTBCR_PASIZE_SHIFT);
+ reg |= (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT);
break;
case 42:
- reg |= (TTBCR2_ADDR_42 << TTBCR_PASIZE_SHIFT);
+ reg |= (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT);
break;
case 44:
- reg |= (TTBCR2_ADDR_44 << TTBCR_PASIZE_SHIFT);
+ reg |= (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT);
break;
case 48:
- reg |= (TTBCR2_ADDR_48 << TTBCR_PASIZE_SHIFT);
+ reg |= (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT);
break;
}
- } else {
- reg |= (64 - smmu->s1_input_size) << TTBCR_T0SZ_SHIFT;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
}
} else {
- reg = 0;
+ reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
}
- reg |= TTBCR_EAE |
- (TTBCR_SH_IS << TTBCR_SH0_SHIFT) |
- (TTBCR_RGN_WBWA << TTBCR_ORGN0_SHIFT) |
- (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT);
-
- if (!stage1)
- reg |= (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT);
-
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
-
- /* MAIR0 (stage-1 only) */
+ /* MAIRs (stage-1 only) */
if (stage1) {
- reg = (MAIR_ATTR_NC << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_NC)) |
- (MAIR_ATTR_WBRWA << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_CACHE)) |
- (MAIR_ATTR_DEVICE << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_DEV));
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
}
/* SCTLR */
@@ -905,11 +832,14 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
struct arm_smmu_device *smmu)
{
int irq, start, ret = 0;
- unsigned long flags;
+ unsigned long ias, oas;
+ struct io_pgtable_ops *pgtbl_ops;
+ struct io_pgtable_cfg pgtbl_cfg;
+ enum io_pgtable_fmt fmt;
struct arm_smmu_domain *smmu_domain = domain->priv;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
- spin_lock_irqsave(&smmu_domain->lock, flags);
+ mutex_lock(&smmu_domain->init_mutex);
if (smmu_domain->smmu)
goto out_unlock;
@@ -940,6 +870,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
case ARM_SMMU_DOMAIN_S1:
cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
start = smmu->num_s2_context_banks;
+ ias = smmu->va_size;
+ oas = smmu->ipa_size;
+ if (IS_ENABLED(CONFIG_64BIT))
+ fmt = ARM_64_LPAE_S1;
+ else
+ fmt = ARM_32_LPAE_S1;
break;
case ARM_SMMU_DOMAIN_NESTED:
/*
@@ -949,6 +885,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
case ARM_SMMU_DOMAIN_S2:
cfg->cbar = CBAR_TYPE_S2_TRANS;
start = 0;
+ ias = smmu->ipa_size;
+ oas = smmu->pa_size;
+ if (IS_ENABLED(CONFIG_64BIT))
+ fmt = ARM_64_LPAE_S2;
+ else
+ fmt = ARM_32_LPAE_S2;
break;
default:
ret = -EINVAL;
@@ -968,10 +910,30 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
cfg->irptndx = cfg->cbndx;
}
- ACCESS_ONCE(smmu_domain->smmu) = smmu;
- arm_smmu_init_context_bank(smmu_domain);
- spin_unlock_irqrestore(&smmu_domain->lock, flags);
+ pgtbl_cfg = (struct io_pgtable_cfg) {
+ .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap,
+ .ias = ias,
+ .oas = oas,
+ .tlb = &arm_smmu_gather_ops,
+ };
+
+ smmu_domain->smmu = smmu;
+ pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
+ if (!pgtbl_ops) {
+ ret = -ENOMEM;
+ goto out_clear_smmu;
+ }
+
+ /* Update our support page sizes to reflect the page table format */
+ arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
+ /* Initialise the context bank with our page table cfg */
+ arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
+
+ /*
+ * Request context fault interrupt. Do this last to avoid the
+ * handler seeing a half-initialised domain state.
+ */
irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
"arm-smmu-context-fault", domain);
@@ -981,10 +943,16 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
cfg->irptndx = INVALID_IRPTNDX;
}
+ mutex_unlock(&smmu_domain->init_mutex);
+
+ /* Publish page table ops for map/unmap */
+ smmu_domain->pgtbl_ops = pgtbl_ops;
return 0;
+out_clear_smmu:
+ smmu_domain->smmu = NULL;
out_unlock:
- spin_unlock_irqrestore(&smmu_domain->lock, flags);
+ mutex_unlock(&smmu_domain->init_mutex);
return ret;
}
@@ -999,23 +967,27 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
if (!smmu)
return;
- /* Disable the context bank and nuke the TLB before freeing it. */
+ /*
+ * Disable the context bank and free the page tables before freeing
+ * it.
+ */
cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
- arm_smmu_tlb_inv_context(smmu_domain);
if (cfg->irptndx != INVALID_IRPTNDX) {
irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
free_irq(irq, domain);
}
+ if (smmu_domain->pgtbl_ops)
+ free_io_pgtable_ops(smmu_domain->pgtbl_ops);
+
__arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
}
static int arm_smmu_domain_init(struct iommu_domain *domain)
{
struct arm_smmu_domain *smmu_domain;
- pgd_t *pgd;
/*
* Allocate the domain and initialise some of its data structures.
@@ -1026,81 +998,10 @@ static int arm_smmu_domain_init(struct iommu_domain *domain)
if (!smmu_domain)
return -ENOMEM;
- pgd = kcalloc(PTRS_PER_PGD, sizeof(pgd_t), GFP_KERNEL);
- if (!pgd)
- goto out_free_domain;
- smmu_domain->cfg.pgd = pgd;
-
- spin_lock_init(&smmu_domain->lock);
+ mutex_init(&smmu_domain->init_mutex);
+ spin_lock_init(&smmu_domain->pgtbl_lock);
domain->priv = smmu_domain;
return 0;
-
-out_free_domain:
- kfree(smmu_domain);
- return -ENOMEM;
-}
-
-static void arm_smmu_free_ptes(pmd_t *pmd)
-{
- pgtable_t table = pmd_pgtable(*pmd);
-
- __free_page(table);
-}
-
-static void arm_smmu_free_pmds(pud_t *pud)
-{
- int i;
- pmd_t *pmd, *pmd_base = pmd_offset(pud, 0);
-
- pmd = pmd_base;
- for (i = 0; i < PTRS_PER_PMD; ++i) {
- if (pmd_none(*pmd))
- continue;
-
- arm_smmu_free_ptes(pmd);
- pmd++;
- }
-
- pmd_free(NULL, pmd_base);
-}
-
-static void arm_smmu_free_puds(pgd_t *pgd)
-{
- int i;
- pud_t *pud, *pud_base = pud_offset(pgd, 0);
-
- pud = pud_base;
- for (i = 0; i < PTRS_PER_PUD; ++i) {
- if (pud_none(*pud))
- continue;
-
- arm_smmu_free_pmds(pud);
- pud++;
- }
-
- pud_free(NULL, pud_base);
-}
-
-static void arm_smmu_free_pgtables(struct arm_smmu_domain *smmu_domain)
-{
- int i;
- struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
- pgd_t *pgd, *pgd_base = cfg->pgd;
-
- /*
- * Recursively free the page tables for this domain. We don't
- * care about speculative TLB filling because the tables should
- * not be active in any context bank at this point (SCTLR.M is 0).
- */
- pgd = pgd_base;
- for (i = 0; i < PTRS_PER_PGD; ++i) {
- if (pgd_none(*pgd))
- continue;
- arm_smmu_free_puds(pgd);
- pgd++;
- }
-
- kfree(pgd_base);
}
static void arm_smmu_domain_destroy(struct iommu_domain *domain)
@@ -1112,7 +1013,6 @@ static void arm_smmu_domain_destroy(struct iommu_domain *domain)
* already been detached.
*/
arm_smmu_destroy_domain_context(domain);
- arm_smmu_free_pgtables(smmu_domain);
kfree(smmu_domain);
}
@@ -1244,7 +1144,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
int ret;
struct arm_smmu_domain *smmu_domain = domain->priv;
- struct arm_smmu_device *smmu, *dom_smmu;
+ struct arm_smmu_device *smmu;
struct arm_smmu_master_cfg *cfg;
smmu = find_smmu_for_device(dev);
@@ -1258,21 +1158,16 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
return -EEXIST;
}
+ /* Ensure that the domain is finalised */
+ ret = arm_smmu_init_domain_context(domain, smmu);
+ if (IS_ERR_VALUE(ret))
+ return ret;
+
/*
* Sanity check the domain. We don't support domains across
* different SMMUs.
*/
- dom_smmu = ACCESS_ONCE(smmu_domain->smmu);
- if (!dom_smmu) {
- /* Now that we have a master, we can finalise the domain */
- ret = arm_smmu_init_domain_context(domain, smmu);
- if (IS_ERR_VALUE(ret))
- return ret;
-
- dom_smmu = smmu_domain->smmu;
- }
-
- if (dom_smmu != smmu) {
+ if (smmu_domain->smmu != smmu) {
dev_err(dev,
"cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
@@ -1303,293 +1198,103 @@ static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
arm_smmu_domain_remove_master(smmu_domain, cfg);
}
-static bool arm_smmu_pte_is_contiguous_range(unsigned long addr,
- unsigned long end)
-{
- return !(addr & ~ARM_SMMU_PTE_CONT_MASK) &&
- (addr + ARM_SMMU_PTE_CONT_SIZE <= end);
-}
-
-static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
- unsigned long addr, unsigned long end,
- unsigned long pfn, int prot, int stage)
-{
- pte_t *pte, *start;
- pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF;
-
- if (pmd_none(*pmd)) {
- /* Allocate a new set of tables */
- pgtable_t table = alloc_page(GFP_ATOMIC|__GFP_ZERO);
-
- if (!table)
- return -ENOMEM;
-
- arm_smmu_flush_pgtable(smmu, page_address(table), PAGE_SIZE);
- pmd_populate(NULL, pmd, table);
- arm_smmu_flush_pgtable(smmu, pmd, sizeof(*pmd));
- }
-
- if (stage == 1) {
- pteval |= ARM_SMMU_PTE_AP_UNPRIV | ARM_SMMU_PTE_nG;
- if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
- pteval |= ARM_SMMU_PTE_AP_RDONLY;
-
- if (prot & IOMMU_CACHE)
- pteval |= (MAIR_ATTR_IDX_CACHE <<
- ARM_SMMU_PTE_ATTRINDX_SHIFT);
- } else {
- pteval |= ARM_SMMU_PTE_HAP_FAULT;
- if (prot & IOMMU_READ)
- pteval |= ARM_SMMU_PTE_HAP_READ;
- if (prot & IOMMU_WRITE)
- pteval |= ARM_SMMU_PTE_HAP_WRITE;
- if (prot & IOMMU_CACHE)
- pteval |= ARM_SMMU_PTE_MEMATTR_OIWB;
- else
- pteval |= ARM_SMMU_PTE_MEMATTR_NC;
- }
-
- if (prot & IOMMU_NOEXEC)
- pteval |= ARM_SMMU_PTE_XN;
-
- /* If no access, create a faulting entry to avoid TLB fills */
- if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
- pteval &= ~ARM_SMMU_PTE_PAGE;
-
- pteval |= ARM_SMMU_PTE_SH_IS;
- start = pmd_page_vaddr(*pmd) + pte_index(addr);
- pte = start;
-
- /*
- * Install the page table entries. This is fairly complicated
- * since we attempt to make use of the contiguous hint in the
- * ptes where possible. The contiguous hint indicates a series
- * of ARM_SMMU_PTE_CONT_ENTRIES ptes mapping a physically
- * contiguous region with the following constraints:
- *
- * - The region start is aligned to ARM_SMMU_PTE_CONT_SIZE
- * - Each pte in the region has the contiguous hint bit set
- *
- * This complicates unmapping (also handled by this code, when
- * neither IOMMU_READ or IOMMU_WRITE are set) because it is
- * possible, yet highly unlikely, that a client may unmap only
- * part of a contiguous range. This requires clearing of the
- * contiguous hint bits in the range before installing the new
- * faulting entries.
- *
- * Note that re-mapping an address range without first unmapping
- * it is not supported, so TLB invalidation is not required here
- * and is instead performed at unmap and domain-init time.
- */
- do {
- int i = 1;
-
- pteval &= ~ARM_SMMU_PTE_CONT;
-
- if (arm_smmu_pte_is_contiguous_range(addr, end)) {
- i = ARM_SMMU_PTE_CONT_ENTRIES;
- pteval |= ARM_SMMU_PTE_CONT;
- } else if (pte_val(*pte) &
- (ARM_SMMU_PTE_CONT | ARM_SMMU_PTE_PAGE)) {
- int j;
- pte_t *cont_start;
- unsigned long idx = pte_index(addr);
-
- idx &= ~(ARM_SMMU_PTE_CONT_ENTRIES - 1);
- cont_start = pmd_page_vaddr(*pmd) + idx;
- for (j = 0; j < ARM_SMMU_PTE_CONT_ENTRIES; ++j)
- pte_val(*(cont_start + j)) &=
- ~ARM_SMMU_PTE_CONT;
-
- arm_smmu_flush_pgtable(smmu, cont_start,
- sizeof(*pte) *
- ARM_SMMU_PTE_CONT_ENTRIES);
- }
-
- do {
- *pte = pfn_pte(pfn, __pgprot(pteval));
- } while (pte++, pfn++, addr += PAGE_SIZE, --i);
- } while (addr != end);
-
- arm_smmu_flush_pgtable(smmu, start, sizeof(*pte) * (pte - start));
- return 0;
-}
-
-static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
- unsigned long addr, unsigned long end,
- phys_addr_t phys, int prot, int stage)
+static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot)
{
int ret;
- pmd_t *pmd;
- unsigned long next, pfn = __phys_to_pfn(phys);
-
-#ifndef __PAGETABLE_PMD_FOLDED
- if (pud_none(*pud)) {
- pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC);
- if (!pmd)
- return -ENOMEM;
-
- arm_smmu_flush_pgtable(smmu, pmd, PAGE_SIZE);
- pud_populate(NULL, pud, pmd);
- arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud));
-
- pmd += pmd_index(addr);
- } else
-#endif
- pmd = pmd_offset(pud, addr);
+ unsigned long flags;
+ struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
- do {
- next = pmd_addr_end(addr, end);
- ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, next, pfn,
- prot, stage);
- phys += next - addr;
- pfn = __phys_to_pfn(phys);
- } while (pmd++, addr = next, addr < end);
+ if (!ops)
+ return -ENODEV;
+ spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
+ ret = ops->map(ops, iova, paddr, size, prot);
+ spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
return ret;
}
-static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd,
- unsigned long addr, unsigned long end,
- phys_addr_t phys, int prot, int stage)
+static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
+ size_t size)
{
- int ret = 0;
- pud_t *pud;
- unsigned long next;
-
-#ifndef __PAGETABLE_PUD_FOLDED
- if (pgd_none(*pgd)) {
- pud = (pud_t *)get_zeroed_page(GFP_ATOMIC);
- if (!pud)
- return -ENOMEM;
-
- arm_smmu_flush_pgtable(smmu, pud, PAGE_SIZE);
- pgd_populate(NULL, pgd, pud);
- arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd));
-
- pud += pud_index(addr);
- } else
-#endif
- pud = pud_offset(pgd, addr);
+ size_t ret;
+ unsigned long flags;
+ struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
- do {
- next = pud_addr_end(addr, end);
- ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys,
- prot, stage);
- phys += next - addr;
- } while (pud++, addr = next, addr < end);
+ if (!ops)
+ return 0;
+ spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
+ ret = ops->unmap(ops, iova, size);
+ spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
return ret;
}
-static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
- unsigned long iova, phys_addr_t paddr,
- size_t size, int prot)
+static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
+ dma_addr_t iova)
{
- int ret, stage;
- unsigned long end;
- phys_addr_t input_mask, output_mask;
+ struct arm_smmu_domain *smmu_domain = domain->priv;
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
- pgd_t *pgd = cfg->pgd;
- unsigned long flags;
+ struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
+ struct device *dev = smmu->dev;
+ void __iomem *cb_base;
+ u32 tmp;
+ u64 phys;
- if (cfg->cbar == CBAR_TYPE_S2_TRANS) {
- stage = 2;
- input_mask = (1ULL << smmu->s2_input_size) - 1;
- output_mask = (1ULL << smmu->s2_output_size) - 1;
+ cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+
+ if (smmu->version == 1) {
+ u32 reg = iova & ~0xfff;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_LO);
} else {
- stage = 1;
- input_mask = (1ULL << smmu->s1_input_size) - 1;
- output_mask = (1ULL << smmu->s1_output_size) - 1;
+ u32 reg = iova & ~0xfff;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_LO);
+ reg = ((u64)iova & ~0xfff) >> 32;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_HI);
}
- if (!pgd)
- return -EINVAL;
-
- if (size & ~PAGE_MASK)
- return -EINVAL;
-
- if ((phys_addr_t)iova & ~input_mask)
- return -ERANGE;
-
- if (paddr & ~output_mask)
- return -ERANGE;
-
- spin_lock_irqsave(&smmu_domain->lock, flags);
- pgd += pgd_index(iova);
- end = iova + size;
- do {
- unsigned long next = pgd_addr_end(iova, end);
-
- ret = arm_smmu_alloc_init_pud(smmu, pgd, iova, next, paddr,
- prot, stage);
- if (ret)
- goto out_unlock;
-
- paddr += next - iova;
- iova = next;
- } while (pgd++, iova != end);
-
-out_unlock:
- spin_unlock_irqrestore(&smmu_domain->lock, flags);
-
- return ret;
-}
-
-static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
-{
- struct arm_smmu_domain *smmu_domain = domain->priv;
-
- if (!smmu_domain)
- return -ENODEV;
+ if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
+ !(tmp & ATSR_ACTIVE), 5, 50)) {
+ dev_err(dev,
+ "iova to phys timed out on 0x%pad. Falling back to software table walk.\n",
+ &iova);
+ return ops->iova_to_phys(ops, iova);
+ }
- return arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, prot);
-}
+ phys = readl_relaxed(cb_base + ARM_SMMU_CB_PAR_LO);
+ phys |= ((u64)readl_relaxed(cb_base + ARM_SMMU_CB_PAR_HI)) << 32;
-static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
- size_t size)
-{
- int ret;
- struct arm_smmu_domain *smmu_domain = domain->priv;
+ if (phys & CB_PAR_F) {
+ dev_err(dev, "translation fault!\n");
+ dev_err(dev, "PAR = 0x%llx\n", phys);
+ return 0;
+ }
- ret = arm_smmu_handle_mapping(smmu_domain, iova, 0, size, 0);
- arm_smmu_tlb_inv_context(smmu_domain);
- return ret ? 0 : size;
+ return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
}
static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
- dma_addr_t iova)
+ dma_addr_t iova)
{
- pgd_t *pgdp, pgd;
- pud_t pud;
- pmd_t pmd;
- pte_t pte;
+ phys_addr_t ret;
+ unsigned long flags;
struct arm_smmu_domain *smmu_domain = domain->priv;
- struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
- pgdp = cfg->pgd;
- if (!pgdp)
+ if (!ops)
return 0;
- pgd = *(pgdp + pgd_index(iova));
- if (pgd_none(pgd))
- return 0;
-
- pud = *pud_offset(&pgd, iova);
- if (pud_none(pud))
- return 0;
-
- pmd = *pmd_offset(&pud, iova);
- if (pmd_none(pmd))
- return 0;
+ spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
+ if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS)
+ ret = arm_smmu_iova_to_phys_hard(domain, iova);
+ else
+ ret = ops->iova_to_phys(ops, iova);
+ spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
- pte = *(pmd_page_vaddr(pmd) + pte_index(iova));
- if (pte_none(pte))
- return 0;
-
- return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK);
+ return ret;
}
static bool arm_smmu_capable(enum iommu_cap cap)
@@ -1698,24 +1403,34 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
enum iommu_attr attr, void *data)
{
+ int ret = 0;
struct arm_smmu_domain *smmu_domain = domain->priv;
+ mutex_lock(&smmu_domain->init_mutex);
+
switch (attr) {
case DOMAIN_ATTR_NESTING:
- if (smmu_domain->smmu)
- return -EPERM;
+ if (smmu_domain->smmu) {
+ ret = -EPERM;
+ goto out_unlock;
+ }
+
if (*(int *)data)
smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
else
smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
- return 0;
+ break;
default:
- return -ENODEV;
+ ret = -ENODEV;
}
+
+out_unlock:
+ mutex_unlock(&smmu_domain->init_mutex);
+ return ret;
}
-static const struct iommu_ops arm_smmu_ops = {
+static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable,
.domain_init = arm_smmu_domain_init,
.domain_destroy = arm_smmu_domain_destroy,
@@ -1729,9 +1444,7 @@ static const struct iommu_ops arm_smmu_ops = {
.remove_device = arm_smmu_remove_device,
.domain_get_attr = arm_smmu_domain_get_attr,
.domain_set_attr = arm_smmu_domain_set_attr,
- .pgsize_bitmap = (SECTION_SIZE |
- ARM_SMMU_PTE_CONT_SIZE |
- PAGE_SIZE),
+ .pgsize_bitmap = -1UL, /* Restricted during device attach */
};
static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
@@ -1760,7 +1473,6 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
}
/* Invalidate the TLB, just in case */
- writel_relaxed(0, gr0_base + ARM_SMMU_GR0_STLBIALL);
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
@@ -1782,7 +1494,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
/* Push the button */
- arm_smmu_tlb_sync(smmu);
+ __arm_smmu_tlb_sync(smmu);
writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
}
@@ -1816,12 +1528,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
/* ID0 */
id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
-#ifndef CONFIG_64BIT
- if (((id >> ID0_PTFS_SHIFT) & ID0_PTFS_MASK) == ID0_PTFS_V8_ONLY) {
- dev_err(smmu->dev, "\tno v7 descriptor support!\n");
- return -ENODEV;
- }
-#endif
/* Restrict available stages based on module parameter */
if (force_stage == 1)
@@ -1850,6 +1556,11 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
return -ENODEV;
}
+ if (smmu->version == 1 || (!(id & ID0_ATOSNS) && (id & ID0_S1TS))) {
+ smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
+ dev_notice(smmu->dev, "\taddress translation ops\n");
+ }
+
if (id & ID0_CTTW) {
smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
dev_notice(smmu->dev, "\tcoherent table walk\n");
@@ -1894,16 +1605,14 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
/* Check for size mismatch of SMMU address space from mapped region */
- size = 1 <<
- (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
+ size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
size *= 2 << smmu->pgshift;
if (smmu->size != size)
dev_warn(smmu->dev,
"SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
size, smmu->size);
- smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) &
- ID1_NUMS2CB_MASK;
+ smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
if (smmu->num_s2_context_banks > smmu->num_context_banks) {
dev_err(smmu->dev, "impossible number of S2 context banks!\n");
@@ -1915,46 +1624,40 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
/* ID2 */
id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
- smmu->s1_output_size = min_t(unsigned long, PHYS_MASK_SHIFT, size);
-
- /* Stage-2 input size limited due to pgd allocation (PTRS_PER_PGD) */
-#ifdef CONFIG_64BIT
- smmu->s2_input_size = min_t(unsigned long, VA_BITS, size);
-#else
- smmu->s2_input_size = min(32UL, size);
-#endif
+ smmu->ipa_size = size;
- /* The stage-2 output mask is also applied for bypass */
+ /* The output mask is also applied for bypass */
size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
- smmu->s2_output_size = min_t(unsigned long, PHYS_MASK_SHIFT, size);
+ smmu->pa_size = size;
if (smmu->version == ARM_SMMU_V1) {
- smmu->s1_input_size = 32;
+ smmu->va_size = smmu->ipa_size;
+ size = SZ_4K | SZ_2M | SZ_1G;
} else {
-#ifdef CONFIG_64BIT
size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
- size = min(VA_BITS, arm_smmu_id_size_to_bits(size));
-#else
- size = 32;
+ smmu->va_size = arm_smmu_id_size_to_bits(size);
+#ifndef CONFIG_64BIT
+ smmu->va_size = min(32UL, smmu->va_size);
#endif
- smmu->s1_input_size = size;
-
- if ((PAGE_SIZE == SZ_4K && !(id & ID2_PTFS_4K)) ||
- (PAGE_SIZE == SZ_64K && !(id & ID2_PTFS_64K)) ||
- (PAGE_SIZE != SZ_4K && PAGE_SIZE != SZ_64K)) {
- dev_err(smmu->dev, "CPU page size 0x%lx unsupported\n",
- PAGE_SIZE);
- return -ENODEV;
- }
+ size = 0;
+ if (id & ID2_PTFS_4K)
+ size |= SZ_4K | SZ_2M | SZ_1G;
+ if (id & ID2_PTFS_16K)
+ size |= SZ_16K | SZ_32M;
+ if (id & ID2_PTFS_64K)
+ size |= SZ_64K | SZ_512M;
}
+ arm_smmu_ops.pgsize_bitmap &= size;
+ dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size);
+
if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
- smmu->s1_input_size, smmu->s1_output_size);
+ smmu->va_size, smmu->ipa_size);
if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
- smmu->s2_input_size, smmu->s2_output_size);
+ smmu->ipa_size, smmu->pa_size);
return 0;
}
diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
index 80ac68d884c5..abeedc9a78c2 100644
--- a/drivers/iommu/fsl_pamu.c
+++ b/drivers/iommu/fsl_pamu.c
@@ -18,22 +18,13 @@
#define pr_fmt(fmt) "fsl-pamu: %s: " fmt, __func__
-#include <linux/init.h>
-#include <linux/iommu.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/mm.h>
+#include "fsl_pamu.h"
+
#include <linux/interrupt.h>
-#include <linux/device.h>
-#include <linux/of_platform.h>
-#include <linux/bootmem.h>
#include <linux/genalloc.h>
-#include <asm/io.h>
-#include <asm/bitops.h>
-#include <asm/fsl_guts.h>
-#include "fsl_pamu.h"
+#include <asm/mpc85xx.h>
+#include <asm/fsl_guts.h>
/* define indexes for each operation mapping scenario */
#define OMI_QMAN 0x00
@@ -44,13 +35,13 @@
#define make64(high, low) (((u64)(high) << 32) | (low))
struct pamu_isr_data {
- void __iomem *pamu_reg_base; /* Base address of PAMU regs*/
+ void __iomem *pamu_reg_base; /* Base address of PAMU regs */
unsigned int count; /* The number of PAMUs */
};
static struct paace *ppaact;
static struct paace *spaact;
-static struct ome *omt;
+static struct ome *omt __initdata;
/*
* Table for matching compatible strings, for device tree
@@ -58,14 +49,13 @@ static struct ome *omt;
* "fsl,qoriq-device-config-2.0" corresponds to T4 & B4
* SOCs. For the older SOCs "fsl,qoriq-device-config-1.0"
* string would be used.
-*/
-static const struct of_device_id guts_device_ids[] = {
+ */
+static const struct of_device_id guts_device_ids[] __initconst = {
{ .compatible = "fsl,qoriq-device-config-1.0", },
{ .compatible = "fsl,qoriq-device-config-2.0", },
{}
};
-
/*
* Table for matching compatible strings, for device tree
* L3 cache controller node.
@@ -73,7 +63,7 @@ static const struct of_device_id guts_device_ids[] = {
* "fsl,b4860-l3-cache-controller" corresponds to B4 &
* "fsl,p4080-l3-cache-controller" corresponds to other,
* SOCs.
-*/
+ */
static const struct of_device_id l3_device_ids[] = {
{ .compatible = "fsl,t4240-l3-cache-controller", },
{ .compatible = "fsl,b4860-l3-cache-controller", },
@@ -85,7 +75,7 @@ static const struct of_device_id l3_device_ids[] = {
static u32 max_subwindow_count;
/* Pool for fspi allocation */
-struct gen_pool *spaace_pool;
+static struct gen_pool *spaace_pool;
/**
* pamu_get_max_subwin_cnt() - Return the maximum supported
@@ -170,7 +160,7 @@ int pamu_disable_liodn(int liodn)
static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size)
{
/* Bug if not a power of 2 */
- BUG_ON((addrspace_size & (addrspace_size - 1)));
+ BUG_ON(addrspace_size & (addrspace_size - 1));
/* window size is 2^(WSE+1) bytes */
return fls64(addrspace_size) - 2;
@@ -179,8 +169,8 @@ static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size)
/* Derive the PAACE window count encoding for the subwindow count */
static unsigned int map_subwindow_cnt_to_wce(u32 subwindow_cnt)
{
- /* window count is 2^(WCE+1) bytes */
- return __ffs(subwindow_cnt) - 1;
+ /* window count is 2^(WCE+1) bytes */
+ return __ffs(subwindow_cnt) - 1;
}
/*
@@ -241,7 +231,7 @@ static struct paace *pamu_get_spaace(struct paace *paace, u32 wnum)
* If no SPAACE entry is available or the allocator can not reserve the required
* number of contiguous entries function returns ULONG_MAX indicating a failure.
*
-*/
+ */
static unsigned long pamu_get_fspi_and_allocate(u32 subwin_cnt)
{
unsigned long spaace_addr;
@@ -288,9 +278,8 @@ int pamu_update_paace_stash(int liodn, u32 subwin, u32 value)
}
if (subwin) {
paace = pamu_get_spaace(paace, subwin - 1);
- if (!paace) {
+ if (!paace)
return -ENOENT;
- }
}
set_bf(paace->impl_attr, PAACE_IA_CID, value);
@@ -311,14 +300,12 @@ int pamu_disable_spaace(int liodn, u32 subwin)
}
if (subwin) {
paace = pamu_get_spaace(paace, subwin - 1);
- if (!paace) {
+ if (!paace)
return -ENOENT;
- }
- set_bf(paace->addr_bitfields, PAACE_AF_V,
- PAACE_V_INVALID);
+ set_bf(paace->addr_bitfields, PAACE_AF_V, PAACE_V_INVALID);
} else {
set_bf(paace->addr_bitfields, PAACE_AF_AP,
- PAACE_AP_PERMS_DENIED);
+ PAACE_AP_PERMS_DENIED);
}
mb();
@@ -326,7 +313,6 @@ int pamu_disable_spaace(int liodn, u32 subwin)
return 0;
}
-
/**
* pamu_config_paace() - Sets up PPAACE entry for specified liodn
*
@@ -352,7 +338,8 @@ int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size,
unsigned long fspi;
if ((win_size & (win_size - 1)) || win_size < PAMU_PAGE_SIZE) {
- pr_debug("window size too small or not a power of two %llx\n", win_size);
+ pr_debug("window size too small or not a power of two %pa\n",
+ &win_size);
return -EINVAL;
}
@@ -362,13 +349,12 @@ int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size,
}
ppaace = pamu_get_ppaace(liodn);
- if (!ppaace) {
+ if (!ppaace)
return -ENOENT;
- }
/* window size is 2^(WSE+1) bytes */
set_bf(ppaace->addr_bitfields, PPAACE_AF_WSE,
- map_addrspace_size_to_wse(win_size));
+ map_addrspace_size_to_wse(win_size));
pamu_init_ppaace(ppaace);
@@ -442,7 +428,6 @@ int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin,
{
struct paace *paace;
-
/* setup sub-windows */
if (!subwin_cnt) {
pr_debug("Invalid subwindow count\n");
@@ -510,11 +495,11 @@ int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin,
}
/**
-* get_ome_index() - Returns the index in the operation mapping table
-* for device.
-* @*omi_index: pointer for storing the index value
-*
-*/
+ * get_ome_index() - Returns the index in the operation mapping table
+ * for device.
+ * @*omi_index: pointer for storing the index value
+ *
+ */
void get_ome_index(u32 *omi_index, struct device *dev)
{
if (of_device_is_compatible(dev->of_node, "fsl,qman-portal"))
@@ -544,9 +529,10 @@ u32 get_stash_id(u32 stash_dest_hint, u32 vcpu)
if (stash_dest_hint == PAMU_ATTR_CACHE_L3) {
node = of_find_matching_node(NULL, l3_device_ids);
if (node) {
- prop = of_get_property(node, "cache-stash-id", 0);
+ prop = of_get_property(node, "cache-stash-id", NULL);
if (!prop) {
- pr_debug("missing cache-stash-id at %s\n", node->full_name);
+ pr_debug("missing cache-stash-id at %s\n",
+ node->full_name);
of_node_put(node);
return ~(u32)0;
}
@@ -570,9 +556,10 @@ found_cpu_node:
/* find the hwnode that represents the cache */
for (cache_level = PAMU_ATTR_CACHE_L1; (cache_level < PAMU_ATTR_CACHE_L3) && found; cache_level++) {
if (stash_dest_hint == cache_level) {
- prop = of_get_property(node, "cache-stash-id", 0);
+ prop = of_get_property(node, "cache-stash-id", NULL);
if (!prop) {
- pr_debug("missing cache-stash-id at %s\n", node->full_name);
+ pr_debug("missing cache-stash-id at %s\n",
+ node->full_name);
of_node_put(node);
return ~(u32)0;
}
@@ -580,10 +567,10 @@ found_cpu_node:
return be32_to_cpup(prop);
}
- prop = of_get_property(node, "next-level-cache", 0);
+ prop = of_get_property(node, "next-level-cache", NULL);
if (!prop) {
pr_debug("can't find next-level-cache at %s\n",
- node->full_name);
+ node->full_name);
of_node_put(node);
return ~(u32)0; /* can't traverse any further */
}
@@ -598,7 +585,7 @@ found_cpu_node:
}
pr_debug("stash dest not found for %d on vcpu %d\n",
- stash_dest_hint, vcpu);
+ stash_dest_hint, vcpu);
return ~(u32)0;
}
@@ -612,7 +599,7 @@ found_cpu_node:
* Memory accesses to QMAN and BMAN private memory need not be coherent, so
* clear the PAACE entry coherency attribute for them.
*/
-static void setup_qbman_paace(struct paace *ppaace, int paace_type)
+static void __init setup_qbman_paace(struct paace *ppaace, int paace_type)
{
switch (paace_type) {
case QMAN_PAACE:
@@ -626,7 +613,7 @@ static void setup_qbman_paace(struct paace *ppaace, int paace_type)
case QMAN_PORTAL_PAACE:
set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
ppaace->op_encode.index_ot.omi = OMI_QMAN;
- /*Set DQRR and Frame stashing for the L3 cache */
+ /* Set DQRR and Frame stashing for the L3 cache */
set_bf(ppaace->impl_attr, PAACE_IA_CID, get_stash_id(PAMU_ATTR_CACHE_L3, 0));
break;
case BMAN_PAACE:
@@ -679,7 +666,7 @@ static void __init setup_omt(struct ome *omt)
* Get the maximum number of PAACT table entries
* and subwindows supported by PAMU
*/
-static void get_pamu_cap_values(unsigned long pamu_reg_base)
+static void __init get_pamu_cap_values(unsigned long pamu_reg_base)
{
u32 pc_val;
@@ -689,9 +676,9 @@ static void get_pamu_cap_values(unsigned long pamu_reg_base)
}
/* Setup PAMU registers pointing to PAACT, SPAACT and OMT */
-int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
- phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
- phys_addr_t omt_phys)
+static int __init setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
+ phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
+ phys_addr_t omt_phys)
{
u32 *pc;
struct pamu_mmap_regs *pamu_regs;
@@ -727,7 +714,7 @@ int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
*/
out_be32((u32 *)(pamu_reg_base + PAMU_PICS),
- PAMU_ACCESS_VIOLATION_ENABLE);
+ PAMU_ACCESS_VIOLATION_ENABLE);
out_be32(pc, PAMU_PC_PE | PAMU_PC_OCE | PAMU_PC_SPCC | PAMU_PC_PPCC);
return 0;
}
@@ -757,9 +744,9 @@ static void __init setup_liodns(void)
ppaace->wbah = 0;
set_bf(ppaace->addr_bitfields, PPAACE_AF_WBAL, 0);
set_bf(ppaace->impl_attr, PAACE_IA_ATM,
- PAACE_ATM_NO_XLATE);
+ PAACE_ATM_NO_XLATE);
set_bf(ppaace->addr_bitfields, PAACE_AF_AP,
- PAACE_AP_PERMS_ALL);
+ PAACE_AP_PERMS_ALL);
if (of_device_is_compatible(node, "fsl,qman-portal"))
setup_qbman_paace(ppaace, QMAN_PORTAL_PAACE);
if (of_device_is_compatible(node, "fsl,qman"))
@@ -772,7 +759,7 @@ static void __init setup_liodns(void)
}
}
-irqreturn_t pamu_av_isr(int irq, void *arg)
+static irqreturn_t pamu_av_isr(int irq, void *arg)
{
struct pamu_isr_data *data = arg;
phys_addr_t phys;
@@ -792,14 +779,16 @@ irqreturn_t pamu_av_isr(int irq, void *arg)
pr_emerg("POES2=%08x\n", in_be32(p + PAMU_POES2));
pr_emerg("AVS1=%08x\n", avs1);
pr_emerg("AVS2=%08x\n", in_be32(p + PAMU_AVS2));
- pr_emerg("AVA=%016llx\n", make64(in_be32(p + PAMU_AVAH),
- in_be32(p + PAMU_AVAL)));
+ pr_emerg("AVA=%016llx\n",
+ make64(in_be32(p + PAMU_AVAH),
+ in_be32(p + PAMU_AVAL)));
pr_emerg("UDAD=%08x\n", in_be32(p + PAMU_UDAD));
- pr_emerg("POEA=%016llx\n", make64(in_be32(p + PAMU_POEAH),
- in_be32(p + PAMU_POEAL)));
+ pr_emerg("POEA=%016llx\n",
+ make64(in_be32(p + PAMU_POEAH),
+ in_be32(p + PAMU_POEAL)));
phys = make64(in_be32(p + PAMU_POEAH),
- in_be32(p + PAMU_POEAL));
+ in_be32(p + PAMU_POEAL));
/* Assume that POEA points to a PAACE */
if (phys) {
@@ -807,11 +796,12 @@ irqreturn_t pamu_av_isr(int irq, void *arg)
/* Only the first four words are relevant */
for (j = 0; j < 4; j++)
- pr_emerg("PAACE[%u]=%08x\n", j, in_be32(paace + j));
+ pr_emerg("PAACE[%u]=%08x\n",
+ j, in_be32(paace + j));
}
/* clear access violation condition */
- out_be32((p + PAMU_AVS1), avs1 & PAMU_AV_MASK);
+ out_be32(p + PAMU_AVS1, avs1 & PAMU_AV_MASK);
paace = pamu_get_ppaace(avs1 >> PAMU_AVS1_LIODN_SHIFT);
BUG_ON(!paace);
/* check if we got a violation for a disabled LIODN */
@@ -827,13 +817,13 @@ irqreturn_t pamu_av_isr(int irq, void *arg)
/* Disable the LIODN */
ret = pamu_disable_liodn(avs1 >> PAMU_AVS1_LIODN_SHIFT);
BUG_ON(ret);
- pr_emerg("Disabling liodn %x\n", avs1 >> PAMU_AVS1_LIODN_SHIFT);
+ pr_emerg("Disabling liodn %x\n",
+ avs1 >> PAMU_AVS1_LIODN_SHIFT);
}
out_be32((p + PAMU_PICS), pics);
}
}
-
return IRQ_HANDLED;
}
@@ -952,7 +942,7 @@ static int __init create_csd(phys_addr_t phys, size_t size, u32 csd_port_id)
}
if (i == 0 || i == num_laws) {
- /* This should never happen*/
+ /* This should never happen */
ret = -ENOENT;
goto error;
}
@@ -998,26 +988,27 @@ error:
static const struct {
u32 svr;
u32 port_id;
-} port_id_map[] = {
- {0x82100010, 0xFF000000}, /* P2040 1.0 */
- {0x82100011, 0xFF000000}, /* P2040 1.1 */
- {0x82100110, 0xFF000000}, /* P2041 1.0 */
- {0x82100111, 0xFF000000}, /* P2041 1.1 */
- {0x82110310, 0xFF000000}, /* P3041 1.0 */
- {0x82110311, 0xFF000000}, /* P3041 1.1 */
- {0x82010020, 0xFFF80000}, /* P4040 2.0 */
- {0x82000020, 0xFFF80000}, /* P4080 2.0 */
- {0x82210010, 0xFC000000}, /* P5010 1.0 */
- {0x82210020, 0xFC000000}, /* P5010 2.0 */
- {0x82200010, 0xFC000000}, /* P5020 1.0 */
- {0x82050010, 0xFF800000}, /* P5021 1.0 */
- {0x82040010, 0xFF800000}, /* P5040 1.0 */
+} port_id_map[] __initconst = {
+ {(SVR_P2040 << 8) | 0x10, 0xFF000000}, /* P2040 1.0 */
+ {(SVR_P2040 << 8) | 0x11, 0xFF000000}, /* P2040 1.1 */
+ {(SVR_P2041 << 8) | 0x10, 0xFF000000}, /* P2041 1.0 */
+ {(SVR_P2041 << 8) | 0x11, 0xFF000000}, /* P2041 1.1 */
+ {(SVR_P3041 << 8) | 0x10, 0xFF000000}, /* P3041 1.0 */
+ {(SVR_P3041 << 8) | 0x11, 0xFF000000}, /* P3041 1.1 */
+ {(SVR_P4040 << 8) | 0x20, 0xFFF80000}, /* P4040 2.0 */
+ {(SVR_P4080 << 8) | 0x20, 0xFFF80000}, /* P4080 2.0 */
+ {(SVR_P5010 << 8) | 0x10, 0xFC000000}, /* P5010 1.0 */
+ {(SVR_P5010 << 8) | 0x20, 0xFC000000}, /* P5010 2.0 */
+ {(SVR_P5020 << 8) | 0x10, 0xFC000000}, /* P5020 1.0 */
+ {(SVR_P5021 << 8) | 0x10, 0xFF800000}, /* P5021 1.0 */
+ {(SVR_P5040 << 8) | 0x10, 0xFF800000}, /* P5040 1.0 */
};
#define SVR_SECURITY 0x80000 /* The Security (E) bit */
static int __init fsl_pamu_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
void __iomem *pamu_regs = NULL;
struct ccsr_guts __iomem *guts_regs = NULL;
u32 pamubypenr, pamu_counter;
@@ -1042,22 +1033,21 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
* NOTE : All PAMUs share the same LIODN tables.
*/
- pamu_regs = of_iomap(pdev->dev.of_node, 0);
+ pamu_regs = of_iomap(dev->of_node, 0);
if (!pamu_regs) {
- dev_err(&pdev->dev, "ioremap of PAMU node failed\n");
+ dev_err(dev, "ioremap of PAMU node failed\n");
return -ENOMEM;
}
- of_get_address(pdev->dev.of_node, 0, &size, NULL);
+ of_get_address(dev->of_node, 0, &size, NULL);
- irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ irq = irq_of_parse_and_map(dev->of_node, 0);
if (irq == NO_IRQ) {
- dev_warn(&pdev->dev, "no interrupts listed in PAMU node\n");
+ dev_warn(dev, "no interrupts listed in PAMU node\n");
goto error;
}
- data = kzalloc(sizeof(struct pamu_isr_data), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data) {
- dev_err(&pdev->dev, "PAMU isr data memory allocation failed\n");
ret = -ENOMEM;
goto error;
}
@@ -1067,15 +1057,14 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
/* The ISR needs access to the regs, so we won't iounmap them */
ret = request_irq(irq, pamu_av_isr, 0, "pamu", data);
if (ret < 0) {
- dev_err(&pdev->dev, "error %i installing ISR for irq %i\n",
- ret, irq);
+ dev_err(dev, "error %i installing ISR for irq %i\n", ret, irq);
goto error;
}
guts_node = of_find_matching_node(NULL, guts_device_ids);
if (!guts_node) {
- dev_err(&pdev->dev, "could not find GUTS node %s\n",
- pdev->dev.of_node->full_name);
+ dev_err(dev, "could not find GUTS node %s\n",
+ dev->of_node->full_name);
ret = -ENODEV;
goto error;
}
@@ -1083,7 +1072,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
guts_regs = of_iomap(guts_node, 0);
of_node_put(guts_node);
if (!guts_regs) {
- dev_err(&pdev->dev, "ioremap of GUTS node failed\n");
+ dev_err(dev, "ioremap of GUTS node failed\n");
ret = -ENODEV;
goto error;
}
@@ -1103,7 +1092,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
p = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!p) {
- dev_err(&pdev->dev, "unable to allocate PAACT/SPAACT/OMT block\n");
+ dev_err(dev, "unable to allocate PAACT/SPAACT/OMT block\n");
ret = -ENOMEM;
goto error;
}
@@ -1113,7 +1102,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
/* Make sure the memory is naturally aligned */
if (ppaact_phys & ((PAGE_SIZE << order) - 1)) {
- dev_err(&pdev->dev, "PAACT/OMT block is unaligned\n");
+ dev_err(dev, "PAACT/OMT block is unaligned\n");
ret = -ENOMEM;
goto error;
}
@@ -1121,8 +1110,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
spaact = (void *)ppaact + (PAGE_SIZE << get_order(PAACT_SIZE));
omt = (void *)spaact + (PAGE_SIZE << get_order(SPAACT_SIZE));
- dev_dbg(&pdev->dev, "ppaact virt=%p phys=0x%llx\n", ppaact,
- (unsigned long long) ppaact_phys);
+ dev_dbg(dev, "ppaact virt=%p phys=%pa\n", ppaact, &ppaact_phys);
/* Check to see if we need to implement the work-around on this SOC */
@@ -1130,21 +1118,19 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(port_id_map); i++) {
if (port_id_map[i].svr == (mfspr(SPRN_SVR) & ~SVR_SECURITY)) {
csd_port_id = port_id_map[i].port_id;
- dev_dbg(&pdev->dev, "found matching SVR %08x\n",
+ dev_dbg(dev, "found matching SVR %08x\n",
port_id_map[i].svr);
break;
}
}
if (csd_port_id) {
- dev_dbg(&pdev->dev, "creating coherency subdomain at address "
- "0x%llx, size %zu, port id 0x%08x", ppaact_phys,
- mem_size, csd_port_id);
+ dev_dbg(dev, "creating coherency subdomain at address %pa, size %zu, port id 0x%08x",
+ &ppaact_phys, mem_size, csd_port_id);
ret = create_csd(ppaact_phys, mem_size, csd_port_id);
if (ret) {
- dev_err(&pdev->dev, "could not create coherence "
- "subdomain\n");
+ dev_err(dev, "could not create coherence subdomain\n");
return ret;
}
}
@@ -1155,7 +1141,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
spaace_pool = gen_pool_create(ilog2(sizeof(struct paace)), -1);
if (!spaace_pool) {
ret = -ENOMEM;
- dev_err(&pdev->dev, "PAMU : failed to allocate spaace gen pool\n");
+ dev_err(dev, "Failed to allocate spaace gen pool\n");
goto error;
}
@@ -1168,9 +1154,9 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
for (pamu_reg_off = 0, pamu_counter = 0x80000000; pamu_reg_off < size;
pamu_reg_off += PAMU_OFFSET, pamu_counter >>= 1) {
- pamu_reg_base = (unsigned long) pamu_regs + pamu_reg_off;
+ pamu_reg_base = (unsigned long)pamu_regs + pamu_reg_off;
setup_one_pamu(pamu_reg_base, pamu_reg_off, ppaact_phys,
- spaact_phys, omt_phys);
+ spaact_phys, omt_phys);
/* Disable PAMU bypass for this PAMU */
pamubypenr &= ~pamu_counter;
}
@@ -1182,7 +1168,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
iounmap(guts_regs);
- /* Enable DMA for the LIODNs in the device tree*/
+ /* Enable DMA for the LIODNs in the device tree */
setup_liodns();
@@ -1214,17 +1200,7 @@ error:
return ret;
}
-static const struct of_device_id fsl_of_pamu_ids[] = {
- {
- .compatible = "fsl,p4080-pamu",
- },
- {
- .compatible = "fsl,pamu",
- },
- {},
-};
-
-static struct platform_driver fsl_of_pamu_driver = {
+static struct platform_driver fsl_of_pamu_driver __initdata = {
.driver = {
.name = "fsl-of-pamu",
},
diff --git a/drivers/iommu/fsl_pamu.h b/drivers/iommu/fsl_pamu.h
index 8fc1a125b16e..aab723f91f12 100644
--- a/drivers/iommu/fsl_pamu.h
+++ b/drivers/iommu/fsl_pamu.h
@@ -19,13 +19,15 @@
#ifndef __FSL_PAMU_H
#define __FSL_PAMU_H
+#include <linux/iommu.h>
+
#include <asm/fsl_pamu_stash.h>
/* Bit Field macros
* v = bit field variable; m = mask, m##_SHIFT = shift, x = value to load
*/
-#define set_bf(v, m, x) (v = ((v) & ~(m)) | (((x) << (m##_SHIFT)) & (m)))
-#define get_bf(v, m) (((v) & (m)) >> (m##_SHIFT))
+#define set_bf(v, m, x) (v = ((v) & ~(m)) | (((x) << m##_SHIFT) & (m)))
+#define get_bf(v, m) (((v) & (m)) >> m##_SHIFT)
/* PAMU CCSR space */
#define PAMU_PGC 0x00000000 /* Allows all peripheral accesses */
@@ -65,7 +67,7 @@ struct pamu_mmap_regs {
#define PAMU_AVS1_GCV 0x2000
#define PAMU_AVS1_PDV 0x4000
#define PAMU_AV_MASK (PAMU_AVS1_AV | PAMU_AVS1_OTV | PAMU_AVS1_APV | PAMU_AVS1_WAV \
- | PAMU_AVS1_LAV | PAMU_AVS1_GCV | PAMU_AVS1_PDV)
+ | PAMU_AVS1_LAV | PAMU_AVS1_GCV | PAMU_AVS1_PDV)
#define PAMU_AVS1_LIODN_SHIFT 16
#define PAMU_LAV_LIODN_NOT_IN_PPAACT 0x400
@@ -198,8 +200,7 @@ struct pamu_mmap_regs {
#define PAACE_ATM_NO_XLATE 0x00
#define PAACE_ATM_WINDOW_XLATE 0x01
#define PAACE_ATM_PAGE_XLATE 0x02
-#define PAACE_ATM_WIN_PG_XLATE \
- (PAACE_ATM_WINDOW_XLATE | PAACE_ATM_PAGE_XLATE)
+#define PAACE_ATM_WIN_PG_XLATE (PAACE_ATM_WINDOW_XLATE | PAACE_ATM_PAGE_XLATE)
#define PAACE_OTM_NO_XLATE 0x00
#define PAACE_OTM_IMMEDIATE 0x01
#define PAACE_OTM_INDEXED 0x02
@@ -219,7 +220,7 @@ struct pamu_mmap_regs {
#define PAACE_TCEF_FORMAT0_8B 0x00
#define PAACE_TCEF_FORMAT1_RSVD 0x01
/*
- * Hard coded value for the PAACT size to accomodate
+ * Hard coded value for the PAACT size to accommodate
* maximum LIODN value generated by u-boot.
*/
#define PAACE_NUMBER_ENTRIES 0x500
@@ -332,7 +333,7 @@ struct paace {
#define NUM_MOE 128
struct ome {
u8 moe[NUM_MOE];
-} __attribute__((packed));
+} __packed;
#define PAACT_SIZE (sizeof(struct paace) * PAACE_NUMBER_ENTRIES)
#define SPAACT_SIZE (sizeof(struct paace) * SPAACE_NUMBER_ENTRIES)
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index c828f80d48b0..ceebd287b660 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -19,26 +19,10 @@
#define pr_fmt(fmt) "fsl-pamu-domain: %s: " fmt, __func__
-#include <linux/init.h>
-#include <linux/iommu.h>
-#include <linux/notifier.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/device.h>
-#include <linux/of_platform.h>
-#include <linux/bootmem.h>
-#include <linux/err.h>
-#include <asm/io.h>
-#include <asm/bitops.h>
-
-#include <asm/pci-bridge.h>
-#include <sysdev/fsl_pci.h>
-
#include "fsl_pamu_domain.h"
+#include <sysdev/fsl_pci.h>
+
/*
* Global spinlock that needs to be held while
* configuring PAMU.
@@ -51,23 +35,21 @@ static DEFINE_SPINLOCK(device_domain_lock);
static int __init iommu_init_mempool(void)
{
-
fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
- sizeof(struct fsl_dma_domain),
- 0,
- SLAB_HWCACHE_ALIGN,
-
- NULL);
+ sizeof(struct fsl_dma_domain),
+ 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
if (!fsl_pamu_domain_cache) {
pr_debug("Couldn't create fsl iommu_domain cache\n");
return -ENOMEM;
}
iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
- sizeof(struct device_domain_info),
- 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
+ sizeof(struct device_domain_info),
+ 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
if (!iommu_devinfo_cache) {
pr_debug("Couldn't create devinfo cache\n");
kmem_cache_destroy(fsl_pamu_domain_cache);
@@ -80,8 +62,7 @@ static int __init iommu_init_mempool(void)
static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova)
{
u32 win_cnt = dma_domain->win_cnt;
- struct dma_window *win_ptr =
- &dma_domain->win_arr[0];
+ struct dma_window *win_ptr = &dma_domain->win_arr[0];
struct iommu_domain_geometry *geom;
geom = &dma_domain->iommu_domain->geometry;
@@ -103,22 +84,20 @@ static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t i
}
if (win_ptr->valid)
- return (win_ptr->paddr + (iova & (win_ptr->size - 1)));
+ return win_ptr->paddr + (iova & (win_ptr->size - 1));
return 0;
}
static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain)
{
- struct dma_window *sub_win_ptr =
- &dma_domain->win_arr[0];
+ struct dma_window *sub_win_ptr = &dma_domain->win_arr[0];
int i, ret;
unsigned long rpn, flags;
for (i = 0; i < dma_domain->win_cnt; i++) {
if (sub_win_ptr[i].valid) {
- rpn = sub_win_ptr[i].paddr >>
- PAMU_PAGE_SHIFT;
+ rpn = sub_win_ptr[i].paddr >> PAMU_PAGE_SHIFT;
spin_lock_irqsave(&iommu_lock, flags);
ret = pamu_config_spaace(liodn, dma_domain->win_cnt, i,
sub_win_ptr[i].size,
@@ -130,7 +109,7 @@ static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain)
sub_win_ptr[i].prot);
spin_unlock_irqrestore(&iommu_lock, flags);
if (ret) {
- pr_debug("PAMU SPAACE configuration failed for liodn %d\n",
+ pr_debug("SPAACE configuration failed for liodn %d\n",
liodn);
return ret;
}
@@ -156,8 +135,7 @@ static int map_win(int liodn, struct fsl_dma_domain *dma_domain)
0, wnd->prot);
spin_unlock_irqrestore(&iommu_lock, flags);
if (ret)
- pr_debug("PAMU PAACE configuration failed for liodn %d\n",
- liodn);
+ pr_debug("PAACE configuration failed for liodn %d\n", liodn);
return ret;
}
@@ -169,7 +147,6 @@ static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain)
return map_subwins(liodn, dma_domain);
else
return map_win(liodn, dma_domain);
-
}
/* Update window/subwindow mapping for the LIODN */
@@ -190,7 +167,8 @@ static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr
(wnd_nr > 0) ? 1 : 0,
wnd->prot);
if (ret)
- pr_debug("Subwindow reconfiguration failed for liodn %d\n", liodn);
+ pr_debug("Subwindow reconfiguration failed for liodn %d\n",
+ liodn);
} else {
phys_addr_t wnd_addr;
@@ -200,10 +178,11 @@ static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr
wnd->size,
~(u32)0,
wnd->paddr >> PAMU_PAGE_SHIFT,
- dma_domain->snoop_id, dma_domain->stash_id,
- 0, wnd->prot);
+ dma_domain->snoop_id, dma_domain->stash_id,
+ 0, wnd->prot);
if (ret)
- pr_debug("Window reconfiguration failed for liodn %d\n", liodn);
+ pr_debug("Window reconfiguration failed for liodn %d\n",
+ liodn);
}
spin_unlock_irqrestore(&iommu_lock, flags);
@@ -212,14 +191,15 @@ static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr
}
static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
- u32 val)
+ u32 val)
{
int ret = 0, i;
unsigned long flags;
spin_lock_irqsave(&iommu_lock, flags);
if (!dma_domain->win_arr) {
- pr_debug("Windows not configured, stash destination update failed for liodn %d\n", liodn);
+ pr_debug("Windows not configured, stash destination update failed for liodn %d\n",
+ liodn);
spin_unlock_irqrestore(&iommu_lock, flags);
return -EINVAL;
}
@@ -227,7 +207,8 @@ static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
for (i = 0; i < dma_domain->win_cnt; i++) {
ret = pamu_update_paace_stash(liodn, i, val);
if (ret) {
- pr_debug("Failed to update SPAACE %d field for liodn %d\n ", i, liodn);
+ pr_debug("Failed to update SPAACE %d field for liodn %d\n ",
+ i, liodn);
spin_unlock_irqrestore(&iommu_lock, flags);
return ret;
}
@@ -240,9 +221,9 @@ static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
/* Set the geometry parameters for a LIODN */
static int pamu_set_liodn(int liodn, struct device *dev,
- struct fsl_dma_domain *dma_domain,
- struct iommu_domain_geometry *geom_attr,
- u32 win_cnt)
+ struct fsl_dma_domain *dma_domain,
+ struct iommu_domain_geometry *geom_attr,
+ u32 win_cnt)
{
phys_addr_t window_addr, window_size;
phys_addr_t subwin_size;
@@ -268,7 +249,8 @@ static int pamu_set_liodn(int liodn, struct device *dev,
dma_domain->stash_id, win_cnt, 0);
spin_unlock_irqrestore(&iommu_lock, flags);
if (ret) {
- pr_debug("PAMU PAACE configuration failed for liodn %d, win_cnt =%d\n", liodn, win_cnt);
+ pr_debug("PAACE configuration failed for liodn %d, win_cnt =%d\n",
+ liodn, win_cnt);
return ret;
}
@@ -285,7 +267,8 @@ static int pamu_set_liodn(int liodn, struct device *dev,
0, 0);
spin_unlock_irqrestore(&iommu_lock, flags);
if (ret) {
- pr_debug("PAMU SPAACE configuration failed for liodn %d\n", liodn);
+ pr_debug("SPAACE configuration failed for liodn %d\n",
+ liodn);
return ret;
}
}
@@ -301,13 +284,13 @@ static int check_size(u64 size, dma_addr_t iova)
* to PAMU page size.
*/
if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) {
- pr_debug("%s: size too small or not a power of two\n", __func__);
+ pr_debug("Size too small or not a power of two\n");
return -EINVAL;
}
- /* iova must be page size aligned*/
+ /* iova must be page size aligned */
if (iova & (size - 1)) {
- pr_debug("%s: address is not aligned with window size\n", __func__);
+ pr_debug("Address is not aligned with window size\n");
return -EINVAL;
}
@@ -396,16 +379,15 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d
if (!dev->archdata.iommu_domain)
dev->archdata.iommu_domain = info;
spin_unlock_irqrestore(&device_domain_lock, flags);
-
}
static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
- dma_addr_t iova)
+ dma_addr_t iova)
{
struct fsl_dma_domain *dma_domain = domain->priv;
- if ((iova < domain->geometry.aperture_start) ||
- iova > (domain->geometry.aperture_end))
+ if (iova < domain->geometry.aperture_start ||
+ iova > domain->geometry.aperture_end)
return 0;
return get_phys_addr(dma_domain, iova);
@@ -460,7 +442,7 @@ static int pamu_set_domain_geometry(struct fsl_dma_domain *dma_domain,
list_for_each_entry(info, &dma_domain->devices, link) {
ret = pamu_set_liodn(info->liodn, info->dev, dma_domain,
- geom_attr, win_cnt);
+ geom_attr, win_cnt);
if (ret)
break;
}
@@ -543,7 +525,6 @@ static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr)
}
spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
-
}
static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr,
@@ -576,7 +557,7 @@ static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr,
win_size = dma_domain->geom_size >> ilog2(dma_domain->win_cnt);
if (size > win_size) {
- pr_debug("Invalid window size \n");
+ pr_debug("Invalid window size\n");
spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
return -EINVAL;
}
@@ -622,8 +603,8 @@ static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr,
* and window mappings.
*/
static int handle_attach_device(struct fsl_dma_domain *dma_domain,
- struct device *dev, const u32 *liodn,
- int num)
+ struct device *dev, const u32 *liodn,
+ int num)
{
unsigned long flags;
struct iommu_domain *domain = dma_domain->iommu_domain;
@@ -632,11 +613,10 @@ static int handle_attach_device(struct fsl_dma_domain *dma_domain,
spin_lock_irqsave(&dma_domain->domain_lock, flags);
for (i = 0; i < num; i++) {
-
/* Ensure that LIODN value is valid */
if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
pr_debug("Invalid liodn %d, attach device failed for %s\n",
- liodn[i], dev->of_node->full_name);
+ liodn[i], dev->of_node->full_name);
ret = -EINVAL;
break;
}
@@ -649,9 +629,9 @@ static int handle_attach_device(struct fsl_dma_domain *dma_domain,
*/
if (dma_domain->win_arr) {
u32 win_cnt = dma_domain->win_cnt > 1 ? dma_domain->win_cnt : 0;
+
ret = pamu_set_liodn(liodn[i], dev, dma_domain,
- &domain->geometry,
- win_cnt);
+ &domain->geometry, win_cnt);
if (ret)
break;
if (dma_domain->mapped) {
@@ -698,19 +678,18 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain,
liodn = of_get_property(dev->of_node, "fsl,liodn", &len);
if (liodn) {
liodn_cnt = len / sizeof(u32);
- ret = handle_attach_device(dma_domain, dev,
- liodn, liodn_cnt);
+ ret = handle_attach_device(dma_domain, dev, liodn, liodn_cnt);
} else {
pr_debug("missing fsl,liodn property at %s\n",
- dev->of_node->full_name);
- ret = -EINVAL;
+ dev->of_node->full_name);
+ ret = -EINVAL;
}
return ret;
}
static void fsl_pamu_detach_device(struct iommu_domain *domain,
- struct device *dev)
+ struct device *dev)
{
struct fsl_dma_domain *dma_domain = domain->priv;
const u32 *prop;
@@ -738,7 +717,7 @@ static void fsl_pamu_detach_device(struct iommu_domain *domain,
detach_device(dev, dma_domain);
else
pr_debug("missing fsl,liodn property at %s\n",
- dev->of_node->full_name);
+ dev->of_node->full_name);
}
static int configure_domain_geometry(struct iommu_domain *domain, void *data)
@@ -754,10 +733,10 @@ static int configure_domain_geometry(struct iommu_domain *domain, void *data)
* DMA outside of the geometry.
*/
if (check_size(geom_size, geom_attr->aperture_start) ||
- !geom_attr->force_aperture) {
- pr_debug("Invalid PAMU geometry attributes\n");
- return -EINVAL;
- }
+ !geom_attr->force_aperture) {
+ pr_debug("Invalid PAMU geometry attributes\n");
+ return -EINVAL;
+ }
spin_lock_irqsave(&dma_domain->domain_lock, flags);
if (dma_domain->enabled) {
@@ -786,7 +765,7 @@ static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data)
spin_lock_irqsave(&dma_domain->domain_lock, flags);
memcpy(&dma_domain->dma_stash, stash_attr,
- sizeof(struct pamu_stash_attribute));
+ sizeof(struct pamu_stash_attribute));
dma_domain->stash_id = get_stash_id(stash_attr->cache,
stash_attr->cpu);
@@ -803,7 +782,7 @@ static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data)
return ret;
}
-/* Configure domain dma state i.e. enable/disable DMA*/
+/* Configure domain dma state i.e. enable/disable DMA */
static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool enable)
{
struct device_domain_info *info;
@@ -819,8 +798,7 @@ static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool en
}
dma_domain->enabled = enable;
- list_for_each_entry(info, &dma_domain->devices,
- link) {
+ list_for_each_entry(info, &dma_domain->devices, link) {
ret = (enable) ? pamu_enable_liodn(info->liodn) :
pamu_disable_liodn(info->liodn);
if (ret)
@@ -833,12 +811,11 @@ static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool en
}
static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
- enum iommu_attr attr_type, void *data)
+ enum iommu_attr attr_type, void *data)
{
struct fsl_dma_domain *dma_domain = domain->priv;
int ret = 0;
-
switch (attr_type) {
case DOMAIN_ATTR_GEOMETRY:
ret = configure_domain_geometry(domain, data);
@@ -853,22 +830,21 @@ static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
pr_debug("Unsupported attribute type\n");
ret = -EINVAL;
break;
- };
+ }
return ret;
}
static int fsl_pamu_get_domain_attr(struct iommu_domain *domain,
- enum iommu_attr attr_type, void *data)
+ enum iommu_attr attr_type, void *data)
{
struct fsl_dma_domain *dma_domain = domain->priv;
int ret = 0;
-
switch (attr_type) {
case DOMAIN_ATTR_FSL_PAMU_STASH:
- memcpy((struct pamu_stash_attribute *) data, &dma_domain->dma_stash,
- sizeof(struct pamu_stash_attribute));
+ memcpy(data, &dma_domain->dma_stash,
+ sizeof(struct pamu_stash_attribute));
break;
case DOMAIN_ATTR_FSL_PAMU_ENABLE:
*(int *)data = dma_domain->enabled;
@@ -880,7 +856,7 @@ static int fsl_pamu_get_domain_attr(struct iommu_domain *domain,
pr_debug("Unsupported attribute type\n");
ret = -EINVAL;
break;
- };
+ }
return ret;
}
@@ -903,11 +879,8 @@ static bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl)
/* Check the PCI controller version number by readding BRR1 register */
version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2));
version &= PCI_FSL_BRR1_VER;
- /* If PCI controller version is >= 0x204 we can partition endpoints*/
- if (version >= 0x204)
- return 1;
-
- return 0;
+ /* If PCI controller version is >= 0x204 we can partition endpoints */
+ return version >= 0x204;
}
/* Get iommu group information from peer devices or devices on the parent bus */
@@ -968,8 +941,9 @@ static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
if (pci_ctl->parent->iommu_group) {
group = get_device_iommu_group(pci_ctl->parent);
iommu_group_remove_device(pci_ctl->parent);
- } else
+ } else {
group = get_shared_pci_device_group(pdev);
+ }
}
if (!group)
@@ -1055,11 +1029,12 @@ static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
}
ret = pamu_set_domain_geometry(dma_domain, &domain->geometry,
- ((w_count > 1) ? w_count : 0));
+ w_count > 1 ? w_count : 0);
if (!ret) {
kfree(dma_domain->win_arr);
- dma_domain->win_arr = kzalloc(sizeof(struct dma_window) *
- w_count, GFP_ATOMIC);
+ dma_domain->win_arr = kcalloc(w_count,
+ sizeof(*dma_domain->win_arr),
+ GFP_ATOMIC);
if (!dma_domain->win_arr) {
spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
return -ENOMEM;
@@ -1095,7 +1070,7 @@ static const struct iommu_ops fsl_pamu_ops = {
.remove_device = fsl_pamu_remove_device,
};
-int pamu_domain_init(void)
+int __init pamu_domain_init(void)
{
int ret = 0;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 1232336b960e..ae4c1a854e57 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -71,6 +71,9 @@
__DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
+/* IO virtual address start page frame number */
+#define IOVA_START_PFN (1)
+
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
@@ -485,7 +488,6 @@ __setup("intel_iommu=", intel_iommu_setup);
static struct kmem_cache *iommu_domain_cache;
static struct kmem_cache *iommu_devinfo_cache;
-static struct kmem_cache *iommu_iova_cache;
static inline void *alloc_pgtable_page(int node)
{
@@ -523,16 +525,6 @@ static inline void free_devinfo_mem(void *vaddr)
kmem_cache_free(iommu_devinfo_cache, vaddr);
}
-struct iova *alloc_iova_mem(void)
-{
- return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
-}
-
-void free_iova_mem(struct iova *iova)
-{
- kmem_cache_free(iommu_iova_cache, iova);
-}
-
static inline int domain_type_is_vm(struct dmar_domain *domain)
{
return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
@@ -1643,7 +1635,8 @@ static int dmar_init_reserved_ranges(void)
struct iova *iova;
int i;
- init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
+ init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
+ DMA_32BIT_PFN);
lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
&reserved_rbtree_key);
@@ -1701,7 +1694,8 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
int adjust_width, agaw;
unsigned long sagaw;
- init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
+ init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
+ DMA_32BIT_PFN);
domain_reserve_special_ranges(domain);
/* calculate AGAW */
@@ -3427,23 +3421,6 @@ static inline int iommu_devinfo_cache_init(void)
return ret;
}
-static inline int iommu_iova_cache_init(void)
-{
- int ret = 0;
-
- iommu_iova_cache = kmem_cache_create("iommu_iova",
- sizeof(struct iova),
- 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
- if (!iommu_iova_cache) {
- printk(KERN_ERR "Couldn't create iova cache\n");
- ret = -ENOMEM;
- }
-
- return ret;
-}
-
static int __init iommu_init_mempool(void)
{
int ret;
@@ -3461,7 +3438,7 @@ static int __init iommu_init_mempool(void)
kmem_cache_destroy(iommu_domain_cache);
domain_error:
- kmem_cache_destroy(iommu_iova_cache);
+ iommu_iova_cache_destroy();
return -ENOMEM;
}
@@ -3470,8 +3447,7 @@ static void __init iommu_exit_mempool(void)
{
kmem_cache_destroy(iommu_devinfo_cache);
kmem_cache_destroy(iommu_domain_cache);
- kmem_cache_destroy(iommu_iova_cache);
-
+ iommu_iova_cache_destroy();
}
static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
@@ -4029,14 +4005,6 @@ static int device_notifier(struct notifier_block *nb,
if (action != BUS_NOTIFY_REMOVED_DEVICE)
return 0;
- /*
- * If the device is still attached to a device driver we can't
- * tear down the domain yet as DMA mappings may still be in use.
- * Wait for the BUS_NOTIFY_UNBOUND_DRIVER event to do that.
- */
- if (action == BUS_NOTIFY_DEL_DEVICE && dev->driver != NULL)
- return 0;
-
domain = find_domain(dev);
if (!domain)
return 0;
@@ -4350,7 +4318,8 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
{
int adjust_width;
- init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
+ init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
+ DMA_32BIT_PFN);
domain_reserve_special_ranges(domain);
/* calculate AGAW */
@@ -4428,6 +4397,10 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
domain_remove_one_dev_info(old_domain, dev);
else
domain_remove_dev_info(old_domain);
+
+ if (!domain_type_is_vm_or_si(old_domain) &&
+ list_empty(&old_domain->devices))
+ domain_exit(old_domain);
}
}
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index a55b207b9425..14de1ab223c8 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -32,8 +32,9 @@ struct hpet_scope {
};
#define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
-#define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8)
+#define IRTE_DEST(dest) ((eim_mode) ? dest : dest << 8)
+static int __read_mostly eim_mode;
static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
static struct hpet_scope ir_hpet[MAX_HPET_TBS];
@@ -481,11 +482,11 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
if (iommu->ir_table)
return 0;
- ir_table = kzalloc(sizeof(struct ir_table), GFP_ATOMIC);
+ ir_table = kzalloc(sizeof(struct ir_table), GFP_KERNEL);
if (!ir_table)
return -ENOMEM;
- pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
+ pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO,
INTR_REMAP_PAGE_ORDER);
if (!pages) {
@@ -566,13 +567,27 @@ static int __init dmar_x2apic_optout(void)
return dmar->flags & DMAR_X2APIC_OPT_OUT;
}
-static int __init intel_irq_remapping_supported(void)
+static void __init intel_cleanup_irq_remapping(void)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+
+ for_each_iommu(iommu, drhd) {
+ if (ecap_ir_support(iommu->ecap)) {
+ iommu_disable_irq_remapping(iommu);
+ intel_teardown_irq_remapping(iommu);
+ }
+ }
+
+ if (x2apic_supported())
+ pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n");
+}
+
+static int __init intel_prepare_irq_remapping(void)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
- if (disable_irq_remap)
- return 0;
if (irq_remap_broken) {
printk(KERN_WARNING
"This system BIOS has enabled interrupt remapping\n"
@@ -581,38 +596,45 @@ static int __init intel_irq_remapping_supported(void)
"interrupt remapping is being disabled. Please\n"
"contact your BIOS vendor for an update\n");
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
- disable_irq_remap = 1;
- return 0;
+ return -ENODEV;
}
+ if (dmar_table_init() < 0)
+ return -ENODEV;
+
if (!dmar_ir_support())
- return 0;
+ return -ENODEV;
+
+ if (parse_ioapics_under_ir() != 1) {
+ printk(KERN_INFO "Not enabling interrupt remapping\n");
+ goto error;
+ }
+ /* First make sure all IOMMUs support IRQ remapping */
for_each_iommu(iommu, drhd)
if (!ecap_ir_support(iommu->ecap))
- return 0;
+ goto error;
- return 1;
+ /* Do the allocations early */
+ for_each_iommu(iommu, drhd)
+ if (intel_setup_irq_remapping(iommu))
+ goto error;
+
+ return 0;
+
+error:
+ intel_cleanup_irq_remapping();
+ return -ENODEV;
}
static int __init intel_enable_irq_remapping(void)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
- bool x2apic_present;
int setup = 0;
int eim = 0;
- x2apic_present = x2apic_supported();
-
- if (parse_ioapics_under_ir() != 1) {
- printk(KERN_INFO "Not enable interrupt remapping\n");
- goto error;
- }
-
- if (x2apic_present) {
- pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
-
+ if (x2apic_supported()) {
eim = !dmar_x2apic_optout();
if (!eim)
printk(KERN_WARNING
@@ -646,16 +668,15 @@ static int __init intel_enable_irq_remapping(void)
/*
* check for the Interrupt-remapping support
*/
- for_each_iommu(iommu, drhd) {
- if (!ecap_ir_support(iommu->ecap))
- continue;
-
+ for_each_iommu(iommu, drhd)
if (eim && !ecap_eim_support(iommu->ecap)) {
printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
" ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
- goto error;
+ eim = 0;
}
- }
+ eim_mode = eim;
+ if (eim)
+ pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
/*
* Enable queued invalidation for all the DRHD's.
@@ -675,12 +696,6 @@ static int __init intel_enable_irq_remapping(void)
* Setup Interrupt-remapping for all the DRHD's now.
*/
for_each_iommu(iommu, drhd) {
- if (!ecap_ir_support(iommu->ecap))
- continue;
-
- if (intel_setup_irq_remapping(iommu))
- goto error;
-
iommu_set_irq_remapping(iommu, eim);
setup = 1;
}
@@ -702,15 +717,7 @@ static int __init intel_enable_irq_remapping(void)
return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
error:
- for_each_iommu(iommu, drhd)
- if (ecap_ir_support(iommu->ecap)) {
- iommu_disable_irq_remapping(iommu);
- intel_teardown_irq_remapping(iommu);
- }
-
- if (x2apic_present)
- pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n");
-
+ intel_cleanup_irq_remapping();
return -1;
}
@@ -1199,8 +1206,7 @@ static int intel_alloc_hpet_msi(unsigned int irq, unsigned int id)
}
struct irq_remap_ops intel_irq_remap_ops = {
- .supported = intel_irq_remapping_supported,
- .prepare = dmar_table_init,
+ .prepare = intel_prepare_irq_remapping,
.enable = intel_enable_irq_remapping,
.disable = disable_irq_remapping,
.reenable = reenable_irq_remapping,
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
new file mode 100644
index 000000000000..5a500edf00cc
--- /dev/null
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -0,0 +1,986 @@
+/*
+ * CPU-agnostic ARM page table allocator.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Copyright (C) 2014 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
+
+#include <linux/iommu.h>
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "io-pgtable.h"
+
+#define ARM_LPAE_MAX_ADDR_BITS 48
+#define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
+#define ARM_LPAE_MAX_LEVELS 4
+
+/* Struct accessors */
+#define io_pgtable_to_data(x) \
+ container_of((x), struct arm_lpae_io_pgtable, iop)
+
+#define io_pgtable_ops_to_pgtable(x) \
+ container_of((x), struct io_pgtable, ops)
+
+#define io_pgtable_ops_to_data(x) \
+ io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
+
+/*
+ * For consistency with the architecture, we always consider
+ * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
+ */
+#define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels)
+
+/*
+ * Calculate the right shift amount to get to the portion describing level l
+ * in a virtual address mapped by the pagetable in d.
+ */
+#define ARM_LPAE_LVL_SHIFT(l,d) \
+ ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
+ * (d)->bits_per_level) + (d)->pg_shift)
+
+#define ARM_LPAE_PAGES_PER_PGD(d) ((d)->pgd_size >> (d)->pg_shift)
+
+/*
+ * Calculate the index at level l used to map virtual address a using the
+ * pagetable in d.
+ */
+#define ARM_LPAE_PGD_IDX(l,d) \
+ ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
+
+#define ARM_LPAE_LVL_IDX(a,l,d) \
+ (((a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
+ ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
+
+/* Calculate the block/page mapping size at level l for pagetable in d. */
+#define ARM_LPAE_BLOCK_SIZE(l,d) \
+ (1 << (ilog2(sizeof(arm_lpae_iopte)) + \
+ ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
+
+/* Page table bits */
+#define ARM_LPAE_PTE_TYPE_SHIFT 0
+#define ARM_LPAE_PTE_TYPE_MASK 0x3
+
+#define ARM_LPAE_PTE_TYPE_BLOCK 1
+#define ARM_LPAE_PTE_TYPE_TABLE 3
+#define ARM_LPAE_PTE_TYPE_PAGE 3
+
+#define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
+#define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
+#define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
+#define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
+#define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
+#define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
+#define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
+#define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
+
+#define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
+/* Ignore the contiguous bit for block splitting */
+#define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
+#define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
+ ARM_LPAE_PTE_ATTR_HI_MASK)
+
+/* Stage-1 PTE */
+#define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
+#define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
+#define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
+#define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
+
+/* Stage-2 PTE */
+#define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
+#define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
+#define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
+#define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
+#define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
+#define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
+
+/* Register bits */
+#define ARM_32_LPAE_TCR_EAE (1 << 31)
+#define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
+
+#define ARM_LPAE_TCR_TG0_4K (0 << 14)
+#define ARM_LPAE_TCR_TG0_64K (1 << 14)
+#define ARM_LPAE_TCR_TG0_16K (2 << 14)
+
+#define ARM_LPAE_TCR_SH0_SHIFT 12
+#define ARM_LPAE_TCR_SH0_MASK 0x3
+#define ARM_LPAE_TCR_SH_NS 0
+#define ARM_LPAE_TCR_SH_OS 2
+#define ARM_LPAE_TCR_SH_IS 3
+
+#define ARM_LPAE_TCR_ORGN0_SHIFT 10
+#define ARM_LPAE_TCR_IRGN0_SHIFT 8
+#define ARM_LPAE_TCR_RGN_MASK 0x3
+#define ARM_LPAE_TCR_RGN_NC 0
+#define ARM_LPAE_TCR_RGN_WBWA 1
+#define ARM_LPAE_TCR_RGN_WT 2
+#define ARM_LPAE_TCR_RGN_WB 3
+
+#define ARM_LPAE_TCR_SL0_SHIFT 6
+#define ARM_LPAE_TCR_SL0_MASK 0x3
+
+#define ARM_LPAE_TCR_T0SZ_SHIFT 0
+#define ARM_LPAE_TCR_SZ_MASK 0xf
+
+#define ARM_LPAE_TCR_PS_SHIFT 16
+#define ARM_LPAE_TCR_PS_MASK 0x7
+
+#define ARM_LPAE_TCR_IPS_SHIFT 32
+#define ARM_LPAE_TCR_IPS_MASK 0x7
+
+#define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
+#define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
+#define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
+#define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
+#define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
+#define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
+
+#define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
+#define ARM_LPAE_MAIR_ATTR_MASK 0xff
+#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
+#define ARM_LPAE_MAIR_ATTR_NC 0x44
+#define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
+#define ARM_LPAE_MAIR_ATTR_IDX_NC 0
+#define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
+#define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
+
+/* IOPTE accessors */
+#define iopte_deref(pte,d) \
+ (__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \
+ & ~((1ULL << (d)->pg_shift) - 1)))
+
+#define iopte_type(pte,l) \
+ (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
+
+#define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
+
+#define iopte_leaf(pte,l) \
+ (l == (ARM_LPAE_MAX_LEVELS - 1) ? \
+ (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \
+ (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
+
+#define iopte_to_pfn(pte,d) \
+ (((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift)
+
+#define pfn_to_iopte(pfn,d) \
+ (((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1))
+
+struct arm_lpae_io_pgtable {
+ struct io_pgtable iop;
+
+ int levels;
+ size_t pgd_size;
+ unsigned long pg_shift;
+ unsigned long bits_per_level;
+
+ void *pgd;
+};
+
+typedef u64 arm_lpae_iopte;
+
+static bool selftest_running = false;
+
+static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
+ unsigned long iova, phys_addr_t paddr,
+ arm_lpae_iopte prot, int lvl,
+ arm_lpae_iopte *ptep)
+{
+ arm_lpae_iopte pte = prot;
+
+ /* We require an unmap first */
+ if (iopte_leaf(*ptep, lvl)) {
+ WARN_ON(!selftest_running);
+ return -EEXIST;
+ }
+
+ if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
+ pte |= ARM_LPAE_PTE_NS;
+
+ if (lvl == ARM_LPAE_MAX_LEVELS - 1)
+ pte |= ARM_LPAE_PTE_TYPE_PAGE;
+ else
+ pte |= ARM_LPAE_PTE_TYPE_BLOCK;
+
+ pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
+ pte |= pfn_to_iopte(paddr >> data->pg_shift, data);
+
+ *ptep = pte;
+ data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), data->iop.cookie);
+ return 0;
+}
+
+static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
+ phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
+ int lvl, arm_lpae_iopte *ptep)
+{
+ arm_lpae_iopte *cptep, pte;
+ void *cookie = data->iop.cookie;
+ size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
+
+ /* Find our entry at the current level */
+ ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
+
+ /* If we can install a leaf entry at this level, then do so */
+ if (size == block_size && (size & data->iop.cfg.pgsize_bitmap))
+ return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
+
+ /* We can't allocate tables at the final level */
+ if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
+ return -EINVAL;
+
+ /* Grab a pointer to the next level */
+ pte = *ptep;
+ if (!pte) {
+ cptep = alloc_pages_exact(1UL << data->pg_shift,
+ GFP_ATOMIC | __GFP_ZERO);
+ if (!cptep)
+ return -ENOMEM;
+
+ data->iop.cfg.tlb->flush_pgtable(cptep, 1UL << data->pg_shift,
+ cookie);
+ pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE;
+ if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
+ pte |= ARM_LPAE_PTE_NSTABLE;
+ *ptep = pte;
+ data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
+ } else {
+ cptep = iopte_deref(pte, data);
+ }
+
+ /* Rinse, repeat */
+ return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
+}
+
+static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
+ int prot)
+{
+ arm_lpae_iopte pte;
+
+ if (data->iop.fmt == ARM_64_LPAE_S1 ||
+ data->iop.fmt == ARM_32_LPAE_S1) {
+ pte = ARM_LPAE_PTE_AP_UNPRIV | ARM_LPAE_PTE_nG;
+
+ if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
+ pte |= ARM_LPAE_PTE_AP_RDONLY;
+
+ if (prot & IOMMU_CACHE)
+ pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
+ << ARM_LPAE_PTE_ATTRINDX_SHIFT);
+ } else {
+ pte = ARM_LPAE_PTE_HAP_FAULT;
+ if (prot & IOMMU_READ)
+ pte |= ARM_LPAE_PTE_HAP_READ;
+ if (prot & IOMMU_WRITE)
+ pte |= ARM_LPAE_PTE_HAP_WRITE;
+ if (prot & IOMMU_CACHE)
+ pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
+ else
+ pte |= ARM_LPAE_PTE_MEMATTR_NC;
+ }
+
+ if (prot & IOMMU_NOEXEC)
+ pte |= ARM_LPAE_PTE_XN;
+
+ return pte;
+}
+
+static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
+ phys_addr_t paddr, size_t size, int iommu_prot)
+{
+ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ arm_lpae_iopte *ptep = data->pgd;
+ int lvl = ARM_LPAE_START_LVL(data);
+ arm_lpae_iopte prot;
+
+ /* If no access, then nothing to do */
+ if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
+ return 0;
+
+ prot = arm_lpae_prot_to_pte(data, iommu_prot);
+ return __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
+}
+
+static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
+ arm_lpae_iopte *ptep)
+{
+ arm_lpae_iopte *start, *end;
+ unsigned long table_size;
+
+ /* Only leaf entries at the last level */
+ if (lvl == ARM_LPAE_MAX_LEVELS - 1)
+ return;
+
+ if (lvl == ARM_LPAE_START_LVL(data))
+ table_size = data->pgd_size;
+ else
+ table_size = 1UL << data->pg_shift;
+
+ start = ptep;
+ end = (void *)ptep + table_size;
+
+ while (ptep != end) {
+ arm_lpae_iopte pte = *ptep++;
+
+ if (!pte || iopte_leaf(pte, lvl))
+ continue;
+
+ __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
+ }
+
+ free_pages_exact(start, table_size);
+}
+
+static void arm_lpae_free_pgtable(struct io_pgtable *iop)
+{
+ struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
+
+ __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
+ kfree(data);
+}
+
+static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
+ unsigned long iova, size_t size,
+ arm_lpae_iopte prot, int lvl,
+ arm_lpae_iopte *ptep, size_t blk_size)
+{
+ unsigned long blk_start, blk_end;
+ phys_addr_t blk_paddr;
+ arm_lpae_iopte table = 0;
+ void *cookie = data->iop.cookie;
+ const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
+
+ blk_start = iova & ~(blk_size - 1);
+ blk_end = blk_start + blk_size;
+ blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift;
+
+ for (; blk_start < blk_end; blk_start += size, blk_paddr += size) {
+ arm_lpae_iopte *tablep;
+
+ /* Unmap! */
+ if (blk_start == iova)
+ continue;
+
+ /* __arm_lpae_map expects a pointer to the start of the table */
+ tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data);
+ if (__arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl,
+ tablep) < 0) {
+ if (table) {
+ /* Free the table we allocated */
+ tablep = iopte_deref(table, data);
+ __arm_lpae_free_pgtable(data, lvl + 1, tablep);
+ }
+ return 0; /* Bytes unmapped */
+ }
+ }
+
+ *ptep = table;
+ tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
+ iova &= ~(blk_size - 1);
+ tlb->tlb_add_flush(iova, blk_size, true, cookie);
+ return size;
+}
+
+static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
+ unsigned long iova, size_t size, int lvl,
+ arm_lpae_iopte *ptep)
+{
+ arm_lpae_iopte pte;
+ const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
+ void *cookie = data->iop.cookie;
+ size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
+
+ ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
+ pte = *ptep;
+
+ /* Something went horribly wrong and we ran out of page table */
+ if (WARN_ON(!pte || (lvl == ARM_LPAE_MAX_LEVELS)))
+ return 0;
+
+ /* If the size matches this level, we're in the right place */
+ if (size == blk_size) {
+ *ptep = 0;
+ tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
+
+ if (!iopte_leaf(pte, lvl)) {
+ /* Also flush any partial walks */
+ tlb->tlb_add_flush(iova, size, false, cookie);
+ tlb->tlb_sync(data->iop.cookie);
+ ptep = iopte_deref(pte, data);
+ __arm_lpae_free_pgtable(data, lvl + 1, ptep);
+ } else {
+ tlb->tlb_add_flush(iova, size, true, cookie);
+ }
+
+ return size;
+ } else if (iopte_leaf(pte, lvl)) {
+ /*
+ * Insert a table at the next level to map the old region,
+ * minus the part we want to unmap
+ */
+ return arm_lpae_split_blk_unmap(data, iova, size,
+ iopte_prot(pte), lvl, ptep,
+ blk_size);
+ }
+
+ /* Keep on walkin' */
+ ptep = iopte_deref(pte, data);
+ return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
+}
+
+static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
+ size_t size)
+{
+ size_t unmapped;
+ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ struct io_pgtable *iop = &data->iop;
+ arm_lpae_iopte *ptep = data->pgd;
+ int lvl = ARM_LPAE_START_LVL(data);
+
+ unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep);
+ if (unmapped)
+ iop->cfg.tlb->tlb_sync(iop->cookie);
+
+ return unmapped;
+}
+
+static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
+ unsigned long iova)
+{
+ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ arm_lpae_iopte pte, *ptep = data->pgd;
+ int lvl = ARM_LPAE_START_LVL(data);
+
+ do {
+ /* Valid IOPTE pointer? */
+ if (!ptep)
+ return 0;
+
+ /* Grab the IOPTE we're interested in */
+ pte = *(ptep + ARM_LPAE_LVL_IDX(iova, lvl, data));
+
+ /* Valid entry? */
+ if (!pte)
+ return 0;
+
+ /* Leaf entry? */
+ if (iopte_leaf(pte,lvl))
+ goto found_translation;
+
+ /* Take it to the next level */
+ ptep = iopte_deref(pte, data);
+ } while (++lvl < ARM_LPAE_MAX_LEVELS);
+
+ /* Ran out of page tables to walk */
+ return 0;
+
+found_translation:
+ iova &= ((1 << data->pg_shift) - 1);
+ return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova;
+}
+
+static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
+{
+ unsigned long granule;
+
+ /*
+ * We need to restrict the supported page sizes to match the
+ * translation regime for a particular granule. Aim to match
+ * the CPU page size if possible, otherwise prefer smaller sizes.
+ * While we're at it, restrict the block sizes to match the
+ * chosen granule.
+ */
+ if (cfg->pgsize_bitmap & PAGE_SIZE)
+ granule = PAGE_SIZE;
+ else if (cfg->pgsize_bitmap & ~PAGE_MASK)
+ granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
+ else if (cfg->pgsize_bitmap & PAGE_MASK)
+ granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
+ else
+ granule = 0;
+
+ switch (granule) {
+ case SZ_4K:
+ cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
+ break;
+ case SZ_16K:
+ cfg->pgsize_bitmap &= (SZ_16K | SZ_32M);
+ break;
+ case SZ_64K:
+ cfg->pgsize_bitmap &= (SZ_64K | SZ_512M);
+ break;
+ default:
+ cfg->pgsize_bitmap = 0;
+ }
+}
+
+static struct arm_lpae_io_pgtable *
+arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
+{
+ unsigned long va_bits, pgd_bits;
+ struct arm_lpae_io_pgtable *data;
+
+ arm_lpae_restrict_pgsizes(cfg);
+
+ if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
+ return NULL;
+
+ if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
+ return NULL;
+
+ if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
+ return NULL;
+
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return NULL;
+
+ data->pg_shift = __ffs(cfg->pgsize_bitmap);
+ data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
+
+ va_bits = cfg->ias - data->pg_shift;
+ data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
+
+ /* Calculate the actual size of our pgd (without concatenation) */
+ pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
+ data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
+
+ data->iop.ops = (struct io_pgtable_ops) {
+ .map = arm_lpae_map,
+ .unmap = arm_lpae_unmap,
+ .iova_to_phys = arm_lpae_iova_to_phys,
+ };
+
+ return data;
+}
+
+static struct io_pgtable *
+arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
+{
+ u64 reg;
+ struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
+
+ if (!data)
+ return NULL;
+
+ /* TCR */
+ reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
+ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
+ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
+
+ switch (1 << data->pg_shift) {
+ case SZ_4K:
+ reg |= ARM_LPAE_TCR_TG0_4K;
+ break;
+ case SZ_16K:
+ reg |= ARM_LPAE_TCR_TG0_16K;
+ break;
+ case SZ_64K:
+ reg |= ARM_LPAE_TCR_TG0_64K;
+ break;
+ }
+
+ switch (cfg->oas) {
+ case 32:
+ reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+ break;
+ case 36:
+ reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+ break;
+ case 40:
+ reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+ break;
+ case 42:
+ reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+ break;
+ case 44:
+ reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+ break;
+ case 48:
+ reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+ break;
+ default:
+ goto out_free_data;
+ }
+
+ reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
+ cfg->arm_lpae_s1_cfg.tcr = reg;
+
+ /* MAIRs */
+ reg = (ARM_LPAE_MAIR_ATTR_NC
+ << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
+ (ARM_LPAE_MAIR_ATTR_WBRWA
+ << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
+ (ARM_LPAE_MAIR_ATTR_DEVICE
+ << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
+
+ cfg->arm_lpae_s1_cfg.mair[0] = reg;
+ cfg->arm_lpae_s1_cfg.mair[1] = 0;
+
+ /* Looking good; allocate a pgd */
+ data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO);
+ if (!data->pgd)
+ goto out_free_data;
+
+ cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie);
+
+ /* TTBRs */
+ cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
+ cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
+ return &data->iop;
+
+out_free_data:
+ kfree(data);
+ return NULL;
+}
+
+static struct io_pgtable *
+arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
+{
+ u64 reg, sl;
+ struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
+
+ if (!data)
+ return NULL;
+
+ /*
+ * Concatenate PGDs at level 1 if possible in order to reduce
+ * the depth of the stage-2 walk.
+ */
+ if (data->levels == ARM_LPAE_MAX_LEVELS) {
+ unsigned long pgd_pages;
+
+ pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
+ if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
+ data->pgd_size = pgd_pages << data->pg_shift;
+ data->levels--;
+ }
+ }
+
+ /* VTCR */
+ reg = ARM_64_LPAE_S2_TCR_RES1 |
+ (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
+ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
+ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
+
+ sl = ARM_LPAE_START_LVL(data);
+
+ switch (1 << data->pg_shift) {
+ case SZ_4K:
+ reg |= ARM_LPAE_TCR_TG0_4K;
+ sl++; /* SL0 format is different for 4K granule size */
+ break;
+ case SZ_16K:
+ reg |= ARM_LPAE_TCR_TG0_16K;
+ break;
+ case SZ_64K:
+ reg |= ARM_LPAE_TCR_TG0_64K;
+ break;
+ }
+
+ switch (cfg->oas) {
+ case 32:
+ reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
+ break;
+ case 36:
+ reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
+ break;
+ case 40:
+ reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
+ break;
+ case 42:
+ reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
+ break;
+ case 44:
+ reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
+ break;
+ case 48:
+ reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
+ break;
+ default:
+ goto out_free_data;
+ }
+
+ reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
+ reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
+ cfg->arm_lpae_s2_cfg.vtcr = reg;
+
+ /* Allocate pgd pages */
+ data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO);
+ if (!data->pgd)
+ goto out_free_data;
+
+ cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie);
+
+ /* VTTBR */
+ cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
+ return &data->iop;
+
+out_free_data:
+ kfree(data);
+ return NULL;
+}
+
+static struct io_pgtable *
+arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
+{
+ struct io_pgtable *iop;
+
+ if (cfg->ias > 32 || cfg->oas > 40)
+ return NULL;
+
+ cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
+ iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
+ if (iop) {
+ cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
+ cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
+ }
+
+ return iop;
+}
+
+static struct io_pgtable *
+arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
+{
+ struct io_pgtable *iop;
+
+ if (cfg->ias > 40 || cfg->oas > 40)
+ return NULL;
+
+ cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
+ iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
+ if (iop)
+ cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
+
+ return iop;
+}
+
+struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
+ .alloc = arm_64_lpae_alloc_pgtable_s1,
+ .free = arm_lpae_free_pgtable,
+};
+
+struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
+ .alloc = arm_64_lpae_alloc_pgtable_s2,
+ .free = arm_lpae_free_pgtable,
+};
+
+struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
+ .alloc = arm_32_lpae_alloc_pgtable_s1,
+ .free = arm_lpae_free_pgtable,
+};
+
+struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
+ .alloc = arm_32_lpae_alloc_pgtable_s2,
+ .free = arm_lpae_free_pgtable,
+};
+
+#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
+
+static struct io_pgtable_cfg *cfg_cookie;
+
+static void dummy_tlb_flush_all(void *cookie)
+{
+ WARN_ON(cookie != cfg_cookie);
+}
+
+static void dummy_tlb_add_flush(unsigned long iova, size_t size, bool leaf,
+ void *cookie)
+{
+ WARN_ON(cookie != cfg_cookie);
+ WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
+}
+
+static void dummy_tlb_sync(void *cookie)
+{
+ WARN_ON(cookie != cfg_cookie);
+}
+
+static void dummy_flush_pgtable(void *ptr, size_t size, void *cookie)
+{
+ WARN_ON(cookie != cfg_cookie);
+}
+
+static struct iommu_gather_ops dummy_tlb_ops __initdata = {
+ .tlb_flush_all = dummy_tlb_flush_all,
+ .tlb_add_flush = dummy_tlb_add_flush,
+ .tlb_sync = dummy_tlb_sync,
+ .flush_pgtable = dummy_flush_pgtable,
+};
+
+static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
+{
+ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
+
+ pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
+ cfg->pgsize_bitmap, cfg->ias);
+ pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
+ data->levels, data->pgd_size, data->pg_shift,
+ data->bits_per_level, data->pgd);
+}
+
+#define __FAIL(ops, i) ({ \
+ WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
+ arm_lpae_dump_ops(ops); \
+ selftest_running = false; \
+ -EFAULT; \
+})
+
+static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
+{
+ static const enum io_pgtable_fmt fmts[] = {
+ ARM_64_LPAE_S1,
+ ARM_64_LPAE_S2,
+ };
+
+ int i, j;
+ unsigned long iova;
+ size_t size;
+ struct io_pgtable_ops *ops;
+
+ selftest_running = true;
+
+ for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
+ cfg_cookie = cfg;
+ ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
+ if (!ops) {
+ pr_err("selftest: failed to allocate io pgtable ops\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Initial sanity checks.
+ * Empty page tables shouldn't provide any translations.
+ */
+ if (ops->iova_to_phys(ops, 42))
+ return __FAIL(ops, i);
+
+ if (ops->iova_to_phys(ops, SZ_1G + 42))
+ return __FAIL(ops, i);
+
+ if (ops->iova_to_phys(ops, SZ_2G + 42))
+ return __FAIL(ops, i);
+
+ /*
+ * Distinct mappings of different granule sizes.
+ */
+ iova = 0;
+ j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
+ while (j != BITS_PER_LONG) {
+ size = 1UL << j;
+
+ if (ops->map(ops, iova, iova, size, IOMMU_READ |
+ IOMMU_WRITE |
+ IOMMU_NOEXEC |
+ IOMMU_CACHE))
+ return __FAIL(ops, i);
+
+ /* Overlapping mappings */
+ if (!ops->map(ops, iova, iova + size, size,
+ IOMMU_READ | IOMMU_NOEXEC))
+ return __FAIL(ops, i);
+
+ if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
+ return __FAIL(ops, i);
+
+ iova += SZ_1G;
+ j++;
+ j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
+ }
+
+ /* Partial unmap */
+ size = 1UL << __ffs(cfg->pgsize_bitmap);
+ if (ops->unmap(ops, SZ_1G + size, size) != size)
+ return __FAIL(ops, i);
+
+ /* Remap of partial unmap */
+ if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
+ return __FAIL(ops, i);
+
+ if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
+ return __FAIL(ops, i);
+
+ /* Full unmap */
+ iova = 0;
+ j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
+ while (j != BITS_PER_LONG) {
+ size = 1UL << j;
+
+ if (ops->unmap(ops, iova, size) != size)
+ return __FAIL(ops, i);
+
+ if (ops->iova_to_phys(ops, iova + 42))
+ return __FAIL(ops, i);
+
+ /* Remap full block */
+ if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
+ return __FAIL(ops, i);
+
+ if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
+ return __FAIL(ops, i);
+
+ iova += SZ_1G;
+ j++;
+ j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
+ }
+
+ free_io_pgtable_ops(ops);
+ }
+
+ selftest_running = false;
+ return 0;
+}
+
+static int __init arm_lpae_do_selftests(void)
+{
+ static const unsigned long pgsize[] = {
+ SZ_4K | SZ_2M | SZ_1G,
+ SZ_16K | SZ_32M,
+ SZ_64K | SZ_512M,
+ };
+
+ static const unsigned int ias[] = {
+ 32, 36, 40, 42, 44, 48,
+ };
+
+ int i, j, pass = 0, fail = 0;
+ struct io_pgtable_cfg cfg = {
+ .tlb = &dummy_tlb_ops,
+ .oas = 48,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
+ for (j = 0; j < ARRAY_SIZE(ias); ++j) {
+ cfg.pgsize_bitmap = pgsize[i];
+ cfg.ias = ias[j];
+ pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
+ pgsize[i], ias[j]);
+ if (arm_lpae_run_tests(&cfg))
+ fail++;
+ else
+ pass++;
+ }
+ }
+
+ pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
+ return fail ? -EFAULT : 0;
+}
+subsys_initcall(arm_lpae_do_selftests);
+#endif
diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c
new file mode 100644
index 000000000000..6436fe24bc2f
--- /dev/null
+++ b/drivers/iommu/io-pgtable.c
@@ -0,0 +1,82 @@
+/*
+ * Generic page table allocator for IOMMUs.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Copyright (C) 2014 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "io-pgtable.h"
+
+extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
+
+static const struct io_pgtable_init_fns *
+io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] =
+{
+#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE
+ [ARM_32_LPAE_S1] = &io_pgtable_arm_32_lpae_s1_init_fns,
+ [ARM_32_LPAE_S2] = &io_pgtable_arm_32_lpae_s2_init_fns,
+ [ARM_64_LPAE_S1] = &io_pgtable_arm_64_lpae_s1_init_fns,
+ [ARM_64_LPAE_S2] = &io_pgtable_arm_64_lpae_s2_init_fns,
+#endif
+};
+
+struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
+ struct io_pgtable_cfg *cfg,
+ void *cookie)
+{
+ struct io_pgtable *iop;
+ const struct io_pgtable_init_fns *fns;
+
+ if (fmt >= IO_PGTABLE_NUM_FMTS)
+ return NULL;
+
+ fns = io_pgtable_init_table[fmt];
+ if (!fns)
+ return NULL;
+
+ iop = fns->alloc(cfg, cookie);
+ if (!iop)
+ return NULL;
+
+ iop->fmt = fmt;
+ iop->cookie = cookie;
+ iop->cfg = *cfg;
+
+ return &iop->ops;
+}
+
+/*
+ * It is the IOMMU driver's responsibility to ensure that the page table
+ * is no longer accessible to the walker by this point.
+ */
+void free_io_pgtable_ops(struct io_pgtable_ops *ops)
+{
+ struct io_pgtable *iop;
+
+ if (!ops)
+ return;
+
+ iop = container_of(ops, struct io_pgtable, ops);
+ iop->cfg.tlb->tlb_flush_all(iop->cookie);
+ io_pgtable_init_table[iop->fmt]->free(iop);
+}
diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
new file mode 100644
index 000000000000..10e32f69c668
--- /dev/null
+++ b/drivers/iommu/io-pgtable.h
@@ -0,0 +1,143 @@
+#ifndef __IO_PGTABLE_H
+#define __IO_PGTABLE_H
+
+/*
+ * Public API for use by IOMMU drivers
+ */
+enum io_pgtable_fmt {
+ ARM_32_LPAE_S1,
+ ARM_32_LPAE_S2,
+ ARM_64_LPAE_S1,
+ ARM_64_LPAE_S2,
+ IO_PGTABLE_NUM_FMTS,
+};
+
+/**
+ * struct iommu_gather_ops - IOMMU callbacks for TLB and page table management.
+ *
+ * @tlb_flush_all: Synchronously invalidate the entire TLB context.
+ * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range.
+ * @tlb_sync: Ensure any queue TLB invalidation has taken effect.
+ * @flush_pgtable: Ensure page table updates are visible to the IOMMU.
+ *
+ * Note that these can all be called in atomic context and must therefore
+ * not block.
+ */
+struct iommu_gather_ops {
+ void (*tlb_flush_all)(void *cookie);
+ void (*tlb_add_flush)(unsigned long iova, size_t size, bool leaf,
+ void *cookie);
+ void (*tlb_sync)(void *cookie);
+ void (*flush_pgtable)(void *ptr, size_t size, void *cookie);
+};
+
+/**
+ * struct io_pgtable_cfg - Configuration data for a set of page tables.
+ *
+ * @quirks: A bitmap of hardware quirks that require some special
+ * action by the low-level page table allocator.
+ * @pgsize_bitmap: A bitmap of page sizes supported by this set of page
+ * tables.
+ * @ias: Input address (iova) size, in bits.
+ * @oas: Output address (paddr) size, in bits.
+ * @tlb: TLB management callbacks for this set of tables.
+ */
+struct io_pgtable_cfg {
+ #define IO_PGTABLE_QUIRK_ARM_NS (1 << 0) /* Set NS bit in PTEs */
+ int quirks;
+ unsigned long pgsize_bitmap;
+ unsigned int ias;
+ unsigned int oas;
+ const struct iommu_gather_ops *tlb;
+
+ /* Low-level data specific to the table format */
+ union {
+ struct {
+ u64 ttbr[2];
+ u64 tcr;
+ u64 mair[2];
+ } arm_lpae_s1_cfg;
+
+ struct {
+ u64 vttbr;
+ u64 vtcr;
+ } arm_lpae_s2_cfg;
+ };
+};
+
+/**
+ * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers.
+ *
+ * @map: Map a physically contiguous memory region.
+ * @unmap: Unmap a physically contiguous memory region.
+ * @iova_to_phys: Translate iova to physical address.
+ *
+ * These functions map directly onto the iommu_ops member functions with
+ * the same names.
+ */
+struct io_pgtable_ops {
+ int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot);
+ int (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
+ size_t size);
+ phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
+ unsigned long iova);
+};
+
+/**
+ * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU.
+ *
+ * @fmt: The page table format.
+ * @cfg: The page table configuration. This will be modified to represent
+ * the configuration actually provided by the allocator (e.g. the
+ * pgsize_bitmap may be restricted).
+ * @cookie: An opaque token provided by the IOMMU driver and passed back to
+ * the callback routines in cfg->tlb.
+ */
+struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
+ struct io_pgtable_cfg *cfg,
+ void *cookie);
+
+/**
+ * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller
+ * *must* ensure that the page table is no longer
+ * live, but the TLB can be dirty.
+ *
+ * @ops: The ops returned from alloc_io_pgtable_ops.
+ */
+void free_io_pgtable_ops(struct io_pgtable_ops *ops);
+
+
+/*
+ * Internal structures for page table allocator implementations.
+ */
+
+/**
+ * struct io_pgtable - Internal structure describing a set of page tables.
+ *
+ * @fmt: The page table format.
+ * @cookie: An opaque token provided by the IOMMU driver and passed back to
+ * any callback routines.
+ * @cfg: A copy of the page table configuration.
+ * @ops: The page table operations in use for this set of page tables.
+ */
+struct io_pgtable {
+ enum io_pgtable_fmt fmt;
+ void *cookie;
+ struct io_pgtable_cfg cfg;
+ struct io_pgtable_ops ops;
+};
+
+/**
+ * struct io_pgtable_init_fns - Alloc/free a set of page tables for a
+ * particular format.
+ *
+ * @alloc: Allocate a set of page tables described by cfg.
+ * @free: Free the page tables associated with iop.
+ */
+struct io_pgtable_init_fns {
+ struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie);
+ void (*free)(struct io_pgtable *iop);
+};
+
+#endif /* __IO_PGTABLE_H */
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index f7718d73e984..72e683df0731 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
- * Author: Joerg Roedel <joerg.roedel@amd.com>
+ * Author: Joerg Roedel <jroedel@suse.de>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -1084,7 +1084,7 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
if (ret)
iommu_unmap(domain, orig_iova, orig_size - size);
else
- trace_map(iova, paddr, size);
+ trace_map(orig_iova, paddr, orig_size);
return ret;
}
@@ -1094,6 +1094,7 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
{
size_t unmapped_page, unmapped = 0;
unsigned int min_pagesz;
+ unsigned long orig_iova = iova;
if (unlikely(domain->ops->unmap == NULL ||
domain->ops->pgsize_bitmap == 0UL))
@@ -1133,7 +1134,7 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
unmapped += unmapped_page;
}
- trace_unmap(iova, 0, size);
+ trace_unmap(orig_iova, size, unmapped);
return unmapped;
}
EXPORT_SYMBOL_GPL(iommu_unmap);
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index f6b17e6af2fb..9dd8208312c2 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -18,13 +18,58 @@
*/
#include <linux/iova.h>
+#include <linux/slab.h>
+
+static struct kmem_cache *iommu_iova_cache;
+
+int iommu_iova_cache_init(void)
+{
+ int ret = 0;
+
+ iommu_iova_cache = kmem_cache_create("iommu_iova",
+ sizeof(struct iova),
+ 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
+ if (!iommu_iova_cache) {
+ pr_err("Couldn't create iova cache\n");
+ ret = -ENOMEM;
+ }
+
+ return ret;
+}
+
+void iommu_iova_cache_destroy(void)
+{
+ kmem_cache_destroy(iommu_iova_cache);
+}
+
+struct iova *alloc_iova_mem(void)
+{
+ return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
+}
+
+void free_iova_mem(struct iova *iova)
+{
+ kmem_cache_free(iommu_iova_cache, iova);
+}
void
-init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)
+init_iova_domain(struct iova_domain *iovad, unsigned long granule,
+ unsigned long start_pfn, unsigned long pfn_32bit)
{
+ /*
+ * IOVA granularity will normally be equal to the smallest
+ * supported IOMMU page size; both *must* be capable of
+ * representing individual CPU pages exactly.
+ */
+ BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule));
+
spin_lock_init(&iovad->iova_rbtree_lock);
iovad->rbroot = RB_ROOT;
iovad->cached32_node = NULL;
+ iovad->granule = granule;
+ iovad->start_pfn = start_pfn;
iovad->dma_32bit_pfn = pfn_32bit;
}
@@ -127,7 +172,7 @@ move_left:
if (!curr) {
if (size_aligned)
pad_size = iova_get_pad_size(size, limit_pfn);
- if ((IOVA_START_PFN + size + pad_size) > limit_pfn) {
+ if ((iovad->start_pfn + size + pad_size) > limit_pfn) {
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
return -ENOMEM;
}
@@ -202,8 +247,8 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova)
* @size: - size of page frames to allocate
* @limit_pfn: - max limit address
* @size_aligned: - set if size_aligned address range is required
- * This function allocates an iova in the range limit_pfn to IOVA_START_PFN
- * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned
+ * This function allocates an iova in the range iovad->start_pfn to limit_pfn,
+ * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned
* flag is set then the allocated address iova->pfn_lo will be naturally
* aligned on roundup_power_of_two(size).
*/
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 68dfb0fd5ee9..10186cac7716 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -16,7 +16,7 @@
#include <linux/io.h>
#include <linux/iommu.h>
#include <linux/module.h>
-#include <linux/platform_data/ipmmu-vmsa.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/slab.h>
@@ -24,12 +24,13 @@
#include <asm/dma-iommu.h>
#include <asm/pgalloc.h>
+#include "io-pgtable.h"
+
struct ipmmu_vmsa_device {
struct device *dev;
void __iomem *base;
struct list_head list;
- const struct ipmmu_vmsa_platform_data *pdata;
unsigned int num_utlbs;
struct dma_iommu_mapping *mapping;
@@ -39,14 +40,17 @@ struct ipmmu_vmsa_domain {
struct ipmmu_vmsa_device *mmu;
struct iommu_domain *io_domain;
+ struct io_pgtable_cfg cfg;
+ struct io_pgtable_ops *iop;
+
unsigned int context_id;
spinlock_t lock; /* Protects mappings */
- pgd_t *pgd;
};
struct ipmmu_vmsa_archdata {
struct ipmmu_vmsa_device *mmu;
- unsigned int utlb;
+ unsigned int *utlbs;
+ unsigned int num_utlbs;
};
static DEFINE_SPINLOCK(ipmmu_devices_lock);
@@ -58,6 +62,8 @@ static LIST_HEAD(ipmmu_devices);
* Registers Definition
*/
+#define IM_NS_ALIAS_OFFSET 0x800
+
#define IM_CTX_SIZE 0x40
#define IMCTR 0x0000
@@ -171,52 +177,6 @@ static LIST_HEAD(ipmmu_devices);
#define IMUASID_ASID0_SHIFT 0
/* -----------------------------------------------------------------------------
- * Page Table Bits
- */
-
-/*
- * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory access,
- * Long-descriptor format" that the NStable bit being set in a table descriptor
- * will result in the NStable and NS bits of all child entries being ignored and
- * considered as being set. The IPMMU seems not to comply with this, as it
- * generates a secure access page fault if any of the NStable and NS bits isn't
- * set when running in non-secure mode.
- */
-#ifndef PMD_NSTABLE
-#define PMD_NSTABLE (_AT(pmdval_t, 1) << 63)
-#endif
-
-#define ARM_VMSA_PTE_XN (((pteval_t)3) << 53)
-#define ARM_VMSA_PTE_CONT (((pteval_t)1) << 52)
-#define ARM_VMSA_PTE_AF (((pteval_t)1) << 10)
-#define ARM_VMSA_PTE_SH_NS (((pteval_t)0) << 8)
-#define ARM_VMSA_PTE_SH_OS (((pteval_t)2) << 8)
-#define ARM_VMSA_PTE_SH_IS (((pteval_t)3) << 8)
-#define ARM_VMSA_PTE_SH_MASK (((pteval_t)3) << 8)
-#define ARM_VMSA_PTE_NS (((pteval_t)1) << 5)
-#define ARM_VMSA_PTE_PAGE (((pteval_t)3) << 0)
-
-/* Stage-1 PTE */
-#define ARM_VMSA_PTE_nG (((pteval_t)1) << 11)
-#define ARM_VMSA_PTE_AP_UNPRIV (((pteval_t)1) << 6)
-#define ARM_VMSA_PTE_AP_RDONLY (((pteval_t)2) << 6)
-#define ARM_VMSA_PTE_AP_MASK (((pteval_t)3) << 6)
-#define ARM_VMSA_PTE_ATTRINDX_MASK (((pteval_t)3) << 2)
-#define ARM_VMSA_PTE_ATTRINDX_SHIFT 2
-
-#define ARM_VMSA_PTE_ATTRS_MASK \
- (ARM_VMSA_PTE_XN | ARM_VMSA_PTE_CONT | ARM_VMSA_PTE_nG | \
- ARM_VMSA_PTE_AF | ARM_VMSA_PTE_SH_MASK | ARM_VMSA_PTE_AP_MASK | \
- ARM_VMSA_PTE_NS | ARM_VMSA_PTE_ATTRINDX_MASK)
-
-#define ARM_VMSA_PTE_CONT_ENTRIES 16
-#define ARM_VMSA_PTE_CONT_SIZE (PAGE_SIZE * ARM_VMSA_PTE_CONT_ENTRIES)
-
-#define IPMMU_PTRS_PER_PTE 512
-#define IPMMU_PTRS_PER_PMD 512
-#define IPMMU_PTRS_PER_PGD 4
-
-/* -----------------------------------------------------------------------------
* Read/Write Access
*/
@@ -305,18 +265,39 @@ static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
ipmmu_write(mmu, IMUCTR(utlb), 0);
}
-static void ipmmu_flush_pgtable(struct ipmmu_vmsa_device *mmu, void *addr,
- size_t size)
+static void ipmmu_tlb_flush_all(void *cookie)
+{
+ struct ipmmu_vmsa_domain *domain = cookie;
+
+ ipmmu_tlb_invalidate(domain);
+}
+
+static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, bool leaf,
+ void *cookie)
{
- unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
+ /* The hardware doesn't support selective TLB flush. */
+}
+
+static void ipmmu_flush_pgtable(void *ptr, size_t size, void *cookie)
+{
+ unsigned long offset = (unsigned long)ptr & ~PAGE_MASK;
+ struct ipmmu_vmsa_domain *domain = cookie;
/*
* TODO: Add support for coherent walk through CCI with DVM and remove
* cache handling.
*/
- dma_map_page(mmu->dev, virt_to_page(addr), offset, size, DMA_TO_DEVICE);
+ dma_map_page(domain->mmu->dev, virt_to_page(ptr), offset, size,
+ DMA_TO_DEVICE);
}
+static struct iommu_gather_ops ipmmu_gather_ops = {
+ .tlb_flush_all = ipmmu_tlb_flush_all,
+ .tlb_add_flush = ipmmu_tlb_add_flush,
+ .tlb_sync = ipmmu_tlb_flush_all,
+ .flush_pgtable = ipmmu_flush_pgtable,
+};
+
/* -----------------------------------------------------------------------------
* Domain/Context Management
*/
@@ -324,7 +305,28 @@ static void ipmmu_flush_pgtable(struct ipmmu_vmsa_device *mmu, void *addr,
static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
{
phys_addr_t ttbr;
- u32 reg;
+
+ /*
+ * Allocate the page table operations.
+ *
+ * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory
+ * access, Long-descriptor format" that the NStable bit being set in a
+ * table descriptor will result in the NStable and NS bits of all child
+ * entries being ignored and considered as being set. The IPMMU seems
+ * not to comply with this, as it generates a secure access page fault
+ * if any of the NStable and NS bits isn't set when running in
+ * non-secure mode.
+ */
+ domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
+ domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
+ domain->cfg.ias = 32;
+ domain->cfg.oas = 40;
+ domain->cfg.tlb = &ipmmu_gather_ops;
+
+ domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
+ domain);
+ if (!domain->iop)
+ return -EINVAL;
/*
* TODO: When adding support for multiple contexts, find an unused
@@ -333,9 +335,7 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
domain->context_id = 0;
/* TTBR0 */
- ipmmu_flush_pgtable(domain->mmu, domain->pgd,
- IPMMU_PTRS_PER_PGD * sizeof(*domain->pgd));
- ttbr = __pa(domain->pgd);
+ ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0];
ipmmu_ctx_write(domain, IMTTLBR0, ttbr);
ipmmu_ctx_write(domain, IMTTUBR0, ttbr >> 32);
@@ -348,15 +348,8 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
IMTTBCR_IRGN0_WB_WA | IMTTBCR_SL0_LVL_1);
- /*
- * MAIR0
- * We need three attributes only, non-cacheable, write-back read/write
- * allocate and device memory.
- */
- reg = (IMMAIR_ATTR_NC << IMMAIR_ATTR_SHIFT(IMMAIR_ATTR_IDX_NC))
- | (IMMAIR_ATTR_WBRWA << IMMAIR_ATTR_SHIFT(IMMAIR_ATTR_IDX_WBRWA))
- | (IMMAIR_ATTR_DEVICE << IMMAIR_ATTR_SHIFT(IMMAIR_ATTR_IDX_DEV));
- ipmmu_ctx_write(domain, IMMAIR0, reg);
+ /* MAIR0 */
+ ipmmu_ctx_write(domain, IMMAIR0, domain->cfg.arm_lpae_s1_cfg.mair[0]);
/* IMBUSCR */
ipmmu_ctx_write(domain, IMBUSCR,
@@ -461,396 +454,6 @@ static irqreturn_t ipmmu_irq(int irq, void *dev)
}
/* -----------------------------------------------------------------------------
- * Page Table Management
- */
-
-#define pud_pgtable(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK))
-
-static void ipmmu_free_ptes(pmd_t *pmd)
-{
- pgtable_t table = pmd_pgtable(*pmd);
- __free_page(table);
-}
-
-static void ipmmu_free_pmds(pud_t *pud)
-{
- pmd_t *pmd = pmd_offset(pud, 0);
- pgtable_t table;
- unsigned int i;
-
- for (i = 0; i < IPMMU_PTRS_PER_PMD; ++i) {
- if (!pmd_table(*pmd))
- continue;
-
- ipmmu_free_ptes(pmd);
- pmd++;
- }
-
- table = pud_pgtable(*pud);
- __free_page(table);
-}
-
-static void ipmmu_free_pgtables(struct ipmmu_vmsa_domain *domain)
-{
- pgd_t *pgd, *pgd_base = domain->pgd;
- unsigned int i;
-
- /*
- * Recursively free the page tables for this domain. We don't care about
- * speculative TLB filling, because the TLB will be nuked next time this
- * context bank is re-allocated and no devices currently map to these
- * tables.
- */
- pgd = pgd_base;
- for (i = 0; i < IPMMU_PTRS_PER_PGD; ++i) {
- if (pgd_none(*pgd))
- continue;
- ipmmu_free_pmds((pud_t *)pgd);
- pgd++;
- }
-
- kfree(pgd_base);
-}
-
-/*
- * We can't use the (pgd|pud|pmd|pte)_populate or the set_(pgd|pud|pmd|pte)
- * functions as they would flush the CPU TLB.
- */
-
-static pte_t *ipmmu_alloc_pte(struct ipmmu_vmsa_device *mmu, pmd_t *pmd,
- unsigned long iova)
-{
- pte_t *pte;
-
- if (!pmd_none(*pmd))
- return pte_offset_kernel(pmd, iova);
-
- pte = (pte_t *)get_zeroed_page(GFP_ATOMIC);
- if (!pte)
- return NULL;
-
- ipmmu_flush_pgtable(mmu, pte, PAGE_SIZE);
- *pmd = __pmd(__pa(pte) | PMD_NSTABLE | PMD_TYPE_TABLE);
- ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd));
-
- return pte + pte_index(iova);
-}
-
-static pmd_t *ipmmu_alloc_pmd(struct ipmmu_vmsa_device *mmu, pgd_t *pgd,
- unsigned long iova)
-{
- pud_t *pud = (pud_t *)pgd;
- pmd_t *pmd;
-
- if (!pud_none(*pud))
- return pmd_offset(pud, iova);
-
- pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC);
- if (!pmd)
- return NULL;
-
- ipmmu_flush_pgtable(mmu, pmd, PAGE_SIZE);
- *pud = __pud(__pa(pmd) | PMD_NSTABLE | PMD_TYPE_TABLE);
- ipmmu_flush_pgtable(mmu, pud, sizeof(*pud));
-
- return pmd + pmd_index(iova);
-}
-
-static u64 ipmmu_page_prot(unsigned int prot, u64 type)
-{
- u64 pgprot = ARM_VMSA_PTE_XN | ARM_VMSA_PTE_nG | ARM_VMSA_PTE_AF
- | ARM_VMSA_PTE_SH_IS | ARM_VMSA_PTE_AP_UNPRIV
- | ARM_VMSA_PTE_NS | type;
-
- if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
- pgprot |= ARM_VMSA_PTE_AP_RDONLY;
-
- if (prot & IOMMU_CACHE)
- pgprot |= IMMAIR_ATTR_IDX_WBRWA << ARM_VMSA_PTE_ATTRINDX_SHIFT;
-
- if (prot & IOMMU_EXEC)
- pgprot &= ~ARM_VMSA_PTE_XN;
- else if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
- /* If no access create a faulting entry to avoid TLB fills. */
- pgprot &= ~ARM_VMSA_PTE_PAGE;
-
- return pgprot;
-}
-
-static int ipmmu_alloc_init_pte(struct ipmmu_vmsa_device *mmu, pmd_t *pmd,
- unsigned long iova, unsigned long pfn,
- size_t size, int prot)
-{
- pteval_t pteval = ipmmu_page_prot(prot, ARM_VMSA_PTE_PAGE);
- unsigned int num_ptes = 1;
- pte_t *pte, *start;
- unsigned int i;
-
- pte = ipmmu_alloc_pte(mmu, pmd, iova);
- if (!pte)
- return -ENOMEM;
-
- start = pte;
-
- /*
- * Install the page table entries. We can be called both for a single
- * page or for a block of 16 physically contiguous pages. In the latter
- * case set the PTE contiguous hint.
- */
- if (size == SZ_64K) {
- pteval |= ARM_VMSA_PTE_CONT;
- num_ptes = ARM_VMSA_PTE_CONT_ENTRIES;
- }
-
- for (i = num_ptes; i; --i)
- *pte++ = pfn_pte(pfn++, __pgprot(pteval));
-
- ipmmu_flush_pgtable(mmu, start, sizeof(*pte) * num_ptes);
-
- return 0;
-}
-
-static int ipmmu_alloc_init_pmd(struct ipmmu_vmsa_device *mmu, pmd_t *pmd,
- unsigned long iova, unsigned long pfn,
- int prot)
-{
- pmdval_t pmdval = ipmmu_page_prot(prot, PMD_TYPE_SECT);
-
- *pmd = pfn_pmd(pfn, __pgprot(pmdval));
- ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd));
-
- return 0;
-}
-
-static int ipmmu_create_mapping(struct ipmmu_vmsa_domain *domain,
- unsigned long iova, phys_addr_t paddr,
- size_t size, int prot)
-{
- struct ipmmu_vmsa_device *mmu = domain->mmu;
- pgd_t *pgd = domain->pgd;
- unsigned long flags;
- unsigned long pfn;
- pmd_t *pmd;
- int ret;
-
- if (!pgd)
- return -EINVAL;
-
- if (size & ~PAGE_MASK)
- return -EINVAL;
-
- if (paddr & ~((1ULL << 40) - 1))
- return -ERANGE;
-
- pfn = __phys_to_pfn(paddr);
- pgd += pgd_index(iova);
-
- /* Update the page tables. */
- spin_lock_irqsave(&domain->lock, flags);
-
- pmd = ipmmu_alloc_pmd(mmu, pgd, iova);
- if (!pmd) {
- ret = -ENOMEM;
- goto done;
- }
-
- switch (size) {
- case SZ_2M:
- ret = ipmmu_alloc_init_pmd(mmu, pmd, iova, pfn, prot);
- break;
- case SZ_64K:
- case SZ_4K:
- ret = ipmmu_alloc_init_pte(mmu, pmd, iova, pfn, size, prot);
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
-done:
- spin_unlock_irqrestore(&domain->lock, flags);
-
- if (!ret)
- ipmmu_tlb_invalidate(domain);
-
- return ret;
-}
-
-static void ipmmu_clear_pud(struct ipmmu_vmsa_device *mmu, pud_t *pud)
-{
- /* Free the page table. */
- pgtable_t table = pud_pgtable(*pud);
- __free_page(table);
-
- /* Clear the PUD. */
- *pud = __pud(0);
- ipmmu_flush_pgtable(mmu, pud, sizeof(*pud));
-}
-
-static void ipmmu_clear_pmd(struct ipmmu_vmsa_device *mmu, pud_t *pud,
- pmd_t *pmd)
-{
- unsigned int i;
-
- /* Free the page table. */
- if (pmd_table(*pmd)) {
- pgtable_t table = pmd_pgtable(*pmd);
- __free_page(table);
- }
-
- /* Clear the PMD. */
- *pmd = __pmd(0);
- ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd));
-
- /* Check whether the PUD is still needed. */
- pmd = pmd_offset(pud, 0);
- for (i = 0; i < IPMMU_PTRS_PER_PMD; ++i) {
- if (!pmd_none(pmd[i]))
- return;
- }
-
- /* Clear the parent PUD. */
- ipmmu_clear_pud(mmu, pud);
-}
-
-static void ipmmu_clear_pte(struct ipmmu_vmsa_device *mmu, pud_t *pud,
- pmd_t *pmd, pte_t *pte, unsigned int num_ptes)
-{
- unsigned int i;
-
- /* Clear the PTE. */
- for (i = num_ptes; i; --i)
- pte[i-1] = __pte(0);
-
- ipmmu_flush_pgtable(mmu, pte, sizeof(*pte) * num_ptes);
-
- /* Check whether the PMD is still needed. */
- pte = pte_offset_kernel(pmd, 0);
- for (i = 0; i < IPMMU_PTRS_PER_PTE; ++i) {
- if (!pte_none(pte[i]))
- return;
- }
-
- /* Clear the parent PMD. */
- ipmmu_clear_pmd(mmu, pud, pmd);
-}
-
-static int ipmmu_split_pmd(struct ipmmu_vmsa_device *mmu, pmd_t *pmd)
-{
- pte_t *pte, *start;
- pteval_t pteval;
- unsigned long pfn;
- unsigned int i;
-
- pte = (pte_t *)get_zeroed_page(GFP_ATOMIC);
- if (!pte)
- return -ENOMEM;
-
- /* Copy the PMD attributes. */
- pteval = (pmd_val(*pmd) & ARM_VMSA_PTE_ATTRS_MASK)
- | ARM_VMSA_PTE_CONT | ARM_VMSA_PTE_PAGE;
-
- pfn = pmd_pfn(*pmd);
- start = pte;
-
- for (i = IPMMU_PTRS_PER_PTE; i; --i)
- *pte++ = pfn_pte(pfn++, __pgprot(pteval));
-
- ipmmu_flush_pgtable(mmu, start, PAGE_SIZE);
- *pmd = __pmd(__pa(start) | PMD_NSTABLE | PMD_TYPE_TABLE);
- ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd));
-
- return 0;
-}
-
-static void ipmmu_split_pte(struct ipmmu_vmsa_device *mmu, pte_t *pte)
-{
- unsigned int i;
-
- for (i = ARM_VMSA_PTE_CONT_ENTRIES; i; --i)
- pte[i-1] = __pte(pte_val(*pte) & ~ARM_VMSA_PTE_CONT);
-
- ipmmu_flush_pgtable(mmu, pte, sizeof(*pte) * ARM_VMSA_PTE_CONT_ENTRIES);
-}
-
-static int ipmmu_clear_mapping(struct ipmmu_vmsa_domain *domain,
- unsigned long iova, size_t size)
-{
- struct ipmmu_vmsa_device *mmu = domain->mmu;
- unsigned long flags;
- pgd_t *pgd = domain->pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- int ret = 0;
-
- if (!pgd)
- return -EINVAL;
-
- if (size & ~PAGE_MASK)
- return -EINVAL;
-
- pgd += pgd_index(iova);
- pud = (pud_t *)pgd;
-
- spin_lock_irqsave(&domain->lock, flags);
-
- /* If there's no PUD or PMD we're done. */
- if (pud_none(*pud))
- goto done;
-
- pmd = pmd_offset(pud, iova);
- if (pmd_none(*pmd))
- goto done;
-
- /*
- * When freeing a 2MB block just clear the PMD. In the unlikely case the
- * block is mapped as individual pages this will free the corresponding
- * PTE page table.
- */
- if (size == SZ_2M) {
- ipmmu_clear_pmd(mmu, pud, pmd);
- goto done;
- }
-
- /*
- * If the PMD has been mapped as a section remap it as pages to allow
- * freeing individual pages.
- */
- if (pmd_sect(*pmd))
- ipmmu_split_pmd(mmu, pmd);
-
- pte = pte_offset_kernel(pmd, iova);
-
- /*
- * When freeing a 64kB block just clear the PTE entries. We don't have
- * to care about the contiguous hint of the surrounding entries.
- */
- if (size == SZ_64K) {
- ipmmu_clear_pte(mmu, pud, pmd, pte, ARM_VMSA_PTE_CONT_ENTRIES);
- goto done;
- }
-
- /*
- * If the PTE has been mapped with the contiguous hint set remap it and
- * its surrounding PTEs to allow unmapping a single page.
- */
- if (pte_val(*pte) & ARM_VMSA_PTE_CONT)
- ipmmu_split_pte(mmu, pte);
-
- /* Clear the PTE. */
- ipmmu_clear_pte(mmu, pud, pmd, pte, 1);
-
-done:
- spin_unlock_irqrestore(&domain->lock, flags);
-
- if (ret)
- ipmmu_tlb_invalidate(domain);
-
- return 0;
-}
-
-/* -----------------------------------------------------------------------------
* IOMMU Operations
*/
@@ -864,12 +467,6 @@ static int ipmmu_domain_init(struct iommu_domain *io_domain)
spin_lock_init(&domain->lock);
- domain->pgd = kzalloc(IPMMU_PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
- if (!domain->pgd) {
- kfree(domain);
- return -ENOMEM;
- }
-
io_domain->priv = domain;
domain->io_domain = io_domain;
@@ -885,7 +482,7 @@ static void ipmmu_domain_destroy(struct iommu_domain *io_domain)
* been detached.
*/
ipmmu_domain_destroy_context(domain);
- ipmmu_free_pgtables(domain);
+ free_io_pgtable_ops(domain->iop);
kfree(domain);
}
@@ -896,6 +493,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
struct ipmmu_vmsa_device *mmu = archdata->mmu;
struct ipmmu_vmsa_domain *domain = io_domain->priv;
unsigned long flags;
+ unsigned int i;
int ret = 0;
if (!mmu) {
@@ -924,7 +522,8 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
if (ret < 0)
return ret;
- ipmmu_utlb_enable(domain, archdata->utlb);
+ for (i = 0; i < archdata->num_utlbs; ++i)
+ ipmmu_utlb_enable(domain, archdata->utlbs[i]);
return 0;
}
@@ -934,8 +533,10 @@ static void ipmmu_detach_device(struct iommu_domain *io_domain,
{
struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
struct ipmmu_vmsa_domain *domain = io_domain->priv;
+ unsigned int i;
- ipmmu_utlb_disable(domain, archdata->utlb);
+ for (i = 0; i < archdata->num_utlbs; ++i)
+ ipmmu_utlb_disable(domain, archdata->utlbs[i]);
/*
* TODO: Optimize by disabling the context when no device is attached.
@@ -950,76 +551,61 @@ static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
if (!domain)
return -ENODEV;
- return ipmmu_create_mapping(domain, iova, paddr, size, prot);
+ return domain->iop->map(domain->iop, iova, paddr, size, prot);
}
static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
size_t size)
{
struct ipmmu_vmsa_domain *domain = io_domain->priv;
- int ret;
- ret = ipmmu_clear_mapping(domain, iova, size);
- return ret ? 0 : size;
+ return domain->iop->unmap(domain->iop, iova, size);
}
static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
dma_addr_t iova)
{
struct ipmmu_vmsa_domain *domain = io_domain->priv;
- pgd_t pgd;
- pud_t pud;
- pmd_t pmd;
- pte_t pte;
/* TODO: Is locking needed ? */
- if (!domain->pgd)
- return 0;
-
- pgd = *(domain->pgd + pgd_index(iova));
- if (pgd_none(pgd))
- return 0;
-
- pud = *pud_offset(&pgd, iova);
- if (pud_none(pud))
- return 0;
+ return domain->iop->iova_to_phys(domain->iop, iova);
+}
- pmd = *pmd_offset(&pud, iova);
- if (pmd_none(pmd))
- return 0;
+static int ipmmu_find_utlbs(struct ipmmu_vmsa_device *mmu, struct device *dev,
+ unsigned int *utlbs, unsigned int num_utlbs)
+{
+ unsigned int i;
- if (pmd_sect(pmd))
- return __pfn_to_phys(pmd_pfn(pmd)) | (iova & ~PMD_MASK);
+ for (i = 0; i < num_utlbs; ++i) {
+ struct of_phandle_args args;
+ int ret;
- pte = *(pmd_page_vaddr(pmd) + pte_index(iova));
- if (pte_none(pte))
- return 0;
+ ret = of_parse_phandle_with_args(dev->of_node, "iommus",
+ "#iommu-cells", i, &args);
+ if (ret < 0)
+ return ret;
- return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK);
-}
+ of_node_put(args.np);
-static int ipmmu_find_utlb(struct ipmmu_vmsa_device *mmu, struct device *dev)
-{
- const struct ipmmu_vmsa_master *master = mmu->pdata->masters;
- const char *devname = dev_name(dev);
- unsigned int i;
+ if (args.np != mmu->dev->of_node || args.args_count != 1)
+ return -EINVAL;
- for (i = 0; i < mmu->pdata->num_masters; ++i, ++master) {
- if (strcmp(master->name, devname) == 0)
- return master->utlb;
+ utlbs[i] = args.args[0];
}
- return -1;
+ return 0;
}
static int ipmmu_add_device(struct device *dev)
{
struct ipmmu_vmsa_archdata *archdata;
struct ipmmu_vmsa_device *mmu;
- struct iommu_group *group;
- int utlb = -1;
- int ret;
+ struct iommu_group *group = NULL;
+ unsigned int *utlbs;
+ unsigned int i;
+ int num_utlbs;
+ int ret = -ENODEV;
if (dev->archdata.iommu) {
dev_warn(dev, "IOMMU driver already assigned to device %s\n",
@@ -1028,11 +614,21 @@ static int ipmmu_add_device(struct device *dev)
}
/* Find the master corresponding to the device. */
+
+ num_utlbs = of_count_phandle_with_args(dev->of_node, "iommus",
+ "#iommu-cells");
+ if (num_utlbs < 0)
+ return -ENODEV;
+
+ utlbs = kcalloc(num_utlbs, sizeof(*utlbs), GFP_KERNEL);
+ if (!utlbs)
+ return -ENOMEM;
+
spin_lock(&ipmmu_devices_lock);
list_for_each_entry(mmu, &ipmmu_devices, list) {
- utlb = ipmmu_find_utlb(mmu, dev);
- if (utlb >= 0) {
+ ret = ipmmu_find_utlbs(mmu, dev, utlbs, num_utlbs);
+ if (!ret) {
/*
* TODO Take a reference to the MMU to protect
* against device removal.
@@ -1043,17 +639,22 @@ static int ipmmu_add_device(struct device *dev)
spin_unlock(&ipmmu_devices_lock);
- if (utlb < 0)
+ if (ret < 0)
return -ENODEV;
- if (utlb >= mmu->num_utlbs)
- return -EINVAL;
+ for (i = 0; i < num_utlbs; ++i) {
+ if (utlbs[i] >= mmu->num_utlbs) {
+ ret = -EINVAL;
+ goto error;
+ }
+ }
/* Create a device group and add the device to it. */
group = iommu_group_alloc();
if (IS_ERR(group)) {
dev_err(dev, "Failed to allocate IOMMU group\n");
- return PTR_ERR(group);
+ ret = PTR_ERR(group);
+ goto error;
}
ret = iommu_group_add_device(group, dev);
@@ -1061,7 +662,8 @@ static int ipmmu_add_device(struct device *dev)
if (ret < 0) {
dev_err(dev, "Failed to add device to IPMMU group\n");
- return ret;
+ group = NULL;
+ goto error;
}
archdata = kzalloc(sizeof(*archdata), GFP_KERNEL);
@@ -1071,7 +673,8 @@ static int ipmmu_add_device(struct device *dev)
}
archdata->mmu = mmu;
- archdata->utlb = utlb;
+ archdata->utlbs = utlbs;
+ archdata->num_utlbs = num_utlbs;
dev->archdata.iommu = archdata;
/*
@@ -1090,7 +693,8 @@ static int ipmmu_add_device(struct device *dev)
SZ_1G, SZ_2G);
if (IS_ERR(mapping)) {
dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
- return PTR_ERR(mapping);
+ ret = PTR_ERR(mapping);
+ goto error;
}
mmu->mapping = mapping;
@@ -1106,17 +710,29 @@ static int ipmmu_add_device(struct device *dev)
return 0;
error:
+ arm_iommu_release_mapping(mmu->mapping);
+
kfree(dev->archdata.iommu);
+ kfree(utlbs);
+
dev->archdata.iommu = NULL;
- iommu_group_remove_device(dev);
+
+ if (!IS_ERR_OR_NULL(group))
+ iommu_group_remove_device(dev);
+
return ret;
}
static void ipmmu_remove_device(struct device *dev)
{
+ struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
+
arm_iommu_detach_device(dev);
iommu_group_remove_device(dev);
- kfree(dev->archdata.iommu);
+
+ kfree(archdata->utlbs);
+ kfree(archdata);
+
dev->archdata.iommu = NULL;
}
@@ -1131,7 +747,7 @@ static const struct iommu_ops ipmmu_ops = {
.iova_to_phys = ipmmu_iova_to_phys,
.add_device = ipmmu_add_device,
.remove_device = ipmmu_remove_device,
- .pgsize_bitmap = SZ_2M | SZ_64K | SZ_4K,
+ .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
};
/* -----------------------------------------------------------------------------
@@ -1154,7 +770,7 @@ static int ipmmu_probe(struct platform_device *pdev)
int irq;
int ret;
- if (!pdev->dev.platform_data) {
+ if (!IS_ENABLED(CONFIG_OF) && !pdev->dev.platform_data) {
dev_err(&pdev->dev, "missing platform data\n");
return -EINVAL;
}
@@ -1166,7 +782,6 @@ static int ipmmu_probe(struct platform_device *pdev)
}
mmu->dev = &pdev->dev;
- mmu->pdata = pdev->dev.platform_data;
mmu->num_utlbs = 32;
/* Map I/O memory and request IRQ. */
@@ -1175,6 +790,20 @@ static int ipmmu_probe(struct platform_device *pdev)
if (IS_ERR(mmu->base))
return PTR_ERR(mmu->base);
+ /*
+ * The IPMMU has two register banks, for secure and non-secure modes.
+ * The bank mapped at the beginning of the IPMMU address space
+ * corresponds to the running mode of the CPU. When running in secure
+ * mode the non-secure register bank is also available at an offset.
+ *
+ * Secure mode operation isn't clearly documented and is thus currently
+ * not implemented in the driver. Furthermore, preliminary tests of
+ * non-secure operation with the main register bank were not successful.
+ * Offset the registers base unconditionally to point to the non-secure
+ * alias space for now.
+ */
+ mmu->base += IM_NS_ALIAS_OFFSET;
+
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "no IRQ found\n");
@@ -1220,9 +849,14 @@ static int ipmmu_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id ipmmu_of_ids[] = {
+ { .compatible = "renesas,ipmmu-vmsa", },
+};
+
static struct platform_driver ipmmu_driver = {
.driver = {
.name = "ipmmu-vmsa",
+ .of_match_table = of_match_ptr(ipmmu_of_ids),
},
.probe = ipmmu_probe,
.remove = ipmmu_remove,
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index 89c4846683be..390079ee1350 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -17,12 +17,11 @@
#include "irq_remapping.h"
int irq_remapping_enabled;
-
-int disable_irq_remap;
int irq_remap_broken;
int disable_sourceid_checking;
int no_x2apic_optout;
+static int disable_irq_remap;
static struct irq_remap_ops *remap_ops;
static int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec);
@@ -194,45 +193,32 @@ static __init int setup_irqremap(char *str)
}
early_param("intremap", setup_irqremap);
-void __init setup_irq_remapping_ops(void)
-{
- remap_ops = &intel_irq_remap_ops;
-
-#ifdef CONFIG_AMD_IOMMU
- if (amd_iommu_irq_ops.prepare() == 0)
- remap_ops = &amd_iommu_irq_ops;
-#endif
-}
-
void set_irq_remapping_broken(void)
{
irq_remap_broken = 1;
}
-int irq_remapping_supported(void)
+int __init irq_remapping_prepare(void)
{
if (disable_irq_remap)
- return 0;
-
- if (!remap_ops || !remap_ops->supported)
- return 0;
-
- return remap_ops->supported();
-}
+ return -ENOSYS;
-int __init irq_remapping_prepare(void)
-{
- if (!remap_ops || !remap_ops->prepare)
- return -ENODEV;
+ if (intel_irq_remap_ops.prepare() == 0)
+ remap_ops = &intel_irq_remap_ops;
+ else if (IS_ENABLED(CONFIG_AMD_IOMMU) &&
+ amd_iommu_irq_ops.prepare() == 0)
+ remap_ops = &amd_iommu_irq_ops;
+ else
+ return -ENOSYS;
- return remap_ops->prepare();
+ return 0;
}
int __init irq_remapping_enable(void)
{
int ret;
- if (!remap_ops || !remap_ops->enable)
+ if (!remap_ops->enable)
return -ENODEV;
ret = remap_ops->enable();
@@ -245,22 +231,16 @@ int __init irq_remapping_enable(void)
void irq_remapping_disable(void)
{
- if (!irq_remapping_enabled ||
- !remap_ops ||
- !remap_ops->disable)
- return;
-
- remap_ops->disable();
+ if (irq_remapping_enabled && remap_ops->disable)
+ remap_ops->disable();
}
int irq_remapping_reenable(int mode)
{
- if (!irq_remapping_enabled ||
- !remap_ops ||
- !remap_ops->reenable)
- return 0;
+ if (irq_remapping_enabled && remap_ops->reenable)
+ return remap_ops->reenable(mode);
- return remap_ops->reenable(mode);
+ return 0;
}
int __init irq_remap_enable_fault_handling(void)
@@ -268,7 +248,7 @@ int __init irq_remap_enable_fault_handling(void)
if (!irq_remapping_enabled)
return 0;
- if (!remap_ops || !remap_ops->enable_faulting)
+ if (!remap_ops->enable_faulting)
return -ENODEV;
return remap_ops->enable_faulting();
@@ -279,7 +259,7 @@ int setup_ioapic_remapped_entry(int irq,
unsigned int destination, int vector,
struct io_apic_irq_attr *attr)
{
- if (!remap_ops || !remap_ops->setup_ioapic_entry)
+ if (!remap_ops->setup_ioapic_entry)
return -ENODEV;
return remap_ops->setup_ioapic_entry(irq, entry, destination,
@@ -289,8 +269,7 @@ int setup_ioapic_remapped_entry(int irq,
static int set_remapped_irq_affinity(struct irq_data *data,
const struct cpumask *mask, bool force)
{
- if (!config_enabled(CONFIG_SMP) || !remap_ops ||
- !remap_ops->set_affinity)
+ if (!config_enabled(CONFIG_SMP) || !remap_ops->set_affinity)
return 0;
return remap_ops->set_affinity(data, mask, force);
@@ -300,10 +279,7 @@ void free_remapped_irq(int irq)
{
struct irq_cfg *cfg = irq_cfg(irq);
- if (!remap_ops || !remap_ops->free_irq)
- return;
-
- if (irq_remapped(cfg))
+ if (irq_remapped(cfg) && remap_ops->free_irq)
remap_ops->free_irq(irq);
}
@@ -315,13 +291,13 @@ void compose_remapped_msi_msg(struct pci_dev *pdev,
if (!irq_remapped(cfg))
native_compose_msi_msg(pdev, irq, dest, msg, hpet_id);
- else if (remap_ops && remap_ops->compose_msi_msg)
+ else if (remap_ops->compose_msi_msg)
remap_ops->compose_msi_msg(pdev, irq, dest, msg, hpet_id);
}
static int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec)
{
- if (!remap_ops || !remap_ops->msi_alloc_irq)
+ if (!remap_ops->msi_alloc_irq)
return -ENODEV;
return remap_ops->msi_alloc_irq(pdev, irq, nvec);
@@ -330,7 +306,7 @@ static int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec)
static int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq,
int index, int sub_handle)
{
- if (!remap_ops || !remap_ops->msi_setup_irq)
+ if (!remap_ops->msi_setup_irq)
return -ENODEV;
return remap_ops->msi_setup_irq(pdev, irq, index, sub_handle);
@@ -340,7 +316,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
{
int ret;
- if (!remap_ops || !remap_ops->alloc_hpet_msi)
+ if (!remap_ops->alloc_hpet_msi)
return -ENODEV;
ret = remap_ops->alloc_hpet_msi(irq, id);
diff --git a/drivers/iommu/irq_remapping.h b/drivers/iommu/irq_remapping.h
index fde250f86e60..7c70cc29ffe6 100644
--- a/drivers/iommu/irq_remapping.h
+++ b/drivers/iommu/irq_remapping.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2012 Advanced Micro Devices, Inc.
- * Author: Joerg Roedel <joerg.roedel@amd.com>
+ * Author: Joerg Roedel <jroedel@suse.de>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -31,16 +31,12 @@ struct cpumask;
struct pci_dev;
struct msi_msg;
-extern int disable_irq_remap;
extern int irq_remap_broken;
extern int disable_sourceid_checking;
extern int no_x2apic_optout;
extern int irq_remapping_enabled;
struct irq_remap_ops {
- /* Check whether Interrupt Remapping is supported */
- int (*supported)(void);
-
/* Initializes hardware and makes it ready for remapping interrupts */
int (*prepare)(void);
@@ -89,7 +85,6 @@ extern struct irq_remap_ops amd_iommu_irq_ops;
#else /* CONFIG_IRQ_REMAP */
#define irq_remapping_enabled 0
-#define disable_irq_remap 1
#define irq_remap_broken 0
#endif /* CONFIG_IRQ_REMAP */
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index bbb7dcef02d3..f59f857b702e 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1126,7 +1126,7 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
return -EINVAL;
}
- dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da, pa, bytes);
+ dev_dbg(dev, "mapping da 0x%lx to pa %pa size 0x%x\n", da, &pa, bytes);
iotlb_init_entry(&e, da, pa, omap_pgsz);
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index b2023af384b9..6a8b1ec4a48a 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1009,7 +1009,6 @@ static struct platform_driver rk_iommu_driver = {
.remove = rk_iommu_remove,
.driver = {
.name = "rk_iommu",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(rk_iommu_dt_ids),
},
};
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index f722a0c466cf..c48da057dbb1 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -315,6 +315,7 @@ static const struct iommu_ops gart_iommu_ops = {
.attach_dev = gart_iommu_attach_dev,
.detach_dev = gart_iommu_detach_dev,
.map = gart_iommu_map,
+ .map_sg = default_iommu_map_sg,
.unmap = gart_iommu_unmap,
.iova_to_phys = gart_iommu_iova_to_phys,
.pgsize_bitmap = GART_IOMMU_PGSIZES,
@@ -395,7 +396,7 @@ static int tegra_gart_probe(struct platform_device *pdev)
do_gart_setup(gart, NULL);
gart_handle = gart;
- bus_set_iommu(&platform_bus_type, &gart_iommu_ops);
+
return 0;
}