diff options
Diffstat (limited to 'arch/arm64')
34 files changed, 149 insertions, 94 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index df350f4e1e7a..69a59a5d1143 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -13,13 +13,15 @@ config ARM64 select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_DMA_COHERENT_TO_PFN select ARCH_HAS_DMA_MMAP_PGPROT + select ARCH_HAS_DMA_PREP_COHERENT select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_FAST_MULTIPLIER select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_GCOV_PROFILE_ALL - select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA + select ARCH_HAS_GIGANTIC_PAGE select ARCH_HAS_KCOV + select ARCH_HAS_KEEPINITRD select ARCH_HAS_MEMBARRIER_SYNC_CORE select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_SETUP_DMA_OPS @@ -58,6 +60,7 @@ config ARM64 select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPT select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPT select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPT + select ARCH_KEEP_MEMBLOCK select ARCH_USE_CMPXCHG_LOCKREF select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_SPINLOCKS diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi index a2cec6218211..fe107ce115ef 100644 --- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi @@ -393,7 +393,7 @@ }; sysmgr: sysmgr@ffd12000 { - compatible = "altr,sys-mgr", "syscon"; + compatible = "altr,sys-mgr-s10","altr,sys-mgr"; reg = <0xffd12000 0x228>; }; diff --git a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi index 976d92a94738..43307bad3f0d 100644 --- a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi @@ -819,7 +819,6 @@ #size-cells = <2>; #interrupt-cells = <1>; ranges; - num-lanes = <1>; interrupt-map-mask = <0 0 0 7>; interrupt-map = <0 0 0 1 &pcie_intc0 0>, <0 0 0 2 &pcie_intc0 1>, @@ -840,7 +839,6 @@ #size-cells = <2>; #interrupt-cells = <1>; ranges; - num-lanes = <1>; interrupt-map-mask = <0 0 0 7>; interrupt-map = <0 0 0 1 &pcie_intc1 0>, <0 0 0 2 &pcie_intc1 1>, diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 2d9c39033c1a..74f0a199166d 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -206,7 +206,7 @@ CONFIG_SIMPLE_PM_BUS=y CONFIG_MTD=y CONFIG_MTD_BLOCK=y CONFIG_MTD_M25P80=y -CONFIG_MTD_NAND=y +CONFIG_MTD_RAW_NAND=y CONFIG_MTD_NAND_DENALI_DT=y CONFIG_MTD_NAND_MARVELL=y CONFIG_MTD_NAND_QCOM=y @@ -432,6 +432,7 @@ CONFIG_MESON_WATCHDOG=m CONFIG_RENESAS_WDT=y CONFIG_UNIPHIER_WATCHDOG=y CONFIG_BCM2835_WDT=y +CONFIG_MFD_ALTERA_SYSMGR=y CONFIG_MFD_BD9571MWV=y CONFIG_MFD_AXP20X_I2C=y CONFIG_MFD_AXP20X_RSB=y diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c index 036ea77f83bc..cb89c80800b5 100644 --- a/arch/arm64/crypto/aes-ce-ccm-glue.c +++ b/arch/arm64/crypto/aes-ce-ccm-glue.c @@ -14,6 +14,7 @@ #include <crypto/aes.h> #include <crypto/scatterwalk.h> #include <crypto/internal/aead.h> +#include <crypto/internal/simd.h> #include <crypto/internal/skcipher.h> #include <linux/module.h> @@ -109,7 +110,7 @@ static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen) static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[], u32 abytes, u32 *macp) { - if (may_use_simd()) { + if (crypto_simd_usable()) { kernel_neon_begin(); ce_aes_ccm_auth_data(mac, in, abytes, macp, key->key_enc, num_rounds(key)); @@ -255,7 +256,7 @@ static int ccm_encrypt(struct aead_request *req) err = skcipher_walk_aead_encrypt(&walk, req, false); - if (may_use_simd()) { + if (crypto_simd_usable()) { while (walk.nbytes) { u32 tail = walk.nbytes % AES_BLOCK_SIZE; @@ -313,7 +314,7 @@ static int ccm_decrypt(struct aead_request *req) err = skcipher_walk_aead_decrypt(&walk, req, false); - if (may_use_simd()) { + if (crypto_simd_usable()) { while (walk.nbytes) { u32 tail = walk.nbytes % AES_BLOCK_SIZE; diff --git a/arch/arm64/crypto/aes-ce-glue.c b/arch/arm64/crypto/aes-ce-glue.c index e6b3227bbf57..3213843fcb46 100644 --- a/arch/arm64/crypto/aes-ce-glue.c +++ b/arch/arm64/crypto/aes-ce-glue.c @@ -12,6 +12,7 @@ #include <asm/simd.h> #include <asm/unaligned.h> #include <crypto/aes.h> +#include <crypto/internal/simd.h> #include <linux/cpufeature.h> #include <linux/crypto.h> #include <linux/module.h> @@ -52,7 +53,7 @@ static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[]) { struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); - if (!may_use_simd()) { + if (!crypto_simd_usable()) { __aes_arm64_encrypt(ctx->key_enc, dst, src, num_rounds(ctx)); return; } @@ -66,7 +67,7 @@ static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[]) { struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); - if (!may_use_simd()) { + if (!crypto_simd_usable()) { __aes_arm64_decrypt(ctx->key_dec, dst, src, num_rounds(ctx)); return; } diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c index 1e676625ef33..f0ceb545bd1e 100644 --- a/arch/arm64/crypto/aes-glue.c +++ b/arch/arm64/crypto/aes-glue.c @@ -405,7 +405,7 @@ static int ctr_encrypt_sync(struct skcipher_request *req) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); - if (!may_use_simd()) + if (!crypto_simd_usable()) return aes_ctr_encrypt_fallback(ctx, req); return ctr_encrypt(req); @@ -642,7 +642,7 @@ static void mac_do_update(struct crypto_aes_ctx *ctx, u8 const in[], int blocks, { int rounds = 6 + ctx->key_length / 4; - if (may_use_simd()) { + if (crypto_simd_usable()) { kernel_neon_begin(); aes_mac_update(in, ctx->key_enc, rounds, blocks, dg, enc_before, enc_after); @@ -707,7 +707,7 @@ static int cbcmac_final(struct shash_desc *desc, u8 *out) struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); struct mac_desc_ctx *ctx = shash_desc_ctx(desc); - mac_do_update(&tctx->key, NULL, 0, ctx->dg, 1, 0); + mac_do_update(&tctx->key, NULL, 0, ctx->dg, (ctx->len != 0), 0); memcpy(out, ctx->dg, AES_BLOCK_SIZE); diff --git a/arch/arm64/crypto/aes-neonbs-glue.c b/arch/arm64/crypto/aes-neonbs-glue.c index bf1b321ff4c1..02b65d9eb947 100644 --- a/arch/arm64/crypto/aes-neonbs-glue.c +++ b/arch/arm64/crypto/aes-neonbs-glue.c @@ -288,7 +288,7 @@ static int ctr_encrypt_sync(struct skcipher_request *req) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm); - if (!may_use_simd()) + if (!crypto_simd_usable()) return aes_ctr_encrypt_fallback(&ctx->fallback, req); return ctr_encrypt(req); @@ -304,6 +304,8 @@ static int __xts_crypt(struct skcipher_request *req, int err; err = skcipher_walk_virt(&walk, req, false); + if (err) + return err; kernel_neon_begin(); neon_aes_ecb_encrypt(walk.iv, walk.iv, ctx->twkey, ctx->key.rounds, 1); diff --git a/arch/arm64/crypto/chacha-neon-glue.c b/arch/arm64/crypto/chacha-neon-glue.c index cb054f51c917..82029cda2e77 100644 --- a/arch/arm64/crypto/chacha-neon-glue.c +++ b/arch/arm64/crypto/chacha-neon-glue.c @@ -21,6 +21,7 @@ #include <crypto/algapi.h> #include <crypto/chacha.h> +#include <crypto/internal/simd.h> #include <crypto/internal/skcipher.h> #include <linux/kernel.h> #include <linux/module.h> @@ -90,7 +91,7 @@ static int chacha_neon(struct skcipher_request *req) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); - if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd()) + if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable()) return crypto_chacha_crypt(req); return chacha_neon_stream_xor(req, ctx, req->iv); @@ -104,7 +105,7 @@ static int xchacha_neon(struct skcipher_request *req) u32 state[16]; u8 real_iv[16]; - if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd()) + if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable()) return crypto_xchacha_crypt(req); crypto_chacha_init(state, ctx, req->iv); diff --git a/arch/arm64/crypto/crct10dif-ce-glue.c b/arch/arm64/crypto/crct10dif-ce-glue.c index e81d5bd555c0..2e0a7d2eee24 100644 --- a/arch/arm64/crypto/crct10dif-ce-glue.c +++ b/arch/arm64/crypto/crct10dif-ce-glue.c @@ -16,6 +16,7 @@ #include <linux/string.h> #include <crypto/internal/hash.h> +#include <crypto/internal/simd.h> #include <asm/neon.h> #include <asm/simd.h> @@ -38,7 +39,7 @@ static int crct10dif_update_pmull_p8(struct shash_desc *desc, const u8 *data, { u16 *crc = shash_desc_ctx(desc); - if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) { + if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) { kernel_neon_begin(); *crc = crc_t10dif_pmull_p8(*crc, data, length); kernel_neon_end(); @@ -54,7 +55,7 @@ static int crct10dif_update_pmull_p64(struct shash_desc *desc, const u8 *data, { u16 *crc = shash_desc_ctx(desc); - if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) { + if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) { kernel_neon_begin(); *crc = crc_t10dif_pmull_p64(*crc, data, length); kernel_neon_end(); diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c index 4e69bb78ea89..b39ed99b06fb 100644 --- a/arch/arm64/crypto/ghash-ce-glue.c +++ b/arch/arm64/crypto/ghash-ce-glue.c @@ -17,6 +17,7 @@ #include <crypto/gf128mul.h> #include <crypto/internal/aead.h> #include <crypto/internal/hash.h> +#include <crypto/internal/simd.h> #include <crypto/internal/skcipher.h> #include <crypto/scatterwalk.h> #include <linux/cpufeature.h> @@ -89,7 +90,7 @@ static void ghash_do_update(int blocks, u64 dg[], const char *src, struct ghash_key const *k, const char *head)) { - if (likely(may_use_simd())) { + if (likely(crypto_simd_usable())) { kernel_neon_begin(); simd_update(blocks, dg, src, key, head); kernel_neon_end(); @@ -441,7 +442,7 @@ static int gcm_encrypt(struct aead_request *req) err = skcipher_walk_aead_encrypt(&walk, req, false); - if (likely(may_use_simd() && walk.total >= 2 * AES_BLOCK_SIZE)) { + if (likely(crypto_simd_usable() && walk.total >= 2 * AES_BLOCK_SIZE)) { u32 const *rk = NULL; kernel_neon_begin(); @@ -473,9 +474,11 @@ static int gcm_encrypt(struct aead_request *req) put_unaligned_be32(2, iv + GCM_IV_SIZE); while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) { - int blocks = walk.nbytes / AES_BLOCK_SIZE; + const int blocks = + walk.nbytes / (2 * AES_BLOCK_SIZE) * 2; u8 *dst = walk.dst.virt.addr; u8 *src = walk.src.virt.addr; + int remaining = blocks; do { __aes_arm64_encrypt(ctx->aes_key.key_enc, @@ -485,9 +488,9 @@ static int gcm_encrypt(struct aead_request *req) dst += AES_BLOCK_SIZE; src += AES_BLOCK_SIZE; - } while (--blocks > 0); + } while (--remaining > 0); - ghash_do_update(walk.nbytes / AES_BLOCK_SIZE, dg, + ghash_do_update(blocks, dg, walk.dst.virt.addr, &ctx->ghash_key, NULL, pmull_ghash_update_p64); @@ -563,7 +566,7 @@ static int gcm_decrypt(struct aead_request *req) err = skcipher_walk_aead_decrypt(&walk, req, false); - if (likely(may_use_simd() && walk.total >= 2 * AES_BLOCK_SIZE)) { + if (likely(crypto_simd_usable() && walk.total >= 2 * AES_BLOCK_SIZE)) { u32 const *rk = NULL; kernel_neon_begin(); @@ -609,7 +612,7 @@ static int gcm_decrypt(struct aead_request *req) put_unaligned_be32(2, iv + GCM_IV_SIZE); while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) { - int blocks = walk.nbytes / AES_BLOCK_SIZE; + int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2; u8 *dst = walk.dst.virt.addr; u8 *src = walk.src.virt.addr; diff --git a/arch/arm64/crypto/nhpoly1305-neon-glue.c b/arch/arm64/crypto/nhpoly1305-neon-glue.c index 38a589044b6c..895d3727c1fb 100644 --- a/arch/arm64/crypto/nhpoly1305-neon-glue.c +++ b/arch/arm64/crypto/nhpoly1305-neon-glue.c @@ -9,6 +9,7 @@ #include <asm/neon.h> #include <asm/simd.h> #include <crypto/internal/hash.h> +#include <crypto/internal/simd.h> #include <crypto/nhpoly1305.h> #include <linux/module.h> @@ -25,7 +26,7 @@ static void _nh_neon(const u32 *key, const u8 *message, size_t message_len, static int nhpoly1305_neon_update(struct shash_desc *desc, const u8 *src, unsigned int srclen) { - if (srclen < 64 || !may_use_simd()) + if (srclen < 64 || !crypto_simd_usable()) return crypto_nhpoly1305_update(desc, src, srclen); do { diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c index 17fac2889f56..eaa7a8258f1c 100644 --- a/arch/arm64/crypto/sha1-ce-glue.c +++ b/arch/arm64/crypto/sha1-ce-glue.c @@ -12,6 +12,7 @@ #include <asm/simd.h> #include <asm/unaligned.h> #include <crypto/internal/hash.h> +#include <crypto/internal/simd.h> #include <crypto/sha.h> #include <crypto/sha1_base.h> #include <linux/cpufeature.h> @@ -38,7 +39,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data, { struct sha1_ce_state *sctx = shash_desc_ctx(desc); - if (!may_use_simd()) + if (!crypto_simd_usable()) return crypto_sha1_update(desc, data, len); sctx->finalize = 0; @@ -56,7 +57,7 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data, struct sha1_ce_state *sctx = shash_desc_ctx(desc); bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE); - if (!may_use_simd()) + if (!crypto_simd_usable()) return crypto_sha1_finup(desc, data, len, out); /* @@ -78,7 +79,7 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out) { struct sha1_ce_state *sctx = shash_desc_ctx(desc); - if (!may_use_simd()) + if (!crypto_simd_usable()) return crypto_sha1_finup(desc, NULL, 0, out); sctx->finalize = 0; diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c index 261f5195cab7..a725997e55f2 100644 --- a/arch/arm64/crypto/sha2-ce-glue.c +++ b/arch/arm64/crypto/sha2-ce-glue.c @@ -12,6 +12,7 @@ #include <asm/simd.h> #include <asm/unaligned.h> #include <crypto/internal/hash.h> +#include <crypto/internal/simd.h> #include <crypto/sha.h> #include <crypto/sha256_base.h> #include <linux/cpufeature.h> @@ -42,7 +43,7 @@ static int sha256_ce_update(struct shash_desc *desc, const u8 *data, { struct sha256_ce_state *sctx = shash_desc_ctx(desc); - if (!may_use_simd()) + if (!crypto_simd_usable()) return sha256_base_do_update(desc, data, len, (sha256_block_fn *)sha256_block_data_order); @@ -61,7 +62,7 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data, struct sha256_ce_state *sctx = shash_desc_ctx(desc); bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE); - if (!may_use_simd()) { + if (!crypto_simd_usable()) { if (len) sha256_base_do_update(desc, data, len, (sha256_block_fn *)sha256_block_data_order); @@ -90,7 +91,7 @@ static int sha256_ce_final(struct shash_desc *desc, u8 *out) { struct sha256_ce_state *sctx = shash_desc_ctx(desc); - if (!may_use_simd()) { + if (!crypto_simd_usable()) { sha256_base_do_finalize(desc, (sha256_block_fn *)sha256_block_data_order); return sha256_base_finish(desc, out); diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c index 0cccdb9cc2c0..e62298740e31 100644 --- a/arch/arm64/crypto/sha256-glue.c +++ b/arch/arm64/crypto/sha256-glue.c @@ -14,6 +14,7 @@ #include <asm/neon.h> #include <asm/simd.h> #include <crypto/internal/hash.h> +#include <crypto/internal/simd.h> #include <crypto/sha.h> #include <crypto/sha256_base.h> #include <linux/cryptohash.h> @@ -89,7 +90,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data, { struct sha256_state *sctx = shash_desc_ctx(desc); - if (!may_use_simd()) + if (!crypto_simd_usable()) return sha256_base_do_update(desc, data, len, (sha256_block_fn *)sha256_block_data_order); @@ -119,7 +120,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data, static int sha256_finup_neon(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { - if (!may_use_simd()) { + if (!crypto_simd_usable()) { if (len) sha256_base_do_update(desc, data, len, (sha256_block_fn *)sha256_block_data_order); diff --git a/arch/arm64/crypto/sha3-ce-glue.c b/arch/arm64/crypto/sha3-ce-glue.c index a336feac0f59..9a4bbfc45f40 100644 --- a/arch/arm64/crypto/sha3-ce-glue.c +++ b/arch/arm64/crypto/sha3-ce-glue.c @@ -14,6 +14,7 @@ #include <asm/simd.h> #include <asm/unaligned.h> #include <crypto/internal/hash.h> +#include <crypto/internal/simd.h> #include <crypto/sha3.h> #include <linux/cpufeature.h> #include <linux/crypto.h> @@ -32,7 +33,7 @@ static int sha3_update(struct shash_desc *desc, const u8 *data, struct sha3_state *sctx = shash_desc_ctx(desc); unsigned int digest_size = crypto_shash_digestsize(desc->tfm); - if (!may_use_simd()) + if (!crypto_simd_usable()) return crypto_sha3_update(desc, data, len); if ((sctx->partial + len) >= sctx->rsiz) { @@ -76,7 +77,7 @@ static int sha3_final(struct shash_desc *desc, u8 *out) __le64 *digest = (__le64 *)out; int i; - if (!may_use_simd()) + if (!crypto_simd_usable()) return crypto_sha3_final(desc, out); sctx->buf[sctx->partial++] = 0x06; diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c index f2c5f28c622a..2369540040aa 100644 --- a/arch/arm64/crypto/sha512-ce-glue.c +++ b/arch/arm64/crypto/sha512-ce-glue.c @@ -13,6 +13,7 @@ #include <asm/simd.h> #include <asm/unaligned.h> #include <crypto/internal/hash.h> +#include <crypto/internal/simd.h> #include <crypto/sha.h> #include <crypto/sha512_base.h> #include <linux/cpufeature.h> @@ -31,7 +32,7 @@ asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks); static int sha512_ce_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - if (!may_use_simd()) + if (!crypto_simd_usable()) return sha512_base_do_update(desc, data, len, (sha512_block_fn *)sha512_block_data_order); @@ -46,7 +47,7 @@ static int sha512_ce_update(struct shash_desc *desc, const u8 *data, static int sha512_ce_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { - if (!may_use_simd()) { + if (!crypto_simd_usable()) { if (len) sha512_base_do_update(desc, data, len, (sha512_block_fn *)sha512_block_data_order); @@ -65,7 +66,7 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data, static int sha512_ce_final(struct shash_desc *desc, u8 *out) { - if (!may_use_simd()) { + if (!crypto_simd_usable()) { sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_block_data_order); return sha512_base_finish(desc, out); diff --git a/arch/arm64/crypto/sm3-ce-glue.c b/arch/arm64/crypto/sm3-ce-glue.c index 88938a20d9b2..5d15533799a2 100644 --- a/arch/arm64/crypto/sm3-ce-glue.c +++ b/arch/arm64/crypto/sm3-ce-glue.c @@ -12,6 +12,7 @@ #include <asm/simd.h> #include <asm/unaligned.h> #include <crypto/internal/hash.h> +#include <crypto/internal/simd.h> #include <crypto/sm3.h> #include <crypto/sm3_base.h> #include <linux/cpufeature.h> @@ -28,7 +29,7 @@ asmlinkage void sm3_ce_transform(struct sm3_state *sst, u8 const *src, static int sm3_ce_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - if (!may_use_simd()) + if (!crypto_simd_usable()) return crypto_sm3_update(desc, data, len); kernel_neon_begin(); @@ -40,7 +41,7 @@ static int sm3_ce_update(struct shash_desc *desc, const u8 *data, static int sm3_ce_final(struct shash_desc *desc, u8 *out) { - if (!may_use_simd()) + if (!crypto_simd_usable()) return crypto_sm3_finup(desc, NULL, 0, out); kernel_neon_begin(); @@ -53,7 +54,7 @@ static int sm3_ce_final(struct shash_desc *desc, u8 *out) static int sm3_ce_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { - if (!may_use_simd()) + if (!crypto_simd_usable()) return crypto_sm3_finup(desc, data, len, out); kernel_neon_begin(); diff --git a/arch/arm64/crypto/sm4-ce-glue.c b/arch/arm64/crypto/sm4-ce-glue.c index 0c4fc223f225..2754c875d39c 100644 --- a/arch/arm64/crypto/sm4-ce-glue.c +++ b/arch/arm64/crypto/sm4-ce-glue.c @@ -3,6 +3,7 @@ #include <asm/neon.h> #include <asm/simd.h> #include <crypto/sm4.h> +#include <crypto/internal/simd.h> #include <linux/module.h> #include <linux/cpufeature.h> #include <linux/crypto.h> @@ -20,7 +21,7 @@ static void sm4_ce_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm); - if (!may_use_simd()) { + if (!crypto_simd_usable()) { crypto_sm4_encrypt(tfm, out, in); } else { kernel_neon_begin(); @@ -33,7 +34,7 @@ static void sm4_ce_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm); - if (!may_use_simd()) { + if (!crypto_simd_usable()) { crypto_sm4_decrypt(tfm, out, in); } else { kernel_neon_begin(); diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index eb0df239a759..9e977dedf193 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild @@ -20,7 +20,6 @@ generic-y += qspinlock.h generic-y += segment.h generic-y += serial.h generic-y += set_memory.h -generic-y += sizes.h generic-y += switch_to.h generic-y += trace_clock.h generic-y += unaligned.h diff --git a/arch/arm64/include/asm/boot.h b/arch/arm64/include/asm/boot.h index 355e552a9175..c7f67da13cd9 100644 --- a/arch/arm64/include/asm/boot.h +++ b/arch/arm64/include/asm/boot.h @@ -3,7 +3,7 @@ #ifndef __ASM_BOOT_H #define __ASM_BOOT_H -#include <asm/sizes.h> +#include <linux/sizes.h> /* * arm64 requires the DTB to be 8 byte aligned and diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index f210bcf096f7..bc895c869892 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -401,7 +401,7 @@ unsigned long cpu_get_elf_hwcap2(void); #define cpu_have_named_feature(name) cpu_have_feature(cpu_feature(name)) /* System capability check for constant caps */ -static inline bool __cpus_have_const_cap(int num) +static __always_inline bool __cpus_have_const_cap(int num) { if (num >= ARM64_NCAPS) return false; @@ -415,7 +415,7 @@ static inline bool cpus_have_cap(unsigned int num) return test_bit(num, cpu_hwcaps); } -static inline bool cpus_have_const_cap(int num) +static __always_inline bool cpus_have_const_cap(int num) { if (static_branch_likely(&arm64_const_caps_ready)) return __cpus_have_const_cap(num); diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h index c6a07a3b433e..4aad6382f631 100644 --- a/arch/arm64/include/asm/hugetlb.h +++ b/arch/arm64/include/asm/hugetlb.h @@ -70,8 +70,4 @@ extern void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, #include <asm-generic/hugetlb.h> -#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE -static inline bool gigantic_page_supported(void) { return true; } -#endif - #endif /* __ASM_HUGETLB_H */ diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index 9c01f04db64d..ec894de0ed4e 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -277,6 +277,7 @@ __AARCH64_INSN_FUNCS(adrp, 0x9F000000, 0x90000000) __AARCH64_INSN_FUNCS(prfm, 0x3FC00000, 0x39800000) __AARCH64_INSN_FUNCS(prfm_lit, 0xFF000000, 0xD8000000) __AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800) +__AARCH64_INSN_FUNCS(ldadd, 0x3F20FC00, 0xB8200000) __AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800) __AARCH64_INSN_FUNCS(ldr_lit, 0xBF000000, 0x18000000) __AARCH64_INSN_FUNCS(ldrsw_lit, 0xFF000000, 0x98000000) @@ -394,6 +395,13 @@ u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg, enum aarch64_insn_register state, enum aarch64_insn_size_type size, enum aarch64_insn_ldst_type type); +u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result, + enum aarch64_insn_register address, + enum aarch64_insn_register value, + enum aarch64_insn_size_type size); +u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address, + enum aarch64_insn_register value, + enum aarch64_insn_size_type size); u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst, enum aarch64_insn_register src, int imm, enum aarch64_insn_variant variant, diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 2cb8248fa2c8..8ffcf5a512bb 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -26,7 +26,7 @@ #include <linux/types.h> #include <asm/bug.h> #include <asm/page-def.h> -#include <asm/sizes.h> +#include <linux/sizes.h> /* * Size of the PCI I/O space. This must remain a power of two so that diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h index a179df3674a1..a65167f5cded 100644 --- a/arch/arm64/include/asm/syscall.h +++ b/arch/arm64/include/asm/syscall.h @@ -87,9 +87,9 @@ static inline void syscall_set_arguments(struct task_struct *task, * We don't care about endianness (__AUDIT_ARCH_LE bit) here because * AArch64 has the same system calls both on little- and big- endian. */ -static inline int syscall_get_arch(void) +static inline int syscall_get_arch(struct task_struct *task) { - if (is_compat_task()) + if (is_compat_thread(task_thread_info(task))) return AUDIT_ARCH_ARM; return AUDIT_ARCH_AARCH64; diff --git a/arch/arm64/kernel/acpi_numa.c b/arch/arm64/kernel/acpi_numa.c index eac1d0cc595c..7ff800045434 100644 --- a/arch/arm64/kernel/acpi_numa.c +++ b/arch/arm64/kernel/acpi_numa.c @@ -45,7 +45,7 @@ static inline int get_cpu_for_acpi_id(u32 uid) return -EINVAL; } -static int __init acpi_parse_gicc_pxm(struct acpi_subtable_header *header, +static int __init acpi_parse_gicc_pxm(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_srat_gicc_affinity *pa; diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index 7820a4a688fa..9e2b5882cdeb 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -734,6 +734,46 @@ u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg, state); } +u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result, + enum aarch64_insn_register address, + enum aarch64_insn_register value, + enum aarch64_insn_size_type size) +{ + u32 insn = aarch64_insn_get_ldadd_value(); + + switch (size) { + case AARCH64_INSN_SIZE_32: + case AARCH64_INSN_SIZE_64: + break; + default: + pr_err("%s: unimplemented size encoding %d\n", __func__, size); + return AARCH64_BREAK_FAULT; + } + + insn = aarch64_insn_encode_ldst_size(size, insn); + + insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, + result); + + insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, + address); + + return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn, + value); +} + +u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address, + enum aarch64_insn_register value, + enum aarch64_insn_size_type size) +{ + /* + * STADD is simply encoded as an alias for LDADD with XZR as + * the destination register. + */ + return aarch64_insn_gen_ldadd(AARCH64_INSN_REG_ZR, address, + value, size); +} + static u32 aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type, enum aarch64_insn_prfm_target target, enum aarch64_insn_prfm_policy policy, diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 824de7038967..bb4b3f07761a 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -586,7 +586,7 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) } static int __init -acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header, +acpi_parse_gic_cpu_interface(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_generic_interrupt *processor; @@ -595,7 +595,7 @@ acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header, if (BAD_MADT_GICC_ENTRY(processor, end)) return -EINVAL; - acpi_table_print_madt_entry(header); + acpi_table_print_madt_entry(&header->common); acpi_map_gic_cpu_interface(processor); diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index a3f85624313e..a67121d419a2 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -23,7 +23,6 @@ config KVM depends on OF select MMU_NOTIFIER select PREEMPT_NOTIFIERS - select ANON_INODES select HAVE_KVM_CPU_RELAX_INTERCEPT select HAVE_KVM_ARCH_TLB_FLUSH_ALL select KVM_MMIO diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 40e2d7e5efcb..d2adffb81b5d 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -48,7 +48,7 @@ #include <asm/numa.h> #include <asm/sections.h> #include <asm/setup.h> -#include <asm/sizes.h> +#include <linux/sizes.h> #include <asm/tlb.h> #include <asm/alternative.h> @@ -578,24 +578,11 @@ void free_initmem(void) } #ifdef CONFIG_BLK_DEV_INITRD - -static int keep_initrd __initdata; - void __init free_initrd_mem(unsigned long start, unsigned long end) { - if (!keep_initrd) { - free_reserved_area((void *)start, (void *)end, 0, "initrd"); - memblock_free(__virt_to_phys(start), end - start); - } -} - -static int __init keepinitrd_setup(char *__unused) -{ - keep_initrd = 1; - return 1; + free_reserved_area((void *)start, (void *)end, 0, "initrd"); + memblock_free(__virt_to_phys(start), end - start); } - -__setup("keepinitrd", keepinitrd_setup); #endif /* diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index ef82312860ac..a170c6369a68 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -40,7 +40,7 @@ #include <asm/kernel-pgtable.h> #include <asm/sections.h> #include <asm/setup.h> -#include <asm/sizes.h> +#include <linux/sizes.h> #include <asm/tlb.h> #include <asm/mmu_context.h> #include <asm/ptdump.h> @@ -1065,8 +1065,8 @@ int p4d_free_pud_page(p4d_t *p4d, unsigned long addr) } #ifdef CONFIG_MEMORY_HOTPLUG -int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, - bool want_memblock) +int arch_add_memory(int nid, u64 start, u64 size, + struct mhp_restrictions *restrictions) { int flags = 0; @@ -1077,6 +1077,6 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, size, PAGE_KERNEL, __pgd_pgtable_alloc, flags); return __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT, - altmap, want_memblock); + restrictions); } #endif diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h index 783de51a6c4e..76606e87233f 100644 --- a/arch/arm64/net/bpf_jit.h +++ b/arch/arm64/net/bpf_jit.h @@ -100,11 +100,9 @@ #define A64_STXR(sf, Rt, Rn, Rs) \ A64_LSX(sf, Rt, Rn, Rs, STORE_EX) -/* Prefetch */ -#define A64_PRFM(Rn, type, target, policy) \ - aarch64_insn_gen_prefetch(Rn, AARCH64_INSN_PRFM_TYPE_##type, \ - AARCH64_INSN_PRFM_TARGET_##target, \ - AARCH64_INSN_PRFM_POLICY_##policy) +/* LSE atomics */ +#define A64_STADD(sf, Rn, Rs) \ + aarch64_insn_gen_stadd(Rn, Rs, A64_SIZE(sf)) /* Add/subtract (immediate) */ #define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \ diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index aaddc0217e73..df845cee438e 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -365,7 +365,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, const bool is64 = BPF_CLASS(code) == BPF_ALU64 || BPF_CLASS(code) == BPF_JMP; const bool isdw = BPF_SIZE(code) == BPF_DW; - u8 jmp_cond; + u8 jmp_cond, reg; s32 jmp_offset; #define check_imm(bits, imm) do { \ @@ -756,19 +756,28 @@ emit_cond_jmp: break; } break; + /* STX XADD: lock *(u32 *)(dst + off) += src */ case BPF_STX | BPF_XADD | BPF_W: /* STX XADD: lock *(u64 *)(dst + off) += src */ case BPF_STX | BPF_XADD | BPF_DW: - emit_a64_mov_i(1, tmp, off, ctx); - emit(A64_ADD(1, tmp, tmp, dst), ctx); - emit(A64_PRFM(tmp, PST, L1, STRM), ctx); - emit(A64_LDXR(isdw, tmp2, tmp), ctx); - emit(A64_ADD(isdw, tmp2, tmp2, src), ctx); - emit(A64_STXR(isdw, tmp2, tmp, tmp3), ctx); - jmp_offset = -3; - check_imm19(jmp_offset); - emit(A64_CBNZ(0, tmp3, jmp_offset), ctx); + if (!off) { + reg = dst; + } else { + emit_a64_mov_i(1, tmp, off, ctx); + emit(A64_ADD(1, tmp, tmp, dst), ctx); + reg = tmp; + } + if (cpus_have_cap(ARM64_HAS_LSE_ATOMICS)) { + emit(A64_STADD(isdw, reg, src), ctx); + } else { + emit(A64_LDXR(isdw, tmp2, reg), ctx); + emit(A64_ADD(isdw, tmp2, tmp2, src), ctx); + emit(A64_STXR(isdw, tmp2, reg, tmp3), ctx); + jmp_offset = -3; + check_imm19(jmp_offset); + emit(A64_CBNZ(0, tmp3, jmp_offset), ctx); + } break; default: |