aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/Kbuild14
-rw-r--r--include/linux/compiler-gcc.h2
-rw-r--r--include/linux/compiler.h5
-rw-r--r--include/linux/compiler_types.h4
-rw-r--r--include/linux/dma-direct.h9
-rw-r--r--include/linux/dma-mapping.h14
-rw-r--r--include/linux/fs_pin.h1
-rw-r--r--include/linux/kvm_host.h1
-rw-r--r--include/linux/msi.h8
-rw-r--r--include/linux/ntb.h200
-rw-r--r--include/linux/pci.h9
-rw-r--r--include/linux/sched/isolation.h6
-rw-r--r--include/scsi/scsi_host.h3
13 files changed, 264 insertions, 12 deletions
diff --git a/include/Kbuild b/include/Kbuild
index 7e9f1acb9dd5..c38f0d46b267 100644
--- a/include/Kbuild
+++ b/include/Kbuild
@@ -31,7 +31,7 @@ header-test- += acpi/platform/acintel.h
header-test- += acpi/platform/aclinux.h
header-test- += acpi/platform/aclinuxex.h
header-test- += acpi/processor.h
-header-test- += clocksource/hyperv_timer.h
+header-test-$(CONFIG_X86) += clocksource/hyperv_timer.h
header-test- += clocksource/timer-sp804.h
header-test- += crypto/cast_common.h
header-test- += crypto/internal/cryptouser.h
@@ -246,6 +246,7 @@ header-test- += linux/intel-pti.h
header-test- += linux/intel-svm.h
header-test- += linux/interconnect-provider.h
header-test- += linux/ioc3.h
+header-test-$(CONFIG_BLOCK) += linux/iomap.h
header-test- += linux/ipack.h
header-test- += linux/irq_cpustat.h
header-test- += linux/irq_poll.h
@@ -454,9 +455,6 @@ header-test- += linux/phy/omap_control_phy.h
header-test- += linux/phy/tegra/xusb.h
header-test- += linux/phy/ulpi_phy.h
header-test- += linux/phy_fixed.h
-header-test- += linux/pinctrl/pinconf-generic.h
-header-test- += linux/pinctrl/pinconf.h
-header-test- += linux/pinctrl/pinctrl.h
header-test- += linux/pipe_fs_i.h
header-test- += linux/pktcdvd.h
header-test- += linux/pl320-ipc.h
@@ -905,10 +903,11 @@ header-test- += net/netfilter/nf_nat_redirect.h
header-test- += net/netfilter/nf_queue.h
header-test- += net/netfilter/nf_reject.h
header-test- += net/netfilter/nf_synproxy.h
-header-test- += net/netfilter/nf_tables.h
-header-test- += net/netfilter/nf_tables_core.h
-header-test- += net/netfilter/nf_tables_ipv4.h
+header-test-$(CONFIG_NF_TABLES) += net/netfilter/nf_tables.h
+header-test-$(CONFIG_NF_TABLES) += net/netfilter/nf_tables_core.h
+header-test-$(CONFIG_NF_TABLES) += net/netfilter/nf_tables_ipv4.h
header-test- += net/netfilter/nf_tables_ipv6.h
+header-test-$(CONFIG_NF_TABLES) += net/netfilter/nf_tables_offload.h
header-test- += net/netfilter/nft_fib.h
header-test- += net/netfilter/nft_meta.h
header-test- += net/netfilter/nft_reject.h
@@ -949,7 +948,6 @@ header-test- += pcmcia/ds.h
header-test- += rdma/ib.h
header-test- += rdma/iw_portmap.h
header-test- += rdma/opa_port_info.h
-header-test- += rdma/rdma_counter.h
header-test- += rdma/rdmavt_cq.h
header-test- += rdma/restrack.h
header-test- += rdma/signature.h
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index e8579412ad21..d7ee4c6bad48 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -170,3 +170,5 @@
#else
#define __diag_GCC_8(s)
#endif
+
+#define __no_fgcse __attribute__((optimize("-fno-gcse")))
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 8aaf7cd026b0..f0fd5636fddb 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -116,9 +116,14 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
".pushsection .discard.unreachable\n\t" \
".long 999b - .\n\t" \
".popsection\n\t"
+
+/* Annotate a C jump table to allow objtool to follow the code flow */
+#define __annotate_jump_table __section(".rodata..c_jump_table")
+
#else
#define annotate_reachable()
#define annotate_unreachable()
+#define __annotate_jump_table
#endif
#ifndef ASM_UNREACHABLE
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 095d55c3834d..599c27b56c29 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -189,6 +189,10 @@ struct ftrace_likely_data {
#define asm_volatile_goto(x...) asm goto(x)
#endif
+#ifndef __no_fgcse
+# define __no_fgcse
+#endif
+
/* Are two types/vars the same type (ignoring qualifiers)? */
#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index b7338702592a..adf993a3bd58 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -32,6 +32,15 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
}
#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
+#ifdef CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED
+bool force_dma_unencrypted(struct device *dev);
+#else
+static inline bool force_dma_unencrypted(struct device *dev)
+{
+ return false;
+}
+#endif /* CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED */
+
/*
* If memory encryption is supported, phys_to_dma will set the memory encryption
* bit in the DMA address, and dma_to_phys will clear it. The raw __phys_to_dma
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 8d13e28a8e07..e11b115dd0e4 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -679,6 +679,20 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
return dma_set_mask_and_coherent(dev, mask);
}
+/**
+ * dma_addressing_limited - return if the device is addressing limited
+ * @dev: device to check
+ *
+ * Return %true if the devices DMA mask is too small to address all memory in
+ * the system, else %false. Lack of addressing bits is the prime reason for
+ * bounce buffering, but might not be the only one.
+ */
+static inline bool dma_addressing_limited(struct device *dev)
+{
+ return min_not_zero(*dev->dma_mask, dev->bus_dma_mask) <
+ dma_get_required_mask(dev);
+}
+
#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent);
diff --git a/include/linux/fs_pin.h b/include/linux/fs_pin.h
index 7cab74d66f85..bdd09fd2520c 100644
--- a/include/linux/fs_pin.h
+++ b/include/linux/fs_pin.h
@@ -20,6 +20,5 @@ static inline void init_fs_pin(struct fs_pin *p, void (*kill)(struct fs_pin *))
}
void pin_remove(struct fs_pin *);
-void pin_insert_group(struct fs_pin *, struct vfsmount *, struct hlist_head *);
void pin_insert(struct fs_pin *, struct vfsmount *);
void pin_kill(struct fs_pin *);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index c5da875f19e3..5c5b5867024c 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -318,6 +318,7 @@ struct kvm_vcpu {
} spin_loop;
#endif
bool preempted;
+ bool ready;
struct kvm_vcpu_arch arch;
struct dentry *debugfs_dentry;
};
diff --git a/include/linux/msi.h b/include/linux/msi.h
index d48e919d55ae..8ad679e9d9c0 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -64,6 +64,10 @@ struct ti_sci_inta_msi_desc {
* @msg: The last set MSI message cached for reuse
* @affinity: Optional pointer to a cpu affinity mask for this descriptor
*
+ * @write_msi_msg: Callback that may be called when the MSI message
+ * address or data changes
+ * @write_msi_msg_data: Data parameter for the callback.
+ *
* @masked: [PCI MSI/X] Mask bits
* @is_msix: [PCI MSI/X] True if MSI-X
* @multiple: [PCI MSI/X] log2 num of messages allocated
@@ -90,6 +94,9 @@ struct msi_desc {
const void *iommu_cookie;
#endif
+ void (*write_msi_msg)(struct msi_desc *entry, void *data);
+ void *write_msi_msg_data;
+
union {
/* PCI MSI/X specific data */
struct {
@@ -100,6 +107,7 @@ struct msi_desc {
u8 multi_cap : 3;
u8 maskbit : 1;
u8 is_64 : 1;
+ u8 is_virtual : 1;
u16 entry_nr;
unsigned default_irq;
} msi_attrib;
diff --git a/include/linux/ntb.h b/include/linux/ntb.h
index 56a92e3ae3ae..8c13538aeffe 100644
--- a/include/linux/ntb.h
+++ b/include/linux/ntb.h
@@ -58,9 +58,11 @@
#include <linux/completion.h>
#include <linux/device.h>
+#include <linux/interrupt.h>
struct ntb_client;
struct ntb_dev;
+struct ntb_msi;
struct pci_dev;
/**
@@ -205,7 +207,7 @@ static inline int ntb_ctx_ops_is_valid(const struct ntb_ctx_ops *ops)
}
/**
- * struct ntb_ctx_ops - ntb device operations
+ * struct ntb_dev_ops - ntb device operations
* @port_number: See ntb_port_number().
* @peer_port_count: See ntb_peer_port_count().
* @peer_port_number: See ntb_peer_port_number().
@@ -404,7 +406,7 @@ struct ntb_client {
#define drv_ntb_client(__drv) container_of((__drv), struct ntb_client, drv)
/**
- * struct ntb_device - ntb device
+ * struct ntb_dev - ntb device
* @dev: Linux device object.
* @pdev: PCI device entry of the ntb.
* @topo: Detected topology of the ntb.
@@ -426,6 +428,10 @@ struct ntb_dev {
spinlock_t ctx_lock;
/* block unregister until device is fully released */
struct completion released;
+
+#ifdef CONFIG_NTB_MSI
+ struct ntb_msi *msi;
+#endif
};
#define dev_ntb(__dev) container_of((__dev), struct ntb_dev, dev)
@@ -616,7 +622,6 @@ static inline int ntb_port_number(struct ntb_dev *ntb)
return ntb->ops->port_number(ntb);
}
-
/**
* ntb_peer_port_count() - get the number of peer device ports
* @ntb: NTB device context.
@@ -654,6 +659,58 @@ static inline int ntb_peer_port_number(struct ntb_dev *ntb, int pidx)
}
/**
+ * ntb_logical_port_number() - get the logical port number of the local port
+ * @ntb: NTB device context.
+ *
+ * The Logical Port Number is defined to be a unique number for each
+ * port starting from zero through to the number of ports minus one.
+ * This is in contrast to the Port Number where each port can be assigned
+ * any unique physical number by the hardware.
+ *
+ * The logical port number is useful for calculating the resource indexes
+ * used by peers.
+ *
+ * Return: the logical port number or negative value indicating an error
+ */
+static inline int ntb_logical_port_number(struct ntb_dev *ntb)
+{
+ int lport = ntb_port_number(ntb);
+ int pidx;
+
+ if (lport < 0)
+ return lport;
+
+ for (pidx = 0; pidx < ntb_peer_port_count(ntb); pidx++)
+ if (lport <= ntb_peer_port_number(ntb, pidx))
+ return pidx;
+
+ return pidx;
+}
+
+/**
+ * ntb_peer_logical_port_number() - get the logical peer port by given index
+ * @ntb: NTB device context.
+ * @pidx: Peer port index.
+ *
+ * The Logical Port Number is defined to be a unique number for each
+ * port starting from zero through to the number of ports minus one.
+ * This is in contrast to the Port Number where each port can be assigned
+ * any unique physical number by the hardware.
+ *
+ * The logical port number is useful for calculating the resource indexes
+ * used by peers.
+ *
+ * Return: the peer's logical port number or negative value indicating an error
+ */
+static inline int ntb_peer_logical_port_number(struct ntb_dev *ntb, int pidx)
+{
+ if (ntb_peer_port_number(ntb, pidx) < ntb_port_number(ntb))
+ return pidx;
+ else
+ return pidx + 1;
+}
+
+/**
* ntb_peer_port_idx() - get the peer device port index by given port number
* @ntb: NTB device context.
* @port: Peer port number.
@@ -1506,4 +1563,141 @@ static inline int ntb_peer_msg_write(struct ntb_dev *ntb, int pidx, int midx,
return ntb->ops->peer_msg_write(ntb, pidx, midx, msg);
}
+/**
+ * ntb_peer_resource_idx() - get a resource index for a given peer idx
+ * @ntb: NTB device context.
+ * @pidx: Peer port index.
+ *
+ * When constructing a graph of peers, each remote peer must use a different
+ * resource index (mw, doorbell, etc) to communicate with each other
+ * peer.
+ *
+ * In a two peer system, this function should always return 0 such that
+ * resource 0 points to the remote peer on both ports.
+ *
+ * In a 5 peer system, this function will return the following matrix
+ *
+ * pidx \ port 0 1 2 3 4
+ * 0 0 0 1 2 3
+ * 1 0 1 1 2 3
+ * 2 0 1 2 2 3
+ * 3 0 1 2 3 3
+ *
+ * For example, if this function is used to program peer's memory
+ * windows, port 0 will program MW 0 on all it's peers to point to itself.
+ * port 1 will program MW 0 in port 0 to point to itself and MW 1 on all
+ * other ports. etc.
+ *
+ * For the legacy two host case, ntb_port_number() and ntb_peer_port_number()
+ * both return zero and therefore this function will always return zero.
+ * So MW 0 on each host would be programmed to point to the other host.
+ *
+ * Return: the resource index to use for that peer.
+ */
+static inline int ntb_peer_resource_idx(struct ntb_dev *ntb, int pidx)
+{
+ int local_port, peer_port;
+
+ if (pidx >= ntb_peer_port_count(ntb))
+ return -EINVAL;
+
+ local_port = ntb_logical_port_number(ntb);
+ peer_port = ntb_peer_logical_port_number(ntb, pidx);
+
+ if (peer_port < local_port)
+ return local_port - 1;
+ else
+ return local_port;
+}
+
+/**
+ * ntb_peer_highest_mw_idx() - get a memory window index for a given peer idx
+ * using the highest index memory windows first
+ *
+ * @ntb: NTB device context.
+ * @pidx: Peer port index.
+ *
+ * Like ntb_peer_resource_idx(), except it returns indexes starting with
+ * last memory window index.
+ *
+ * Return: the resource index to use for that peer.
+ */
+static inline int ntb_peer_highest_mw_idx(struct ntb_dev *ntb, int pidx)
+{
+ int ret;
+
+ ret = ntb_peer_resource_idx(ntb, pidx);
+ if (ret < 0)
+ return ret;
+
+ return ntb_mw_count(ntb, pidx) - ret - 1;
+}
+
+struct ntb_msi_desc {
+ u32 addr_offset;
+ u32 data;
+};
+
+#ifdef CONFIG_NTB_MSI
+
+int ntb_msi_init(struct ntb_dev *ntb, void (*desc_changed)(void *ctx));
+int ntb_msi_setup_mws(struct ntb_dev *ntb);
+void ntb_msi_clear_mws(struct ntb_dev *ntb);
+int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler,
+ irq_handler_t thread_fn,
+ const char *name, void *dev_id,
+ struct ntb_msi_desc *msi_desc);
+void ntbm_msi_free_irq(struct ntb_dev *ntb, unsigned int irq, void *dev_id);
+int ntb_msi_peer_trigger(struct ntb_dev *ntb, int peer,
+ struct ntb_msi_desc *desc);
+int ntb_msi_peer_addr(struct ntb_dev *ntb, int peer,
+ struct ntb_msi_desc *desc,
+ phys_addr_t *msi_addr);
+
+#else /* not CONFIG_NTB_MSI */
+
+static inline int ntb_msi_init(struct ntb_dev *ntb,
+ void (*desc_changed)(void *ctx))
+{
+ return -EOPNOTSUPP;
+}
+static inline int ntb_msi_setup_mws(struct ntb_dev *ntb)
+{
+ return -EOPNOTSUPP;
+}
+static inline void ntb_msi_clear_mws(struct ntb_dev *ntb) {}
+static inline int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb,
+ irq_handler_t handler,
+ irq_handler_t thread_fn,
+ const char *name, void *dev_id,
+ struct ntb_msi_desc *msi_desc)
+{
+ return -EOPNOTSUPP;
+}
+static inline void ntbm_msi_free_irq(struct ntb_dev *ntb, unsigned int irq,
+ void *dev_id) {}
+static inline int ntb_msi_peer_trigger(struct ntb_dev *ntb, int peer,
+ struct ntb_msi_desc *desc)
+{
+ return -EOPNOTSUPP;
+}
+static inline int ntb_msi_peer_addr(struct ntb_dev *ntb, int peer,
+ struct ntb_msi_desc *desc,
+ phys_addr_t *msi_addr)
+{
+ return -EOPNOTSUPP;
+
+}
+
+#endif /* CONFIG_NTB_MSI */
+
+static inline int ntbm_msi_request_irq(struct ntb_dev *ntb,
+ irq_handler_t handler,
+ const char *name, void *dev_id,
+ struct ntb_msi_desc *msi_desc)
+{
+ return ntbm_msi_request_threaded_irq(ntb, handler, NULL, name,
+ dev_id, msi_desc);
+}
+
#endif
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 2972793e3028..9e700d9f9f28 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1412,6 +1412,15 @@ int pci_set_vga_state(struct pci_dev *pdev, bool decode,
#define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */
#define PCI_IRQ_MSIX (1 << 2) /* Allow MSI-X interrupts */
#define PCI_IRQ_AFFINITY (1 << 3) /* Auto-assign affinity */
+
+/*
+ * Virtual interrupts allow for more interrupts to be allocated
+ * than the device has interrupts for. These are not programmed
+ * into the device's MSI-X table and must be handled by some
+ * other driver means.
+ */
+#define PCI_IRQ_VIRTUAL (1 << 4)
+
#define PCI_IRQ_ALL_TYPES \
(PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX)
diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h
index b0fb1446fe04..6c8512d3be88 100644
--- a/include/linux/sched/isolation.h
+++ b/include/linux/sched/isolation.h
@@ -19,6 +19,7 @@ enum hk_flags {
DECLARE_STATIC_KEY_FALSE(housekeeping_overridden);
extern int housekeeping_any_cpu(enum hk_flags flags);
extern const struct cpumask *housekeeping_cpumask(enum hk_flags flags);
+extern bool housekeeping_enabled(enum hk_flags flags);
extern void housekeeping_affine(struct task_struct *t, enum hk_flags flags);
extern bool housekeeping_test_cpu(int cpu, enum hk_flags flags);
extern void __init housekeeping_init(void);
@@ -35,6 +36,11 @@ static inline const struct cpumask *housekeeping_cpumask(enum hk_flags flags)
return cpu_possible_mask;
}
+static inline bool housekeeping_enabled(enum hk_flags flags)
+{
+ return false;
+}
+
static inline void housekeeping_affine(struct task_struct *t,
enum hk_flags flags) { }
static inline void housekeeping_init(void) { }
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index a5fcdad4a03e..cc139dbd71e5 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -369,6 +369,8 @@ struct scsi_host_template {
*/
unsigned long dma_boundary;
+ unsigned long virt_boundary_mask;
+
/*
* This specifies "machine infinity" for host templates which don't
* limit the transfer size. Note this limit represents an absolute
@@ -587,6 +589,7 @@ struct Scsi_Host {
unsigned int max_sectors;
unsigned int max_segment_size;
unsigned long dma_boundary;
+ unsigned long virt_boundary_mask;
/*
* In scsi-mq mode, the number of hardware queues supported by the LLD.
*