diff options
Diffstat (limited to 'include')
2023 files changed, 92575 insertions, 32936 deletions
diff --git a/include/acpi/acbuffer.h b/include/acpi/acbuffer.h index 3e8d969b22fe..8cbfcbca7b7e 100644 --- a/include/acpi/acbuffer.h +++ b/include/acpi/acbuffer.h @@ -3,7 +3,7 @@ * * Name: acbuffer.h - Support for buffers returned by ACPI predefined names * - * Copyright (C) 2000 - 2021, Intel Corp. + * Copyright (C) 2000 - 2022, Intel Corp. * *****************************************************************************/ diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h index 0362cbb72359..c3ae3ea88e17 100644 --- a/include/acpi/acconfig.h +++ b/include/acpi/acconfig.h @@ -3,7 +3,7 @@ * * Name: acconfig.h - Global configuration constants * - * Copyright (C) 2000 - 2021, Intel Corp. + * Copyright (C) 2000 - 2022, Intel Corp. * *****************************************************************************/ diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h index ea3b1c41bc79..28943c900be7 100644 --- a/include/acpi/acexcep.h +++ b/include/acpi/acexcep.h @@ -3,7 +3,7 @@ * * Name: acexcep.h - Exception codes returned by the ACPI subsystem * - * Copyright (C) 2000 - 2021, Intel Corp. + * Copyright (C) 2000 - 2022, Intel Corp. * *****************************************************************************/ diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h index 30869ab77fba..6f22e92b1744 100644 --- a/include/acpi/acnames.h +++ b/include/acpi/acnames.h @@ -3,7 +3,7 @@ * * Name: acnames.h - Global names and strings * - * Copyright (C) 2000 - 2021, Intel Corp. + * Copyright (C) 2000 - 2022, Intel Corp. * *****************************************************************************/ diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h index 5a3875744678..73781aae2119 100644 --- a/include/acpi/acoutput.h +++ b/include/acpi/acoutput.h @@ -3,7 +3,7 @@ * * Name: acoutput.h -- debug output * - * Copyright (C) 2000 - 2021, Intel Corp. + * Copyright (C) 2000 - 2022, Intel Corp. * *****************************************************************************/ diff --git a/include/acpi/acpi.h b/include/acpi/acpi.h index 6f6282a862bc..416e59bcf149 100644 --- a/include/acpi/acpi.h +++ b/include/acpi/acpi.h @@ -3,7 +3,7 @@ * * Name: acpi.h - Master public include file used to interface to ACPICA * - * Copyright (C) 2000 - 2021, Intel Corp. + * Copyright (C) 2000 - 2022, Intel Corp. * *****************************************************************************/ diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 44a3082a92a4..c09d72986968 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -344,8 +344,9 @@ struct acpi_device_physical_node { struct acpi_device_properties { const guid_t *guid; - const union acpi_object *properties; + union acpi_object *properties; struct list_head list; + void **bufs; }; /* ACPI Device Specific Data (_DSD) */ @@ -360,12 +361,10 @@ struct acpi_gpio_mapping; /* Device */ struct acpi_device { + u32 pld_crc; int device_type; acpi_handle handle; /* no handle for fixed hardware */ struct fwnode_handle fwnode; - struct acpi_device *parent; - struct list_head children; - struct list_head node; struct list_head wakeup_list; struct list_head del_list; struct acpi_device_status status; @@ -378,7 +377,6 @@ struct acpi_device { struct acpi_device_data data; struct acpi_scan_handler *handler; struct acpi_hotplug_context *hp; - struct acpi_driver *driver; const struct acpi_gpio_mapping *driver_gpios; void *driver_data; struct device dev; @@ -459,6 +457,14 @@ static inline void *acpi_driver_data(struct acpi_device *d) #define to_acpi_device(d) container_of(d, struct acpi_device, dev) #define to_acpi_driver(d) container_of(d, struct acpi_driver, drv) +static inline struct acpi_device *acpi_dev_parent(struct acpi_device *adev) +{ + if (adev->dev.parent) + return to_acpi_device(adev->dev.parent); + + return NULL; +} + static inline void acpi_set_device_status(struct acpi_device *adev, u32 sta) { *((u32 *)&adev->status) = sta; @@ -479,6 +485,13 @@ void acpi_initialize_hp_context(struct acpi_device *adev, /* acpi_device.dev.bus == &acpi_bus_type */ extern struct bus_type acpi_bus_type; +int acpi_bus_for_each_dev(int (*fn)(struct device *, void *), void *data); +int acpi_dev_for_each_child(struct acpi_device *adev, + int (*fn)(struct acpi_device *, void *), void *data); +int acpi_dev_for_each_child_reverse(struct acpi_device *adev, + int (*fn)(struct acpi_device *, void *), + void *data); + /* * Events * ------ @@ -506,8 +519,6 @@ extern int unregister_acpi_notifier(struct notifier_block *); * External Functions */ -int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device); -struct acpi_device *acpi_fetch_acpi_dev(acpi_handle handle); acpi_status acpi_bus_get_status_handle(acpi_handle handle, unsigned long long *sta); int acpi_bus_get_status(struct acpi_device *device); @@ -517,9 +528,11 @@ const char *acpi_power_state_string(int state); int acpi_device_set_power(struct acpi_device *device, int state); int acpi_bus_init_power(struct acpi_device *device); int acpi_device_fix_up_power(struct acpi_device *device); +void acpi_device_fix_up_power_extended(struct acpi_device *adev); int acpi_bus_update_power(acpi_handle handle, int *state_p); int acpi_device_update_power(struct acpi_device *device, int *state_p); bool acpi_bus_power_manageable(acpi_handle handle); +void acpi_dev_power_up_children_with_adr(struct acpi_device *adev); int acpi_device_power_add_dependent(struct acpi_device *adev, struct device *dev); void acpi_device_power_remove_dependent(struct acpi_device *adev, @@ -580,14 +593,22 @@ int unregister_acpi_bus_type(struct acpi_bus_type *); int acpi_bind_one(struct device *dev, struct acpi_device *adev); int acpi_unbind_one(struct device *dev); +enum acpi_bridge_type { + ACPI_BRIDGE_TYPE_PCIE = 1, + ACPI_BRIDGE_TYPE_CXL, +}; + struct acpi_pci_root { struct acpi_device * device; struct pci_bus *bus; u16 segment; + int bridge_type; struct resource secondary; /* downstream bus range */ - u32 osc_support_set; /* _OSC state of support bits */ - u32 osc_control_set; /* _OSC state of control bits */ + u32 osc_support_set; /* _OSC state of support bits */ + u32 osc_control_set; /* _OSC state of control bits */ + u32 osc_ext_support_set; /* _OSC state of extended support bits */ + u32 osc_ext_control_set; /* _OSC state of extended control bits */ phys_addr_t mcfg_addr; }; @@ -598,8 +619,7 @@ enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev); int acpi_iommu_fwspec_init(struct device *dev, u32 id, struct fwnode_handle *fwnode, const struct iommu_ops *ops); -int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset, - u64 *size); +int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map); int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr, const u32 *input_id); static inline int acpi_dma_configure(struct device *dev, @@ -609,6 +629,8 @@ static inline int acpi_dma_configure(struct device *dev, } struct acpi_device *acpi_find_child_device(struct acpi_device *parent, u64 address, bool check_children); +struct acpi_device *acpi_find_child_by_adr(struct acpi_device *adev, + acpi_bus_address adr); int acpi_is_root_bridge(acpi_handle); struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle); @@ -716,10 +738,24 @@ static inline bool acpi_device_can_poweroff(struct acpi_device *adev) } bool acpi_dev_hid_uid_match(struct acpi_device *adev, const char *hid2, const char *uid2); +int acpi_dev_uid_to_integer(struct acpi_device *adev, u64 *integer); void acpi_dev_clear_dependencies(struct acpi_device *supplier); bool acpi_dev_ready_for_enumeration(const struct acpi_device *device); -struct acpi_device *acpi_dev_get_first_consumer_dev(struct acpi_device *supplier); +struct acpi_device *acpi_dev_get_next_consumer_dev(struct acpi_device *supplier, + struct acpi_device *start); + +/** + * for_each_acpi_consumer_dev - iterate over the consumer ACPI devices for a + * given supplier + * @supplier: Pointer to the supplier's ACPI device + * @consumer: Pointer to &struct acpi_device to hold the consumer, initially NULL + */ +#define for_each_acpi_consumer_dev(supplier, consumer) \ + for (consumer = acpi_dev_get_next_consumer_dev(supplier, NULL); \ + consumer; \ + consumer = acpi_dev_get_next_consumer_dev(supplier, consumer)) + struct acpi_device * acpi_dev_get_next_match_dev(struct acpi_device *adev, const char *hid, const char *uid, s64 hrv); struct acpi_device * @@ -750,9 +786,10 @@ static inline void acpi_dev_put(struct acpi_device *adev) put_device(&adev->dev); } -struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle); +struct acpi_device *acpi_fetch_acpi_dev(acpi_handle handle); +struct acpi_device *acpi_get_acpi_dev(acpi_handle handle); -static inline void acpi_bus_put_acpi_device(struct acpi_device *adev) +static inline void acpi_put_acpi_dev(struct acpi_device *adev) { acpi_dev_put(adev); } diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h index 690c369b717a..52844cc5eeb5 100644 --- a/include/acpi/acpiosxf.h +++ b/include/acpi/acpiosxf.h @@ -5,7 +5,7 @@ * interfaces must be implemented by OSL to interface the * ACPI components to the host operating system. * - * Copyright (C) 2000 - 2021, Intel Corp. + * Copyright (C) 2000 - 2022, Intel Corp. * *****************************************************************************/ diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index 7417731472b7..67c0b9e734b6 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -3,7 +3,7 @@ * * Name: acpixf.h - External interfaces to the ACPI subsystem * - * Copyright (C) 2000 - 2021, Intel Corp. + * Copyright (C) 2000 - 2022, Intel Corp. * *****************************************************************************/ @@ -12,7 +12,7 @@ /* Current ACPICA subsystem version in YYYYMMDD format */ -#define ACPI_CA_VERSION 0x20211217 +#define ACPI_CA_VERSION 0x20220331 #include <acpi/acconfig.h> #include <acpi/actypes.h> diff --git a/include/acpi/acrestyp.h b/include/acpi/acrestyp.h index 8e2319bbd0a2..a7fb8ddb3dc6 100644 --- a/include/acpi/acrestyp.h +++ b/include/acpi/acrestyp.h @@ -3,7 +3,7 @@ * * Name: acrestyp.h - Defines, types, and structures for resource descriptors * - * Copyright (C) 2000 - 2021, Intel Corp. + * Copyright (C) 2000 - 2022, Intel Corp. * *****************************************************************************/ diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h index f9cda909f92c..c6af579f74f4 100644 --- a/include/acpi/actbl.h +++ b/include/acpi/actbl.h @@ -3,7 +3,7 @@ * * Name: actbl.h - Basic ACPI Table Definitions * - * Copyright (C) 2000 - 2021, Intel Corp. + * Copyright (C) 2000 - 2022, Intel Corp. * *****************************************************************************/ diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h index 159070edd031..15c78678c5d3 100644 --- a/include/acpi/actbl1.h +++ b/include/acpi/actbl1.h @@ -3,7 +3,7 @@ * * Name: actbl1.h - Additional ACPI table definitions * - * Copyright (C) 2000 - 2021, Intel Corp. + * Copyright (C) 2000 - 2022, Intel Corp. * *****************************************************************************/ @@ -373,17 +373,21 @@ struct acpi_cedt_cfmws { u32 interleave_targets[]; }; +struct acpi_cedt_cfmws_target_element { + u32 interleave_target; +}; + /* Values for Interleave Arithmetic field above */ -#define ACPI_CEDT_CFMWS_ARITHMETIC_MODULO (0) +#define ACPI_CEDT_CFMWS_ARITHMETIC_MODULO (0) /* Values for Restrictions field above */ -#define ACPI_CEDT_CFMWS_RESTRICT_TYPE2 (1) -#define ACPI_CEDT_CFMWS_RESTRICT_TYPE3 (1<<1) -#define ACPI_CEDT_CFMWS_RESTRICT_VOLATILE (1<<2) -#define ACPI_CEDT_CFMWS_RESTRICT_PMEM (1<<3) -#define ACPI_CEDT_CFMWS_RESTRICT_FIXED (1<<4) +#define ACPI_CEDT_CFMWS_RESTRICT_TYPE2 (1) +#define ACPI_CEDT_CFMWS_RESTRICT_TYPE3 (1<<1) +#define ACPI_CEDT_CFMWS_RESTRICT_VOLATILE (1<<2) +#define ACPI_CEDT_CFMWS_RESTRICT_PMEM (1<<3) +#define ACPI_CEDT_CFMWS_RESTRICT_FIXED (1<<4) /******************************************************************************* * diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h index 16847c8d9d5f..655102bc6d14 100644 --- a/include/acpi/actbl2.h +++ b/include/acpi/actbl2.h @@ -3,7 +3,7 @@ * * Name: actbl2.h - ACPI Table Definitions (tables not in ACPI spec) * - * Copyright (C) 2000 - 2021, Intel Corp. + * Copyright (C) 2000 - 2022, Intel Corp. * *****************************************************************************/ @@ -25,6 +25,7 @@ * the wrong signature. */ #define ACPI_SIG_AGDI "AGDI" /* Arm Generic Diagnostic Dump and Reset Device Interface */ +#define ACPI_SIG_APMT "APMT" /* Arm Performance Monitoring Unit table */ #define ACPI_SIG_BDAT "BDAT" /* BIOS Data ACPI Table */ #define ACPI_SIG_IORT "IORT" /* IO Remapping Table */ #define ACPI_SIG_IVRS "IVRS" /* I/O Virtualization Reporting Structure */ @@ -260,6 +261,85 @@ struct acpi_table_agdi { /******************************************************************************* * + * APMT - ARM Performance Monitoring Unit Table + * + * Conforms to: + * ARM Performance Monitoring Unit Architecture 1.0 Platform Design Document + * ARM DEN0117 v1.0 November 25, 2021 + * + ******************************************************************************/ + +struct acpi_table_apmt { + struct acpi_table_header header; /* Common ACPI table header */ +}; + +#define ACPI_APMT_NODE_ID_LENGTH 4 + +/* + * APMT subtables + */ +struct acpi_apmt_node { + u16 length; + u8 flags; + u8 type; + u32 id; + u64 inst_primary; + u32 inst_secondary; + u64 base_address0; + u64 base_address1; + u32 ovflw_irq; + u32 reserved; + u32 ovflw_irq_flags; + u32 proc_affinity; + u32 impl_id; +}; + +/* Masks for Flags field above */ + +#define ACPI_APMT_FLAGS_DUAL_PAGE (1<<0) +#define ACPI_APMT_FLAGS_AFFINITY (1<<1) +#define ACPI_APMT_FLAGS_ATOMIC (1<<2) + +/* Values for Flags dual page field above */ + +#define ACPI_APMT_FLAGS_DUAL_PAGE_NSUPP (0<<0) +#define ACPI_APMT_FLAGS_DUAL_PAGE_SUPP (1<<0) + +/* Values for Flags processor affinity field above */ +#define ACPI_APMT_FLAGS_AFFINITY_PROC (0<<1) +#define ACPI_APMT_FLAGS_AFFINITY_PROC_CONTAINER (1<<1) + +/* Values for Flags 64-bit atomic field above */ +#define ACPI_APMT_FLAGS_ATOMIC_NSUPP (0<<2) +#define ACPI_APMT_FLAGS_ATOMIC_SUPP (1<<2) + +/* Values for Type field above */ + +enum acpi_apmt_node_type { + ACPI_APMT_NODE_TYPE_MC = 0x00, + ACPI_APMT_NODE_TYPE_SMMU = 0x01, + ACPI_APMT_NODE_TYPE_PCIE_ROOT = 0x02, + ACPI_APMT_NODE_TYPE_ACPI = 0x03, + ACPI_APMT_NODE_TYPE_CACHE = 0x04, + ACPI_APMT_NODE_TYPE_COUNT +}; + +/* Masks for ovflw_irq_flags field above */ + +#define ACPI_APMT_OVFLW_IRQ_FLAGS_MODE (1<<0) +#define ACPI_APMT_OVFLW_IRQ_FLAGS_TYPE (1<<1) + +/* Values for ovflw_irq_flags mode field above */ + +#define ACPI_APMT_OVFLW_IRQ_FLAGS_MODE_LEVEL (0<<0) +#define ACPI_APMT_OVFLW_IRQ_FLAGS_MODE_EDGE (1<<0) + +/* Values for ovflw_irq_flags type field above */ + +#define ACPI_APMT_OVFLW_IRQ_FLAGS_TYPE_WIRED (0<<1) + +/******************************************************************************* + * * BDAT - BIOS Data ACPI Table * * Conforms to "BIOS Data ACPI Table", Interface Specification v4.0 Draft 5 @@ -277,7 +357,7 @@ struct acpi_table_bdat { * IORT - IO Remapping Table * * Conforms to "IO Remapping Table System Software on ARM Platforms", - * Document number: ARM DEN 0049E.b, Feb 2021 + * Document number: ARM DEN 0049E.d, Feb 2022 * ******************************************************************************/ @@ -374,7 +454,8 @@ struct acpi_iort_root_complex { u32 ats_attribute; u32 pci_segment_number; u8 memory_address_limit; /* Memory address size limit */ - u8 reserved[3]; /* Reserved, must be zero */ + u16 pasid_capabilities; /* PASID Capabilities */ + u8 reserved[1]; /* Reserved, must be zero */ }; /* Masks for ats_attribute field above */ @@ -383,6 +464,9 @@ struct acpi_iort_root_complex { #define ACPI_IORT_PRI_SUPPORTED (1<<1) /* The root complex PRI support */ #define ACPI_IORT_PASID_FWD_SUPPORTED (1<<2) /* The root complex PASID forward support */ +/* Masks for pasid_capabilities field above */ +#define ACPI_IORT_PASID_MAX_WIDTH (0x1F) /* Bits 0-4 */ + struct acpi_iort_smmu { u64 base_address; /* SMMU base address */ u64 span; /* Length of memory range */ @@ -458,6 +542,25 @@ struct acpi_iort_rmr { u32 rmr_offset; }; +/* Masks for Flags field above */ +#define ACPI_IORT_RMR_REMAP_PERMITTED (1) +#define ACPI_IORT_RMR_ACCESS_PRIVILEGE (1<<1) + +/* + * Macro to access the Access Attributes in flags field above: + * Access Attributes is encoded in bits 9:2 + */ +#define ACPI_IORT_RMR_ACCESS_ATTRIBUTES(flags) (((flags) >> 2) & 0xFF) + +/* Values for above Access Attributes */ + +#define ACPI_IORT_RMR_ATTR_DEVICE_NGNRNE 0x00 +#define ACPI_IORT_RMR_ATTR_DEVICE_NGNRE 0x01 +#define ACPI_IORT_RMR_ATTR_DEVICE_NGRE 0x02 +#define ACPI_IORT_RMR_ATTR_DEVICE_GRE 0x03 +#define ACPI_IORT_RMR_ATTR_NORMAL_NC 0x04 +#define ACPI_IORT_RMR_ATTR_NORMAL_IWB_OWB 0x05 + struct acpi_iort_rmr_desc { u64 base_address; u64 length; @@ -762,7 +865,8 @@ enum acpi_madt_type { ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR = 14, ACPI_MADT_TYPE_GENERIC_TRANSLATOR = 15, ACPI_MADT_TYPE_MULTIPROC_WAKEUP = 16, - ACPI_MADT_TYPE_RESERVED = 17 /* 17 and greater are reserved */ + ACPI_MADT_TYPE_RESERVED = 17, /* 17 to 0x7F are reserved */ + ACPI_MADT_TYPE_OEM_RESERVED = 0x80 /* 0x80 to 0xFF are reserved for OEM use */ }; /* @@ -978,8 +1082,8 @@ struct acpi_madt_multiproc_wakeup { u64 base_address; }; -#define ACPI_MULTIPROC_WAKEUP_MB_OS_SIZE 2032 -#define ACPI_MULTIPROC_WAKEUP_MB_FIRMWARE_SIZE 2048 +#define ACPI_MULTIPROC_WAKEUP_MB_OS_SIZE 2032 +#define ACPI_MULTIPROC_WAKEUP_MB_FIRMWARE_SIZE 2048 struct acpi_madt_multiproc_wakeup_mailbox { u16 command; @@ -992,6 +1096,12 @@ struct acpi_madt_multiproc_wakeup_mailbox { #define ACPI_MP_WAKE_COMMAND_WAKEUP 1 +/* 17: OEM data */ + +struct acpi_madt_oem_data { + u8 oem_data[0]; +}; + /* * Common flags fields for MADT subtables */ @@ -1597,7 +1707,7 @@ struct acpi_nhlt_mic_device_specific_config { /* Values for array_type_ext above */ -#define ACPI_NHLT_ARRAY_TYPE_RESERVED 0x09 // 9 and below are reserved +#define ACPI_NHLT_ARRAY_TYPE_RESERVED 0x09 /* 9 and below are reserved */ #define ACPI_NHLT_SMALL_LINEAR_2ELEMENT 0x0A #define ACPI_NHLT_BIG_LINEAR_2ELEMENT 0x0B #define ACPI_NHLT_FIRST_GEOMETRY_LINEAR_4ELEMENT 0x0C @@ -1617,17 +1727,17 @@ struct acpi_nhlt_vendor_mic_count { struct acpi_nhlt_vendor_mic_config { u8 type; u8 panel; - u16 speaker_position_distance; // mm - u16 horizontal_offset; // mm - u16 vertical_offset; // mm - u8 frequency_low_band; // 5*hz - u8 frequency_high_band; // 500*hz - u16 direction_angle; // -180 - + 180 - u16 elevation_angle; // -180 - + 180 - u16 work_vertical_angle_begin; // -180 - + 180 with 2 deg step - u16 work_vertical_angle_end; // -180 - + 180 with 2 deg step - u16 work_horizontal_angle_begin; // -180 - + 180 with 2 deg step - u16 work_horizontal_angle_end; // -180 - + 180 with 2 deg step + u16 speaker_position_distance; /* mm */ + u16 horizontal_offset; /* mm */ + u16 vertical_offset; /* mm */ + u8 frequency_low_band; /* 5*Hz */ + u8 frequency_high_band; /* 500*Hz */ + u16 direction_angle; /* -180 - + 180 */ + u16 elevation_angle; /* -180 - + 180 */ + u16 work_vertical_angle_begin; /* -180 - + 180 with 2 deg step */ + u16 work_vertical_angle_end; /* -180 - + 180 with 2 deg step */ + u16 work_horizontal_angle_begin; /* -180 - + 180 with 2 deg step */ + u16 work_horizontal_angle_end; /* -180 - + 180 with 2 deg step */ }; /* Values for Type field above */ @@ -1638,9 +1748,9 @@ struct acpi_nhlt_vendor_mic_config { #define ACPI_NHLT_MIC_SUPER_CARDIOID 3 #define ACPI_NHLT_MIC_HYPER_CARDIOID 4 #define ACPI_NHLT_MIC_8_SHAPED 5 -#define ACPI_NHLT_MIC_RESERVED6 6 // 6 is reserved +#define ACPI_NHLT_MIC_RESERVED6 6 /* 6 is reserved */ #define ACPI_NHLT_MIC_VENDOR_DEFINED 7 -#define ACPI_NHLT_MIC_RESERVED 8 // 8 and above are reserved +#define ACPI_NHLT_MIC_RESERVED 8 /* 8 and above are reserved */ /* Values for Panel field above */ @@ -1650,12 +1760,12 @@ struct acpi_nhlt_vendor_mic_config { #define ACPI_NHLT_MIC_POSITION_RIGHT 3 #define ACPI_NHLT_MIC_POSITION_FRONT 4 #define ACPI_NHLT_MIC_POSITION_BACK 5 -#define ACPI_NHLT_MIC_POSITION_RESERVED 6 // 6 and above are reserved +#define ACPI_NHLT_MIC_POSITION_RESERVED 6 /* 6 and above are reserved */ struct acpi_nhlt_vendor_mic_device_specific_config { struct acpi_nhlt_mic_device_specific_config mic_array_device_config; u8 number_of_microphones; - struct acpi_nhlt_vendor_mic_config mic_config[]; // indexed by number_of_microphones + struct acpi_nhlt_vendor_mic_config mic_config[]; /* Indexed by number_of_microphones */ }; /* Microphone SNR and Sensitivity extension */ @@ -1668,32 +1778,23 @@ struct acpi_nhlt_mic_snr_sensitivity_extension { /* Render device with feedback */ struct acpi_nhlt_render_feedback_device_specific_config { - u8 feedback_virtual_slot; // render slot in case of capture - u16 feedback_channels; // informative only + u8 feedback_virtual_slot; /* Render slot in case of capture */ + u16 feedback_channels; /* Informative only */ u16 feedback_valid_bits_per_sample; }; -/* Linux-specific structures */ +/* Non documented structures */ -struct acpi_nhlt_linux_specific_count { +struct acpi_nhlt_device_info_count { u8 structure_count; }; -struct acpi_nhlt_linux_specific_data { +struct acpi_nhlt_device_info { u8 device_id[16]; u8 device_instance_id; u8 device_port_id; }; -struct acpi_nhlt_linux_specific_data_b { - u8 specific_data[18]; -}; - -struct acpi_nhlt_table_terminator { - u32 terminator_value; - u32 terminator_signature; -}; - /******************************************************************************* * * PCCT - Platform Communications Channel Table (ACPI 5.0) @@ -2319,7 +2420,7 @@ struct acpi_table_rgrt { u16 version; u8 image_type; u8 reserved; - u8 image[0]; + u8 image[]; }; /* image_type values */ diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h index edbf1ad8206d..7b9571e00cc4 100644 --- a/include/acpi/actbl3.h +++ b/include/acpi/actbl3.h @@ -3,7 +3,7 @@ * * Name: actbl3.h - ACPI Table Definitions * - * Copyright (C) 2000 - 2021, Intel Corp. + * Copyright (C) 2000 - 2022, Intel Corp. * *****************************************************************************/ diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index 69e89d572b9e..3491e454b2ab 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h @@ -3,7 +3,7 @@ * * Name: actypes.h - Common data types for the entire ACPI subsystem * - * Copyright (C) 2000 - 2021, Intel Corp. + * Copyright (C) 2000 - 2022, Intel Corp. * *****************************************************************************/ @@ -507,8 +507,12 @@ typedef u64 acpi_integer; /* Pointer/Integer type conversions */ #define ACPI_TO_POINTER(i) ACPI_CAST_PTR (void, (acpi_size) (i)) +#ifndef ACPI_TO_INTEGER #define ACPI_TO_INTEGER(p) ACPI_PTR_DIFF (p, (void *) 0) +#endif +#ifndef ACPI_OFFSET #define ACPI_OFFSET(d, f) ACPI_PTR_DIFF (&(((d *) 0)->f), (void *) 0) +#endif #define ACPI_PTR_TO_PHYSADDR(i) ACPI_TO_INTEGER(i) /* Optimizations for 4-character (32-bit) acpi_name manipulation */ @@ -535,14 +539,14 @@ typedef u64 acpi_integer; * Can be used with access_width of struct acpi_generic_address and access_size of * struct acpi_resource_generic_register. */ -#define ACPI_ACCESS_BIT_SHIFT 2 -#define ACPI_ACCESS_BYTE_SHIFT -1 -#define ACPI_ACCESS_BIT_MAX (31 - ACPI_ACCESS_BIT_SHIFT) -#define ACPI_ACCESS_BYTE_MAX (31 - ACPI_ACCESS_BYTE_SHIFT) -#define ACPI_ACCESS_BIT_DEFAULT (8 - ACPI_ACCESS_BIT_SHIFT) -#define ACPI_ACCESS_BYTE_DEFAULT (8 - ACPI_ACCESS_BYTE_SHIFT) -#define ACPI_ACCESS_BIT_WIDTH(size) (1 << ((size) + ACPI_ACCESS_BIT_SHIFT)) -#define ACPI_ACCESS_BYTE_WIDTH(size) (1 << ((size) + ACPI_ACCESS_BYTE_SHIFT)) +#define ACPI_ACCESS_BIT_SHIFT 2 +#define ACPI_ACCESS_BYTE_SHIFT -1 +#define ACPI_ACCESS_BIT_MAX (31 - ACPI_ACCESS_BIT_SHIFT) +#define ACPI_ACCESS_BYTE_MAX (31 - ACPI_ACCESS_BYTE_SHIFT) +#define ACPI_ACCESS_BIT_DEFAULT (8 - ACPI_ACCESS_BIT_SHIFT) +#define ACPI_ACCESS_BYTE_DEFAULT (8 - ACPI_ACCESS_BYTE_SHIFT) +#define ACPI_ACCESS_BIT_WIDTH(size) (1 << ((size) + ACPI_ACCESS_BIT_SHIFT)) +#define ACPI_ACCESS_BYTE_WIDTH(size) (1 << ((size) + ACPI_ACCESS_BYTE_SHIFT)) /******************************************************************************* * @@ -1299,6 +1303,7 @@ typedef enum { #define ACPI_OSI_WIN_10_RS5 0x13 #define ACPI_OSI_WIN_10_19H1 0x14 #define ACPI_OSI_WIN_10_20H1 0x15 +#define ACPI_OSI_WIN_11 0x16 /* Definitions of getopt */ diff --git a/include/acpi/acuuid.h b/include/acpi/acuuid.h index bc24388ce94e..8f1e7c489df5 100644 --- a/include/acpi/acuuid.h +++ b/include/acpi/acuuid.h @@ -3,7 +3,7 @@ * * Name: acuuid.h - ACPI-related UUID/GUID definitions * - * Copyright (C) 2000 - 2021, Intel Corp. + * Copyright (C) 2000 - 2022, Intel Corp. * *****************************************************************************/ diff --git a/include/acpi/apei.h b/include/acpi/apei.h index ece0a8af2bae..dc60f7db5524 100644 --- a/include/acpi/apei.h +++ b/include/acpi/apei.h @@ -27,14 +27,16 @@ extern int hest_disable; extern int erst_disable; #ifdef CONFIG_ACPI_APEI_GHES extern bool ghes_disable; +void __init acpi_ghes_init(void); #else #define ghes_disable 1 +static inline void acpi_ghes_init(void) { } #endif #ifdef CONFIG_ACPI_APEI void __init acpi_hest_init(void); #else -static inline void acpi_hest_init(void) { return; } +static inline void acpi_hest_init(void) { } #endif int erst_write(const struct cper_record_header *record); @@ -44,6 +46,8 @@ int erst_get_record_id_next(int *pos, u64 *record_id); void erst_get_record_id_end(void); ssize_t erst_read(u64 record_id, struct cper_record_header *record, size_t buflen); +ssize_t erst_read_record(u64 record_id, struct cper_record_header *record, + size_t buflen, size_t recordlen, const guid_t *creatorid); int erst_clear(u64 record_id); int arch_apei_enable_cmcff(struct acpi_hest_header *hest_hdr, void *data); diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h index 92b7ea8d8f5e..c5614444031f 100644 --- a/include/acpi/cppc_acpi.h +++ b/include/acpi/cppc_acpi.h @@ -17,7 +17,7 @@ #include <acpi/pcc.h> #include <acpi/processor.h> -/* Support CPPCv2 and CPPCv3 */ +/* CPPCv2 and CPPCv3 support */ #define CPPC_V2_REV 2 #define CPPC_V3_REV 3 #define CPPC_V2_NUM_ENT 21 @@ -140,10 +140,13 @@ extern int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs); extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls); extern int cppc_set_enable(int cpu, bool enable); extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps); +extern bool cppc_perf_ctrs_in_pcc(void); extern bool acpi_cpc_valid(void); +extern bool cppc_allow_fast_switch(void); extern int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data); extern unsigned int cppc_get_transition_latency(int cpu); extern bool cpc_ffh_supported(void); +extern bool cpc_supported_by_cpu(void); extern int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val); extern int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val); #else /* !CONFIG_ACPI_CPPC_LIB */ @@ -171,10 +174,18 @@ static inline int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps) { return -ENOTSUPP; } +static inline bool cppc_perf_ctrs_in_pcc(void) +{ + return false; +} static inline bool acpi_cpc_valid(void) { return false; } +static inline bool cppc_allow_fast_switch(void) +{ + return false; +} static inline unsigned int cppc_get_transition_latency(int cpu) { return CPUFREQ_ETERNAL; diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h index 34fb3431a8f3..292a5c40bd0c 100644 --- a/include/acpi/ghes.h +++ b/include/acpi/ghes.h @@ -71,7 +71,7 @@ int ghes_register_vendor_record_notifier(struct notifier_block *nb); void ghes_unregister_vendor_record_notifier(struct notifier_block *nb); #endif -int ghes_estatus_pool_init(int num_ghes); +int ghes_estatus_pool_init(unsigned int num_ghes); /* From drivers/edac/ghes_edac.c */ diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h index e8958e0d1646..03eb3d977075 100644 --- a/include/acpi/platform/acenv.h +++ b/include/acpi/platform/acenv.h @@ -3,7 +3,7 @@ * * Name: acenv.h - Host and compiler configuration * - * Copyright (C) 2000 - 2021, Intel Corp. + * Copyright (C) 2000 - 2022, Intel Corp. * *****************************************************************************/ diff --git a/include/acpi/platform/acenvex.h b/include/acpi/platform/acenvex.h index 277fe2fa4d9b..3a6b1db9a984 100644 --- a/include/acpi/platform/acenvex.h +++ b/include/acpi/platform/acenvex.h @@ -3,7 +3,7 @@ * * Name: acenvex.h - Extra host and compiler configuration * - * Copyright (C) 2000 - 2021, Intel Corp. + * Copyright (C) 2000 - 2022, Intel Corp. * *****************************************************************************/ diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h index 33ad282bd338..ac80111f503c 100644 --- a/include/acpi/platform/acgcc.h +++ b/include/acpi/platform/acgcc.h @@ -3,7 +3,7 @@ * * Name: acgcc.h - GCC specific defines, etc. * - * Copyright (C) 2000 - 2021, Intel Corp. + * Copyright (C) 2000 - 2022, Intel Corp. * *****************************************************************************/ diff --git a/include/acpi/platform/acgccex.h b/include/acpi/platform/acgccex.h index 738d52865e0a..302ea1b724b9 100644 --- a/include/acpi/platform/acgccex.h +++ b/include/acpi/platform/acgccex.h @@ -3,7 +3,7 @@ * * Name: acgccex.h - Extra GCC specific defines, etc. * - * Copyright (C) 2000 - 2021, Intel Corp. + * Copyright (C) 2000 - 2022, Intel Corp. * *****************************************************************************/ diff --git a/include/acpi/platform/acintel.h b/include/acpi/platform/acintel.h index 550fe9a8cd6c..85b1ae86ee63 100644 --- a/include/acpi/platform/acintel.h +++ b/include/acpi/platform/acintel.h @@ -3,7 +3,7 @@ * * Name: acintel.h - VC specific defines, etc. * - * Copyright (C) 2000 - 2021, Intel Corp. + * Copyright (C) 2000 - 2022, Intel Corp. * *****************************************************************************/ diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h index b3ffb9bbf664..a5550dd4d507 100644 --- a/include/acpi/platform/aclinux.h +++ b/include/acpi/platform/aclinux.h @@ -3,7 +3,7 @@ * * Name: aclinux.h - OS specific defines, etc. for Linux * - * Copyright (C) 2000 - 2021, Intel Corp. + * Copyright (C) 2000 - 2022, Intel Corp. * *****************************************************************************/ @@ -114,6 +114,11 @@ #define acpi_raw_spinlock raw_spinlock_t * #define acpi_cpu_flags unsigned long +#define acpi_uintptr_t uintptr_t + +#define ACPI_TO_INTEGER(p) ((uintptr_t)(p)) +#define ACPI_OFFSET(d, f) offsetof(d, f) + /* Use native linux version of acpi_os_allocate_zeroed */ #define USE_NATIVE_ALLOCATE_ZEROED diff --git a/include/acpi/platform/aclinuxex.h b/include/acpi/platform/aclinuxex.h index 5f642b07ad64..28c72744decf 100644 --- a/include/acpi/platform/aclinuxex.h +++ b/include/acpi/platform/aclinuxex.h @@ -3,7 +3,7 @@ * * Name: aclinuxex.h - Extra OS specific defines, etc. for Linux * - * Copyright (C) 2000 - 2021, Intel Corp. + * Copyright (C) 2000 - 2022, Intel Corp. * *****************************************************************************/ diff --git a/include/acpi/processor.h b/include/acpi/processor.h index 194027371928..9fa49686957a 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h @@ -441,9 +441,12 @@ static inline int acpi_processor_hotplug(struct acpi_processor *pr) #endif /* CONFIG_ACPI_PROCESSOR_IDLE */ /* in processor_thermal.c */ -int acpi_processor_get_limit_info(struct acpi_processor *pr); +int acpi_processor_thermal_init(struct acpi_processor *pr, + struct acpi_device *device); +void acpi_processor_thermal_exit(struct acpi_processor *pr, + struct acpi_device *device); extern const struct thermal_cooling_device_ops processor_cooling_ops; -#if defined(CONFIG_ACPI_CPU_FREQ_PSS) & defined(CONFIG_CPU_FREQ) +#ifdef CONFIG_CPU_FREQ void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy); void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy); #else @@ -455,6 +458,6 @@ static inline void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy) { return; } -#endif /* CONFIG_ACPI_CPU_FREQ_PSS */ +#endif /* CONFIG_CPU_FREQ */ #endif diff --git a/include/acpi/video.h b/include/acpi/video.h index db8548ff03ce..a275c35e5249 100644 --- a/include/acpi/video.h +++ b/include/acpi/video.h @@ -48,15 +48,18 @@ enum acpi_backlight_type { acpi_backlight_video, acpi_backlight_vendor, acpi_backlight_native, + acpi_backlight_nvidia_wmi_ec, + acpi_backlight_apple_gmux, }; #if IS_ENABLED(CONFIG_ACPI_VIDEO) extern int acpi_video_register(void); extern void acpi_video_unregister(void); +extern void acpi_video_register_backlight(void); extern int acpi_video_get_edid(struct acpi_device *device, int type, int device_id, void **edid); extern enum acpi_backlight_type acpi_video_get_backlight_type(void); -extern void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type); +extern bool acpi_video_backlight_use_native(void); /* * Note: The value returned by acpi_video_handles_brightness_key_presses() * may change over time and should not be cached. @@ -68,6 +71,7 @@ extern int acpi_video_get_levels(struct acpi_device *device, #else static inline int acpi_video_register(void) { return -ENODEV; } static inline void acpi_video_unregister(void) { return; } +static inline void acpi_video_register_backlight(void) { return; } static inline int acpi_video_get_edid(struct acpi_device *device, int type, int device_id, void **edid) { @@ -77,8 +81,9 @@ static inline enum acpi_backlight_type acpi_video_get_backlight_type(void) { return acpi_backlight_vendor; } -static inline void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type) +static inline bool acpi_video_backlight_use_native(void) { + return true; } static inline bool acpi_video_handles_brightness_key_presses(void) { diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild index 302506bbc2a4..941be574bbe0 100644 --- a/include/asm-generic/Kbuild +++ b/include/asm-generic/Kbuild @@ -5,6 +5,7 @@ # asm headers from the host architecutre.) mandatory-y += atomic.h +mandatory-y += archrandom.h mandatory-y += barrier.h mandatory-y += bitops.h mandatory-y += bug.h diff --git a/include/asm-generic/access_ok.h b/include/asm-generic/access_ok.h new file mode 100644 index 000000000000..2866ae61b1cd --- /dev/null +++ b/include/asm-generic/access_ok.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_ACCESS_OK_H__ +#define __ASM_GENERIC_ACCESS_OK_H__ + +/* + * Checking whether a pointer is valid for user space access. + * These definitions work on most architectures, but overrides can + * be used where necessary. + */ + +/* + * architectures with compat tasks have a variable TASK_SIZE and should + * override this to a constant. + */ +#ifndef TASK_SIZE_MAX +#define TASK_SIZE_MAX TASK_SIZE +#endif + +#ifndef __access_ok +/* + * 'size' is a compile-time constant for most callers, so optimize for + * this case to turn the check into a single comparison against a constant + * limit and catch all possible overflows. + * On architectures with separate user address space (m68k, s390, parisc, + * sparc64) or those without an MMU, this should always return true. + * + * This version was originally contributed by Jonas Bonn for the + * OpenRISC architecture, and was found to be the most efficient + * for constant 'size' and 'limit' values. + */ +static inline int __access_ok(const void __user *ptr, unsigned long size) +{ + unsigned long limit = TASK_SIZE_MAX; + unsigned long addr = (unsigned long)ptr; + + if (IS_ENABLED(CONFIG_ALTERNATE_USER_ADDRESS_SPACE) || + !IS_ENABLED(CONFIG_MMU)) + return true; + + return (size <= limit) && (addr <= (limit - size)); +} +#endif + +#ifndef access_ok +#define access_ok(addr, size) likely(__access_ok(addr, size)) +#endif + +#endif diff --git a/include/asm-generic/archrandom.h b/include/asm-generic/archrandom.h new file mode 100644 index 000000000000..3cd7f980cfdc --- /dev/null +++ b/include/asm-generic/archrandom.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_ARCHRANDOM_H__ +#define __ASM_GENERIC_ARCHRANDOM_H__ + +static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs) +{ + return 0; +} + +static inline size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs) +{ + return 0; +} + +#endif diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index 3d503e74037f..961f4d88f9ef 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -38,6 +38,10 @@ #define wmb() do { kcsan_wmb(); __wmb(); } while (0) #endif +#ifdef __dma_mb +#define dma_mb() do { kcsan_mb(); __dma_mb(); } while (0) +#endif + #ifdef __dma_rmb #define dma_rmb() do { kcsan_rmb(); __dma_rmb(); } while (0) #endif @@ -65,6 +69,10 @@ #define wmb() mb() #endif +#ifndef dma_mb +#define dma_mb() mb() +#endif + #ifndef dma_rmb #define dma_rmb() rmb() #endif @@ -285,7 +293,7 @@ do { \ * write-combining memory accesses before this macro with those after it. */ #ifndef io_stop_wc -#define io_stop_wc do { } while (0) +#define io_stop_wc() do { } while (0) #endif #endif /* !__ASSEMBLY__ */ diff --git a/include/asm-generic/bitops.h b/include/asm-generic/bitops.h index df9b5bc3d282..a47b8a71d6fe 100644 --- a/include/asm-generic/bitops.h +++ b/include/asm-generic/bitops.h @@ -20,7 +20,6 @@ #include <asm-generic/bitops/fls.h> #include <asm-generic/bitops/__fls.h> #include <asm-generic/bitops/fls64.h> -#include <asm-generic/bitops/find.h> #ifndef _LINUX_BITOPS_H #error only <linux/bitops.h> can be included directly diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h index 3096f086b5a3..71ab4ba9c25d 100644 --- a/include/asm-generic/bitops/atomic.h +++ b/include/asm-generic/bitops/atomic.h @@ -39,9 +39,6 @@ arch_test_and_set_bit(unsigned int nr, volatile unsigned long *p) unsigned long mask = BIT_MASK(nr); p += BIT_WORD(nr); - if (READ_ONCE(*p) & mask) - return 1; - old = arch_atomic_long_fetch_or(mask, (atomic_long_t *)p); return !!(old & mask); } @@ -53,9 +50,6 @@ arch_test_and_clear_bit(unsigned int nr, volatile unsigned long *p) unsigned long mask = BIT_MASK(nr); p += BIT_WORD(nr); - if (!(READ_ONCE(*p) & mask)) - return 0; - old = arch_atomic_long_fetch_andnot(mask, (atomic_long_t *)p); return !!(old & mask); } diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h deleted file mode 100644 index 0d132ee2a291..000000000000 --- a/include/asm-generic/bitops/find.h +++ /dev/null @@ -1,188 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_GENERIC_BITOPS_FIND_H_ -#define _ASM_GENERIC_BITOPS_FIND_H_ - -extern unsigned long _find_next_bit(const unsigned long *addr1, - const unsigned long *addr2, unsigned long nbits, - unsigned long start, unsigned long invert, unsigned long le); -extern unsigned long _find_first_bit(const unsigned long *addr, unsigned long size); -extern unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size); -extern unsigned long _find_last_bit(const unsigned long *addr, unsigned long size); - -#ifndef find_next_bit -/** - * find_next_bit - find the next set bit in a memory region - * @addr: The address to base the search on - * @offset: The bitnumber to start searching at - * @size: The bitmap size in bits - * - * Returns the bit number for the next set bit - * If no bits are set, returns @size. - */ -static inline -unsigned long find_next_bit(const unsigned long *addr, unsigned long size, - unsigned long offset) -{ - if (small_const_nbits(size)) { - unsigned long val; - - if (unlikely(offset >= size)) - return size; - - val = *addr & GENMASK(size - 1, offset); - return val ? __ffs(val) : size; - } - - return _find_next_bit(addr, NULL, size, offset, 0UL, 0); -} -#endif - -#ifndef find_next_and_bit -/** - * find_next_and_bit - find the next set bit in both memory regions - * @addr1: The first address to base the search on - * @addr2: The second address to base the search on - * @offset: The bitnumber to start searching at - * @size: The bitmap size in bits - * - * Returns the bit number for the next set bit - * If no bits are set, returns @size. - */ -static inline -unsigned long find_next_and_bit(const unsigned long *addr1, - const unsigned long *addr2, unsigned long size, - unsigned long offset) -{ - if (small_const_nbits(size)) { - unsigned long val; - - if (unlikely(offset >= size)) - return size; - - val = *addr1 & *addr2 & GENMASK(size - 1, offset); - return val ? __ffs(val) : size; - } - - return _find_next_bit(addr1, addr2, size, offset, 0UL, 0); -} -#endif - -#ifndef find_next_zero_bit -/** - * find_next_zero_bit - find the next cleared bit in a memory region - * @addr: The address to base the search on - * @offset: The bitnumber to start searching at - * @size: The bitmap size in bits - * - * Returns the bit number of the next zero bit - * If no bits are zero, returns @size. - */ -static inline -unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, - unsigned long offset) -{ - if (small_const_nbits(size)) { - unsigned long val; - - if (unlikely(offset >= size)) - return size; - - val = *addr | ~GENMASK(size - 1, offset); - return val == ~0UL ? size : ffz(val); - } - - return _find_next_bit(addr, NULL, size, offset, ~0UL, 0); -} -#endif - -#ifdef CONFIG_GENERIC_FIND_FIRST_BIT - -/** - * find_first_bit - find the first set bit in a memory region - * @addr: The address to start the search at - * @size: The maximum number of bits to search - * - * Returns the bit number of the first set bit. - * If no bits are set, returns @size. - */ -static inline -unsigned long find_first_bit(const unsigned long *addr, unsigned long size) -{ - if (small_const_nbits(size)) { - unsigned long val = *addr & GENMASK(size - 1, 0); - - return val ? __ffs(val) : size; - } - - return _find_first_bit(addr, size); -} - -/** - * find_first_zero_bit - find the first cleared bit in a memory region - * @addr: The address to start the search at - * @size: The maximum number of bits to search - * - * Returns the bit number of the first cleared bit. - * If no bits are zero, returns @size. - */ -static inline -unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size) -{ - if (small_const_nbits(size)) { - unsigned long val = *addr | ~GENMASK(size - 1, 0); - - return val == ~0UL ? size : ffz(val); - } - - return _find_first_zero_bit(addr, size); -} -#else /* CONFIG_GENERIC_FIND_FIRST_BIT */ - -#ifndef find_first_bit -#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) -#endif -#ifndef find_first_zero_bit -#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) -#endif - -#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ - -#ifndef find_last_bit -/** - * find_last_bit - find the last set bit in a memory region - * @addr: The address to start the search at - * @size: The number of bits to search - * - * Returns the bit number of the last set bit, or size. - */ -static inline -unsigned long find_last_bit(const unsigned long *addr, unsigned long size) -{ - if (small_const_nbits(size)) { - unsigned long val = *addr & GENMASK(size - 1, 0); - - return val ? __fls(val) : size; - } - - return _find_last_bit(addr, size); -} -#endif - -/** - * find_next_clump8 - find next 8-bit clump with set bits in a memory region - * @clump: location to store copy of found clump - * @addr: address to base the search on - * @size: bitmap size in number of bits - * @offset: bit offset at which to start searching - * - * Returns the bit offset for the next set clump; the found clump value is - * copied to the location pointed by @clump. If no bits are set, returns @size. - */ -extern unsigned long find_next_clump8(unsigned long *clump, - const unsigned long *addr, - unsigned long size, unsigned long offset); - -#define find_first_clump8(clump, bits, size) \ - find_next_clump8((clump), (bits), (size), 0) - -#endif /*_ASM_GENERIC_BITOPS_FIND_H_ */ diff --git a/include/asm-generic/bitops/generic-non-atomic.h b/include/asm-generic/bitops/generic-non-atomic.h new file mode 100644 index 000000000000..564a8c675d85 --- /dev/null +++ b/include/asm-generic/bitops/generic-non-atomic.h @@ -0,0 +1,175 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H +#define __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H + +#include <linux/bits.h> +#include <asm/barrier.h> + +#ifndef _LINUX_BITOPS_H +#error only <linux/bitops.h> can be included directly +#endif + +/* + * Generic definitions for bit operations, should not be used in regular code + * directly. + */ + +/** + * generic___set_bit - Set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * Unlike set_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static __always_inline void +generic___set_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + + *p |= mask; +} + +static __always_inline void +generic___clear_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + + *p &= ~mask; +} + +/** + * generic___change_bit - Toggle a bit in memory + * @nr: the bit to change + * @addr: the address to start counting from + * + * Unlike change_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static __always_inline void +generic___change_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + + *p ^= mask; +} + +/** + * generic___test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static __always_inline bool +generic___test_and_set_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old = *p; + + *p = old | mask; + return (old & mask) != 0; +} + +/** + * generic___test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to clear + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static __always_inline bool +generic___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old = *p; + + *p = old & ~mask; + return (old & mask) != 0; +} + +/* WARNING: non atomic and it can be reordered! */ +static __always_inline bool +generic___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old = *p; + + *p = old ^ mask; + return (old & mask) != 0; +} + +/** + * generic_test_bit - Determine whether a bit is set + * @nr: bit number to test + * @addr: Address to start counting from + */ +static __always_inline bool +generic_test_bit(unsigned long nr, const volatile unsigned long *addr) +{ + /* + * Unlike the bitops with the '__' prefix above, this one *is* atomic, + * so `volatile` must always stay here with no cast-aways. See + * `Documentation/atomic_bitops.txt` for the details. + */ + return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); +} + +/** + * generic_test_bit_acquire - Determine, with acquire semantics, whether a bit is set + * @nr: bit number to test + * @addr: Address to start counting from + */ +static __always_inline bool +generic_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} + +/* + * const_*() definitions provide good compile-time optimizations when + * the passed arguments can be resolved at compile time. + */ +#define const___set_bit generic___set_bit +#define const___clear_bit generic___clear_bit +#define const___change_bit generic___change_bit +#define const___test_and_set_bit generic___test_and_set_bit +#define const___test_and_clear_bit generic___test_and_clear_bit +#define const___test_and_change_bit generic___test_and_change_bit +#define const_test_bit_acquire generic_test_bit_acquire + +/** + * const_test_bit - Determine whether a bit is set + * @nr: bit number to test + * @addr: Address to start counting from + * + * A version of generic_test_bit() which discards the `volatile` qualifier to + * allow a compiler to optimize code harder. Non-atomic and to be called only + * for testing compile-time constants, e.g. by the corresponding macros, not + * directly from "regular" code. + */ +static __always_inline bool +const_test_bit(unsigned long nr, const volatile unsigned long *addr) +{ + const unsigned long *p = (const unsigned long *)addr + BIT_WORD(nr); + unsigned long mask = BIT_MASK(nr); + unsigned long val = *p; + + return !!(val & mask); +} + +#endif /* __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H */ diff --git a/include/asm-generic/bitops/instrumented-atomic.h b/include/asm-generic/bitops/instrumented-atomic.h index c90192b1c755..4225a8ca9c1a 100644 --- a/include/asm-generic/bitops/instrumented-atomic.h +++ b/include/asm-generic/bitops/instrumented-atomic.h @@ -23,7 +23,7 @@ * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ -static inline void set_bit(long nr, volatile unsigned long *addr) +static __always_inline void set_bit(long nr, volatile unsigned long *addr) { instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long)); arch_set_bit(nr, addr); @@ -36,7 +36,7 @@ static inline void set_bit(long nr, volatile unsigned long *addr) * * This is a relaxed atomic operation (no implied memory barriers). */ -static inline void clear_bit(long nr, volatile unsigned long *addr) +static __always_inline void clear_bit(long nr, volatile unsigned long *addr) { instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long)); arch_clear_bit(nr, addr); @@ -52,7 +52,7 @@ static inline void clear_bit(long nr, volatile unsigned long *addr) * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ -static inline void change_bit(long nr, volatile unsigned long *addr) +static __always_inline void change_bit(long nr, volatile unsigned long *addr) { instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long)); arch_change_bit(nr, addr); @@ -65,7 +65,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr) * * This is an atomic fully-ordered operation (implied full memory barrier). */ -static inline bool test_and_set_bit(long nr, volatile unsigned long *addr) +static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr) { kcsan_mb(); instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long)); @@ -79,7 +79,7 @@ static inline bool test_and_set_bit(long nr, volatile unsigned long *addr) * * This is an atomic fully-ordered operation (implied full memory barrier). */ -static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) +static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) { kcsan_mb(); instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long)); @@ -93,7 +93,7 @@ static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) * * This is an atomic fully-ordered operation (implied full memory barrier). */ -static inline bool test_and_change_bit(long nr, volatile unsigned long *addr) +static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr) { kcsan_mb(); instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long)); diff --git a/include/asm-generic/bitops/instrumented-non-atomic.h b/include/asm-generic/bitops/instrumented-non-atomic.h index 37363d570b9b..2b238b161a62 100644 --- a/include/asm-generic/bitops/instrumented-non-atomic.h +++ b/include/asm-generic/bitops/instrumented-non-atomic.h @@ -14,7 +14,7 @@ #include <linux/instrumented.h> /** - * __set_bit - Set a bit in memory + * ___set_bit - Set a bit in memory * @nr: the bit to set * @addr: the address to start counting from * @@ -22,14 +22,15 @@ * region of memory concurrently, the effect may be that only one operation * succeeds. */ -static inline void __set_bit(long nr, volatile unsigned long *addr) +static __always_inline void +___set_bit(unsigned long nr, volatile unsigned long *addr) { instrument_write(addr + BIT_WORD(nr), sizeof(long)); arch___set_bit(nr, addr); } /** - * __clear_bit - Clears a bit in memory + * ___clear_bit - Clears a bit in memory * @nr: the bit to clear * @addr: the address to start counting from * @@ -37,14 +38,15 @@ static inline void __set_bit(long nr, volatile unsigned long *addr) * region of memory concurrently, the effect may be that only one operation * succeeds. */ -static inline void __clear_bit(long nr, volatile unsigned long *addr) +static __always_inline void +___clear_bit(unsigned long nr, volatile unsigned long *addr) { instrument_write(addr + BIT_WORD(nr), sizeof(long)); arch___clear_bit(nr, addr); } /** - * __change_bit - Toggle a bit in memory + * ___change_bit - Toggle a bit in memory * @nr: the bit to change * @addr: the address to start counting from * @@ -52,13 +54,14 @@ static inline void __clear_bit(long nr, volatile unsigned long *addr) * region of memory concurrently, the effect may be that only one operation * succeeds. */ -static inline void __change_bit(long nr, volatile unsigned long *addr) +static __always_inline void +___change_bit(unsigned long nr, volatile unsigned long *addr) { instrument_write(addr + BIT_WORD(nr), sizeof(long)); arch___change_bit(nr, addr); } -static inline void __instrument_read_write_bitop(long nr, volatile unsigned long *addr) +static __always_inline void __instrument_read_write_bitop(long nr, volatile unsigned long *addr) { if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC)) { /* @@ -83,56 +86,72 @@ static inline void __instrument_read_write_bitop(long nr, volatile unsigned long } /** - * __test_and_set_bit - Set a bit and return its old value + * ___test_and_set_bit - Set a bit and return its old value * @nr: Bit to set * @addr: Address to count from * * This operation is non-atomic. If two instances of this operation race, one * can appear to succeed but actually fail. */ -static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr) +static __always_inline bool +___test_and_set_bit(unsigned long nr, volatile unsigned long *addr) { __instrument_read_write_bitop(nr, addr); return arch___test_and_set_bit(nr, addr); } /** - * __test_and_clear_bit - Clear a bit and return its old value + * ___test_and_clear_bit - Clear a bit and return its old value * @nr: Bit to clear * @addr: Address to count from * * This operation is non-atomic. If two instances of this operation race, one * can appear to succeed but actually fail. */ -static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr) +static __always_inline bool +___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) { __instrument_read_write_bitop(nr, addr); return arch___test_and_clear_bit(nr, addr); } /** - * __test_and_change_bit - Change a bit and return its old value + * ___test_and_change_bit - Change a bit and return its old value * @nr: Bit to change * @addr: Address to count from * * This operation is non-atomic. If two instances of this operation race, one * can appear to succeed but actually fail. */ -static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr) +static __always_inline bool +___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) { __instrument_read_write_bitop(nr, addr); return arch___test_and_change_bit(nr, addr); } /** - * test_bit - Determine whether a bit is set + * _test_bit - Determine whether a bit is set * @nr: bit number to test * @addr: Address to start counting from */ -static inline bool test_bit(long nr, const volatile unsigned long *addr) +static __always_inline bool +_test_bit(unsigned long nr, const volatile unsigned long *addr) { instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long)); return arch_test_bit(nr, addr); } +/** + * _test_bit_acquire - Determine, with acquire semantics, whether a bit is set + * @nr: bit number to test + * @addr: Address to start counting from + */ +static __always_inline bool +_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long)); + return arch_test_bit_acquire(nr, addr); +} + #endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */ diff --git a/include/asm-generic/bitops/le.h b/include/asm-generic/bitops/le.h index 5a28629cbf4d..d51beff60375 100644 --- a/include/asm-generic/bitops/le.h +++ b/include/asm-generic/bitops/le.h @@ -2,83 +2,19 @@ #ifndef _ASM_GENERIC_BITOPS_LE_H_ #define _ASM_GENERIC_BITOPS_LE_H_ -#include <asm-generic/bitops/find.h> #include <asm/types.h> #include <asm/byteorder.h> -#include <linux/swab.h> #if defined(__LITTLE_ENDIAN) #define BITOP_LE_SWIZZLE 0 -static inline unsigned long find_next_zero_bit_le(const void *addr, - unsigned long size, unsigned long offset) -{ - return find_next_zero_bit(addr, size, offset); -} - -static inline unsigned long find_next_bit_le(const void *addr, - unsigned long size, unsigned long offset) -{ - return find_next_bit(addr, size, offset); -} - -static inline unsigned long find_first_zero_bit_le(const void *addr, - unsigned long size) -{ - return find_first_zero_bit(addr, size); -} - #elif defined(__BIG_ENDIAN) #define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) -#ifndef find_next_zero_bit_le -static inline -unsigned long find_next_zero_bit_le(const void *addr, unsigned - long size, unsigned long offset) -{ - if (small_const_nbits(size)) { - unsigned long val = *(const unsigned long *)addr; - - if (unlikely(offset >= size)) - return size; - - val = swab(val) | ~GENMASK(size - 1, offset); - return val == ~0UL ? size : ffz(val); - } - - return _find_next_bit(addr, NULL, size, offset, ~0UL, 1); -} -#endif - -#ifndef find_next_bit_le -static inline -unsigned long find_next_bit_le(const void *addr, unsigned - long size, unsigned long offset) -{ - if (small_const_nbits(size)) { - unsigned long val = *(const unsigned long *)addr; - - if (unlikely(offset >= size)) - return size; - - val = swab(val) & GENMASK(size - 1, offset); - return val ? __ffs(val) : size; - } - - return _find_next_bit(addr, NULL, size, offset, 0UL, 1); -} #endif -#ifndef find_first_zero_bit_le -#define find_first_zero_bit_le(addr, size) \ - find_next_zero_bit_le((addr), (size), 0) -#endif - -#else -#error "Please fix <asm/byteorder.h>" -#endif static inline int test_bit_le(int nr, const void *addr) { diff --git a/include/asm-generic/bitops/non-atomic.h b/include/asm-generic/bitops/non-atomic.h index 078cc68be2f1..71f8d54a5195 100644 --- a/include/asm-generic/bitops/non-atomic.h +++ b/include/asm-generic/bitops/non-atomic.h @@ -2,121 +2,19 @@ #ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ #define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ -#include <asm/types.h> +#include <asm-generic/bitops/generic-non-atomic.h> -/** - * arch___set_bit - Set a bit in memory - * @nr: the bit to set - * @addr: the address to start counting from - * - * Unlike set_bit(), this function is non-atomic and may be reordered. - * If it's called on the same region of memory simultaneously, the effect - * may be that only one operation succeeds. - */ -static __always_inline void -arch___set_bit(unsigned int nr, volatile unsigned long *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); +#define arch___set_bit generic___set_bit +#define arch___clear_bit generic___clear_bit +#define arch___change_bit generic___change_bit - *p |= mask; -} -#define __set_bit arch___set_bit +#define arch___test_and_set_bit generic___test_and_set_bit +#define arch___test_and_clear_bit generic___test_and_clear_bit +#define arch___test_and_change_bit generic___test_and_change_bit -static __always_inline void -arch___clear_bit(unsigned int nr, volatile unsigned long *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); +#define arch_test_bit generic_test_bit +#define arch_test_bit_acquire generic_test_bit_acquire - *p &= ~mask; -} -#define __clear_bit arch___clear_bit - -/** - * arch___change_bit - Toggle a bit in memory - * @nr: the bit to change - * @addr: the address to start counting from - * - * Unlike change_bit(), this function is non-atomic and may be reordered. - * If it's called on the same region of memory simultaneously, the effect - * may be that only one operation succeeds. - */ -static __always_inline -void arch___change_bit(unsigned int nr, volatile unsigned long *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); - - *p ^= mask; -} -#define __change_bit arch___change_bit - -/** - * arch___test_and_set_bit - Set a bit and return its old value - * @nr: Bit to set - * @addr: Address to count from - * - * This operation is non-atomic and can be reordered. - * If two examples of this operation race, one can appear to succeed - * but actually fail. You must protect multiple accesses with a lock. - */ -static __always_inline int -arch___test_and_set_bit(unsigned int nr, volatile unsigned long *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); - unsigned long old = *p; - - *p = old | mask; - return (old & mask) != 0; -} -#define __test_and_set_bit arch___test_and_set_bit - -/** - * arch___test_and_clear_bit - Clear a bit and return its old value - * @nr: Bit to clear - * @addr: Address to count from - * - * This operation is non-atomic and can be reordered. - * If two examples of this operation race, one can appear to succeed - * but actually fail. You must protect multiple accesses with a lock. - */ -static __always_inline int -arch___test_and_clear_bit(unsigned int nr, volatile unsigned long *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); - unsigned long old = *p; - - *p = old & ~mask; - return (old & mask) != 0; -} -#define __test_and_clear_bit arch___test_and_clear_bit - -/* WARNING: non atomic and it can be reordered! */ -static __always_inline int -arch___test_and_change_bit(unsigned int nr, volatile unsigned long *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); - unsigned long old = *p; - - *p = old ^ mask; - return (old & mask) != 0; -} -#define __test_and_change_bit arch___test_and_change_bit - -/** - * arch_test_bit - Determine whether a bit is set - * @nr: bit number to test - * @addr: Address to start counting from - */ -static __always_inline int -arch_test_bit(unsigned int nr, const volatile unsigned long *addr) -{ - return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); -} -#define test_bit arch_test_bit +#include <asm-generic/bitops/non-instrumented-non-atomic.h> #endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */ diff --git a/include/asm-generic/bitops/non-instrumented-non-atomic.h b/include/asm-generic/bitops/non-instrumented-non-atomic.h new file mode 100644 index 000000000000..0ddc78dfc358 --- /dev/null +++ b/include/asm-generic/bitops/non-instrumented-non-atomic.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H +#define __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H + +#define ___set_bit arch___set_bit +#define ___clear_bit arch___clear_bit +#define ___change_bit arch___change_bit + +#define ___test_and_set_bit arch___test_and_set_bit +#define ___test_and_clear_bit arch___test_and_clear_bit +#define ___test_and_change_bit arch___test_and_change_bit + +#define _test_bit arch_test_bit +#define _test_bit_acquire arch_test_bit_acquire + +#endif /* __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H */ diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index edb0e2a602a8..4050b191e1a9 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -21,6 +21,12 @@ #include <linux/panic.h> #include <linux/printk.h> +struct warn_args; +struct pt_regs; + +void __warn(const char *file, int line, void *caller, unsigned taint, + struct pt_regs *regs, struct warn_args *args); + #ifdef CONFIG_BUG #ifdef CONFIG_GENERIC_BUG @@ -110,11 +116,6 @@ extern __printf(1, 2) void __warn_printk(const char *fmt, ...); #endif /* used internally by panic.c */ -struct warn_args; -struct pt_regs; - -void __warn(const char *file, int line, void *caller, unsigned taint, - struct pt_regs *regs, struct warn_args *args); #ifndef WARN_ON #define WARN_ON(condition) ({ \ @@ -219,22 +220,6 @@ void __warn(const char *file, int line, void *caller, unsigned taint, # define WARN_ON_SMP(x) ({0;}) #endif -/* - * WARN_ON_FUNCTION_MISMATCH() warns if a value doesn't match a - * function address, and can be useful for catching issues with - * callback functions, for example. - * - * With CONFIG_CFI_CLANG, the warning is disabled because the - * compiler replaces function addresses taken in C code with - * local jump table addresses, which breaks cross-module function - * address equality. - */ -#if defined(CONFIG_CFI_CLANG) && defined(CONFIG_MODULES) -# define WARN_ON_FUNCTION_MISMATCH(x, fn) ({ 0; }) -#else -# define WARN_ON_FUNCTION_MISMATCH(x, fn) WARN_ON_ONCE((x) != (fn)) -#endif - #endif /* __ASSEMBLY__ */ #endif diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h index 4f07afacbc23..f46258d1a080 100644 --- a/include/asm-generic/cacheflush.h +++ b/include/asm-generic/cacheflush.h @@ -2,6 +2,8 @@ #ifndef _ASM_GENERIC_CACHEFLUSH_H #define _ASM_GENERIC_CACHEFLUSH_H +#include <linux/instrumented.h> + struct mm_struct; struct vm_area_struct; struct page; @@ -105,14 +107,22 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end) #ifndef copy_to_user_page #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ do { \ + instrument_copy_to_user((void __user *)dst, src, len); \ memcpy(dst, src, len); \ flush_icache_user_page(vma, page, vaddr, len); \ } while (0) #endif + #ifndef copy_from_user_page -#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ - memcpy(dst, src, len) +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ + do { \ + instrument_copy_from_user_before(dst, (void __user *)src, \ + len); \ + memcpy(dst, src, len); \ + instrument_copy_from_user_after(dst, (void __user *)src, len, \ + 0); \ + } while (0) #endif #endif /* _ASM_GENERIC_CACHEFLUSH_H */ diff --git a/include/asm-generic/compat.h b/include/asm-generic/compat.h index d46c0201cc34..8392caea398f 100644 --- a/include/asm-generic/compat.h +++ b/include/asm-generic/compat.h @@ -2,6 +2,30 @@ #ifndef __ASM_GENERIC_COMPAT_H #define __ASM_GENERIC_COMPAT_H +#ifndef COMPAT_USER_HZ +#define COMPAT_USER_HZ 100 +#endif + +#ifndef COMPAT_RLIM_INFINITY +#define COMPAT_RLIM_INFINITY 0xffffffff +#endif + +#ifndef COMPAT_OFF_T_MAX +#define COMPAT_OFF_T_MAX 0x7fffffff +#endif + +#ifndef compat_arg_u64 +#ifndef CONFIG_CPU_BIG_ENDIAN +#define compat_arg_u64(name) u32 name##_lo, u32 name##_hi +#define compat_arg_u64_dual(name) u32, name##_lo, u32, name##_hi +#else +#define compat_arg_u64(name) u32 name##_hi, u32 name##_lo +#define compat_arg_u64_dual(name) u32, name##_hi, u32, name##_lo +#endif +#define compat_arg_u64_glue(name) (((u64)name##_lo & 0xffffffffUL) | \ + ((u64)name##_hi << 32)) +#endif /* compat_arg_u64 */ + /* These types are common across all compat ABIs */ typedef u32 compat_size_t; typedef s32 compat_ssize_t; @@ -24,6 +48,11 @@ typedef u32 compat_caddr_t; typedef u32 compat_aio_context_t; typedef u32 compat_old_sigset_t; +#ifndef __compat_uid_t +typedef u32 __compat_uid_t; +typedef u32 __compat_gid_t; +#endif + #ifndef __compat_uid32_t typedef u32 __compat_uid32_t; typedef u32 __compat_gid32_t; @@ -47,4 +76,93 @@ typedef u32 compat_sigset_word; #define _COMPAT_NSIG_BPW 32 #endif +#ifndef compat_dev_t +typedef u32 compat_dev_t; +#endif + +#ifndef compat_ipc_pid_t +typedef s32 compat_ipc_pid_t; +#endif + +#ifndef compat_fsid_t +typedef __kernel_fsid_t compat_fsid_t; +#endif + +#ifndef compat_statfs +struct compat_statfs { + compat_int_t f_type; + compat_int_t f_bsize; + compat_int_t f_blocks; + compat_int_t f_bfree; + compat_int_t f_bavail; + compat_int_t f_files; + compat_int_t f_ffree; + compat_fsid_t f_fsid; + compat_int_t f_namelen; + compat_int_t f_frsize; + compat_int_t f_flags; + compat_int_t f_spare[4]; +}; +#endif + +#ifndef compat_ipc64_perm +struct compat_ipc64_perm { + compat_key_t key; + __compat_uid32_t uid; + __compat_gid32_t gid; + __compat_uid32_t cuid; + __compat_gid32_t cgid; + compat_mode_t mode; + unsigned char __pad1[4 - sizeof(compat_mode_t)]; + compat_ushort_t seq; + compat_ushort_t __pad2; + compat_ulong_t unused1; + compat_ulong_t unused2; +}; + +struct compat_semid64_ds { + struct compat_ipc64_perm sem_perm; + compat_ulong_t sem_otime; + compat_ulong_t sem_otime_high; + compat_ulong_t sem_ctime; + compat_ulong_t sem_ctime_high; + compat_ulong_t sem_nsems; + compat_ulong_t __unused3; + compat_ulong_t __unused4; +}; + +struct compat_msqid64_ds { + struct compat_ipc64_perm msg_perm; + compat_ulong_t msg_stime; + compat_ulong_t msg_stime_high; + compat_ulong_t msg_rtime; + compat_ulong_t msg_rtime_high; + compat_ulong_t msg_ctime; + compat_ulong_t msg_ctime_high; + compat_ulong_t msg_cbytes; + compat_ulong_t msg_qnum; + compat_ulong_t msg_qbytes; + compat_pid_t msg_lspid; + compat_pid_t msg_lrpid; + compat_ulong_t __unused4; + compat_ulong_t __unused5; +}; + +struct compat_shmid64_ds { + struct compat_ipc64_perm shm_perm; + compat_size_t shm_segsz; + compat_ulong_t shm_atime; + compat_ulong_t shm_atime_high; + compat_ulong_t shm_dtime; + compat_ulong_t shm_dtime_high; + compat_ulong_t shm_ctime; + compat_ulong_t shm_ctime_high; + compat_pid_t shm_cpid; + compat_pid_t shm_lpid; + compat_ulong_t shm_nattch; + compat_ulong_t __unused4; + compat_ulong_t __unused5; +}; +#endif + #endif diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h index 07a36a874dca..5e4b1f2369d2 100644 --- a/include/asm-generic/export.h +++ b/include/asm-generic/export.h @@ -2,6 +2,14 @@ #ifndef __ASM_GENERIC_EXPORT_H #define __ASM_GENERIC_EXPORT_H +/* + * This comment block is used by fixdep. Please do not remove. + * + * When CONFIG_MODVERSIONS is changed from n to y, all source files having + * EXPORT_SYMBOL variants must be re-compiled because genksyms is run as a + * side effect of the *.o build rule. + */ + #ifndef KSYM_FUNC #define KSYM_FUNC(x) x #endif @@ -12,9 +20,6 @@ #else #define KSYM_ALIGN 4 #endif -#ifndef KCRC_ALIGN -#define KCRC_ALIGN 4 -#endif .macro __put, val, name #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS @@ -43,17 +48,6 @@ __ksymtab_\name: __kstrtab_\name: .asciz "\name" .previous -#ifdef CONFIG_MODVERSIONS - .section ___kcrctab\sec+\name,"a" - .balign KCRC_ALIGN -#if defined(CONFIG_MODULE_REL_CRCS) - .long __crc_\name - . -#else - .long __crc_\name -#endif - .weak __crc_\name - .previous -#endif #endif .endm diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h index f4c3470480c7..2a19215baae5 100644 --- a/include/asm-generic/futex.h +++ b/include/asm-generic/futex.h @@ -6,15 +6,22 @@ #include <linux/uaccess.h> #include <asm/errno.h> +#ifndef futex_atomic_cmpxchg_inatomic #ifndef CONFIG_SMP /* * The following implementation only for uniprocessor machines. * It relies on preempt_disable() ensuring mutual exclusion. * */ +#define futex_atomic_cmpxchg_inatomic(uval, uaddr, oldval, newval) \ + futex_atomic_cmpxchg_inatomic_local(uval, uaddr, oldval, newval) +#define arch_futex_atomic_op_inuser(op, oparg, oval, uaddr) \ + futex_atomic_op_inuser_local(op, oparg, oval, uaddr) +#endif /* CONFIG_SMP */ +#endif /** - * arch_futex_atomic_op_inuser() - Atomic arithmetic operation with constant + * futex_atomic_op_inuser_local() - Atomic arithmetic operation with constant * argument and comparison of the previous * futex value with another constant. * @@ -28,7 +35,7 @@ * -ENOSYS - Operation not supported */ static inline int -arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr) +futex_atomic_op_inuser_local(int op, u32 oparg, int *oval, u32 __user *uaddr) { int oldval, ret; u32 tmp; @@ -75,7 +82,7 @@ out_pagefault_enable: } /** - * futex_atomic_cmpxchg_inatomic() - Compare and exchange the content of the + * futex_atomic_cmpxchg_inatomic_local() - Compare and exchange the content of the * uaddr with newval if the current value is * oldval. * @uval: pointer to store content of @uaddr @@ -87,10 +94,9 @@ out_pagefault_enable: * 0 - On success * -EFAULT - User access resulted in a page fault * -EAGAIN - Atomic operation was unable to complete due to contention - * -ENOSYS - Function not implemented (only if !HAVE_FUTEX_CMPXCHG) */ static inline int -futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, +futex_atomic_cmpxchg_inatomic_local(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval) { u32 val; @@ -112,19 +118,4 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, return 0; } -#else -static inline int -arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr) -{ - return -ENOSYS; -} - -static inline int -futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, - u32 oldval, u32 newval) -{ - return -ENOSYS; -} - -#endif /* CONFIG_SMP */ #endif diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h index 8e1e6244a89d..a57d667addd2 100644 --- a/include/asm-generic/hugetlb.h +++ b/include/asm-generic/hugetlb.h @@ -2,6 +2,9 @@ #ifndef _ASM_GENERIC_HUGETLB_H #define _ASM_GENERIC_HUGETLB_H +#include <linux/swap.h> +#include <linux/swapops.h> + static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot) { return mk_pte(page, pgprot); @@ -32,6 +35,21 @@ static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot) return pte_modify(pte, newprot); } +static inline pte_t huge_pte_mkuffd_wp(pte_t pte) +{ + return pte_mkuffd_wp(pte); +} + +static inline pte_t huge_pte_clear_uffd_wp(pte_t pte) +{ + return pte_clear_uffd_wp(pte); +} + +static inline int huge_pte_uffd_wp(pte_t pte) +{ + return pte_uffd_wp(pte); +} + #ifndef __HAVE_ARCH_HUGE_PTE_CLEAR static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long sz) @@ -66,10 +84,10 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, #endif #ifndef __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH -static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, +static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { - ptep_clear_flush(vma, addr, ptep); + return ptep_clear_flush(vma, addr, ptep); } #endif @@ -80,6 +98,12 @@ static inline int huge_pte_none(pte_t pte) } #endif +/* Please refer to comments above pte_none_mostly() for the usage */ +static inline int huge_pte_none_mostly(pte_t pte) +{ + return huge_pte_none(pte) || is_pte_marker(pte); +} + #ifndef __HAVE_ARCH_HUGE_PTE_WRPROTECT static inline pte_t huge_pte_wrprotect(pte_t pte) { diff --git a/include/asm-generic/hyperv-tlfs.h b/include/asm-generic/hyperv-tlfs.h index 8ed6733d5146..b17c6eeb9afa 100644 --- a/include/asm-generic/hyperv-tlfs.h +++ b/include/asm-generic/hyperv-tlfs.h @@ -102,6 +102,15 @@ struct ms_hyperv_tsc_page { volatile s64 tsc_offset; } __packed; +union hv_reference_tsc_msr { + u64 as_uint64; + struct { + u64 enable:1; + u64 reserved:11; + u64 pfn:52; + } __packed; +}; + /* * The guest OS needs to register the guest ID with the hypervisor. * The guest ID is a 64 bit entity and the structure of this ID is @@ -183,11 +192,18 @@ enum HV_GENERIC_SET_FORMAT { #define HV_HYPERCALL_RESULT_MASK GENMASK_ULL(15, 0) #define HV_HYPERCALL_FAST_BIT BIT(16) #define HV_HYPERCALL_VARHEAD_OFFSET 17 +#define HV_HYPERCALL_VARHEAD_MASK GENMASK_ULL(26, 17) +#define HV_HYPERCALL_RSVD0_MASK GENMASK_ULL(31, 27) #define HV_HYPERCALL_REP_COMP_OFFSET 32 #define HV_HYPERCALL_REP_COMP_1 BIT_ULL(32) #define HV_HYPERCALL_REP_COMP_MASK GENMASK_ULL(43, 32) +#define HV_HYPERCALL_RSVD1_MASK GENMASK_ULL(47, 44) #define HV_HYPERCALL_REP_START_OFFSET 48 #define HV_HYPERCALL_REP_START_MASK GENMASK_ULL(59, 48) +#define HV_HYPERCALL_RSVD2_MASK GENMASK_ULL(63, 60) +#define HV_HYPERCALL_RSVD_MASK (HV_HYPERCALL_RSVD0_MASK | \ + HV_HYPERCALL_RSVD1_MASK | \ + HV_HYPERCALL_RSVD2_MASK) /* hypercall status code */ #define HV_STATUS_SUCCESS 0 @@ -540,39 +556,6 @@ enum hv_interrupt_source { HV_INTERRUPT_SOURCE_IOAPIC, }; -union hv_msi_address_register { - u32 as_uint32; - struct { - u32 reserved1:2; - u32 destination_mode:1; - u32 redirection_hint:1; - u32 reserved2:8; - u32 destination_id:8; - u32 msi_base:12; - }; -} __packed; - -union hv_msi_data_register { - u32 as_uint32; - struct { - u32 vector:8; - u32 delivery_mode:3; - u32 reserved1:3; - u32 level_assert:1; - u32 trigger_mode:1; - u32 reserved2:16; - }; -} __packed; - -/* HvRetargetDeviceInterrupt hypercall */ -union hv_msi_entry { - u64 as_uint64; - struct { - union hv_msi_address_register address; - union hv_msi_data_register data; - } __packed; -}; - union hv_ioapic_rte { u64 as_uint64; diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index 7ce93aaf69f8..a68f8fbf423b 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -10,6 +10,7 @@ #include <asm/page.h> /* I/O is all done through memory accesses */ #include <linux/string.h> /* for memset() and memcpy() */ #include <linux/types.h> +#include <linux/instruction_pointer.h> #ifdef CONFIG_GENERIC_IOMAP #include <asm-generic/iomap.h> @@ -61,6 +62,44 @@ #define __io_par(v) __io_ar(v) #endif +/* + * "__DISABLE_TRACE_MMIO__" flag can be used to disable MMIO tracing for + * specific kernel drivers in case of excessive/unwanted logging. + * + * Usage: Add a #define flag at the beginning of the driver file. + * Ex: #define __DISABLE_TRACE_MMIO__ + * #include <...> + * ... + */ +#if IS_ENABLED(CONFIG_TRACE_MMIO_ACCESS) && !(defined(__DISABLE_TRACE_MMIO__)) +#include <linux/tracepoint-defs.h> + +DECLARE_TRACEPOINT(rwmmio_write); +DECLARE_TRACEPOINT(rwmmio_post_write); +DECLARE_TRACEPOINT(rwmmio_read); +DECLARE_TRACEPOINT(rwmmio_post_read); + +void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr, + unsigned long caller_addr); +void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr, + unsigned long caller_addr); +void log_read_mmio(u8 width, const volatile void __iomem *addr, + unsigned long caller_addr); +void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr, + unsigned long caller_addr); + +#else + +static inline void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr, + unsigned long caller_addr) {} +static inline void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr, + unsigned long caller_addr) {} +static inline void log_read_mmio(u8 width, const volatile void __iomem *addr, + unsigned long caller_addr) {} +static inline void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr, + unsigned long caller_addr) {} + +#endif /* CONFIG_TRACE_MMIO_ACCESS */ /* * __raw_{read,write}{b,w,l,q}() access memory in native endianness. @@ -149,9 +188,11 @@ static inline u8 readb(const volatile void __iomem *addr) { u8 val; + log_read_mmio(8, addr, _THIS_IP_); __io_br(); val = __raw_readb(addr); __io_ar(val); + log_post_read_mmio(val, 8, addr, _THIS_IP_); return val; } #endif @@ -162,9 +203,11 @@ static inline u16 readw(const volatile void __iomem *addr) { u16 val; + log_read_mmio(16, addr, _THIS_IP_); __io_br(); val = __le16_to_cpu((__le16 __force)__raw_readw(addr)); __io_ar(val); + log_post_read_mmio(val, 16, addr, _THIS_IP_); return val; } #endif @@ -175,9 +218,11 @@ static inline u32 readl(const volatile void __iomem *addr) { u32 val; + log_read_mmio(32, addr, _THIS_IP_); __io_br(); val = __le32_to_cpu((__le32 __force)__raw_readl(addr)); __io_ar(val); + log_post_read_mmio(val, 32, addr, _THIS_IP_); return val; } #endif @@ -189,9 +234,11 @@ static inline u64 readq(const volatile void __iomem *addr) { u64 val; + log_read_mmio(64, addr, _THIS_IP_); __io_br(); val = __le64_to_cpu(__raw_readq(addr)); __io_ar(val); + log_post_read_mmio(val, 64, addr, _THIS_IP_); return val; } #endif @@ -201,9 +248,11 @@ static inline u64 readq(const volatile void __iomem *addr) #define writeb writeb static inline void writeb(u8 value, volatile void __iomem *addr) { + log_write_mmio(value, 8, addr, _THIS_IP_); __io_bw(); __raw_writeb(value, addr); __io_aw(); + log_post_write_mmio(value, 8, addr, _THIS_IP_); } #endif @@ -211,9 +260,11 @@ static inline void writeb(u8 value, volatile void __iomem *addr) #define writew writew static inline void writew(u16 value, volatile void __iomem *addr) { + log_write_mmio(value, 16, addr, _THIS_IP_); __io_bw(); __raw_writew((u16 __force)cpu_to_le16(value), addr); __io_aw(); + log_post_write_mmio(value, 16, addr, _THIS_IP_); } #endif @@ -221,9 +272,11 @@ static inline void writew(u16 value, volatile void __iomem *addr) #define writel writel static inline void writel(u32 value, volatile void __iomem *addr) { + log_write_mmio(value, 32, addr, _THIS_IP_); __io_bw(); __raw_writel((u32 __force)__cpu_to_le32(value), addr); __io_aw(); + log_post_write_mmio(value, 32, addr, _THIS_IP_); } #endif @@ -232,9 +285,11 @@ static inline void writel(u32 value, volatile void __iomem *addr) #define writeq writeq static inline void writeq(u64 value, volatile void __iomem *addr) { + log_write_mmio(value, 64, addr, _THIS_IP_); __io_bw(); __raw_writeq(__cpu_to_le64(value), addr); __io_aw(); + log_post_write_mmio(value, 64, addr, _THIS_IP_); } #endif #endif /* CONFIG_64BIT */ @@ -248,7 +303,12 @@ static inline void writeq(u64 value, volatile void __iomem *addr) #define readb_relaxed readb_relaxed static inline u8 readb_relaxed(const volatile void __iomem *addr) { - return __raw_readb(addr); + u8 val; + + log_read_mmio(8, addr, _THIS_IP_); + val = __raw_readb(addr); + log_post_read_mmio(val, 8, addr, _THIS_IP_); + return val; } #endif @@ -256,7 +316,12 @@ static inline u8 readb_relaxed(const volatile void __iomem *addr) #define readw_relaxed readw_relaxed static inline u16 readw_relaxed(const volatile void __iomem *addr) { - return __le16_to_cpu(__raw_readw(addr)); + u16 val; + + log_read_mmio(16, addr, _THIS_IP_); + val = __le16_to_cpu(__raw_readw(addr)); + log_post_read_mmio(val, 16, addr, _THIS_IP_); + return val; } #endif @@ -264,7 +329,12 @@ static inline u16 readw_relaxed(const volatile void __iomem *addr) #define readl_relaxed readl_relaxed static inline u32 readl_relaxed(const volatile void __iomem *addr) { - return __le32_to_cpu(__raw_readl(addr)); + u32 val; + + log_read_mmio(32, addr, _THIS_IP_); + val = __le32_to_cpu(__raw_readl(addr)); + log_post_read_mmio(val, 32, addr, _THIS_IP_); + return val; } #endif @@ -272,7 +342,12 @@ static inline u32 readl_relaxed(const volatile void __iomem *addr) #define readq_relaxed readq_relaxed static inline u64 readq_relaxed(const volatile void __iomem *addr) { - return __le64_to_cpu(__raw_readq(addr)); + u64 val; + + log_read_mmio(64, addr, _THIS_IP_); + val = __le64_to_cpu(__raw_readq(addr)); + log_post_read_mmio(val, 64, addr, _THIS_IP_); + return val; } #endif @@ -280,7 +355,9 @@ static inline u64 readq_relaxed(const volatile void __iomem *addr) #define writeb_relaxed writeb_relaxed static inline void writeb_relaxed(u8 value, volatile void __iomem *addr) { + log_write_mmio(value, 8, addr, _THIS_IP_); __raw_writeb(value, addr); + log_post_write_mmio(value, 8, addr, _THIS_IP_); } #endif @@ -288,7 +365,9 @@ static inline void writeb_relaxed(u8 value, volatile void __iomem *addr) #define writew_relaxed writew_relaxed static inline void writew_relaxed(u16 value, volatile void __iomem *addr) { + log_write_mmio(value, 16, addr, _THIS_IP_); __raw_writew(cpu_to_le16(value), addr); + log_post_write_mmio(value, 16, addr, _THIS_IP_); } #endif @@ -296,7 +375,9 @@ static inline void writew_relaxed(u16 value, volatile void __iomem *addr) #define writel_relaxed writel_relaxed static inline void writel_relaxed(u32 value, volatile void __iomem *addr) { + log_write_mmio(value, 32, addr, _THIS_IP_); __raw_writel(__cpu_to_le32(value), addr); + log_post_write_mmio(value, 32, addr, _THIS_IP_); } #endif @@ -304,7 +385,9 @@ static inline void writel_relaxed(u32 value, volatile void __iomem *addr) #define writeq_relaxed writeq_relaxed static inline void writeq_relaxed(u64 value, volatile void __iomem *addr) { + log_write_mmio(value, 64, addr, _THIS_IP_); __raw_writeq(__cpu_to_le64(value), addr); + log_post_write_mmio(value, 64, addr, _THIS_IP_); } #endif @@ -964,7 +1047,34 @@ static inline void iounmap(volatile void __iomem *addr) #elif defined(CONFIG_GENERIC_IOREMAP) #include <linux/pgtable.h> -void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot); +/* + * Arch code can implement the following two hooks when using GENERIC_IOREMAP + * ioremap_allowed() return a bool, + * - true means continue to remap + * - false means skip remap and return directly + * iounmap_allowed() return a bool, + * - true means continue to vunmap + * - false means skip vunmap and return directly + */ +#ifndef ioremap_allowed +#define ioremap_allowed ioremap_allowed +static inline bool ioremap_allowed(phys_addr_t phys_addr, size_t size, + unsigned long prot) +{ + return true; +} +#endif + +#ifndef iounmap_allowed +#define iounmap_allowed iounmap_allowed +static inline bool iounmap_allowed(void *addr) +{ + return true; +} +#endif + +void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, + unsigned long prot); void iounmap(volatile void __iomem *addr); static inline void __iomem *ioremap(phys_addr_t addr, size_t size) @@ -1059,20 +1169,6 @@ static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) } #endif -#ifdef CONFIG_VIRT_TO_BUS -#ifndef virt_to_bus -static inline unsigned long virt_to_bus(void *address) -{ - return (unsigned long)address; -} - -static inline void *bus_to_virt(unsigned long address) -{ - return (void *)address; -} -#endif -#endif - #ifndef memset_io #define memset_io memset_io /** @@ -1125,9 +1221,7 @@ static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer, } #endif -#ifndef CONFIG_GENERIC_DEVMEM_IS_ALLOWED extern int devmem_is_allowed(unsigned long pfn); -#endif #endif /* __KERNEL__ */ diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h index 3e2248ac328e..bfb9eb9d7215 100644 --- a/include/asm-generic/mshyperv.h +++ b/include/asm-generic/mshyperv.h @@ -49,8 +49,8 @@ struct ms_hyperv_info { }; extern struct ms_hyperv_info ms_hyperv; -extern void __percpu **hyperv_pcpu_input_arg; -extern void __percpu **hyperv_pcpu_output_arg; +extern void * __percpu *hyperv_pcpu_input_arg; +extern void * __percpu *hyperv_pcpu_output_arg; extern u64 hv_do_hypercall(u64 control, void *inputaddr, void *outputaddr); extern u64 hv_do_fast_hypercall8(u16 control, u64 input8); @@ -105,15 +105,12 @@ static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size, } /* Generate the guest OS identifier as described in the Hyper-V TLFS */ -static inline __u64 generate_guest_id(__u64 d_info1, __u64 kernel_version, - __u64 d_info2) +static inline u64 hv_generate_guest_id(u64 kernel_version) { - __u64 guest_id = 0; + u64 guest_id; - guest_id = (((__u64)HV_LINUX_VENDOR_ID) << 48); - guest_id |= (d_info1 << 48); + guest_id = (((u64)HV_LINUX_VENDOR_ID) << 48); guest_id |= (kernel_version << 16); - guest_id |= d_info2; return guest_id; } @@ -269,6 +266,9 @@ bool hv_isolation_type_snp(void); u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size); void hyperv_cleanup(void); bool hv_query_ext_cap(u64 cap_query); +void hv_setup_dma_ops(struct device *dev, bool coherent); +void *hv_map_memory(void *addr, unsigned long size); +void hv_unmap_memory(void *addr); #else /* CONFIG_HYPERV */ static inline bool hv_is_hyperv_initialized(void) { return false; } static inline bool hv_is_hibernation_supported(void) { return false; } diff --git a/include/asm-generic/pci.h b/include/asm-generic/pci.h index 6bb3cd3d695a..6869f1061528 100644 --- a/include/asm-generic/pci.h +++ b/include/asm-generic/pci.h @@ -1,17 +1,30 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * linux/include/asm-generic/pci.h - * - * Copyright (C) 2003 Russell King - */ -#ifndef _ASM_GENERIC_PCI_H -#define _ASM_GENERIC_PCI_H +/* SPDX-License-Identifier: GPL-2.0-only */ -#ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ -static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) +#ifndef __ASM_GENERIC_PCI_H +#define __ASM_GENERIC_PCI_H + +#ifndef PCIBIOS_MIN_IO +#define PCIBIOS_MIN_IO 0 +#endif + +#ifndef PCIBIOS_MIN_MEM +#define PCIBIOS_MIN_MEM 0 +#endif + +#ifndef pcibios_assign_all_busses +/* For bootloaders that do not initialize the PCI bus */ +#define pcibios_assign_all_busses() 1 +#endif + +/* Enable generic resource mapping code in drivers/pci/ */ +#define ARCH_GENERIC_PCI_MMAP_RESOURCE + +#ifdef CONFIG_PCI_DOMAINS +static inline int pci_proc_domain(struct pci_bus *bus) { - return channel ? 15 : 14; + /* always show the domain in /proc */ + return 1; } -#endif /* HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ */ +#endif /* CONFIG_PCI_DOMAINS */ -#endif /* _ASM_GENERIC_PCI_H */ +#endif /* __ASM_GENERIC_PCI_H */ diff --git a/include/asm-generic/pci_iomap.h b/include/asm-generic/pci_iomap.h index 5a2f9bf53384..8fbb0a55545d 100644 --- a/include/asm-generic/pci_iomap.h +++ b/include/asm-generic/pci_iomap.h @@ -25,6 +25,8 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *); #ifdef CONFIG_NO_GENERIC_PCI_IOPORT_MAP extern void __iomem *__pci_ioport_map(struct pci_dev *dev, unsigned long port, unsigned int nr); +#elif !defined(CONFIG_HAS_IOPORT_MAP) +#define __pci_ioport_map(dev, port, nr) NULL #else #define __pci_ioport_map(dev, port, nr) ioport_map((port), (nr)) #endif diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h index 02932efad3ab..977bea16cf1b 100644 --- a/include/asm-generic/pgalloc.h +++ b/include/asm-generic/pgalloc.h @@ -147,6 +147,15 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) #if CONFIG_PGTABLE_LEVELS > 3 +static inline pud_t *__pud_alloc_one(struct mm_struct *mm, unsigned long addr) +{ + gfp_t gfp = GFP_PGTABLE_USER; + + if (mm == &init_mm) + gfp = GFP_PGTABLE_KERNEL; + return (pud_t *)get_zeroed_page(gfp); +} + #ifndef __HAVE_ARCH_PUD_ALLOC_ONE /** * pud_alloc_one - allocate a page for PUD-level page table @@ -159,20 +168,23 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) */ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) { - gfp_t gfp = GFP_PGTABLE_USER; - - if (mm == &init_mm) - gfp = GFP_PGTABLE_KERNEL; - return (pud_t *)get_zeroed_page(gfp); + return __pud_alloc_one(mm, addr); } #endif -static inline void pud_free(struct mm_struct *mm, pud_t *pud) +static inline void __pud_free(struct mm_struct *mm, pud_t *pud) { BUG_ON((unsigned long)pud & (PAGE_SIZE-1)); free_page((unsigned long)pud); } +#ifndef __HAVE_ARCH_PUD_FREE +static inline void pud_free(struct mm_struct *mm, pud_t *pud) +{ + __pud_free(mm, pud); +} +#endif + #endif /* CONFIG_PGTABLE_LEVELS > 3 */ #ifndef __HAVE_ARCH_PGD_FREE diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h index 10789cf51d16..8ffd64e7a24c 100644 --- a/include/asm-generic/pgtable-nopmd.h +++ b/include/asm-generic/pgtable-nopmd.h @@ -30,6 +30,8 @@ typedef struct { pud_t pud; } pmd_t; static inline int pud_none(pud_t pud) { return 0; } static inline int pud_bad(pud_t pud) { return 0; } static inline int pud_present(pud_t pud) { return 1; } +static inline int pud_user(pud_t pud) { return 0; } +static inline int pud_leaf(pud_t pud) { return 0; } static inline void pud_clear(pud_t *pud) { } #define pmd_ERROR(pmd) (pud_ERROR((pmd).pud)) diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h index 7ae0ece07b4e..75b8f4601b28 100644 --- a/include/asm-generic/qrwlock.h +++ b/include/asm-generic/qrwlock.h @@ -2,6 +2,10 @@ /* * Queue read/write lock * + * These use generic atomic and locking routines, but depend on a fair spinlock + * implementation in order to be fair themselves. The implementation in + * asm-generic/spinlock.h meets these requirements. + * * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P. * * Authors: Waiman Long <waiman.long@hp.com> @@ -33,8 +37,8 @@ extern void queued_read_lock_slowpath(struct qrwlock *lock); extern void queued_write_lock_slowpath(struct qrwlock *lock); /** - * queued_read_trylock - try to acquire read lock of a queue rwlock - * @lock : Pointer to queue rwlock structure + * queued_read_trylock - try to acquire read lock of a queued rwlock + * @lock : Pointer to queued rwlock structure * Return: 1 if lock acquired, 0 if failed */ static inline int queued_read_trylock(struct qrwlock *lock) @@ -52,8 +56,8 @@ static inline int queued_read_trylock(struct qrwlock *lock) } /** - * queued_write_trylock - try to acquire write lock of a queue rwlock - * @lock : Pointer to queue rwlock structure + * queued_write_trylock - try to acquire write lock of a queued rwlock + * @lock : Pointer to queued rwlock structure * Return: 1 if lock acquired, 0 if failed */ static inline int queued_write_trylock(struct qrwlock *lock) @@ -68,8 +72,8 @@ static inline int queued_write_trylock(struct qrwlock *lock) _QW_LOCKED)); } /** - * queued_read_lock - acquire read lock of a queue rwlock - * @lock: Pointer to queue rwlock structure + * queued_read_lock - acquire read lock of a queued rwlock + * @lock: Pointer to queued rwlock structure */ static inline void queued_read_lock(struct qrwlock *lock) { @@ -84,8 +88,8 @@ static inline void queued_read_lock(struct qrwlock *lock) } /** - * queued_write_lock - acquire write lock of a queue rwlock - * @lock : Pointer to queue rwlock structure + * queued_write_lock - acquire write lock of a queued rwlock + * @lock : Pointer to queued rwlock structure */ static inline void queued_write_lock(struct qrwlock *lock) { @@ -98,8 +102,8 @@ static inline void queued_write_lock(struct qrwlock *lock) } /** - * queued_read_unlock - release read lock of a queue rwlock - * @lock : Pointer to queue rwlock structure + * queued_read_unlock - release read lock of a queued rwlock + * @lock : Pointer to queued rwlock structure */ static inline void queued_read_unlock(struct qrwlock *lock) { @@ -110,8 +114,8 @@ static inline void queued_read_unlock(struct qrwlock *lock) } /** - * queued_write_unlock - release write lock of a queue rwlock - * @lock : Pointer to queue rwlock structure + * queued_write_unlock - release write lock of a queued rwlock + * @lock : Pointer to queued rwlock structure */ static inline void queued_write_unlock(struct qrwlock *lock) { @@ -120,7 +124,7 @@ static inline void queued_write_unlock(struct qrwlock *lock) /** * queued_rwlock_is_contended - check if the lock is contended - * @lock : Pointer to queue rwlock structure + * @lock : Pointer to queued rwlock structure * Return: 1 if lock contended, 0 otherwise */ static inline int queued_rwlock_is_contended(struct qrwlock *lock) @@ -130,7 +134,7 @@ static inline int queued_rwlock_is_contended(struct qrwlock *lock) /* * Remapping rwlock architecture specific functions to the corresponding - * queue rwlock functions. + * queued rwlock functions. */ #define arch_read_lock(l) queued_read_lock(l) #define arch_write_lock(l) queued_write_lock(l) diff --git a/include/asm-generic/qrwlock_types.h b/include/asm-generic/qrwlock_types.h index c36f1d5a2572..12392c14c4d0 100644 --- a/include/asm-generic/qrwlock_types.h +++ b/include/asm-generic/qrwlock_types.h @@ -7,7 +7,7 @@ #include <asm/spinlock_types.h> /* - * The queue read/write lock data structure + * The queued read/write lock data structure */ typedef struct qrwlock { diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index d74b13825501..995513fa2690 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -2,6 +2,35 @@ /* * Queued spinlock * + * A 'generic' spinlock implementation that is based on MCS locks. For an + * architecture that's looking for a 'generic' spinlock, please first consider + * ticket-lock.h and only come looking here when you've considered all the + * constraints below and can show your hardware does actually perform better + * with qspinlock. + * + * qspinlock relies on atomic_*_release()/atomic_*_acquire() to be RCsc (or no + * weaker than RCtso if you're power), where regular code only expects atomic_t + * to be RCpc. + * + * qspinlock relies on a far greater (compared to asm-generic/spinlock.h) set + * of atomic operations to behave well together, please audit them carefully to + * ensure they all have forward progress. Many atomic operations may default to + * cmpxchg() loops which will not have good forward progress properties on + * LL/SC architectures. + * + * One notable example is atomic_fetch_or_acquire(), which x86 cannot (cheaply) + * do. Carefully read the patches that introduced + * queued_fetch_set_pending_acquire(). + * + * qspinlock also heavily relies on mixed size atomic operations, in specific + * it requires architectures to have xchg16; something which many LL/SC + * architectures need to implement as a 32bit and+or in order to satisfy the + * forward progress guarantees mentioned above. + * + * Further reading on mixed size atomics that might be relevant: + * + * http://www.cl.cam.ac.uk/~pes20/popl17/mixed-size.pdf + * * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P. * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP * diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h index 690f741764e1..db13bb620f52 100644 --- a/include/asm-generic/sections.h +++ b/include/asm-generic/sections.h @@ -59,11 +59,24 @@ extern char __noinstr_text_start[], __noinstr_text_end[]; extern __visible const void __nosave_begin, __nosave_end; /* Function descriptor handling (if any). Override in asm/sections.h */ -#ifndef dereference_function_descriptor +#ifdef CONFIG_HAVE_FUNCTION_DESCRIPTORS +void *dereference_function_descriptor(void *ptr); +void *dereference_kernel_function_descriptor(void *ptr); +#else #define dereference_function_descriptor(p) ((void *)(p)) #define dereference_kernel_function_descriptor(p) ((void *)(p)) + +/* An address is simply the address of the function. */ +typedef struct { + unsigned long addr; +} func_desc_t; #endif +static inline bool have_function_descriptors(void) +{ + return IS_ENABLED(CONFIG_HAVE_FUNCTION_DESCRIPTORS); +} + /** * memory_contains - checks if an object is contained within a memory region * @begin: virtual address of the beginning of the memory region @@ -84,7 +97,7 @@ static inline bool memory_contains(void *begin, void *end, void *virt, /** * memory_intersects - checks if the region occupied by an object intersects * with another memory region - * @begin: virtual address of the beginning of the memory regien + * @begin: virtual address of the beginning of the memory region * @end: virtual address of the end of the memory region * @virt: virtual address of the memory object * @size: size of the memory object @@ -97,7 +110,10 @@ static inline bool memory_intersects(void *begin, void *end, void *virt, { void *vend = virt + size; - return (virt >= begin && virt < end) || (vend >= begin && vend < end); + if (virt < end && vend > begin) + return true; + + return false; } /** diff --git a/include/asm-generic/signal.h b/include/asm-generic/signal.h index c53984fa9761..663dd6d0795d 100644 --- a/include/asm-generic/signal.h +++ b/include/asm-generic/signal.h @@ -5,8 +5,6 @@ #include <uapi/asm-generic/signal.h> #ifndef __ASSEMBLY__ -#ifdef SA_RESTORER -#endif #include <asm/sigcontext.h> #undef __HAVE_ARCH_SIG_BITOPS diff --git a/include/asm-generic/softirq_stack.h b/include/asm-generic/softirq_stack.h index eceeecf6a5bd..2a67aed9ac52 100644 --- a/include/asm-generic/softirq_stack.h +++ b/include/asm-generic/softirq_stack.h @@ -2,7 +2,7 @@ #ifndef __ASM_GENERIC_SOFTIRQ_STACK_H #define __ASM_GENERIC_SOFTIRQ_STACK_H -#ifdef CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK +#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK void do_softirq_own_stack(void); #else static inline void do_softirq_own_stack(void) diff --git a/include/asm-generic/spinlock.h b/include/asm-generic/spinlock.h index adaf6acab172..fdfebcb050f4 100644 --- a/include/asm-generic/spinlock.h +++ b/include/asm-generic/spinlock.h @@ -1,12 +1,92 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __ASM_GENERIC_SPINLOCK_H -#define __ASM_GENERIC_SPINLOCK_H + /* - * You need to implement asm/spinlock.h for SMP support. The generic - * version does not handle SMP. + * 'Generic' ticket-lock implementation. + * + * It relies on atomic_fetch_add() having well defined forward progress + * guarantees under contention. If your architecture cannot provide this, stick + * to a test-and-set lock. + * + * It also relies on atomic_fetch_add() being safe vs smp_store_release() on a + * sub-word of the value. This is generally true for anything LL/SC although + * you'd be hard pressed to find anything useful in architecture specifications + * about this. If your architecture cannot do this you might be better off with + * a test-and-set. + * + * It further assumes atomic_*_release() + atomic_*_acquire() is RCpc and hence + * uses atomic_fetch_add() which is RCsc to create an RCsc hot path, along with + * a full fence after the spin to upgrade the otherwise-RCpc + * atomic_cond_read_acquire(). + * + * The implementation uses smp_cond_load_acquire() to spin, so if the + * architecture has WFE like instructions to sleep instead of poll for word + * modifications be sure to implement that (see ARM64 for example). + * */ -#ifdef CONFIG_SMP -#error need an architecture specific asm/spinlock.h -#endif + +#ifndef __ASM_GENERIC_SPINLOCK_H +#define __ASM_GENERIC_SPINLOCK_H + +#include <linux/atomic.h> +#include <asm-generic/spinlock_types.h> + +static __always_inline void arch_spin_lock(arch_spinlock_t *lock) +{ + u32 val = atomic_fetch_add(1<<16, lock); + u16 ticket = val >> 16; + + if (ticket == (u16)val) + return; + + /* + * atomic_cond_read_acquire() is RCpc, but rather than defining a + * custom cond_read_rcsc() here we just emit a full fence. We only + * need the prior reads before subsequent writes ordering from + * smb_mb(), but as atomic_cond_read_acquire() just emits reads and we + * have no outstanding writes due to the atomic_fetch_add() the extra + * orderings are free. + */ + atomic_cond_read_acquire(lock, ticket == (u16)VAL); + smp_mb(); +} + +static __always_inline bool arch_spin_trylock(arch_spinlock_t *lock) +{ + u32 old = atomic_read(lock); + + if ((old >> 16) != (old & 0xffff)) + return false; + + return atomic_try_cmpxchg(lock, &old, old + (1<<16)); /* SC, for RCsc */ +} + +static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) +{ + u16 *ptr = (u16 *)lock + IS_ENABLED(CONFIG_CPU_BIG_ENDIAN); + u32 val = atomic_read(lock); + + smp_store_release(ptr, (u16)val + 1); +} + +static __always_inline int arch_spin_is_locked(arch_spinlock_t *lock) +{ + u32 val = atomic_read(lock); + + return ((val >> 16) != (val & 0xffff)); +} + +static __always_inline int arch_spin_is_contended(arch_spinlock_t *lock) +{ + u32 val = atomic_read(lock); + + return (s16)((val >> 16) - (val & 0xffff)) > 1; +} + +static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) +{ + return !arch_spin_is_locked(&lock); +} + +#include <asm/qrwlock.h> #endif /* __ASM_GENERIC_SPINLOCK_H */ diff --git a/include/asm-generic/spinlock_types.h b/include/asm-generic/spinlock_types.h new file mode 100644 index 000000000000..8962bb730945 --- /dev/null +++ b/include/asm-generic/spinlock_types.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_GENERIC_SPINLOCK_TYPES_H +#define __ASM_GENERIC_SPINLOCK_TYPES_H + +#include <linux/types.h> +typedef atomic_t arch_spinlock_t; + +/* + * qrwlock_types depends on arch_spinlock_t, so we must typedef that before the + * include. + */ +#include <asm/qrwlock_types.h> + +#define __ARCH_SPIN_LOCK_UNLOCKED ATOMIC_INIT(0) + +#endif /* __ASM_GENERIC_SPINLOCK_TYPES_H */ diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h index 81695eb02a12..5a80fe728dc8 100644 --- a/include/asm-generic/syscall.h +++ b/ |