From 8e97d4c8a4633f6d826e1872448435894df1794c Mon Sep 17 00:00:00 2001 From: Tomasz Duszynski Date: Wed, 10 Jul 2019 21:21:55 +0200 Subject: MAINTAINERS: add entry for plantower pms7003 driver Add myself as a plantower pms7003 driver maintainer. Signed-off-by: Tomasz Duszynski Signed-off-by: Jonathan Cameron --- MAINTAINERS | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 783569e3c4b4..90498954685b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -12775,6 +12775,12 @@ F: drivers/i2c/busses/i2c-puv3.c F: drivers/video/fbdev/fb-puv3.c F: drivers/rtc/rtc-puv3.c +PLANTOWER PMS7003 AIR POLLUTION SENSOR DRIVER +M: Tomasz Duszynski +S: Maintained +F: drivers/iio/chemical/pms7003.c +F: Documentation/devicetree/bindings/iio/chemical/plantower,pms7003.yaml + PMBUS HARDWARE MONITORING DRIVERS M: Guenter Roeck L: linux-hwmon@vger.kernel.org -- cgit v1.2.3-59-g8ed1b From db6ed4d23dd10a5bfbe922f23fc284bd4b4b0a1d Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Tue, 23 Jul 2019 10:36:39 +0300 Subject: iio: imu: Add support for the ADIS16460 IMU The ADIS16460 device is a complete inertial system that includes a triaxial gyroscope and a triaxial accelerometer. It's more simplified design than that of the ADIS16480, and does not offer the triaxial magnetometers & pressure sensors. It does also have a temperature sensor (like the ADIS16480). Since it is part of the ADIS16XXX family, it re-uses parts of the ADIS library. Naturally, the register map is different and much more simplified than the ADIS16480 subfamily, so it cannot be integrated into that driver. A major difference is that the registers are not paged. One thing that is particularly special about it, is that it requires a higher delay between CS changes (i.e. when CS goes up, the spec recommends that it be brought down after a minimum of 16 uS). Other ADIS chips require (via spec) a minimum of 2 uS between CS changes. The kernel's 10 uS default should be fine for those other chips; they haven't been tested with lower CS change delays yet. Datasheet: https://www.analog.com/media/en/technical-documentation/data-sheets/adis16460.pdf Signed-off-by: Dragos Bogdan Signed-off-by: Michael Hennerich Signed-off-by: Alexandru Ardelean Signed-off-by: Jonathan Cameron --- MAINTAINERS | 7 + drivers/iio/imu/Kconfig | 12 ++ drivers/iio/imu/Makefile | 1 + drivers/iio/imu/adis16460.c | 489 ++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 509 insertions(+) create mode 100644 drivers/iio/imu/adis16460.c (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 90498954685b..4213e3b00a75 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -945,6 +945,13 @@ L: linux-iio@vger.kernel.org F: include/linux/iio/imu/adis.h F: drivers/iio/imu/adis.c +ANALOG DEVICES INC ADIS16460 DRIVER +M: Dragos Bogdan +S: Supported +L: linux-iio@vger.kernel.org +W: http://ez.analog.com/community/linux-device-drivers +F: drivers/iio/imu/adis16460.c + ANALOG DEVICES INC ADP5061 DRIVER M: Stefan Popa L: linux-pm@vger.kernel.org diff --git a/drivers/iio/imu/Kconfig b/drivers/iio/imu/Kconfig index 4957e6df447e..f3c7282321a8 100644 --- a/drivers/iio/imu/Kconfig +++ b/drivers/iio/imu/Kconfig @@ -17,6 +17,18 @@ config ADIS16400 adis16365, adis16400 and adis16405 triaxial inertial sensors (adis16400 series also have magnetometers). +config ADIS16460 + tristate "Analog Devices ADIS16460 and similar IMU driver" + depends on SPI + select IIO_ADIS_LIB + select IIO_ADIS_LIB_BUFFER if IIO_BUFFER + help + Say yes here to build support for Analog Devices ADIS16460 inertial + sensor. + + To compile this driver as a module, choose M here: the module will be + called adis16460. + config ADIS16480 tristate "Analog Devices ADIS16480 and similar IMU driver" depends on SPI diff --git a/drivers/iio/imu/Makefile b/drivers/iio/imu/Makefile index 9e452fce1aaf..4a6958865504 100644 --- a/drivers/iio/imu/Makefile +++ b/drivers/iio/imu/Makefile @@ -5,6 +5,7 @@ # When adding new entries keep the list in alphabetical order obj-$(CONFIG_ADIS16400) += adis16400.o +obj-$(CONFIG_ADIS16460) += adis16460.o obj-$(CONFIG_ADIS16480) += adis16480.o adis_lib-y += adis.o diff --git a/drivers/iio/imu/adis16460.c b/drivers/iio/imu/adis16460.c new file mode 100644 index 000000000000..1ef11640ee20 --- /dev/null +++ b/drivers/iio/imu/adis16460.c @@ -0,0 +1,489 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * ADIS16460 IMU driver + * + * Copyright 2019 Analog Devices Inc. + */ + +#include +#include +#include + +#include +#include + +#include + +#define ADIS16460_REG_FLASH_CNT 0x00 +#define ADIS16460_REG_DIAG_STAT 0x02 +#define ADIS16460_REG_X_GYRO_LOW 0x04 +#define ADIS16460_REG_X_GYRO_OUT 0x06 +#define ADIS16460_REG_Y_GYRO_LOW 0x08 +#define ADIS16460_REG_Y_GYRO_OUT 0x0A +#define ADIS16460_REG_Z_GYRO_LOW 0x0C +#define ADIS16460_REG_Z_GYRO_OUT 0x0E +#define ADIS16460_REG_X_ACCL_LOW 0x10 +#define ADIS16460_REG_X_ACCL_OUT 0x12 +#define ADIS16460_REG_Y_ACCL_LOW 0x14 +#define ADIS16460_REG_Y_ACCL_OUT 0x16 +#define ADIS16460_REG_Z_ACCL_LOW 0x18 +#define ADIS16460_REG_Z_ACCL_OUT 0x1A +#define ADIS16460_REG_SMPL_CNTR 0x1C +#define ADIS16460_REG_TEMP_OUT 0x1E +#define ADIS16460_REG_X_DELT_ANG 0x24 +#define ADIS16460_REG_Y_DELT_ANG 0x26 +#define ADIS16460_REG_Z_DELT_ANG 0x28 +#define ADIS16460_REG_X_DELT_VEL 0x2A +#define ADIS16460_REG_Y_DELT_VEL 0x2C +#define ADIS16460_REG_Z_DELT_VEL 0x2E +#define ADIS16460_REG_MSC_CTRL 0x32 +#define ADIS16460_REG_SYNC_SCAL 0x34 +#define ADIS16460_REG_DEC_RATE 0x36 +#define ADIS16460_REG_FLTR_CTRL 0x38 +#define ADIS16460_REG_GLOB_CMD 0x3E +#define ADIS16460_REG_X_GYRO_OFF 0x40 +#define ADIS16460_REG_Y_GYRO_OFF 0x42 +#define ADIS16460_REG_Z_GYRO_OFF 0x44 +#define ADIS16460_REG_X_ACCL_OFF 0x46 +#define ADIS16460_REG_Y_ACCL_OFF 0x48 +#define ADIS16460_REG_Z_ACCL_OFF 0x4A +#define ADIS16460_REG_LOT_ID1 0x52 +#define ADIS16460_REG_LOT_ID2 0x54 +#define ADIS16460_REG_PROD_ID 0x56 +#define ADIS16460_REG_SERIAL_NUM 0x58 +#define ADIS16460_REG_CAL_SGNTR 0x60 +#define ADIS16460_REG_CAL_CRC 0x62 +#define ADIS16460_REG_CODE_SGNTR 0x64 +#define ADIS16460_REG_CODE_CRC 0x66 + +struct adis16460_chip_info { + unsigned int num_channels; + const struct iio_chan_spec *channels; + unsigned int gyro_max_val; + unsigned int gyro_max_scale; + unsigned int accel_max_val; + unsigned int accel_max_scale; +}; + +struct adis16460 { + const struct adis16460_chip_info *chip_info; + struct adis adis; +}; + +#ifdef CONFIG_DEBUG_FS + +static int adis16460_show_serial_number(void *arg, u64 *val) +{ + struct adis16460 *adis16460 = arg; + u16 serial; + int ret; + + ret = adis_read_reg_16(&adis16460->adis, ADIS16460_REG_SERIAL_NUM, + &serial); + if (ret < 0) + return ret; + + *val = serial; + + return 0; +} +DEFINE_SIMPLE_ATTRIBUTE(adis16460_serial_number_fops, + adis16460_show_serial_number, NULL, "0x%.4llx\n"); + +static int adis16460_show_product_id(void *arg, u64 *val) +{ + struct adis16460 *adis16460 = arg; + u16 prod_id; + int ret; + + ret = adis_read_reg_16(&adis16460->adis, ADIS16460_REG_PROD_ID, + &prod_id); + if (ret < 0) + return ret; + + *val = prod_id; + + return 0; +} +DEFINE_SIMPLE_ATTRIBUTE(adis16460_product_id_fops, + adis16460_show_product_id, NULL, "%llu\n"); + +static int adis16460_show_flash_count(void *arg, u64 *val) +{ + struct adis16460 *adis16460 = arg; + u32 flash_count; + int ret; + + ret = adis_read_reg_32(&adis16460->adis, ADIS16460_REG_FLASH_CNT, + &flash_count); + if (ret < 0) + return ret; + + *val = flash_count; + + return 0; +} +DEFINE_SIMPLE_ATTRIBUTE(adis16460_flash_count_fops, + adis16460_show_flash_count, NULL, "%lld\n"); + +static int adis16460_debugfs_init(struct iio_dev *indio_dev) +{ + struct adis16460 *adis16460 = iio_priv(indio_dev); + + debugfs_create_file("serial_number", 0400, indio_dev->debugfs_dentry, + adis16460, &adis16460_serial_number_fops); + debugfs_create_file("product_id", 0400, indio_dev->debugfs_dentry, + adis16460, &adis16460_product_id_fops); + debugfs_create_file("flash_count", 0400, indio_dev->debugfs_dentry, + adis16460, &adis16460_flash_count_fops); + + return 0; +} + +#else + +static int adis16460_debugfs_init(struct iio_dev *indio_dev) +{ + return 0; +} + +#endif + +static int adis16460_set_freq(struct iio_dev *indio_dev, int val, int val2) +{ + struct adis16460 *st = iio_priv(indio_dev); + unsigned int t; + + t = val * 1000 + val2 / 1000; + if (t <= 0) + return -EINVAL; + + t = 2048000 / t; + if (t > 2048) + t = 2048; + + if (t != 0) + t--; + + return adis_write_reg_16(&st->adis, ADIS16460_REG_DEC_RATE, t); +} + +static int adis16460_get_freq(struct iio_dev *indio_dev, int *val, int *val2) +{ + struct adis16460 *st = iio_priv(indio_dev); + uint16_t t; + int ret; + unsigned int freq; + + ret = adis_read_reg_16(&st->adis, ADIS16460_REG_DEC_RATE, &t); + if (ret < 0) + return ret; + + freq = 2048000 / (t + 1); + *val = freq / 1000; + *val2 = (freq % 1000) * 1000; + + return IIO_VAL_INT_PLUS_MICRO; +} + +static int adis16460_read_raw(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, int *val, int *val2, long info) +{ + struct adis16460 *st = iio_priv(indio_dev); + + switch (info) { + case IIO_CHAN_INFO_RAW: + return adis_single_conversion(indio_dev, chan, 0, val); + case IIO_CHAN_INFO_SCALE: + switch (chan->type) { + case IIO_ANGL_VEL: + *val = st->chip_info->gyro_max_scale; + *val2 = st->chip_info->gyro_max_val; + return IIO_VAL_FRACTIONAL; + case IIO_ACCEL: + *val = st->chip_info->accel_max_scale; + *val2 = st->chip_info->accel_max_val; + return IIO_VAL_FRACTIONAL; + case IIO_TEMP: + *val = 50; /* 50 milli degrees Celsius/LSB */ + return IIO_VAL_INT; + default: + return -EINVAL; + } + case IIO_CHAN_INFO_OFFSET: + *val = 500; /* 25 degrees Celsius = 0x0000 */ + return IIO_VAL_INT; + case IIO_CHAN_INFO_SAMP_FREQ: + return adis16460_get_freq(indio_dev, val, val2); + default: + return -EINVAL; + } +} + +static int adis16460_write_raw(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, int val, int val2, long info) +{ + switch (info) { + case IIO_CHAN_INFO_SAMP_FREQ: + return adis16460_set_freq(indio_dev, val, val2); + default: + return -EINVAL; + } +} + +enum { + ADIS16460_SCAN_GYRO_X, + ADIS16460_SCAN_GYRO_Y, + ADIS16460_SCAN_GYRO_Z, + ADIS16460_SCAN_ACCEL_X, + ADIS16460_SCAN_ACCEL_Y, + ADIS16460_SCAN_ACCEL_Z, + ADIS16460_SCAN_TEMP, +}; + +#define ADIS16460_MOD_CHANNEL(_type, _mod, _address, _si, _bits) \ + { \ + .type = (_type), \ + .modified = 1, \ + .channel2 = (_mod), \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \ + .address = (_address), \ + .scan_index = (_si), \ + .scan_type = { \ + .sign = 's', \ + .realbits = (_bits), \ + .storagebits = (_bits), \ + .endianness = IIO_BE, \ + }, \ + } + +#define ADIS16460_GYRO_CHANNEL(_mod) \ + ADIS16460_MOD_CHANNEL(IIO_ANGL_VEL, IIO_MOD_ ## _mod, \ + ADIS16460_REG_ ## _mod ## _GYRO_LOW, ADIS16460_SCAN_GYRO_ ## _mod, \ + 32) + +#define ADIS16460_ACCEL_CHANNEL(_mod) \ + ADIS16460_MOD_CHANNEL(IIO_ACCEL, IIO_MOD_ ## _mod, \ + ADIS16460_REG_ ## _mod ## _ACCL_LOW, ADIS16460_SCAN_ACCEL_ ## _mod, \ + 32) + +#define ADIS16460_TEMP_CHANNEL() { \ + .type = IIO_TEMP, \ + .indexed = 1, \ + .channel = 0, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ + BIT(IIO_CHAN_INFO_SCALE) | \ + BIT(IIO_CHAN_INFO_OFFSET), \ + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \ + .address = ADIS16460_REG_TEMP_OUT, \ + .scan_index = ADIS16460_SCAN_TEMP, \ + .scan_type = { \ + .sign = 's', \ + .realbits = 16, \ + .storagebits = 16, \ + .endianness = IIO_BE, \ + }, \ + } + +static const struct iio_chan_spec adis16460_channels[] = { + ADIS16460_GYRO_CHANNEL(X), + ADIS16460_GYRO_CHANNEL(Y), + ADIS16460_GYRO_CHANNEL(Z), + ADIS16460_ACCEL_CHANNEL(X), + ADIS16460_ACCEL_CHANNEL(Y), + ADIS16460_ACCEL_CHANNEL(Z), + ADIS16460_TEMP_CHANNEL(), + IIO_CHAN_SOFT_TIMESTAMP(7) +}; + +static const struct adis16460_chip_info adis16460_chip_info = { + .channels = adis16460_channels, + .num_channels = ARRAY_SIZE(adis16460_channels), + /* + * storing the value in rad/degree and the scale in degree + * gives us the result in rad and better precession than + * storing the scale directly in rad. + */ + .gyro_max_val = IIO_RAD_TO_DEGREE(200 << 16), + .gyro_max_scale = 1, + .accel_max_val = IIO_M_S_2_TO_G(20000 << 16), + .accel_max_scale = 5, +}; + +static const struct iio_info adis16460_info = { + .read_raw = &adis16460_read_raw, + .write_raw = &adis16460_write_raw, + .update_scan_mode = adis_update_scan_mode, + .debugfs_reg_access = adis_debugfs_reg_access, +}; + +static int adis16460_enable_irq(struct adis *adis, bool enable) +{ + /* + * There is no way to gate the data-ready signal internally inside the + * ADIS16460 :( + */ + if (enable) + enable_irq(adis->spi->irq); + else + disable_irq(adis->spi->irq); + + return 0; +} + +static int adis16460_initial_setup(struct iio_dev *indio_dev) +{ + struct adis16460 *st = iio_priv(indio_dev); + uint16_t prod_id; + unsigned int device_id; + int ret; + + adis_reset(&st->adis); + msleep(222); + + ret = adis_write_reg_16(&st->adis, ADIS16460_REG_GLOB_CMD, BIT(1)); + if (ret) + return ret; + msleep(75); + + ret = adis_check_status(&st->adis); + if (ret) + return ret; + + ret = adis_read_reg_16(&st->adis, ADIS16460_REG_PROD_ID, &prod_id); + if (ret) + return ret; + + ret = sscanf(indio_dev->name, "adis%u\n", &device_id); + if (ret != 1) + return -EINVAL; + + if (prod_id != device_id) + dev_warn(&indio_dev->dev, "Device ID(%u) and product ID(%u) do not match.", + device_id, prod_id); + + return 0; +} + +#define ADIS16460_DIAG_STAT_IN_CLK_OOS 7 +#define ADIS16460_DIAG_STAT_FLASH_MEM 6 +#define ADIS16460_DIAG_STAT_SELF_TEST 5 +#define ADIS16460_DIAG_STAT_OVERRANGE 4 +#define ADIS16460_DIAG_STAT_SPI_COMM 3 +#define ADIS16460_DIAG_STAT_FLASH_UPT 2 + +static const char * const adis16460_status_error_msgs[] = { + [ADIS16460_DIAG_STAT_IN_CLK_OOS] = "Input clock out of sync", + [ADIS16460_DIAG_STAT_FLASH_MEM] = "Flash memory failure", + [ADIS16460_DIAG_STAT_SELF_TEST] = "Self test diagnostic failure", + [ADIS16460_DIAG_STAT_OVERRANGE] = "Sensor overrange", + [ADIS16460_DIAG_STAT_SPI_COMM] = "SPI communication failure", + [ADIS16460_DIAG_STAT_FLASH_UPT] = "Flash update failure", +}; + +static const struct adis_data adis16460_data = { + .diag_stat_reg = ADIS16460_REG_DIAG_STAT, + .glob_cmd_reg = ADIS16460_REG_GLOB_CMD, + .has_paging = false, + .read_delay = 5, + .write_delay = 5, + .cs_change_delay = 16, + .status_error_msgs = adis16460_status_error_msgs, + .status_error_mask = BIT(ADIS16460_DIAG_STAT_IN_CLK_OOS) | + BIT(ADIS16460_DIAG_STAT_FLASH_MEM) | + BIT(ADIS16460_DIAG_STAT_SELF_TEST) | + BIT(ADIS16460_DIAG_STAT_OVERRANGE) | + BIT(ADIS16460_DIAG_STAT_SPI_COMM) | + BIT(ADIS16460_DIAG_STAT_FLASH_UPT), + .enable_irq = adis16460_enable_irq, +}; + +static int adis16460_probe(struct spi_device *spi) +{ + struct iio_dev *indio_dev; + struct adis16460 *st; + int ret; + + indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st)); + if (indio_dev == NULL) + return -ENOMEM; + + spi_set_drvdata(spi, indio_dev); + + st = iio_priv(indio_dev); + + st->chip_info = &adis16460_chip_info; + indio_dev->dev.parent = &spi->dev; + indio_dev->name = spi_get_device_id(spi)->name; + indio_dev->channels = st->chip_info->channels; + indio_dev->num_channels = st->chip_info->num_channels; + indio_dev->info = &adis16460_info; + indio_dev->modes = INDIO_DIRECT_MODE; + + ret = adis_init(&st->adis, indio_dev, spi, &adis16460_data); + if (ret) + return ret; + + ret = adis_setup_buffer_and_trigger(&st->adis, indio_dev, NULL); + if (ret) + return ret; + + adis16460_enable_irq(&st->adis, 0); + + ret = adis16460_initial_setup(indio_dev); + if (ret) + goto error_cleanup_buffer; + + ret = iio_device_register(indio_dev); + if (ret) + goto error_cleanup_buffer; + + adis16460_debugfs_init(indio_dev); + + return 0; + +error_cleanup_buffer: + adis_cleanup_buffer_and_trigger(&st->adis, indio_dev); + return ret; +} + +static int adis16460_remove(struct spi_device *spi) +{ + struct iio_dev *indio_dev = spi_get_drvdata(spi); + struct adis16460 *st = iio_priv(indio_dev); + + iio_device_unregister(indio_dev); + + adis_cleanup_buffer_and_trigger(&st->adis, indio_dev); + + return 0; +} + +static const struct spi_device_id adis16460_ids[] = { + { "adis16460", 0 }, + {} +}; +MODULE_DEVICE_TABLE(spi, adis16460_ids); + +static const struct of_device_id adis16460_of_match[] = { + { .compatible = "adi,adis16460" }, + {} +}; +MODULE_DEVICE_TABLE(of, adis16460_of_match); + +static struct spi_driver adis16460_driver = { + .driver = { + .name = "adis16460", + .of_match_table = adis16460_of_match, + }, + .id_table = adis16460_ids, + .probe = adis16460_probe, + .remove = adis16460_remove, +}; +module_spi_driver(adis16460_driver); + +MODULE_AUTHOR("Dragos Bogdan "); +MODULE_DESCRIPTION("Analog Devices ADIS16460 IMU driver"); +MODULE_LICENSE("GPL"); -- cgit v1.2.3-59-g8ed1b From 1c667c4d99d7a42bc6bdcafc2896045e8c17b910 Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Tue, 23 Jul 2019 10:36:40 +0300 Subject: dt-bindings: iio: imu: add bindings for ADIS16460 This change adds device-tree bindings for the ADIS16460. Signed-off-by: Alexandru Ardelean Reviewed-by: Rob Herring Signed-off-by: Jonathan Cameron --- .../devicetree/bindings/iio/imu/adi,adis16460.yaml | 53 ++++++++++++++++++++++ MAINTAINERS | 1 + 2 files changed, 54 insertions(+) create mode 100644 Documentation/devicetree/bindings/iio/imu/adi,adis16460.yaml (limited to 'MAINTAINERS') diff --git a/Documentation/devicetree/bindings/iio/imu/adi,adis16460.yaml b/Documentation/devicetree/bindings/iio/imu/adi,adis16460.yaml new file mode 100644 index 000000000000..0c53009ba7d6 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/imu/adi,adis16460.yaml @@ -0,0 +1,53 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/iio/imu/adi,adis16460.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Analog Devices ADIS16460 and similar IMUs + +maintainers: + - Dragos Bogdan + +description: | + Analog Devices ADIS16460 and similar IMUs + https://www.analog.com/media/en/technical-documentation/data-sheets/ADIS16460.pdf + +properties: + compatible: + enum: + - adi,adis16460 + + reg: + maxItems: 1 + + spi-cpha: true + + spi-cpol: true + + interrupts: + maxItems: 1 + +required: + - compatible + - reg + - interrupts + +examples: + - | + #include + #include + spi0 { + #address-cells = <1>; + #size-cells = <0>; + + imu@0 { + compatible = "adi,adis16460"; + reg = <0>; + spi-max-frequency = <5000000>; + spi-cpol; + spi-cpha; + interrupt-parent = <&gpio0>; + interrupts = <0 IRQ_TYPE_LEVEL_HIGH>; + }; + }; diff --git a/MAINTAINERS b/MAINTAINERS index 4213e3b00a75..ff1cac18ef38 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -951,6 +951,7 @@ S: Supported L: linux-iio@vger.kernel.org W: http://ez.analog.com/community/linux-device-drivers F: drivers/iio/imu/adis16460.c +F: Documentation/devicetree/bindings/iio/imu/adi,adis16460.yaml ANALOG DEVICES INC ADP5061 DRIVER M: Stefan Popa -- cgit v1.2.3-59-g8ed1b From 62ed7a81ef81b501a0ef3ecda532a34c6b4deb9b Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 13 Aug 2019 09:02:34 +0300 Subject: MAINTAINERS: ftm-quaddec: Fix typo in a filepath Fix typo (s/quadddec/quaddec/) in the path to the documentation. Cc: linux-iio@vger.kernel.org Fixes: 517b2d045aeb ("MAINTAINERS: add counter/ftm-quaddec driver entry") Signed-off-by: Denis Efremov Acked-by: Patrick Havelange Signed-off-by: Jonathan Cameron --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 429d61119980..ac3a5cd6a682 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6329,7 +6329,7 @@ FLEXTIMER FTM-QUADDEC DRIVER M: Patrick Havelange L: linux-iio@vger.kernel.org S: Maintained -F: Documentation/ABI/testing/sysfs-bus-counter-ftm-quadddec +F: Documentation/ABI/testing/sysfs-bus-counter-ftm-quaddec F: Documentation/devicetree/bindings/counter/ftm-quaddec.txt F: drivers/counter/ftm-quaddec.c -- cgit v1.2.3-59-g8ed1b From 47e4937a4a7ca4184fd282791dfee76c6799966a Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Fri, 23 Aug 2019 05:36:59 +0800 Subject: erofs: move erofs out of staging EROFS filesystem has been merged into linux-staging for a year. EROFS is designed to be a better solution of saving extra storage space with guaranteed end-to-end performance for read-only files with the help of reduced metadata, fixed-sized output compression and decompression inplace technologies. In the past year, EROFS was greatly improved by many people as a staging driver, self-tested, betaed by a large number of our internal users, successfully applied to almost all in-service HUAWEI smartphones as the part of EMUI 9.1 and proven to be stable enough to be moved out of staging. EROFS is a self-contained filesystem driver. Although there are still some TODOs to be more generic, we have a dedicated team actively keeping on working on EROFS in order to make it better with the evolution of Linux kernel as the other in-kernel filesystems. As Pavel suggested, it's better to do as one commit since git can do moves and all histories will be saved in this way. Let's promote it from staging and enhance it more actively as a "real" part of kernel for more wider scenarios! Cc: Greg Kroah-Hartman Cc: Alexander Viro Cc: Andrew Morton Cc: Stephen Rothwell Cc: Theodore Ts'o Cc: Pavel Machek Cc: David Sterba Cc: Amir Goldstein Cc: Christoph Hellwig Cc: Darrick J . Wong Cc: Dave Chinner Cc: Jaegeuk Kim Cc: Jan Kara Cc: Richard Weinberger Cc: Linus Torvalds Cc: Chao Yu Cc: Miao Xie Cc: Li Guifu Cc: Fang Wei Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20190822213659.5501-1-hsiangkao@aol.com Signed-off-by: Greg Kroah-Hartman --- Documentation/filesystems/erofs.txt | 219 +++ MAINTAINERS | 14 +- drivers/staging/Kconfig | 2 - drivers/staging/Makefile | 1 - .../erofs/Documentation/filesystems/erofs.txt | 223 --- drivers/staging/erofs/Kconfig | 98 -- drivers/staging/erofs/Makefile | 13 - drivers/staging/erofs/TODO | 46 - drivers/staging/erofs/compress.h | 62 - drivers/staging/erofs/data.c | 425 ------ drivers/staging/erofs/decompressor.c | 360 ----- drivers/staging/erofs/dir.c | 141 -- drivers/staging/erofs/erofs_fs.h | 310 ----- drivers/staging/erofs/include/trace/events/erofs.h | 256 ---- drivers/staging/erofs/inode.c | 334 ----- drivers/staging/erofs/internal.h | 554 -------- drivers/staging/erofs/namei.c | 253 ---- drivers/staging/erofs/super.c | 671 --------- drivers/staging/erofs/tagptr.h | 110 -- drivers/staging/erofs/utils.c | 335 ----- drivers/staging/erofs/xattr.c | 705 ---------- drivers/staging/erofs/xattr.h | 94 -- drivers/staging/erofs/zdata.c | 1434 -------------------- drivers/staging/erofs/zdata.h | 195 --- drivers/staging/erofs/zmap.c | 468 ------- drivers/staging/erofs/zpvec.h | 159 --- fs/Kconfig | 1 + fs/Makefile | 1 + fs/erofs/Kconfig | 98 ++ fs/erofs/Makefile | 11 + fs/erofs/compress.h | 60 + fs/erofs/data.c | 423 ++++++ fs/erofs/decompressor.c | 358 +++++ fs/erofs/dir.c | 139 ++ fs/erofs/erofs_fs.h | 307 +++++ fs/erofs/inode.c | 332 +++++ fs/erofs/internal.h | 553 ++++++++ fs/erofs/namei.c | 251 ++++ fs/erofs/super.c | 669 +++++++++ fs/erofs/tagptr.h | 110 ++ fs/erofs/utils.c | 333 +++++ fs/erofs/xattr.c | 703 ++++++++++ fs/erofs/xattr.h | 92 ++ fs/erofs/zdata.c | 1432 +++++++++++++++++++ fs/erofs/zdata.h | 193 +++ fs/erofs/zmap.c | 466 +++++++ fs/erofs/zpvec.h | 157 +++ include/trace/events/erofs.h | 256 ++++ include/uapi/linux/magic.h | 1 + 49 files changed, 7172 insertions(+), 7256 deletions(-) create mode 100644 Documentation/filesystems/erofs.txt delete mode 100644 drivers/staging/erofs/Documentation/filesystems/erofs.txt delete mode 100644 drivers/staging/erofs/Kconfig delete mode 100644 drivers/staging/erofs/Makefile delete mode 100644 drivers/staging/erofs/TODO delete mode 100644 drivers/staging/erofs/compress.h delete mode 100644 drivers/staging/erofs/data.c delete mode 100644 drivers/staging/erofs/decompressor.c delete mode 100644 drivers/staging/erofs/dir.c delete mode 100644 drivers/staging/erofs/erofs_fs.h delete mode 100644 drivers/staging/erofs/include/trace/events/erofs.h delete mode 100644 drivers/staging/erofs/inode.c delete mode 100644 drivers/staging/erofs/internal.h delete mode 100644 drivers/staging/erofs/namei.c delete mode 100644 drivers/staging/erofs/super.c delete mode 100644 drivers/staging/erofs/tagptr.h delete mode 100644 drivers/staging/erofs/utils.c delete mode 100644 drivers/staging/erofs/xattr.c delete mode 100644 drivers/staging/erofs/xattr.h delete mode 100644 drivers/staging/erofs/zdata.c delete mode 100644 drivers/staging/erofs/zdata.h delete mode 100644 drivers/staging/erofs/zmap.c delete mode 100644 drivers/staging/erofs/zpvec.h create mode 100644 fs/erofs/Kconfig create mode 100644 fs/erofs/Makefile create mode 100644 fs/erofs/compress.h create mode 100644 fs/erofs/data.c create mode 100644 fs/erofs/decompressor.c create mode 100644 fs/erofs/dir.c create mode 100644 fs/erofs/erofs_fs.h create mode 100644 fs/erofs/inode.c create mode 100644 fs/erofs/internal.h create mode 100644 fs/erofs/namei.c create mode 100644 fs/erofs/super.c create mode 100644 fs/erofs/tagptr.h create mode 100644 fs/erofs/utils.c create mode 100644 fs/erofs/xattr.c create mode 100644 fs/erofs/xattr.h create mode 100644 fs/erofs/zdata.c create mode 100644 fs/erofs/zdata.h create mode 100644 fs/erofs/zmap.c create mode 100644 fs/erofs/zpvec.h create mode 100644 include/trace/events/erofs.h (limited to 'MAINTAINERS') diff --git a/Documentation/filesystems/erofs.txt b/Documentation/filesystems/erofs.txt new file mode 100644 index 000000000000..38aa9126ec98 --- /dev/null +++ b/Documentation/filesystems/erofs.txt @@ -0,0 +1,219 @@ +Overview +======== + +EROFS file-system stands for Enhanced Read-Only File System. Different +from other read-only file systems, it aims to be designed for flexibility, +scalability, but be kept simple and high performance. + +It is designed as a better filesystem solution for the following scenarios: + - read-only storage media or + + - part of a fully trusted read-only solution, which means it needs to be + immutable and bit-for-bit identical to the official golden image for + their releases due to security and other considerations and + + - hope to save some extra storage space with guaranteed end-to-end performance + by using reduced metadata and transparent file compression, especially + for those embedded devices with limited memory (ex, smartphone); + +Here is the main features of EROFS: + - Little endian on-disk design; + + - Currently 4KB block size (nobh) and therefore maximum 16TB address space; + + - Metadata & data could be mixed by design; + + - 2 inode versions for different requirements: + v1 v2 + Inode metadata size: 32 bytes 64 bytes + Max file size: 4 GB 16 EB (also limited by max. vol size) + Max uids/gids: 65536 4294967296 + File creation time: no yes (64 + 32-bit timestamp) + Max hardlinks: 65536 4294967296 + Metadata reserved: 4 bytes 14 bytes + + - Support extended attributes (xattrs) as an option; + + - Support xattr inline and tail-end data inline for all files; + + - Support POSIX.1e ACLs by using xattrs; + + - Support transparent file compression as an option: + LZ4 algorithm with 4 KB fixed-output compression for high performance; + +The following git tree provides the file system user-space tools under +development (ex, formatting tool mkfs.erofs): +>> git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs-utils.git + +Bugs and patches are welcome, please kindly help us and send to the following +linux-erofs mailing list: +>> linux-erofs mailing list + +Mount options +============= + +fault_injection=%d Enable fault injection in all supported types with + specified injection rate. Supported injection type: + Type_Name Type_Value + FAULT_KMALLOC 0x000000001 + FAULT_READ_IO 0x000000002 +(no)user_xattr Setup Extended User Attributes. Note: xattr is enabled + by default if CONFIG_EROFS_FS_XATTR is selected. +(no)acl Setup POSIX Access Control List. Note: acl is enabled + by default if CONFIG_EROFS_FS_POSIX_ACL is selected. +cache_strategy=%s Select a strategy for cached decompression from now on: + disabled: In-place I/O decompression only; + readahead: Cache the last incomplete compressed physical + cluster for further reading. It still does + in-place I/O decompression for the rest + compressed physical clusters; + readaround: Cache the both ends of incomplete compressed + physical clusters for further reading. + It still does in-place I/O decompression + for the rest compressed physical clusters. + +Module parameters +================= +use_vmap=[0|1] Use vmap() instead of vm_map_ram() (default 0). + +On-disk details +=============== + +Summary +------- +Different from other read-only file systems, an EROFS volume is designed +to be as simple as possible: + + |-> aligned with the block size + ____________________________________________________________ + | |SB| | ... | Metadata | ... | Data | Metadata | ... | Data | + |_|__|_|_____|__________|_____|______|__________|_____|______| + 0 +1K + +All data areas should be aligned with the block size, but metadata areas +may not. All metadatas can be now observed in two different spaces (views): + 1. Inode metadata space + Each valid inode should be aligned with an inode slot, which is a fixed + value (32 bytes) and designed to be kept in line with v1 inode size. + + Each inode can be directly found with the following formula: + inode offset = meta_blkaddr * block_size + 32 * nid + + |-> aligned with 8B + |-> followed closely + + meta_blkaddr blocks |-> another slot + _____________________________________________________________________ + | ... | inode | xattrs | extents | data inline | ... | inode ... + |________|_______|(optional)|(optional)|__(optional)_|_____|__________ + |-> aligned with the inode slot size + . . + . . + . . + . . + . . + . . + .____________________________________________________|-> aligned with 4B + | xattr_ibody_header | shared xattrs | inline xattrs | + |____________________|_______________|_______________| + |-> 12 bytes <-|->x * 4 bytes<-| . + . . . + . . . + . . . + ._______________________________.______________________. + | id | id | id | id | ... | id | ent | ... | ent| ... | + |____|____|____|____|______|____|_____|_____|____|_____| + |-> aligned with 4B + |-> aligned with 4B + + Inode could be 32 or 64 bytes, which can be distinguished from a common + field which all inode versions have -- i_advise: + + __________________ __________________ + | i_advise | | i_advise | + |__________________| |__________________| + | ... | | ... | + | | | | + |__________________| 32 bytes | | + | | + |__________________| 64 bytes + + Xattrs, extents, data inline are followed by the corresponding inode with + proper alignes, and they could be optional for different data mappings, + _currently_ there are totally 3 valid data mappings supported: + + 1) flat file data without data inline (no extent); + 2) fixed-output size data compression (must have extents); + 3) flat file data with tail-end data inline (no extent); + + The size of the optional xattrs is indicated by i_xattr_count in inode + header. Large xattrs or xattrs shared by many different files can be + stored in shared xattrs metadata rather than inlined right after inode. + + 2. Shared xattrs metadata space + Shared xattrs space is similar to the above inode space, started with + a specific block indicated by xattr_blkaddr, organized one by one with + proper align. + + Each share xattr can also be directly found by the following formula: + xattr offset = xattr_blkaddr * block_size + 4 * xattr_id + + |-> aligned by 4 bytes + + xattr_blkaddr blocks |-> aligned with 4 bytes + _________________________________________________________________________ + | ... | xattr_entry | xattr data | ... | xattr_entry | xattr data ... + |________|_____________|_____________|_____|______________|_______________ + +Directories +----------- +All directories are now organized in a compact on-disk format. Note that +each directory block is divided into index and name areas in order to support +random file lookup, and all directory entries are _strictly_ recorded in +alphabetical order in order to support improved prefix binary search +algorithm (could refer to the related source code). + + ___________________________ + / | + / ______________|________________ + / / | nameoff1 | nameoffN-1 + ____________.______________._______________v________________v__________ +| dirent | dirent | ... | dirent | filename | filename | ... | filename | +|___.0___|____1___|_____|___N-1__|____0_____|____1_____|_____|___N-1____| + \ ^ + \ | * could have + \ | trailing '\0' + \________________________| nameoff0 + + Directory block + +Note that apart from the offset of the first filename, nameoff0 also indicates +the total number of directory entries in this block since it is no need to +introduce another on-disk field at all. + +Compression +----------- +Currently, EROFS supports 4KB fixed-output clustersize transparent file +compression, as illustrated below: + + |---- Variant-Length Extent ----|-------- VLE --------|----- VLE ----- + clusterofs clusterofs clusterofs + | | | logical data +_________v_______________________________v_____________________v_______________ +... | . | | . | | . | ... +____|____.________|_____________|________.____|_____________|__.__________|____ + |-> cluster <-|-> cluster <-|-> cluster <-|-> cluster <-|-> cluster <-| + size size size size size + . . . . + . . . . + . . . . + _______._____________._____________._____________._____________________ + ... | | | | ... physical data + _______|_____________|_____________|_____________|_____________________ + |-> cluster <-|-> cluster <-|-> cluster <-| + size size size + +Currently each on-disk physical cluster can contain 4KB (un)compressed data +at most. For each logical cluster, there is a corresponding on-disk index to +describe its cluster type, physical cluster address, etc. + +See "struct z_erofs_vle_decompressed_index" in erofs_fs.h for more details. + diff --git a/MAINTAINERS b/MAINTAINERS index 6847372cfab8..0f38cba2c581 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6046,6 +6046,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kristoffer/linux-hpc.git F: drivers/video/fbdev/s1d13xxxfb.c F: include/video/s1d13xxxfb.h +EROFS FILE SYSTEM +M: Gao Xiang +M: Chao Yu +L: linux-erofs@lists.ozlabs.org +S: Maintained +F: fs/erofs/ + ERRSEQ ERROR TRACKING INFRASTRUCTURE M: Jeff Layton S: Maintained @@ -15229,13 +15236,6 @@ M: H Hartley Sweeten S: Odd Fixes F: drivers/staging/comedi/ -STAGING - EROFS FILE SYSTEM -M: Gao Xiang -M: Chao Yu -L: linux-erofs@lists.ozlabs.org -S: Maintained -F: drivers/staging/erofs/ - STAGING - FIELDBUS SUBSYSTEM M: Sven Van Asbroeck S: Maintained diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index 7c96a01eef6c..d972ec8e71fb 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -112,8 +112,6 @@ source "drivers/staging/gasket/Kconfig" source "drivers/staging/axis-fifo/Kconfig" -source "drivers/staging/erofs/Kconfig" - source "drivers/staging/fieldbus/Kconfig" source "drivers/staging/kpc2000/Kconfig" diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index fcaac9693b83..6018b9a4a077 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -46,7 +46,6 @@ obj-$(CONFIG_DMA_RALINK) += ralink-gdma/ obj-$(CONFIG_SOC_MT7621) += mt7621-dts/ obj-$(CONFIG_STAGING_GASKET_FRAMEWORK) += gasket/ obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/ -obj-$(CONFIG_EROFS_FS) += erofs/ obj-$(CONFIG_FIELDBUS_DEV) += fieldbus/ obj-$(CONFIG_KPC2000) += kpc2000/ obj-$(CONFIG_ISDN_CAPI) += isdn/ diff --git a/drivers/staging/erofs/Documentation/filesystems/erofs.txt b/drivers/staging/erofs/Documentation/filesystems/erofs.txt deleted file mode 100644 index 0eab600ca7ca..000000000000 --- a/drivers/staging/erofs/Documentation/filesystems/erofs.txt +++ /dev/null @@ -1,223 +0,0 @@ -Overview -======== - -EROFS file-system stands for Enhanced Read-Only File System. Different -from other read-only file systems, it aims to be designed for flexibility, -scalability, but be kept simple and high performance. - -It is designed as a better filesystem solution for the following scenarios: - - read-only storage media or - - - part of a fully trusted read-only solution, which means it needs to be - immutable and bit-for-bit identical to the official golden image for - their releases due to security and other considerations and - - - hope to save some extra storage space with guaranteed end-to-end performance - by using reduced metadata and transparent file compression, especially - for those embedded devices with limited memory (ex, smartphone); - -Here is the main features of EROFS: - - Little endian on-disk design; - - - Currently 4KB block size (nobh) and therefore maximum 16TB address space; - - - Metadata & data could be mixed by design; - - - 2 inode versions for different requirements: - v1 v2 - Inode metadata size: 32 bytes 64 bytes - Max file size: 4 GB 16 EB (also limited by max. vol size) - Max uids/gids: 65536 4294967296 - File creation time: no yes (64 + 32-bit timestamp) - Max hardlinks: 65536 4294967296 - Metadata reserved: 4 bytes 14 bytes - - - Support extended attributes (xattrs) as an option; - - - Support xattr inline and tail-end data inline for all files; - - - Support POSIX.1e ACLs by using xattrs; - - - Support transparent file compression as an option: - LZ4 algorithm with 4 KB fixed-output compression for high performance; - -The following git tree provides the file system user-space tools under -development (ex, formatting tool mkfs.erofs): ->> git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs-utils.git - -Bugs and patches are welcome, please kindly help us and send to the following -linux-erofs mailing list: ->> linux-erofs mailing list - -Note that EROFS is still working in progress as a Linux staging driver, -Cc the staging mailing list as well is highly recommended: ->> Linux Driver Project Developer List - -Mount options -============= - -fault_injection=%d Enable fault injection in all supported types with - specified injection rate. Supported injection type: - Type_Name Type_Value - FAULT_KMALLOC 0x000000001 - FAULT_READ_IO 0x000000002 -(no)user_xattr Setup Extended User Attributes. Note: xattr is enabled - by default if CONFIG_EROFS_FS_XATTR is selected. -(no)acl Setup POSIX Access Control List. Note: acl is enabled - by default if CONFIG_EROFS_FS_POSIX_ACL is selected. -cache_strategy=%s Select a strategy for cached decompression from now on: - disabled: In-place I/O decompression only; - readahead: Cache the last incomplete compressed physical - cluster for further reading. It still does - in-place I/O decompression for the rest - compressed physical clusters; - readaround: Cache the both ends of incomplete compressed - physical clusters for further reading. - It still does in-place I/O decompression - for the rest compressed physical clusters. - -Module parameters -================= -use_vmap=[0|1] Use vmap() instead of vm_map_ram() (default 0). - -On-disk details -=============== - -Summary -------- -Different from other read-only file systems, an EROFS volume is designed -to be as simple as possible: - - |-> aligned with the block size - ____________________________________________________________ - | |SB| | ... | Metadata | ... | Data | Metadata | ... | Data | - |_|__|_|_____|__________|_____|______|__________|_____|______| - 0 +1K - -All data areas should be aligned with the block size, but metadata areas -may not. All metadatas can be now observed in two different spaces (views): - 1. Inode metadata space - Each valid inode should be aligned with an inode slot, which is a fixed - value (32 bytes) and designed to be kept in line with v1 inode size. - - Each inode can be directly found with the following formula: - inode offset = meta_blkaddr * block_size + 32 * nid - - |-> aligned with 8B - |-> followed closely - + meta_blkaddr blocks |-> another slot - _____________________________________________________________________ - | ... | inode | xattrs | extents | data inline | ... | inode ... - |________|_______|(optional)|(optional)|__(optional)_|_____|__________ - |-> aligned with the inode slot size - . . - . . - . . - . . - . . - . . - .____________________________________________________|-> aligned with 4B - | xattr_ibody_header | shared xattrs | inline xattrs | - |____________________|_______________|_______________| - |-> 12 bytes <-|->x * 4 bytes<-| . - . . . - . . . - . . . - ._______________________________.______________________. - | id | id | id | id | ... | id | ent | ... | ent| ... | - |____|____|____|____|______|____|_____|_____|____|_____| - |-> aligned with 4B - |-> aligned with 4B - - Inode could be 32 or 64 bytes, which can be distinguished from a common - field which all inode versions have -- i_advise: - - __________________ __________________ - | i_advise | | i_advise | - |__________________| |__________________| - | ... | | ... | - | | | | - |__________________| 32 bytes | | - | | - |__________________| 64 bytes - - Xattrs, extents, data inline are followed by the corresponding inode with - proper alignes, and they could be optional for different data mappings, - _currently_ there are totally 3 valid data mappings supported: - - 1) flat file data without data inline (no extent); - 2) fixed-output size data compression (must have extents); - 3) flat file data with tail-end data inline (no extent); - - The size of the optional xattrs is indicated by i_xattr_count in inode - header. Large xattrs or xattrs shared by many different files can be - stored in shared xattrs metadata rather than inlined right after inode. - - 2. Shared xattrs metadata space - Shared xattrs space is similar to the above inode space, started with - a specific block indicated by xattr_blkaddr, organized one by one with - proper align. - - Each share xattr can also be directly found by the following formula: - xattr offset = xattr_blkaddr * block_size + 4 * xattr_id - - |-> aligned by 4 bytes - + xattr_blkaddr blocks |-> aligned with 4 bytes - _________________________________________________________________________ - | ... | xattr_entry | xattr data | ... | xattr_entry | xattr data ... - |________|_____________|_____________|_____|______________|_______________ - -Directories ------------ -All directories are now organized in a compact on-disk format. Note that -each directory block is divided into index and name areas in order to support -random file lookup, and all directory entries are _strictly_ recorded in -alphabetical order in order to support improved prefix binary search -algorithm (could refer to the related source code). - - ___________________________ - / | - / ______________|________________ - / / | nameoff1 | nameoffN-1 - ____________.______________._______________v________________v__________ -| dirent | dirent | ... | dirent | filename | filename | ... | filename | -|___.0___|____1___|_____|___N-1__|____0_____|____1_____|_____|___N-1____| - \ ^ - \ | * could have - \ | trailing '\0' - \________________________| nameoff0 - - Directory block - -Note that apart from the offset of the first filename, nameoff0 also indicates -the total number of directory entries in this block since it is no need to -introduce another on-disk field at all. - -Compression ------------ -Currently, EROFS supports 4KB fixed-output clustersize transparent file -compression, as illustrated below: - - |---- Variant-Length Extent ----|-------- VLE --------|----- VLE ----- - clusterofs clusterofs clusterofs - | | | logical data -_________v_______________________________v_____________________v_______________ -... | . | | . | | . | ... -____|____.________|_____________|________.____|_____________|__.__________|____ - |-> cluster <-|-> cluster <-|-> cluster <-|-> cluster <-|-> cluster <-| - size size size size size - . . . . - . . . . - . . . . - _______._____________._____________._____________._____________________ - ... | | | | ... physical data - _______|_____________|_____________|_____________|_____________________ - |-> cluster <-|-> cluster <-|-> cluster <-| - size size size - -Currently each on-disk physical cluster can contain 4KB (un)compressed data -at most. For each logical cluster, there is a corresponding on-disk index to -describe its cluster type, physical cluster address, etc. - -See "struct z_erofs_vle_decompressed_index" in erofs_fs.h for more details. - diff --git a/drivers/staging/erofs/Kconfig b/drivers/staging/erofs/Kconfig deleted file mode 100644 index 16316d1adca3..000000000000 --- a/drivers/staging/erofs/Kconfig +++ /dev/null @@ -1,98 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only - -config EROFS_FS - tristate "EROFS filesystem support" - depends on BLOCK - help - EROFS (Enhanced Read-Only File System) is a lightweight - read-only file system with modern designs (eg. page-sized - blocks, inline xattrs/data, etc.) for scenarios which need - high-performance read-only requirements, e.g. Android OS - for mobile phones and LIVECDs. - - It also provides fixed-sized output compression support, - which improves storage density, keeps relatively higher - compression ratios, which is more useful to achieve high - performance for embedded devices with limited memory. - - If unsure, say N. - -config EROFS_FS_DEBUG - bool "EROFS debugging feature" - depends on EROFS_FS - help - Print debugging messages and enable more BUG_ONs which check - filesystem consistency and find potential issues aggressively, - which can be used for Android eng build, for example. - - For daily use, say N. - -config EROFS_FAULT_INJECTION - bool "EROFS fault injection facility" - depends on EROFS_FS - help - Test EROFS to inject faults such as ENOMEM, EIO, and so on. - If unsure, say N. - -config EROFS_FS_XATTR - bool "EROFS extended attributes" - depends on EROFS_FS - default y - help - Extended attributes are name:value pairs associated with inodes by - the kernel or by users (see the attr(5) manual page, or visit - for details). - - If unsure, say N. - -config EROFS_FS_POSIX_ACL - bool "EROFS Access Control Lists" - depends on EROFS_FS_XATTR - select FS_POSIX_ACL - default y - help - Posix Access Control Lists (ACLs) support permissions for users and - groups beyond the owner/group/world scheme. - - To learn more about Access Control Lists, visit the POSIX ACLs for - Linux website . - - If you don't know what Access Control Lists are, say N. - -config EROFS_FS_SECURITY - bool "EROFS Security Labels" - depends on EROFS_FS_XATTR - default y - help - Security labels provide an access control facility to support Linux - Security Models (LSMs) accepted by AppArmor, SELinux, Smack and TOMOYO - Linux. This option enables an extended attribute handler for file - security labels in the erofs filesystem, so that it requires enabling - the extended attribute support in advance. - - If you are not using a security module, say N. - -config EROFS_FS_ZIP - bool "EROFS Data Compression Support" - depends on EROFS_FS - select LZ4_DECOMPRESS - default y - help - Enable fixed-sized output compression for EROFS. - - If you don't want to enable compression feature, say N. - -config EROFS_FS_CLUSTER_PAGE_LIMIT - int "EROFS Cluster Pages Hard Limit" - depends on EROFS_FS_ZIP - range 1 256 - default "1" - help - Indicates maximum # of pages of a compressed - physical cluster. - - For example, if files in a image were compressed - into 8k-unit, hard limit should not be configured - less than 2. Otherwise, the image will be refused - to mount on this kernel. - diff --git a/drivers/staging/erofs/Makefile b/drivers/staging/erofs/Makefile deleted file mode 100644 index 5cdae21cb5af..000000000000 --- a/drivers/staging/erofs/Makefile +++ /dev/null @@ -1,13 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only - -EROFS_VERSION = "1.0pre1" - -ccflags-y += -DEROFS_VERSION=\"$(EROFS_VERSION)\" - -obj-$(CONFIG_EROFS_FS) += erofs.o -# staging requirement: to be self-contained in its own directory -ccflags-y += -I $(srctree)/$(src)/include -erofs-objs := super.o inode.o data.o namei.o dir.o utils.o -erofs-$(CONFIG_EROFS_FS_XATTR) += xattr.o -erofs-$(CONFIG_EROFS_FS_ZIP) += decompressor.o zmap.o zdata.o - diff --git a/drivers/staging/erofs/TODO b/drivers/staging/erofs/TODO deleted file mode 100644 index a8608b2f72bd..000000000000 --- a/drivers/staging/erofs/TODO +++ /dev/null @@ -1,46 +0,0 @@ - -EROFS is still working in progress, thus it is not suitable -for all productive uses. play at your own risk :) - -TODO List: - - add the missing error handling code - (mainly existed in xattr and decompression submodules); - - - finalize erofs ondisk format design (which means that - minor on-disk revisions could happen later); - - - documentation and detailed technical analysis; - - - general code review and clean up - (including confusing variable names and code snippets); - - - support larger compressed clustersizes for selection - (currently erofs only works as expected with the page-sized - compressed cluster configuration, usually 4KB); - - - support more lossless data compression algorithms - in addition to LZ4 algorithms in VLE approach; - - - data deduplication and other useful features. - -The following git tree provides the file system user-space -tools under development (ex, formatting tool mkfs.erofs): ->> git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs-utils.git - -The open-source development of erofs-utils is at the early stage. -Contact the original author Li Guifu and -the co-maintainer Fang Wei for the latest news -and more details. - -Code, suggestions, etc, are welcome. Please feel free to -ask and send patches, - -To: - linux-erofs mailing list - Gao Xiang - Chao Yu - -Cc: (for linux-kernel upstream patches) - Greg Kroah-Hartman - linux-staging mailing list - diff --git a/drivers/staging/erofs/compress.h b/drivers/staging/erofs/compress.h deleted file mode 100644 index 043013f9ef1b..000000000000 --- a/drivers/staging/erofs/compress.h +++ /dev/null @@ -1,62 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * linux/drivers/staging/erofs/compress.h - * - * Copyright (C) 2019 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#ifndef __EROFS_FS_COMPRESS_H -#define __EROFS_FS_COMPRESS_H - -#include "internal.h" - -enum { - Z_EROFS_COMPRESSION_SHIFTED = Z_EROFS_COMPRESSION_MAX, - Z_EROFS_COMPRESSION_RUNTIME_MAX -}; - -struct z_erofs_decompress_req { - struct super_block *sb; - struct page **in, **out; - - unsigned short pageofs_out; - unsigned int inputsize, outputsize; - - /* indicate the algorithm will be used for decompression */ - unsigned int alg; - bool inplace_io, partial_decoding; -}; - -/* - * - 0x5A110C8D ('sallocated', Z_EROFS_MAPPING_STAGING) - - * used to mark temporary allocated pages from other - * file/cached pages and NULL mapping pages. - */ -#define Z_EROFS_MAPPING_STAGING ((void *)0x5A110C8D) - -/* check if a page is marked as staging */ -static inline bool z_erofs_page_is_staging(struct page *page) -{ - return page->mapping == Z_EROFS_MAPPING_STAGING; -} - -static inline bool z_erofs_put_stagingpage(struct list_head *pagepool, - struct page *page) -{ - if (!z_erofs_page_is_staging(page)) - return false; - - /* staging pages should not be used by others at the same time */ - if (page_ref_count(page) > 1) - put_page(page); - else - list_add(&page->lru, pagepool); - return true; -} - -int z_erofs_decompress(struct z_erofs_decompress_req *rq, - struct list_head *pagepool); - -#endif - diff --git a/drivers/staging/erofs/data.c b/drivers/staging/erofs/data.c deleted file mode 100644 index 72c4b4c5296b..000000000000 --- a/drivers/staging/erofs/data.c +++ /dev/null @@ -1,425 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * linux/drivers/staging/erofs/data.c - * - * Copyright (C) 2017-2018 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#include "internal.h" -#include - -#include - -static inline void read_endio(struct bio *bio) -{ - struct super_block *const sb = bio->bi_private; - struct bio_vec *bvec; - blk_status_t err = bio->bi_status; - struct bvec_iter_all iter_all; - - if (time_to_inject(EROFS_SB(sb), FAULT_READ_IO)) { - erofs_show_injection_info(FAULT_READ_IO); - err = BLK_STS_IOERR; - } - - bio_for_each_segment_all(bvec, bio, iter_all) { - struct page *page = bvec->bv_page; - - /* page is already locked */ - DBG_BUGON(PageUptodate(page)); - - if (unlikely(err)) - SetPageError(page); - else - SetPageUptodate(page); - - unlock_page(page); - /* page could be reclaimed now */ - } - bio_put(bio); -} - -/* prio -- true is used for dir */ -struct page *__erofs_get_meta_page(struct super_block *sb, - erofs_blk_t blkaddr, bool prio, bool nofail) -{ - struct inode *const bd_inode = sb->s_bdev->bd_inode; - struct address_space *const mapping = bd_inode->i_mapping; - /* prefer retrying in the allocator to blindly looping below */ - const gfp_t gfp = mapping_gfp_constraint(mapping, ~__GFP_FS) | - (nofail ? __GFP_NOFAIL : 0); - unsigned int io_retries = nofail ? EROFS_IO_MAX_RETRIES_NOFAIL : 0; - struct page *page; - int err; - -repeat: - page = find_or_create_page(mapping, blkaddr, gfp); - if (unlikely(!page)) { - DBG_BUGON(nofail); - return ERR_PTR(-ENOMEM); - } - DBG_BUGON(!PageLocked(page)); - - if (!PageUptodate(page)) { - struct bio *bio; - - bio = erofs_grab_bio(sb, blkaddr, 1, sb, read_endio, nofail); - if (IS_ERR(bio)) { - DBG_BUGON(nofail); - err = PTR_ERR(bio); - goto err_out; - } - - err = bio_add_page(bio, page, PAGE_SIZE, 0); - if (unlikely(err != PAGE_SIZE)) { - err = -EFAULT; - goto err_out; - } - - __submit_bio(bio, REQ_OP_READ, - REQ_META | (prio ? REQ_PRIO : 0)); - - lock_page(page); - - /* this page has been truncated by others */ - if (unlikely(page->mapping != mapping)) { -unlock_repeat: - unlock_page(page); - put_page(page); - goto repeat; - } - - /* more likely a read error */ - if (unlikely(!PageUptodate(page))) { - if (io_retries) { - --io_retries; - goto unlock_repeat; - } - err = -EIO; - goto err_out; - } - } - return page; - -err_out: - unlock_page(page); - put_page(page); - return ERR_PTR(err); -} - -static int erofs_map_blocks_flatmode(struct inode *inode, - struct erofs_map_blocks *map, - int flags) -{ - int err = 0; - erofs_blk_t nblocks, lastblk; - u64 offset = map->m_la; - struct erofs_vnode *vi = EROFS_V(inode); - - trace_erofs_map_blocks_flatmode_enter(inode, map, flags); - - nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE); - lastblk = nblocks - is_inode_flat_inline(inode); - - if (unlikely(offset >= inode->i_size)) { - /* leave out-of-bound access unmapped */ - map->m_flags = 0; - map->m_plen = 0; - goto out; - } - - /* there is no hole in flatmode */ - map->m_flags = EROFS_MAP_MAPPED; - - if (offset < blknr_to_addr(lastblk)) { - map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la; - map->m_plen = blknr_to_addr(lastblk) - offset; - } else if (is_inode_flat_inline(inode)) { - /* 2 - inode inline B: inode, [xattrs], inline last blk... */ - struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb); - - map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize + - vi->xattr_isize + erofs_blkoff(map->m_la); - map->m_plen = inode->i_size - offset; - - /* inline data should be located in one meta block */ - if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) { - errln("inline data cross block boundary @ nid %llu", - vi->nid); - DBG_BUGON(1); - err = -EFSCORRUPTED; - goto err_out; - } - - map->m_flags |= EROFS_MAP_META; - } else { - errln("internal error @ nid: %llu (size %llu), m_la 0x%llx", - vi->nid, inode->i_size, map->m_la); - DBG_BUGON(1); - err = -EIO; - goto err_out; - } - -out: - map->m_llen = map->m_plen; - -err_out: - trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0); - return err; -} - -int erofs_map_blocks(struct inode *inode, - struct erofs_map_blocks *map, int flags) -{ - if (unlikely(is_inode_layout_compression(inode))) { - int err = z_erofs_map_blocks_iter(inode, map, flags); - - if (map->mpage) { - put_page(map->mpage); - map->mpage = NULL; - } - return err; - } - return erofs_map_blocks_flatmode(inode, map, flags); -} - -static inline struct bio *erofs_read_raw_page(struct bio *bio, - struct address_space *mapping, - struct page *page, - erofs_off_t *last_block, - unsigned int nblocks, - bool ra) -{ - struct inode *const inode = mapping->host; - struct super_block *const sb = inode->i_sb; - erofs_off_t current_block = (erofs_off_t)page->index; - int err; - - DBG_BUGON(!nblocks); - - if (PageUptodate(page)) { - err = 0; - goto has_updated; - } - - /* note that for readpage case, bio also equals to NULL */ - if (bio && - /* not continuous */ - *last_block + 1 != current_block) { -submit_bio_retry: - __submit_bio(bio, REQ_OP_READ, 0); - bio = NULL; - } - - if (!bio) { - struct erofs_map_blocks map = { - .m_la = blknr_to_addr(current_block), - }; - erofs_blk_t blknr; - unsigned int blkoff; - - err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW); - if (unlikely(err)) - goto err_out; - - /* zero out the holed page */ - if (unlikely(!(map.m_flags & EROFS_MAP_MAPPED))) { - zero_user_segment(page, 0, PAGE_SIZE); - SetPageUptodate(page); - - /* imply err = 0, see erofs_map_blocks */ - goto has_updated; - } - - /* for RAW access mode, m_plen must be equal to m_llen */ - DBG_BUGON(map.m_plen != map.m_llen); - - blknr = erofs_blknr(map.m_pa); - blkoff = erofs_blkoff(map.m_pa); - - /* deal with inline page */ - if (map.m_flags & EROFS_MAP_META) { - void *vsrc, *vto; - struct page *ipage; - - DBG_BUGON(map.m_plen > PAGE_SIZE); - - ipage = erofs_get_meta_page(inode->i_sb, blknr, 0); - - if (IS_ERR(ipage)) { - err = PTR_ERR(ipage); - goto err_out; - } - - vsrc = kmap_atomic(ipage); - vto = kmap_atomic(page); - memcpy(vto, vsrc + blkoff, map.m_plen); - memset(vto + map.m_plen, 0, PAGE_SIZE - map.m_plen); - kunmap_atomic(vto); - kunmap_atomic(vsrc); - flush_dcache_page(page); - - SetPageUptodate(page); - /* TODO: could we unlock the page earlier? */ - unlock_page(ipage); - put_page(ipage); - - /* imply err = 0, see erofs_map_blocks */ - goto has_updated; - } - - /* pa must be block-aligned for raw reading */ - DBG_BUGON(erofs_blkoff(map.m_pa)); - - /* max # of continuous pages */ - if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE)) - nblocks = DIV_ROUND_UP(map.m_plen, PAGE_SIZE); - if (nblocks > BIO_MAX_PAGES) - nblocks = BIO_MAX_PAGES; - - bio = erofs_grab_bio(sb, blknr, nblocks, sb, - read_endio, false); - if (IS_ERR(bio)) { - err = PTR_ERR(bio); - bio = NULL; - goto err_out; - } - } - - err = bio_add_page(bio, page, PAGE_SIZE, 0); - /* out of the extent or bio is full */ - if (err < PAGE_SIZE) - goto submit_bio_retry; - - *last_block = current_block; - - /* shift in advance in case of it followed by too many gaps */ - if (bio->bi_iter.bi_size >= bio->bi_max_vecs * PAGE_SIZE) { - /* err should reassign to 0 after submitting */ - err = 0; - goto submit_bio_out; - } - - return bio; - -err_out: - /* for sync reading, set page error immediately */ - if (!ra) { - SetPageError(page); - ClearPageUptodate(page); - } -has_updated: - unlock_page(page); - - /* if updated manually, continuous pages has a gap */ - if (bio) -submit_bio_out: - __submit_bio(bio, REQ_OP_READ, 0); - - return unlikely(err) ? ERR_PTR(err) : NULL; -} - -/* - * since we dont have write or truncate flows, so no inode - * locking needs to be held at the moment. - */ -static int erofs_raw_access_readpage(struct file *file, struct page *page) -{ - erofs_off_t last_block; - struct bio *bio; - - trace_erofs_readpage(page, true); - - bio = erofs_read_raw_page(NULL, page->mapping, - page, &last_block, 1, false); - - if (IS_ERR(bio)) - return PTR_ERR(bio); - - DBG_BUGON(bio); /* since we have only one bio -- must be NULL */ - return 0; -} - -static int erofs_raw_access_readpages(struct file *filp, - struct address_space *mapping, - struct list_head *pages, - unsigned int nr_pages) -{ - erofs_off_t last_block; - struct bio *bio = NULL; - gfp_t gfp = readahead_gfp_mask(mapping); - struct page *page = list_last_entry(pages, struct page, lru); - - trace_erofs_readpages(mapping->host, page, nr_pages, true); - - for (; nr_pages; --nr_pages) { - page = list_entry(pages->prev, struct page, lru); - - prefetchw(&page->flags); - list_del(&page->lru); - - if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) { - bio = erofs_read_raw_page(bio, mapping, page, - &last_block, nr_pages, true); - - /* all the page errors are ignored when readahead */ - if (IS_ERR(bio)) { - pr_err("%s, readahead error at page %lu of nid %llu\n", - __func__, page->index, - EROFS_V(mapping->host)->nid); - - bio = NULL; - } - } - - /* pages could still be locked */ - put_page(page); - } - DBG_BUGON(!list_empty(pages)); - - /* the rare case (end in gaps) */ - if (unlikely(bio)) - __submit_bio(bio, REQ_OP_READ, 0); - return 0; -} - -static int erofs_get_block(struct inode *inode, sector_t iblock, - struct buffer_head *bh, int create) -{ - struct erofs_map_blocks map = { - .m_la = iblock << 9, - }; - int err; - - err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW); - if (err) - return err; - - if (map.m_flags & EROFS_MAP_MAPPED) - bh->b_blocknr = erofs_blknr(map.m_pa); - - return err; -} - -static sector_t erofs_bmap(struct address_space *mapping, sector_t block) -{ - struct inode *inode = mapping->host; - - if (is_inode_flat_inline(inode)) { - erofs_blk_t blks = i_size_read(inode) >> LOG_BLOCK_SIZE; - - if (block >> LOG_SECTORS_PER_BLOCK >= blks) - return 0; - } - - return generic_block_bmap(mapping, block, erofs_get_block); -} - -/* for uncompressed (aligned) files and raw access for other files */ -const struct address_space_operations erofs_raw_access_aops = { - .readpage = erofs_raw_access_readpage, - .readpages = erofs_raw_access_readpages, - .bmap = erofs_bmap, -}; - diff --git a/drivers/staging/erofs/decompressor.c b/drivers/staging/erofs/decompressor.c deleted file mode 100644 index 32a811ac704a..000000000000 --- a/drivers/staging/erofs/decompressor.c +++ /dev/null @@ -1,360 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * linux/drivers/staging/erofs/decompressor.c - * - * Copyright (C) 2019 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#include "compress.h" -#include -#include - -#ifndef LZ4_DISTANCE_MAX /* history window size */ -#define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */ -#endif - -#define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1) -#ifndef LZ4_DECOMPRESS_INPLACE_MARGIN -#define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32) -#endif - -struct z_erofs_decompressor { - /* - * if destpages have sparsed pages, fill them with bounce pages. - * it also check whether destpages indicate continuous physical memory. - */ - int (*prepare_destpages)(struct z_erofs_decompress_req *rq, - struct list_head *pagepool); - int (*decompress)(struct z_erofs_decompress_req *rq, u8 *out); - char *name; -}; - -static bool use_vmap; -module_param(use_vmap, bool, 0444); -MODULE_PARM_DESC(use_vmap, "Use vmap() instead of vm_map_ram() (default 0)"); - -static int lz4_prepare_destpages(struct z_erofs_decompress_req *rq, - struct list_head *pagepool) -{ - const unsigned int nr = - PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; - struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL }; - unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES, - BITS_PER_LONG)] = { 0 }; - void *kaddr = NULL; - unsigned int i, j, top; - - top = 0; - for (i = j = 0; i < nr; ++i, ++j) { - struct page *const page = rq->out[i]; - struct page *victim; - - if (j >= LZ4_MAX_DISTANCE_PAGES) - j = 0; - - /* 'valid' bounced can only be tested after a complete round */ - if (test_bit(j, bounced)) { - DBG_BUGON(i < LZ4_MAX_DISTANCE_PAGES); - DBG_BUGON(top >= LZ4_MAX_DISTANCE_PAGES); - availables[top++] = rq->out[i - LZ4_MAX_DISTANCE_PAGES]; - } - - if (page) { - __clear_bit(j, bounced); - if (kaddr) { - if (kaddr + PAGE_SIZE == page_address(page)) - kaddr += PAGE_SIZE; - else - kaddr = NULL; - } else if (!i) { - kaddr = page_address(page); - } - continue; - } - kaddr = NULL; - __set_bit(j, bounced); - - if (top) { - victim = availables[--top]; - get_page(victim); - } else { - victim = erofs_allocpage(pagepool, GFP_KERNEL, false); - if (unlikely(!victim)) - return -ENOMEM; - victim->mapping = Z_EROFS_MAPPING_STAGING; - } - rq->out[i] = victim; - } - return kaddr ? 1 : 0; -} - -static void *generic_copy_inplace_data(struct z_erofs_decompress_req *rq, - u8 *src, unsigned int pageofs_in) -{ - /* - * if in-place decompression is ongoing, those decompressed - * pages should be copied in order to avoid being overlapped. - */ - struct page **in = rq->in; - u8 *const tmp = erofs_get_pcpubuf(0); - u8 *tmpp = tmp; - unsigned int inlen = rq->inputsize - pageofs_in; - unsigned int count = min_t(uint, inlen, PAGE_SIZE - pageofs_in); - - while (tmpp < tmp + inlen) { - if (!src) - src = kmap_atomic(*in); - memcpy(tmpp, src + pageofs_in, count); - kunmap_atomic(src); - src = NULL; - tmpp += count; - pageofs_in = 0; - count = PAGE_SIZE; - ++in; - } - return tmp; -} - -static int lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out) -{ - unsigned int inputmargin, inlen; - u8 *src; - bool copied, support_0padding; - int ret; - - if (rq->inputsize > PAGE_SIZE) - return -EOPNOTSUPP; - - src = kmap_atomic(*rq->in); - inputmargin = 0; - support_0padding = false; - - /* decompression inplace is only safe when 0padding is enabled */ - if (EROFS_SB(rq->sb)->requirements & EROFS_REQUIREMENT_LZ4_0PADDING) { - support_0padding = true; - - while (!src[inputmargin & ~PAGE_MASK]) - if (!(++inputmargin & ~PAGE_MASK)) - break; - - if (inputmargin >= rq->inputsize) { - kunmap_atomic(src); - return -EIO; - } - } - - copied = false; - inlen = rq->inputsize - inputmargin; - if (rq->inplace_io) { - const uint oend = (rq->pageofs_out + - rq->outputsize) & ~PAGE_MASK; - const uint nr = PAGE_ALIGN(rq->pageofs_out + - rq->outputsize) >> PAGE_SHIFT; - - if (rq->partial_decoding || !support_0padding || - rq->out[nr - 1] != rq->in[0] || - rq->inputsize - oend < - LZ4_DECOMPRESS_INPLACE_MARGIN(inlen)) { - src = generic_copy_inplace_data(rq, src, inputmargin); - inputmargin = 0; - copied = true; - } - } - - ret = LZ4_decompress_safe_partial(src + inputmargin, out, - inlen, rq->outputsize, - rq->outputsize); - if (ret < 0) { - errln("%s, failed to decompress, in[%p, %u, %u] out[%p, %u]", - __func__, src + inputmargin, inlen, inputmargin, - out, rq->outputsize); - WARN_ON(1); - print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET, - 16, 1, src + inputmargin, inlen, true); - print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET, - 16, 1, out, rq->outputsize, true); - ret = -EIO; - } - - if (copied) - erofs_put_pcpubuf(src); - else - kunmap_atomic(src); - return ret; -} - -static struct z_erofs_decompressor decompressors[] = { - [Z_EROFS_COMPRESSION_SHIFTED] = { - .name = "shifted" - }, - [Z_EROFS_COMPRESSION_LZ4] = { - .prepare_destpages = lz4_prepare_destpages, - .decompress = lz4_decompress, - .name = "lz4" - }, -}; - -static void copy_from_pcpubuf(struct page **out, const char *dst, - unsigned short pageofs_out, - unsigned int outputsize) -{ - const char *end = dst + outputsize; - const unsigned int righthalf = PAGE_SIZE - pageofs_out; - const char *cur = dst - pageofs_out; - - while (cur < end) { - struct page *const page = *out++; - - if (page) { - char *buf = kmap_atomic(page); - - if (cur >= dst) { - memcpy(buf, cur, min_t(uint, PAGE_SIZE, - end - cur)); - } else { - memcpy(buf + pageofs_out, cur + pageofs_out, - min_t(uint, righthalf, end - cur)); - } - kunmap_atomic(buf); - } - cur += PAGE_SIZE; - } -} - -static void *erofs_vmap(struct page **pages, unsigned int count) -{ - int i = 0; - - if (use_vmap) - return vmap(pages, count, VM_MAP, PAGE_KERNEL); - - while (1) { - void *addr = vm_map_ram(pages, count, -1, PAGE_KERNEL); - - /* retry two more times (totally 3 times) */ - if (addr || ++i >= 3) - return addr; - vm_unmap_aliases(); - } - return NULL; -} - -static void erofs_vunmap(const void *mem, unsigned int count) -{ - if (!use_vmap) - vm_unmap_ram(mem, count); - else - vunmap(mem); -} - -static int decompress_generic(struct z_erofs_decompress_req *rq, - struct list_head *pagepool) -{ - const unsigned int nrpages_out = - PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; - const struct z_erofs_decompressor *alg = decompressors + rq->alg; - unsigned int dst_maptype; - void *dst; - int ret; - - if (nrpages_out == 1 && !rq->inplace_io) { - DBG_BUGON(!*rq->out); - dst = kmap_atomic(*rq->out); - dst_maptype = 0; - goto dstmap_out; - } - - /* - * For the case of small output size (especially much less - * than PAGE_SIZE), memcpy the decompressed data rather than - * compressed data is preferred. - */ - if (rq->outputsize <= PAGE_SIZE * 7 / 8) { - dst = erofs_get_pcpubuf(0); - if (IS_ERR(dst)) - return PTR_ERR(dst); - - rq->inplace_io = false; - ret = alg->decompress(rq, dst); - if (!ret) - copy_from_pcpubuf(rq->out, dst, rq->pageofs_out, - rq->outputsize); - - erofs_put_pcpubuf(dst); - return ret; - } - - ret = alg->prepare_destpages(rq, pagepool); - if (ret < 0) { - return ret; - } else if (ret) { - dst = page_address(*rq->out); - dst_maptype = 1; - goto dstmap_out; - } - - dst = erofs_vmap(rq->out, nrpages_out); - if (!dst) - return -ENOMEM; - dst_maptype = 2; - -dstmap_out: - ret = alg->decompress(rq, dst + rq->pageofs_out); - - if (!dst_maptype) - kunmap_atomic(dst); - else if (dst_maptype == 2) - erofs_vunmap(dst, nrpages_out); - return ret; -} - -static int shifted_decompress(const struct z_erofs_decompress_req *rq, - struct list_head *pagepool) -{ - const unsigned int nrpages_out = - PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; - const unsigned int righthalf = PAGE_SIZE - rq->pageofs_out; - unsigned char *src, *dst; - - if (nrpages_out > 2) { - DBG_BUGON(1); - return -EIO; - } - - if (rq->out[0] == *rq->in) { - DBG_BUGON(nrpages_out != 1); - return 0; - } - - src = kmap_atomic(*rq->in); - if (!rq->out[0]) { - dst = NULL; - } else { - dst = kmap_atomic(rq->out[0]); - memcpy(dst + rq->pageofs_out, src, righthalf); - } - - if (rq->out[1] == *rq->in) { - memmove(src, src + righthalf, rq->pageofs_out); - } else if (nrpages_out == 2) { - if (dst) - kunmap_atomic(dst); - DBG_BUGON(!rq->out[1]); - dst = kmap_atomic(rq->out[1]); - memcpy(dst, src + righthalf, rq->pageofs_out); - } - if (dst) - kunmap_atomic(dst); - kunmap_atomic(src); - return 0; -} - -int z_erofs_decompress(struct z_erofs_decompress_req *rq, - struct list_head *pagepool) -{ - if (rq->alg == Z_EROFS_COMPRESSION_SHIFTED) - return shifted_decompress(rq, pagepool); - return decompress_generic(rq, pagepool); -} - diff --git a/drivers/staging/erofs/dir.c b/drivers/staging/erofs/dir.c deleted file mode 100644 index 77ef856df9f3..000000000000 --- a/drivers/staging/erofs/dir.c +++ /dev/null @@ -1,141 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * linux/drivers/staging/erofs/dir.c - * - * Copyright (C) 2017-2018 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#include "internal.h" - -static void debug_one_dentry(unsigned char d_type, const char *de_name, - unsigned int de_namelen) -{ -#ifdef CONFIG_EROFS_FS_DEBUG - /* since the on-disk name could not have the trailing '\0' */ - unsigned char dbg_namebuf[EROFS_NAME_LEN + 1]; - - memcpy(dbg_namebuf, de_name, de_namelen); - dbg_namebuf[de_namelen] = '\0'; - - debugln("found dirent %s de_len %u d_type %d", dbg_namebuf, - de_namelen, d_type); -#endif -} - -static int erofs_fill_dentries(struct inode *dir, struct dir_context *ctx, - void *dentry_blk, unsigned int *ofs, - unsigned int nameoff, unsigned int maxsize) -{ - struct erofs_dirent *de = dentry_blk + *ofs; - const struct erofs_dirent *end = dentry_blk + nameoff; - - while (de < end) { - const char *de_name; - unsigned int de_namelen; - unsigned char d_type; - - d_type = fs_ftype_to_dtype(de->file_type); - - nameoff = le16_to_cpu(de->nameoff); - de_name = (char *)dentry_blk + nameoff; - - /* the last dirent in the block? */ - if (de + 1 >= end) - de_namelen = strnlen(de_name, maxsize - nameoff); - else - de_namelen = le16_to_cpu(de[1].nameoff) - nameoff; - - /* a corrupted entry is found */ - if (unlikely(nameoff + de_namelen > maxsize || - de_namelen > EROFS_NAME_LEN)) { - errln("bogus dirent @ nid %llu", EROFS_V(dir)->nid); - DBG_BUGON(1); - return -EFSCORRUPTED; - } - - debug_one_dentry(d_type, de_name, de_namelen); - if (!dir_emit(ctx, de_name, de_namelen, - le64_to_cpu(de->nid), d_type)) - /* stopped by some reason */ - return 1; - ++de; - *ofs += sizeof(struct erofs_dirent); - } - *ofs = maxsize; - return 0; -} - -static int erofs_readdir(struct file *f, struct dir_context *ctx) -{ - struct inode *dir = file_inode(f); - struct address_space *mapping = dir->i_mapping; - const size_t dirsize = i_size_read(dir); - unsigned int i = ctx->pos / EROFS_BLKSIZ; - unsigned int ofs = ctx->pos % EROFS_BLKSIZ; - int err = 0; - bool initial = true; - - while (ctx->pos < dirsize) { - struct page *dentry_page; - struct erofs_dirent *de; - unsigned int nameoff, maxsize; - - dentry_page = read_mapping_page(mapping, i, NULL); - if (dentry_page == ERR_PTR(-ENOMEM)) { - err = -ENOMEM; - break; - } else if (IS_ERR(dentry_page)) { - errln("fail to readdir of logical block %u of nid %llu", - i, EROFS_V(dir)->nid); - err = -EFSCORRUPTED; - break; - } - - de = (struct erofs_dirent *)kmap(dentry_page); - - nameoff = le16_to_cpu(de->nameoff); - - if (unlikely(nameoff < sizeof(struct erofs_dirent) || - nameoff >= PAGE_SIZE)) { - errln("%s, invalid de[0].nameoff %u @ nid %llu", - __func__, nameoff, EROFS_V(dir)->nid); - err = -EFSCORRUPTED; - goto skip_this; - } - - maxsize = min_t(unsigned int, - dirsize - ctx->pos + ofs, PAGE_SIZE); - - /* search dirents at the arbitrary position */ - if (unlikely(initial)) { - initial = false; - - ofs = roundup(ofs, sizeof(struct erofs_dirent)); - if (unlikely(ofs >= nameoff)) - goto skip_this; - } - - err = erofs_fill_dentries(dir, ctx, de, &ofs, - nameoff, maxsize); -skip_this: - kunmap(dentry_page); - - put_page(dentry_page); - - ctx->pos = blknr_to_addr(i) + ofs; - - if (unlikely(err)) - break; - ++i; - ofs = 0; - } - return err < 0 ? err : 0; -} - -const struct file_operations erofs_dir_fops = { - .llseek = generic_file_llseek, - .read = generic_read_dir, - .iterate_shared = erofs_readdir, -}; - diff --git a/drivers/staging/erofs/erofs_fs.h b/drivers/staging/erofs/erofs_fs.h deleted file mode 100644 index 6db70f395937..000000000000 --- a/drivers/staging/erofs/erofs_fs.h +++ /dev/null @@ -1,310 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only OR Apache-2.0 */ -/* - * linux/drivers/staging/erofs/erofs_fs.h - * - * Copyright (C) 2017-2018 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#ifndef __EROFS_FS_H -#define __EROFS_FS_H - -/* Enhanced(Extended) ROM File System */ -#define EROFS_SUPER_MAGIC_V1 0xE0F5E1E2 -#define EROFS_SUPER_OFFSET 1024 - -/* - * Any bits that aren't in EROFS_ALL_REQUIREMENTS should be - * incompatible with this kernel version. - */ -#define EROFS_REQUIREMENT_LZ4_0PADDING 0x00000001 -#define EROFS_ALL_REQUIREMENTS EROFS_REQUIREMENT_LZ4_0PADDING - -struct erofs_super_block { -/* 0 */__le32 magic; /* in the little endian */ -/* 4 */__le32 checksum; /* crc32c(super_block) */ -/* 8 */__le32 features; /* (aka. feature_compat) */ -/* 12 */__u8 blkszbits; /* support block_size == PAGE_SIZE only */ -/* 13 */__u8 reserved; - -/* 14 */__le16 root_nid; -/* 16 */__le64 inos; /* total valid ino # (== f_files - f_favail) */ - -/* 24 */__le64 build_time; /* inode v1 time derivation */ -/* 32 */__le32 build_time_nsec; -/* 36 */__le32 blocks; /* used for statfs */ -/* 40 */__le32 meta_blkaddr; -/* 44 */__le32 xattr_blkaddr; -/* 48 */__u8 uuid[16]; /* 128-bit uuid for volume */ -/* 64 */__u8 volume_name[16]; /* volume name */ -/* 80 */__le32 requirements; /* (aka. feature_incompat) */ - -/* 84 */__u8 reserved2[44]; -} __packed; /* 128 bytes */ - -/* - * erofs inode data mapping: - * 0 - inode plain without inline data A: - * inode, [xattrs], ... | ... | no-holed data - * 1 - inode VLE compression B (legacy): - * inode, [xattrs], extents ... | ... - * 2 - inode plain with inline data C: - * inode, [xattrs], last_inline_data, ... | ... | no-holed data - * 3 - inode compression D: - * inode, [xattrs], map_header, extents ... | ... - * 4~7 - reserved - */ -enum { - EROFS_INODE_FLAT_PLAIN, - EROFS_INODE_FLAT_COMPRESSION_LEGACY, - EROFS_INODE_FLAT_INLINE, - EROFS_INODE_FLAT_COMPRESSION, - EROFS_INODE_LAYOUT_MAX -}; - -static inline bool erofs_inode_is_data_compressed(unsigned int datamode) -{ - if (datamode == EROFS_INODE_FLAT_COMPRESSION) - return true; - return datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY; -} - -/* bit definitions of inode i_advise */ -#define EROFS_I_VERSION_BITS 1 -#define EROFS_I_DATA_MAPPING_BITS 3 - -#define EROFS_I_VERSION_BIT 0 -#define EROFS_I_DATA_MAPPING_BIT 1 - -struct erofs_inode_v1 { -/* 0 */__le16 i_advise; - -/* 1 header + n-1 * 4 bytes inline xattr to keep continuity */ -/* 2 */__le16 i_xattr_icount; -/* 4 */__le16 i_mode; -/* 6 */__le16 i_nlink; -/* 8 */__le32 i_size; -/* 12 */__le32 i_reserved; -/* 16 */union { - /* file total compressed blocks for data mapping 1 */ - __le32 compressed_blocks; - __le32 raw_blkaddr; - - /* for device files, used to indicate old/new device # */ - __le32 rdev; - } i_u __packed; -/* 20 */__le32 i_ino; /* only used for 32-bit stat compatibility */ -/* 24 */__le16 i_uid; -/* 26 */__le16 i_gid; -/* 28 */__le32 i_reserved2; -} __packed; - -/* 32 bytes on-disk inode */ -#define EROFS_INODE_LAYOUT_V1 0 -/* 64 bytes on-disk inode */ -#define EROFS_INODE_LAYOUT_V2 1 - -struct erofs_inode_v2 { -/* 0 */__le16 i_advise; - -/* 1 header + n-1 * 4 bytes inline xattr to keep continuity */ -/* 2 */__le16 i_xattr_icount; -/* 4 */__le16 i_mode; -/* 6 */__le16 i_reserved; -/* 8 */__le64 i_size; -/* 16 */union { - /* file total compressed blocks for data mapping 1 */ - __le32 compressed_blocks; - __le32 raw_blkaddr; - - /* for device files, used to indicate old/new device # */ - __le32 rdev; - } i_u __packed; - - /* only used for 32-bit stat compatibility */ -/* 20 */__le32 i_ino; - -/* 24 */__le32 i_uid; -/* 28 */__le32 i_gid; -/* 32 */__le64 i_ctime; -/* 40 */__le32 i_ctime_nsec; -/* 44 */__le32 i_nlink; -/* 48 */__u8 i_reserved2[16]; -} __packed; /* 64 bytes */ - -#define EROFS_MAX_SHARED_XATTRS (128) -/* h_shared_count between 129 ... 255 are special # */ -#define EROFS_SHARED_XATTR_EXTENT (255) - -/* - * inline xattrs (n == i_xattr_icount): - * erofs_xattr_ibody_header(1) + (n - 1) * 4 bytes - * 12 bytes / \ - * / \ - * /-----------------------\ - * | erofs_xattr_entries+ | - * +-----------------------+ - * inline xattrs must starts in erofs_xattr_ibody_header, - * for read-only fs, no need to introduce h_refcount - */ -struct erofs_xattr_ibody_header { - __le32 h_reserved; - __u8 h_shared_count; - __u8 h_reserved2[7]; - __le32 h_shared_xattrs[0]; /* shared xattr id array */ -} __packed; - -/* Name indexes */ -#define EROFS_XATTR_INDEX_USER 1 -#define EROFS_XATTR_INDEX_POSIX_ACL_ACCESS 2 -#define EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT 3 -#define EROFS_XATTR_INDEX_TRUSTED 4 -#define EROFS_XATTR_INDEX_LUSTRE 5 -#define EROFS_XATTR_INDEX_SECURITY 6 - -/* xattr entry (for both inline & shared xattrs) */ -struct erofs_xattr_entry { - __u8 e_name_len; /* length of name */ - __u8 e_name_index; /* attribute name index */ - __le16 e_value_size; /* size of attribute value */ - /* followed by e_name and e_value */ - char e_name[0]; /* attribute name */ -} __packed; - -#define ondisk_xattr_ibody_size(count) ({\ - u32 __count = le16_to_cpu(count); \ - ((__count) == 0) ? 0 : \ - sizeof(struct erofs_xattr_ibody_header) + \ - sizeof(__u32) * ((__count) - 1); }) - -#define EROFS_XATTR_ALIGN(size) round_up(size, sizeof(struct erofs_xattr_entry)) -#define EROFS_XATTR_ENTRY_SIZE(entry) EROFS_XATTR_ALIGN( \ - sizeof(struct erofs_xattr_entry) + \ - (entry)->e_name_len + le16_to_cpu((entry)->e_value_size)) - -/* available compression algorithm types */ -enum { - Z_EROFS_COMPRESSION_LZ4, - Z_EROFS_COMPRESSION_MAX -}; - -/* - * bit 0 : COMPACTED_2B indexes (0 - off; 1 - on) - * e.g. for 4k logical cluster size, 4B if compacted 2B is off; - * (4B) + 2B + (4B) if compacted 2B is on. - */ -#define Z_EROFS_ADVISE_COMPACTED_2B_BIT 0 - -#define Z_EROFS_ADVISE_COMPACTED_2B (1 << Z_EROFS_ADVISE_COMPACTED_2B_BIT) - -struct z_erofs_map_header { - __le32 h_reserved1; - __le16 h_advise; - /* - * bit 0-3 : algorithm type of head 1 (logical cluster type 01); - * bit 4-7 : algorithm type of head 2 (logical cluster type 11). - */ - __u8 h_algorithmtype; - /* - * bit 0-2 : logical cluster bits - 12, e.g. 0 for 4096; - * bit 3-4 : (physical - logical) cluster bits of head 1: - * For example, if logical clustersize = 4096, 1 for 8192. - * bit 5-7 : (physical - logical) cluster bits of head 2. - */ - __u8 h_clusterbits; -}; - -#define Z_EROFS_VLE_LEGACY_HEADER_PADDING 8 - -/* - * Z_EROFS Variable-sized Logical Extent cluster type: - * 0 - literal (uncompressed) cluster - * 1 - compressed cluster (for the head logical cluster) - * 2 - compressed cluster (for the other logical clusters) - * - * In detail, - * 0 - literal (uncompressed) cluster, - * di_advise = 0 - * di_clusterofs = the literal data offset of the cluster - * di_blkaddr = the blkaddr of the literal cluster - * - * 1 - compressed cluster (for the head logical cluster) - * di_advise = 1 - * di_clusterofs = the decompressed data offset of the cluster - * di_blkaddr = the blkaddr of the compressed cluster - * - * 2 - compressed cluster (for the other logical clusters) - * di_advise = 2 - * di_clusterofs = - * the decompressed data offset in its own head cluster - * di_u.delta[0] = distance to its corresponding head cluster - * di_u.delta[1] = distance to its corresponding tail cluster - * (di_advise could be 0, 1 or 2) - */ -enum { - Z_EROFS_VLE_CLUSTER_TYPE_PLAIN, - Z_EROFS_VLE_CLUSTER_TYPE_HEAD, - Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD, - Z_EROFS_VLE_CLUSTER_TYPE_RESERVED, - Z_EROFS_VLE_CLUSTER_TYPE_MAX -}; - -#define Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS 2 -#define Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT 0 - -struct z_erofs_vle_decompressed_index { - __le16 di_advise; - /* where to decompress in the head cluster */ - __le16 di_clusterofs; - - union { - /* for the head cluster */ - __le32 blkaddr; - /* - * for the rest clusters - * eg. for 4k page-sized cluster, maximum 4K*64k = 256M) - * [0] - pointing to the head cluster - * [1] - pointing to the tail cluster - */ - __le16 delta[2]; - } di_u __packed; /* 8 bytes */ -} __packed; - -#define Z_EROFS_VLE_LEGACY_INDEX_ALIGN(size) \ - (round_up(size, sizeof(struct z_erofs_vle_decompressed_index)) + \ - sizeof(struct z_erofs_map_header) + Z_EROFS_VLE_LEGACY_HEADER_PADDING) - -/* dirent sorts in alphabet order, thus we can do binary search */ -struct erofs_dirent { - __le64 nid; /* 0, node number */ - __le16 nameoff; /* 8, start offset of file name */ - __u8 file_type; /* 10, file type */ - __u8 reserved; /* 11, reserved */ -} __packed; - -/* - * EROFS file types should match generic FT_* types and - * it seems no need to add BUILD_BUG_ONs since potential - * unmatchness will break other fses as well... - */ - -#define EROFS_NAME_LEN 255 - -/* check the EROFS on-disk layout strictly at compile time */ -static inline void erofs_check_ondisk_layout_definitions(void) -{ - BUILD_BUG_ON(sizeof(struct erofs_super_block) != 128); - BUILD_BUG_ON(sizeof(struct erofs_inode_v1) != 32); - BUILD_BUG_ON(sizeof(struct erofs_inode_v2) != 64); - BUILD_BUG_ON(sizeof(struct erofs_xattr_ibody_header) != 12); - BUILD_BUG_ON(sizeof(struct erofs_xattr_entry) != 4); - BUILD_BUG_ON(sizeof(struct z_erofs_map_header) != 8); - BUILD_BUG_ON(sizeof(struct z_erofs_vle_decompressed_index) != 8); - BUILD_BUG_ON(sizeof(struct erofs_dirent) != 12); - - BUILD_BUG_ON(BIT(Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) < - Z_EROFS_VLE_CLUSTER_TYPE_MAX - 1); -} - -#endif - diff --git a/drivers/staging/erofs/include/trace/events/erofs.h b/drivers/staging/erofs/include/trace/events/erofs.h deleted file mode 100644 index bfb2da9c4eee..000000000000 --- a/drivers/staging/erofs/include/trace/events/erofs.h +++ /dev/null @@ -1,256 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -#undef TRACE_SYSTEM -#define TRACE_SYSTEM erofs - -#if !defined(_TRACE_EROFS_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_EROFS_H - -#include - -#define show_dev(dev) MAJOR(dev), MINOR(dev) -#define show_dev_nid(entry) show_dev(entry->dev), entry->nid - -#define show_file_type(type) \ - __print_symbolic(type, \ - { 0, "FILE" }, \ - { 1, "DIR" }) - -#define show_map_flags(flags) __print_flags(flags, "|", \ - { EROFS_GET_BLOCKS_RAW, "RAW" }) - -#define show_mflags(flags) __print_flags(flags, "", \ - { EROFS_MAP_MAPPED, "M" }, \ - { EROFS_MAP_META, "I" }, \ - { EROFS_MAP_ZIPPED, "Z" }) - -TRACE_EVENT(erofs_lookup, - - TP_PROTO(struct inode *dir, struct dentry *dentry, unsigned int flags), - - TP_ARGS(dir, dentry, flags), - - TP_STRUCT__entry( - __field(dev_t, dev ) - __field(erofs_nid_t, nid ) - __field(const char *, name ) - __field(unsigned int, flags ) - ), - - TP_fast_assign( - __entry->dev = dir->i_sb->s_dev; - __entry->nid = EROFS_V(dir)->nid; - __entry->name = dentry->d_name.name; - __entry->flags = flags; - ), - - TP_printk("dev = (%d,%d), pnid = %llu, name:%s, flags:%x", - show_dev_nid(__entry), - __entry->name, - __entry->flags) -); - -TRACE_EVENT(erofs_fill_inode, - TP_PROTO(struct inode *inode, int isdir), - TP_ARGS(inode, isdir), - - TP_STRUCT__entry( - __field(dev_t, dev ) - __field(erofs_nid_t, nid ) - __field(erofs_blk_t, blkaddr ) - __field(unsigned int, ofs ) - __field(int, isdir ) - ), - - TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; - __entry->nid = EROFS_V(inode)->nid; - __entry->blkaddr = erofs_blknr(iloc(EROFS_I_SB(inode), __entry->nid)); - __entry->ofs = erofs_blkoff(iloc(EROFS_I_SB(inode), __entry->nid)); - __entry->isdir = isdir; - ), - - TP_printk("dev = (%d,%d), nid = %llu, blkaddr %u ofs %u, isdir %d", - show_dev_nid(__entry), - __entry->blkaddr, __entry->ofs, - __entry->isdir) -); - -TRACE_EVENT(erofs_readpage, - - TP_PROTO(struct page *page, bool raw), - - TP_ARGS(page, raw), - - TP_STRUCT__entry( - __field(dev_t, dev ) - __field(erofs_nid_t, nid ) - __field(int, dir ) - __field(pgoff_t, index ) - __field(int, uptodate) - __field(bool, raw ) - ), - - TP_fast_assign( - __entry->dev = page->mapping->host->i_sb->s_dev; - __entry->nid = EROFS_V(page->mapping->host)->nid; - __entry->dir = S_ISDIR(page->mapping->host->i_mode); - __entry->index = page->index; - __entry->uptodate = PageUptodate(page); - __entry->raw = raw; - ), - - TP_printk("dev = (%d,%d), nid = %llu, %s, index = %lu, uptodate = %d " - "raw = %d", - show_dev_nid(__entry), - show_file_type(__entry->dir), - (unsigned long)__entry->index, - __entry->uptodate, - __entry->raw) -); - -TRACE_EVENT(erofs_readpages, - - TP_PROTO(struct inode *inode, struct page *page, unsigned int nrpage, - bool raw), - - TP_ARGS(inode, page, nrpage, raw), - - TP_STRUCT__entry( - __field(dev_t, dev ) - __field(erofs_nid_t, nid ) - __field(pgoff_t, start ) - __field(unsigned int, nrpage ) - __field(bool, raw ) - ), - - TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; - __entry->nid = EROFS_V(inode)->nid; - __entry->start = page->index; - __entry->nrpage = nrpage; - __entry->raw = raw; - ), - - TP_printk("dev = (%d,%d), nid = %llu, start = %lu nrpage = %u raw = %d", - show_dev_nid(__entry), - (unsigned long)__entry->start, - __entry->nrpage, - __entry->raw) -); - -DECLARE_EVENT_CLASS(erofs__map_blocks_enter, - TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, - unsigned int flags), - - TP_ARGS(inode, map, flags), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( erofs_nid_t, nid ) - __field( erofs_off_t, la ) - __field( u64, llen ) - __field( unsigned int, flags ) - ), - - TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; - __entry->nid = EROFS_V(inode)->nid; - __entry->la = map->m_la; - __entry->llen = map->m_llen; - __entry->flags = flags; - ), - - TP_printk("dev = (%d,%d), nid = %llu, la %llu llen %llu flags %s", - show_dev_nid(__entry), - __entry->la, __entry->llen, - __entry->flags ? show_map_flags(__entry->flags) : "NULL") -); - -DEFINE_EVENT(erofs__map_blocks_enter, erofs_map_blocks_flatmode_enter, - TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, - unsigned flags), - - TP_ARGS(inode, map, flags) -); - -DEFINE_EVENT(erofs__map_blocks_enter, z_erofs_map_blocks_iter_enter, - TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, - unsigned int flags), - - TP_ARGS(inode, map, flags) -); - -DECLARE_EVENT_CLASS(erofs__map_blocks_exit, - TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, - unsigned int flags, int ret), - - TP_ARGS(inode, map, flags, ret), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( erofs_nid_t, nid ) - __field( unsigned int, flags ) - __field( erofs_off_t, la ) - __field( erofs_off_t, pa ) - __field( u64, llen ) - __field( u64, plen ) - __field( unsigned int, mflags ) - __field( int, ret ) - ), - - TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; - __entry->nid = EROFS_V(inode)->nid; - __entry->flags = flags; - __entry->la = map->m_la; - __entry->pa = map->m_pa; - __entry->llen = map->m_llen; - __entry->plen = map->m_plen; - __entry->mflags = map->m_flags; - __entry->ret = ret; - ), - - TP_printk("dev = (%d,%d), nid = %llu, flags %s " - "la %llu pa %llu llen %llu plen %llu mflags %s ret %d", - show_dev_nid(__entry), - __entry->flags ? show_map_flags(__entry->flags) : "NULL", - __entry->la, __entry->pa, __entry->llen, __entry->plen, - show_mflags(__entry->mflags), __entry->ret) -); - -DEFINE_EVENT(erofs__map_blocks_exit, erofs_map_blocks_flatmode_exit, - TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, - unsigned flags, int ret), - - TP_ARGS(inode, map, flags, ret) -); - -DEFINE_EVENT(erofs__map_blocks_exit, z_erofs_map_blocks_iter_exit, - TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, - unsigned int flags, int ret), - - TP_ARGS(inode, map, flags, ret) -); - -TRACE_EVENT(erofs_destroy_inode, - TP_PROTO(struct inode *inode), - - TP_ARGS(inode), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( erofs_nid_t, nid ) - ), - - TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; - __entry->nid = EROFS_V(inode)->nid; - ), - - TP_printk("dev = (%d,%d), nid = %llu", show_dev_nid(__entry)) -); - -#endif /* _TRACE_EROFS_H */ - - /* This part must be outside protection */ -#include diff --git a/drivers/staging/erofs/inode.c b/drivers/staging/erofs/inode.c deleted file mode 100644 index cbc2c342a37f..000000000000 --- a/drivers/staging/erofs/inode.c +++ /dev/null @@ -1,334 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * linux/drivers/staging/erofs/inode.c - * - * Copyright (C) 2017-2018 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#include "xattr.h" - -#include - -/* no locking */ -static int read_inode(struct inode *inode, void *data) -{ - struct erofs_vnode *vi = EROFS_V(inode); - struct erofs_inode_v1 *v1 = data; - const unsigned int advise = le16_to_cpu(v1->i_advise); - erofs_blk_t nblks = 0; - - vi->datamode = __inode_data_mapping(advise); - - if (unlikely(vi->datamode >= EROFS_INODE_LAYOUT_MAX)) { - errln("unsupported data mapping %u of nid %llu", - vi->datamode, vi->nid); - DBG_BUGON(1); - return -EOPNOTSUPP; - } - - if (__inode_version(advise) == EROFS_INODE_LAYOUT_V2) { - struct erofs_inode_v2 *v2 = data; - - vi->inode_isize = sizeof(struct erofs_inode_v2); - vi->xattr_isize = ondisk_xattr_ibody_size(v2->i_xattr_icount); - - inode->i_mode = le16_to_cpu(v2->i_mode); - if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || - S_ISLNK(inode->i_mode)) - vi->raw_blkaddr = le32_to_cpu(v2->i_u.raw_blkaddr); - else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) - inode->i_rdev = - new_decode_dev(le32_to_cpu(v2->i_u.rdev)); - else if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) - inode->i_rdev = 0; - else - goto bogusimode; - - i_uid_write(inode, le32_to_cpu(v2->i_uid)); - i_gid_write(inode, le32_to_cpu(v2->i_gid)); - set_nlink(inode, le32_to_cpu(v2->i_nlink)); - - /* ns timestamp */ - inode->i_mtime.tv_sec = inode->i_ctime.tv_sec = - le64_to_cpu(v2->i_ctime); - inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = - le32_to_cpu(v2->i_ctime_nsec); - - inode->i_size = le64_to_cpu(v2->i_size); - - /* total blocks for compressed files */ - if (is_inode_layout_compression(inode)) - nblks = le32_to_cpu(v2->i_u.compressed_blocks); - } else if (__inode_version(advise) == EROFS_INODE_LAYOUT_V1) { - struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb); - - vi->inode_isize = sizeof(struct erofs_inode_v1); - vi->xattr_isize = ondisk_xattr_ibody_size(v1->i_xattr_icount); - - inode->i_mode = le16_to_cpu(v1->i_mode); - if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || - S_ISLNK(inode->i_mode)) - vi->raw_blkaddr = le32_to_cpu(v1->i_u.raw_blkaddr); - else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) - inode->i_rdev = - new_decode_dev(le32_to_cpu(v1->i_u.rdev)); - else if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) - inode->i_rdev = 0; - else - goto bogusimode; - - i_uid_write(inode, le16_to_cpu(v1->i_uid)); - i_gid_write(inode, le16_to_cpu(v1->i_gid)); - set_nlink(inode, le16_to_cpu(v1->i_nlink)); - - /* use build time to derive all file time */ - inode->i_mtime.tv_sec = inode->i_ctime.tv_sec = - sbi->build_time; - inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = - sbi->build_time_nsec; - - inode->i_size = le32_to_cpu(v1->i_size); - if (is_inode_layout_compression(inode)) - nblks = le32_to_cpu(v1->i_u.compressed_blocks); - } else { - errln("unsupported on-disk inode version %u of nid %llu", - __inode_version(advise), vi->nid); - DBG_BUGON(1); - return -EOPNOTSUPP; - } - - if (!nblks) - /* measure inode.i_blocks as generic filesystems */ - inode->i_blocks = roundup(inode->i_size, EROFS_BLKSIZ) >> 9; - else - inode->i_blocks = nblks << LOG_SECTORS_PER_BLOCK; - return 0; - -bogusimode: - errln("bogus i_mode (%o) @ nid %llu", inode->i_mode, vi->nid); - DBG_BUGON(1); - return -EFSCORRUPTED; -} - -/* - * try_lock can be required since locking order is: - * file data(fs_inode) - * meta(bd_inode) - * but the majority of the callers is "iget", - * in that case we are pretty sure no deadlock since - * no data operations exist. However I tend to - * try_lock since it takes no much overhead and - * will success immediately. - */ -static int fill_inline_data(struct inode *inode, void *data, - unsigned int m_pofs) -{ - struct erofs_vnode *vi = EROFS_V(inode); - struct erofs_sb_info *sbi = EROFS_I_SB(inode); - - /* should be inode inline C */ - if (!is_inode_flat_inline(inode)) - return 0; - - /* fast symlink (following ext4) */ - if (S_ISLNK(inode->i_mode) && inode->i_size < PAGE_SIZE) { - char *lnk = erofs_kmalloc(sbi, inode->i_size + 1, GFP_KERNEL); - - if (unlikely(!lnk)) - return -ENOMEM; - - m_pofs += vi->inode_isize + vi->xattr_isize; - - /* inline symlink data shouldn't across page boundary as well */ - if (unlikely(m_pofs + inode->i_size > PAGE_SIZE)) { - kfree(lnk); - errln("inline data cross block boundary @ nid %llu", - vi->nid); - DBG_BUGON(1); - return -EFSCORRUPTED; - } - - /* get in-page inline data */ - memcpy(lnk, data + m_pofs, inode->i_size); - lnk[inode->i_size] = '\0'; - - inode->i_link = lnk; - set_inode_fast_symlink(inode); - } - return 0; -} - -static int fill_inode(struct inode *inode, int isdir) -{ - struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb); - struct erofs_vnode *vi = EROFS_V(inode); - struct page *page; - void *data; - int err; - erofs_blk_t blkaddr; - unsigned int ofs; - erofs_off_t inode_loc; - - trace_erofs_fill_inode(inode, isdir); - inode_loc = iloc(sbi, vi->nid); - blkaddr = erofs_blknr(inode_loc); - ofs = erofs_blkoff(inode_loc); - - debugln("%s, reading inode nid %llu at %u of blkaddr %u", - __func__, vi->nid, ofs, blkaddr); - - page = erofs_get_meta_page(inode->i_sb, blkaddr, isdir); - - if (IS_ERR(page)) { - errln("failed to get inode (nid: %llu) page, err %ld", - vi->nid, PTR_ERR(page)); - return PTR_ERR(page); - } - - DBG_BUGON(!PageUptodate(page)); - data = page_address(page); - - err = read_inode(inode, data + ofs); - if (!err) { - /* setup the new inode */ - if (S_ISREG(inode->i_mode)) { - inode->i_op = &erofs_generic_iops; - inode->i_fop = &generic_ro_fops; - } else if (S_ISDIR(inode->i_mode)) { - inode->i_op = &erofs_dir_iops; - inode->i_fop = &erofs_dir_fops; - } else if (S_ISLNK(inode->i_mode)) { - /* by default, page_get_link is used for symlink */ - inode->i_op = &erofs_symlink_iops; - inode_nohighmem(inode); - } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || - S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { - inode->i_op = &erofs_generic_iops; - init_special_inode(inode, inode->i_mode, inode->i_rdev); - goto out_unlock; - } else { - err = -EFSCORRUPTED; - goto out_unlock; - } - - if (is_inode_layout_compression(inode)) { - err = z_erofs_fill_inode(inode); - goto out_unlock; - } - - inode->i_mapping->a_ops = &erofs_raw_access_aops; - - /* fill last page if inline data is available */ - err = fill_inline_data(inode, data, ofs); - } - -out_unlock: - unlock_page(page); - put_page(page); - return err; -} - -/* - * erofs nid is 64bits, but i_ino is 'unsigned long', therefore - * we should do more for 32-bit platform to find the right inode. - */ -#if BITS_PER_LONG == 32 -static int erofs_ilookup_test_actor(struct inode *inode, void *opaque) -{ - const erofs_nid_t nid = *(erofs_nid_t *)opaque; - - return EROFS_V(inode)->nid == nid; -} - -static int erofs_iget_set_actor(struct inode *inode, void *opaque) -{ - const erofs_nid_t nid = *(erofs_nid_t *)opaque; - - inode->i_ino = erofs_inode_hash(nid); - return 0; -} -#endif - -static inline struct inode *erofs_iget_locked(struct super_block *sb, - erofs_nid_t nid) -{ - const unsigned long hashval = erofs_inode_hash(nid); - -#if BITS_PER_LONG >= 64 - /* it is safe to use iget_locked for >= 64-bit platform */ - return iget_locked(sb, hashval); -#else - return iget5_locked(sb, hashval, erofs_ilookup_test_actor, - erofs_iget_set_actor, &nid); -#endif -} - -struct inode *erofs_iget(struct super_block *sb, - erofs_nid_t nid, - bool isdir) -{ - struct inode *inode = erofs_iget_locked(sb, nid); - - if (unlikely(!inode)) - return ERR_PTR(-ENOMEM); - - if (inode->i_state & I_NEW) { - int err; - struct erofs_vnode *vi = EROFS_V(inode); - - vi->nid = nid; - - err = fill_inode(inode, isdir); - if (likely(!err)) - unlock_new_inode(inode); - else { - iget_failed(inode); - inode = ERR_PTR(err); - } - } - return inode; -} - -int erofs_getattr(const struct path *path, struct kstat *stat, - u32 request_mask, unsigned int query_flags) -{ - struct inode *const inode = d_inode(path->dentry); - - if (is_inode_layout_compression(inode)) - stat->attributes |= STATX_ATTR_COMPRESSED; - - stat->attributes |= STATX_ATTR_IMMUTABLE; - stat->attributes_mask |= (STATX_ATTR_COMPRESSED | - STATX_ATTR_IMMUTABLE); - - generic_fillattr(inode, stat); - return 0; -} - -const struct inode_operations erofs_generic_iops = { - .getattr = erofs_getattr, -#ifdef CONFIG_EROFS_FS_XATTR - .listxattr = erofs_listxattr, -#endif - .get_acl = erofs_get_acl, -}; - -const struct inode_operations erofs_symlink_iops = { - .get_link = page_get_link, - .getattr = erofs_getattr, -#ifdef CONFIG_EROFS_FS_XATTR - .listxattr = erofs_listxattr, -#endif - .get_acl = erofs_get_acl, -}; - -const struct inode_operations erofs_fast_symlink_iops = { - .get_link = simple_get_link, - .getattr = erofs_getattr, -#ifdef CONFIG_EROFS_FS_XATTR - .listxattr = erofs_listxattr, -#endif - .get_acl = erofs_get_acl, -}; - diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h deleted file mode 100644 index 0e8d58546c52..000000000000 --- a/drivers/staging/erofs/internal.h +++ /dev/null @@ -1,554 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * linux/drivers/staging/erofs/internal.h - * - * Copyright (C) 2017-2018 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#ifndef __EROFS_INTERNAL_H -#define __EROFS_INTERNAL_H - -#include -#include -#include -#include -#include -#include -#include -#include -#include "erofs_fs.h" - -/* redefine pr_fmt "erofs: " */ -#undef pr_fmt -#define pr_fmt(fmt) "erofs: " fmt - -#define errln(x, ...) pr_err(x "\n", ##__VA_ARGS__) -#define infoln(x, ...) pr_info(x "\n", ##__VA_ARGS__) -#ifdef CONFIG_EROFS_FS_DEBUG -#define debugln(x, ...) pr_debug(x "\n", ##__VA_ARGS__) -#define DBG_BUGON BUG_ON -#else -#define debugln(x, ...) ((void)0) -#define DBG_BUGON(x) ((void)(x)) -#endif /* !CONFIG_EROFS_FS_DEBUG */ - -enum { - FAULT_KMALLOC, - FAULT_READ_IO, - FAULT_MAX, -}; - -#ifdef CONFIG_EROFS_FAULT_INJECTION -extern const char *erofs_fault_name[FAULT_MAX]; -#define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type))) - -struct erofs_fault_info { - atomic_t inject_ops; - unsigned int inject_rate; - unsigned int inject_type; -}; -#endif /* CONFIG_EROFS_FAULT_INJECTION */ - -/* EROFS_SUPER_MAGIC_V1 to represent the whole file system */ -#define EROFS_SUPER_MAGIC EROFS_SUPER_MAGIC_V1 - -typedef u64 erofs_nid_t; -typedef u64 erofs_off_t; -/* data type for filesystem-wide blocks number */ -typedef u32 erofs_blk_t; - -struct erofs_sb_info { -#ifdef CONFIG_EROFS_FS_ZIP - /* list for all registered superblocks, mainly for shrinker */ - struct list_head list; - struct mutex umount_mutex; - - /* the dedicated workstation for compression */ - struct radix_tree_root workstn_tree; - - /* threshold for decompression synchronously */ - unsigned int max_sync_decompress_pages; - - unsigned int shrinker_run_no; - - /* current strategy of how to use managed cache */ - unsigned char cache_strategy; - - /* pseudo inode to manage cached pages */ - struct inode *managed_cache; -#endif /* CONFIG_EROFS_FS_ZIP */ - u32 blocks; - u32 meta_blkaddr; -#ifdef CONFIG_EROFS_FS_XATTR - u32 xattr_blkaddr; -#endif - - /* inode slot unit size in bit shift */ - unsigned char islotbits; - - u32 build_time_nsec; - u64 build_time; - - /* what we really care is nid, rather than ino.. */ - erofs_nid_t root_nid; - /* used for statfs, f_files - f_favail */ - u64 inos; - - u8 uuid[16]; /* 128-bit uuid for volume */ - u8 volume_name[16]; /* volume name */ - u32 requirements; - - unsigned int mount_opt; - -#ifdef CONFIG_EROFS_FAULT_INJECTION - struct erofs_fault_info fault_info; /* For fault injection */ -#endif -}; - -#ifdef CONFIG_EROFS_FAULT_INJECTION -#define erofs_show_injection_info(type) \ - infoln("inject %s in %s of %pS", erofs_fault_name[type], \ - __func__, __builtin_return_address(0)) - -static inline bool time_to_inject(struct erofs_sb_info *sbi, int type) -{ - struct erofs_fault_info *ffi = &sbi->fault_info; - - if (!ffi->inject_rate) - return false; - - if (!IS_FAULT_SET(ffi, type)) - return false; - - atomic_inc(&ffi->inject_ops); - if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) { - atomic_set(&ffi->inject_ops, 0); - return true; - } - return false; -} -#else -static inline bool time_to_inject(struct erofs_sb_info *sbi, int type) -{ - return false; -} - -static inline void erofs_show_injection_info(int type) -{ -} -#endif /* !CONFIG_EROFS_FAULT_INJECTION */ - -static inline void *erofs_kmalloc(struct erofs_sb_info *sbi, - size_t size, gfp_t flags) -{ - if (time_to_inject(sbi, FAULT_KMALLOC)) { - erofs_show_injection_info(FAULT_KMALLOC); - return NULL; - } - return kmalloc(size, flags); -} - -#define EROFS_SB(sb) ((struct erofs_sb_info *)(sb)->s_fs_info) -#define EROFS_I_SB(inode) ((struct erofs_sb_info *)(inode)->i_sb->s_fs_info) - -/* Mount flags set via mount options or defaults */ -#define EROFS_MOUNT_XATTR_USER 0x00000010 -#define EROFS_MOUNT_POSIX_ACL 0x00000020 -#define EROFS_MOUNT_FAULT_INJECTION 0x00000040 - -#define clear_opt(sbi, option) ((sbi)->mount_opt &= ~EROFS_MOUNT_##option) -#define set_opt(sbi, option) ((sbi)->mount_opt |= EROFS_MOUNT_##option) -#define test_opt(sbi, option) ((sbi)->mount_opt & EROFS_MOUNT_##option) - -#ifdef CONFIG_EROFS_FS_ZIP -enum { - EROFS_ZIP_CACHE_DISABLED, - EROFS_ZIP_CACHE_READAHEAD, - EROFS_ZIP_CACHE_READAROUND -}; - -#define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL) - -/* basic unit of the workstation of a super_block */ -struct erofs_workgroup { - /* the workgroup index in the workstation */ - pgoff_t index; - - /* overall workgroup reference count */ - atomic_t refcount; -}; - -#if defined(CONFIG_SMP) -static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp, - int val) -{ - preempt_disable(); - if (val != atomic_cmpxchg(&grp->refcount, val, EROFS_LOCKED_MAGIC)) { - preempt_enable(); - return false; - } - return true; -} - -static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp, - int orig_val) -{ - /* - * other observers should notice all modifications - * in the freezing period. - */ - smp_mb(); - atomic_set(&grp->refcount, orig_val); - preempt_enable(); -} - -static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp) -{ - return atomic_cond_read_relaxed(&grp->refcount, - VAL != EROFS_LOCKED_MAGIC); -} -#else -static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp, - int val) -{ - preempt_disable(); - /* no need to spin on UP platforms, let's just disable preemption. */ - if (val != atomic_read(&grp->refcount)) { - preempt_enable(); - return false; - } - return true; -} - -static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp, - int orig_val) -{ - preempt_enable(); -} - -static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp) -{ - int v = atomic_read(&grp->refcount); - - /* workgroup is never freezed on uniprocessor systems */ - DBG_BUGON(v == EROFS_LOCKED_MAGIC); - return v; -} -#endif /* !CONFIG_SMP */ - -/* hard limit of pages per compressed cluster */ -#define Z_EROFS_CLUSTER_MAX_PAGES (CONFIG_EROFS_FS_CLUSTER_PAGE_LIMIT) -#define EROFS_PCPUBUF_NR_PAGES Z_EROFS_CLUSTER_MAX_PAGES -#else -#define EROFS_PCPUBUF_NR_PAGES 0 -#endif /* !CONFIG_EROFS_FS_ZIP */ - -/* we strictly follow PAGE_SIZE and no buffer head yet */ -#define LOG_BLOCK_SIZE PAGE_SHIFT - -#undef LOG_SECTORS_PER_BLOCK -#define LOG_SECTORS_PER_BLOCK (PAGE_SHIFT - 9) - -#undef SECTORS_PER_BLOCK -#define SECTORS_PER_BLOCK (1 << SECTORS_PER_BLOCK) - -#define EROFS_BLKSIZ (1 << LOG_BLOCK_SIZE) - -#if (EROFS_BLKSIZ % 4096 || !EROFS_BLKSIZ) -#error erofs cannot be used in this platform -#endif - -#define EROFS_IO_MAX_RETRIES_NOFAIL 5 - -#define ROOT_NID(sb) ((sb)->root_nid) - -#define erofs_blknr(addr) ((addr) / EROFS_BLKSIZ) -#define erofs_blkoff(addr) ((addr) % EROFS_BLKSIZ) -#define blknr_to_addr(nr) ((erofs_off_t)(nr) * EROFS_BLKSIZ) - -static inline erofs_off_t iloc(struct erofs_sb_info *sbi, erofs_nid_t nid) -{ - return blknr_to_addr(sbi->meta_blkaddr) + (nid << sbi->islotbits); -} - -/* atomic flag definitions */ -#define EROFS_V_EA_INITED_BIT 0 -#define EROFS_V_Z_INITED_BIT 1 - -/* bitlock definitions (arranged in reverse order) */ -#define EROFS_V_BL_XATTR_BIT (BITS_PER_LONG - 1) -#define EROFS_V_BL_Z_BIT (BITS_PER_LONG - 2) - -struct erofs_vnode { - erofs_nid_t nid; - - /* atomic flags (including bitlocks) */ - unsigned long flags; - - unsigned char datamode; - unsigned char inode_isize; - unsigned short xattr_isize; - - unsigned int xattr_shared_count; - unsigned int *xattr_shared_xattrs; - - union { - erofs_blk_t raw_blkaddr; -#ifdef CONFIG_EROFS_FS_ZIP - struct { - unsigned short z_advise; - unsigned char z_algorithmtype[2]; - unsigned char z_logical_clusterbits; - unsigned char z_physical_clusterbits[2]; - }; -#endif /* CONFIG_EROFS_FS_ZIP */ - }; - /* the corresponding vfs inode */ - struct inode vfs_inode; -}; - -#define EROFS_V(ptr) \ - container_of(ptr, struct erofs_vnode, vfs_inode) - -#define __inode_advise(x, bit, bits) \ - (((x) >> (bit)) & ((1 << (bits)) - 1)) - -#define __inode_version(advise) \ - __inode_advise(advise, EROFS_I_VERSION_BIT, \ - EROFS_I_VERSION_BITS) - -#define __inode_data_mapping(advise) \ - __inode_advise(advise, EROFS_I_DATA_MAPPING_BIT,\ - EROFS_I_DATA_MAPPING_BITS) - -static inline unsigned long inode_datablocks(struct inode *inode) -{ - /* since i_size cannot be changed */ - return DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ); -} - -static inline bool is_inode_layout_compression(struct inode *inode) -{ - return erofs_inode_is_data_compressed(EROFS_V(inode)->datamode); -} - -static inline bool is_inode_flat_inline(struct inode *inode) -{ - return EROFS_V(inode)->datamode == EROFS_INODE_FLAT_INLINE; -} - -extern const struct super_operations erofs_sops; - -extern const struct address_space_operations erofs_raw_access_aops; -#ifdef CONFIG_EROFS_FS_ZIP -extern const struct address_space_operations z_erofs_vle_normalaccess_aops; -#endif - -/* - * Logical to physical block mapping, used by erofs_map_blocks() - * - * Different with other file systems, it is used for 2 access modes: - * - * 1) RAW access mode: - * - * Users pass a valid (m_lblk, m_lofs -- usually 0) pair, - * and get the valid m_pblk, m_pofs and the longest m_len(in bytes). - * - * Note that m_lblk in the RAW access mode refers to the number of - * the compressed ondisk block rather than the uncompressed - * in-memory block for the compressed file. - * - * m_pofs equals to m_lofs except for the inline data page. - * - * 2) Normal access mode: - * - * If the inode is not compressed, it has no difference with - * the RAW access mode. However, if the inode is compressed, - * users should pass a valid (m_lblk, m_lofs) pair, and get - * the needed m_pblk, m_pofs, m_len to get the compressed data - * and the updated m_lblk, m_lofs which indicates the start - * of the corresponding uncompressed data in the file. - */ -enum { - BH_Zipped = BH_PrivateStart, - BH_FullMapped, -}; - -/* Has a disk mapping */ -#define EROFS_MAP_MAPPED (1 << BH_Mapped) -/* Located in metadata (could be copied from bd_inode) */ -#define EROFS_MAP_META (1 << BH_Meta) -/* The extent has been compressed */ -#define EROFS_MAP_ZIPPED (1 << BH_Zipped) -/* The length of extent is full */ -#define EROFS_MAP_FULL_MAPPED (1 << BH_FullMapped) - -struct erofs_map_blocks { - erofs_off_t m_pa, m_la; - u64 m_plen, m_llen; - - unsigned int m_flags; - - struct page *mpage; -}; - -/* Flags used by erofs_map_blocks() */ -#define EROFS_GET_BLOCKS_RAW 0x0001 - -/* zmap.c */ -#ifdef CONFIG_EROFS_FS_ZIP -int z_erofs_fill_inode(struct inode *inode); -int z_erofs_map_blocks_iter(struct inode *inode, - struct erofs_map_blocks *map, - int flags); -#else -static inline int z_erofs_fill_inode(struct inode *inode) { return -EOPNOTSUPP; } -static inline int z_erofs_map_blocks_iter(struct inode *inode, - struct erofs_map_blocks *map, - int flags) -{ - return -EOPNOTSUPP; -} -#endif /* !CONFIG_EROFS_FS_ZIP */ - -/* data.c */ -static inline struct bio *erofs_grab_bio(struct super_block *sb, - erofs_blk_t blkaddr, - unsigned int nr_pages, - void *bi_private, bio_end_io_t endio, - bool nofail) -{ - const gfp_t gfp = GFP_NOIO; - struct bio *bio; - - do { - if (nr_pages == 1) { - bio = bio_alloc(gfp | (nofail ? __GFP_NOFAIL : 0), 1); - if (unlikely(!bio)) { - DBG_BUGON(nofail); - return ERR_PTR(-ENOMEM); - } - break; - } - bio = bio_alloc(gfp, nr_pages); - nr_pages /= 2; - } while (unlikely(!bio)); - - bio->bi_end_io = endio; - bio_set_dev(bio, sb->s_bdev); - bio->bi_iter.bi_sector = (sector_t)blkaddr << LOG_SECTORS_PER_BLOCK; - bio->bi_private = bi_private; - return bio; -} - -static inline void __submit_bio(struct bio *bio, unsigned int op, - unsigned int op_flags) -{ - bio_set_op_attrs(bio, op, op_flags); - submit_bio(bio); -} - -struct page *__erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr, - bool prio, bool nofail); - -static inline struct page *erofs_get_meta_page(struct super_block *sb, - erofs_blk_t blkaddr, bool prio) -{ - return __erofs_get_meta_page(sb, blkaddr, prio, false); -} - -int erofs_map_blocks(struct inode *, struct erofs_map_blocks *, int); - -static inline struct page *erofs_get_inline_page(struct inode *inode, - erofs_blk_t blkaddr) -{ - return erofs_get_meta_page(inode->i_sb, blkaddr, - S_ISDIR(inode->i_mode)); -} - -/* inode.c */ -static inline unsigned long erofs_inode_hash(erofs_nid_t nid) -{ -#if BITS_PER_LONG == 32 - return (nid >> 32) ^ (nid & 0xffffffff); -#else - return nid; -#endif -} - -extern const struct inode_operations erofs_generic_iops; -extern const struct inode_operations erofs_symlink_iops; -extern const struct inode_operations erofs_fast_symlink_iops; - -static inline void set_inode_fast_symlink(struct inode *inode) -{ - inode->i_op = &erofs_fast_symlink_iops; -} - -static inline bool is_inode_fast_symlink(struct inode *inode) -{ - return inode->i_op == &erofs_fast_symlink_iops; -} - -struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid, bool dir); -int erofs_getattr(const struct path *path, struct kstat *stat, - u32 request_mask, unsigned int query_flags); - -/* namei.c */ -extern const struct inode_operations erofs_dir_iops; - -int erofs_namei(struct inode *dir, struct qstr *name, - erofs_nid_t *nid, unsigned int *d_type); - -/* dir.c */ -extern const struct file_operations erofs_dir_fops; - -/* utils.c / zdata.c */ -struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail); - -#if (EROFS_PCPUBUF_NR_PAGES > 0) -void *erofs_get_pcpubuf(unsigned int pagenr); -#define erofs_put_pcpubuf(buf) do { \ - (void)&(buf); \ - preempt_enable(); \ -} while (0) -#else -static inline void *erofs_get_pcpubuf(unsigned int pagenr) -{ - return ERR_PTR(-EOPNOTSUPP); -} - -#define erofs_put_pcpubuf(buf) do {} while (0) -#endif - -#ifdef CONFIG_EROFS_FS_ZIP -int erofs_workgroup_put(struct erofs_workgroup *grp); -struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb, - pgoff_t index, bool *tag); -int erofs_register_workgroup(struct super_block *sb, - struct erofs_workgroup *grp, bool tag); -void erofs_workgroup_free_rcu(struct erofs_workgroup *grp); -void erofs_shrinker_register(struct super_block *sb); -void erofs_shrinker_unregister(struct super_block *sb); -int __init erofs_init_shrinker(void); -void erofs_exit_shrinker(void); -int __init z_erofs_init_zip_subsystem(void); -void z_erofs_exit_zip_subsystem(void); -int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, - struct erofs_workgroup *egrp); -int erofs_try_to_free_cached_page(struct address_space *mapping, - struct page *page); -#else -static inline void erofs_shrinker_register(struct super_block *sb) {} -static inline void erofs_shrinker_unregister(struct super_block *sb) {} -static inline int erofs_init_shrinker(void) { return 0; } -static inline void erofs_exit_shrinker(void) {} -static inline int z_erofs_init_zip_subsystem(void) { return 0; } -static inline void z_erofs_exit_zip_subsystem(void) {} -#endif /* !CONFIG_EROFS_FS_ZIP */ - -#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */ - -#endif /* __EROFS_INTERNAL_H */ - diff --git a/drivers/staging/erofs/namei.c b/drivers/staging/erofs/namei.c deleted file mode 100644 index 8334a910acef..000000000000 --- a/drivers/staging/erofs/namei.c +++ /dev/null @@ -1,253 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * linux/drivers/staging/erofs/namei.c - * - * Copyright (C) 2017-2018 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#include "xattr.h" - -#include - -struct erofs_qstr { - const unsigned char *name; - const unsigned char *end; -}; - -/* based on the end of qn is accurate and it must have the trailing '\0' */ -static inline int dirnamecmp(const struct erofs_qstr *qn, - const struct erofs_qstr *qd, - unsigned int *matched) -{ - unsigned int i = *matched; - - /* - * on-disk error, let's only BUG_ON in the debugging mode. - * otherwise, it will return 1 to just skip the invalid name - * and go on (in consideration of the lookup performance). - */ - DBG_BUGON(qd->name > qd->end); - - /* qd could not have trailing '\0' */ - /* However it is absolutely safe if < qd->end */ - while (qd->name + i < qd->end && qd->name[i] != '\0') { - if (qn->name[i] != qd->name[i]) { - *matched = i; - return qn->name[i] > qd->name[i] ? 1 : -1; - } - ++i; - } - *matched = i; - /* See comments in __d_alloc on the terminating NUL character */ - return qn->name[i] == '\0' ? 0 : 1; -} - -#define nameoff_from_disk(off, sz) (le16_to_cpu(off) & ((sz) - 1)) - -static struct erofs_dirent *find_target_dirent(struct erofs_qstr *name, - u8 *data, - unsigned int dirblksize, - const int ndirents) -{ - int head, back; - unsigned int startprfx, endprfx; - struct erofs_dirent *const de = (struct erofs_dirent *)data; - - /* since the 1st dirent has been evaluated previously */ - head = 1; - back = ndirents - 1; - startprfx = endprfx = 0; - - while (head <= back) { - const int mid = head + (back - head) / 2; - const int nameoff = nameoff_from_disk(de[mid].nameoff, - dirblksize); - unsigned int matched = min(startprfx, endprfx); - struct erofs_qstr dname = { - .name = data + nameoff, - .end = unlikely(mid >= ndirents - 1) ? - data + dirblksize : - data + nameoff_from_disk(de[mid + 1].nameoff, - dirblksize) - }; - - /* string comparison without already matched prefix */ - int ret = dirnamecmp(name, &dname, &matched); - - if (unlikely(!ret)) { - return de + mid; - } else if (ret > 0) { - head = mid + 1; - startprfx = matched; - } else { - back = mid - 1; - endprfx = matched; - } - } - - return ERR_PTR(-ENOENT); -} - -static struct page *find_target_block_classic(struct inode *dir, - struct erofs_qstr *name, - int *_ndirents) -{ - unsigned int startprfx, endprfx; - int head, back; - struct address_space *const mapping = dir->i_mapping; - struct page *candidate = ERR_PTR(-ENOENT); - - startprfx = endprfx = 0; - head = 0; - back = inode_datablocks(dir) - 1; - - while (head <= back) { - const int mid = head + (back - head) / 2; - struct page *page = read_mapping_page(mapping, mid, NULL); - - if (!IS_ERR(page)) { - struct erofs_dirent *de = kmap_atomic(page); - const int nameoff = nameoff_from_disk(de->nameoff, - EROFS_BLKSIZ); - const int ndirents = nameoff / sizeof(*de); - int diff; - unsigned int matched; - struct erofs_qstr dname; - - if (unlikely(!ndirents)) { - kunmap_atomic(de); - put_page(page); - errln("corrupted dir block %d @ nid %llu", - mid, EROFS_V(dir)->nid); - DBG_BUGON(1); - page = ERR_PTR(-EFSCORRUPTED); - goto out; - } - - matched = min(startprfx, endprfx); - - dname.name = (u8 *)de + nameoff; - if (ndirents == 1) - dname.end = (u8 *)de + EROFS_BLKSIZ; - else - dname.end = (u8 *)de + - nameoff_from_disk(de[1].nameoff, - EROFS_BLKSIZ); - - /* string comparison without already matched prefix */ - diff = dirnamecmp(name, &dname, &matched); - kunmap_atomic(de); - - if (unlikely(!diff)) { - *_ndirents = 0; - goto out; - } else if (diff > 0) { - head = mid + 1; - startprfx = matched; - - if (!IS_ERR(candidate)) - put_page(candidate); - candidate = page; - *_ndirents = ndirents; - } else { - put_page(page); - - back = mid - 1; - endprfx = matched; - } - continue; - } -out: /* free if the candidate is valid */ - if (!IS_ERR(candidate)) - put_page(candidate); - return page; - } - return candidate; -} - -int erofs_namei(struct inode *dir, - struct qstr *name, - erofs_nid_t *nid, unsigned int *d_type) -{ - int ndirents; - struct page *page; - void *data; - struct erofs_dirent *de; - struct erofs_qstr qn; - - if (unlikely(!dir->i_size)) - return -ENOENT; - - qn.name = name->name; - qn.end = name->name + name->len; - - ndirents = 0; - page = find_target_block_classic(dir, &qn, &ndirents); - - if (IS_ERR(page)) - return PTR_ERR(page); - - data = kmap_atomic(page); - /* the target page has been mapped */ - if (ndirents) - de = find_target_dirent(&qn, data, EROFS_BLKSIZ, ndirents); - else - de = (struct erofs_dirent *)data; - - if (!IS_ERR(de)) { - *nid = le64_to_cpu(de->nid); - *d_type = de->file_type; - } - - kunmap_atomic(data); - put_page(page); - - return PTR_ERR_OR_ZERO(de); -} - -/* NOTE: i_mutex is already held by vfs */ -static struct dentry *erofs_lookup(struct inode *dir, - struct dentry *dentry, - unsigned int flags) -{ - int err; - erofs_nid_t nid; - unsigned int d_type; - struct inode *inode; - - DBG_BUGON(!d_really_is_negative(dentry)); - /* dentry must be unhashed in lookup, no need to worry about */ - DBG_BUGON(!d_unhashed(dentry)); - - trace_erofs_lookup(dir, dentry, flags); - - /* file name exceeds fs limit */ - if (unlikely(dentry->d_name.len > EROFS_NAME_LEN)) - return ERR_PTR(-ENAMETOOLONG); - - /* false uninitialized warnings on gcc 4.8.x */ - err = erofs_namei(dir, &dentry->d_name, &nid, &d_type); - - if (err == -ENOENT) { - /* negative dentry */ - inode = NULL; - } else if (unlikely(err)) { - inode = ERR_PTR(err); - } else { - debugln("%s, %s (nid %llu) found, d_type %u", __func__, - dentry->d_name.name, nid, d_type); - inode = erofs_iget(dir->i_sb, nid, d_type == FT_DIR); - } - return d_splice_alias(inode, dentry); -} - -const struct inode_operations erofs_dir_iops = { - .lookup = erofs_lookup, - .getattr = erofs_getattr, -#ifdef CONFIG_EROFS_FS_XATTR - .listxattr = erofs_listxattr, -#endif - .get_acl = erofs_get_acl, -}; - diff --git a/drivers/staging/erofs/super.c b/drivers/staging/erofs/super.c deleted file mode 100644 index 2da471010a86..000000000000 --- a/drivers/staging/erofs/super.c +++ /dev/null @@ -1,671 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * linux/drivers/staging/erofs/super.c - * - * Copyright (C) 2017-2018 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#include -#include -#include -#include -#include -#include "xattr.h" - -#define CREATE_TRACE_POINTS -#include - -static struct kmem_cache *erofs_inode_cachep __read_mostly; - -static void init_once(void *ptr) -{ - struct erofs_vnode *vi = ptr; - - inode_init_once(&vi->vfs_inode); -} - -static int __init erofs_init_inode_cache(void) -{ - erofs_inode_cachep = kmem_cache_create("erofs_inode", - sizeof(struct erofs_vnode), 0, - SLAB_RECLAIM_ACCOUNT, - init_once); - - return erofs_inode_cachep ? 0 : -ENOMEM; -} - -static void erofs_exit_inode_cache(void) -{ - kmem_cache_destroy(erofs_inode_cachep); -} - -static struct inode *alloc_inode(struct super_block *sb) -{ - struct erofs_vnode *vi = - kmem_cache_alloc(erofs_inode_cachep, GFP_KERNEL); - - if (!vi) - return NULL; - - /* zero out everything except vfs_inode */ - memset(vi, 0, offsetof(struct erofs_vnode, vfs_inode)); - return &vi->vfs_inode; -} - -static void free_inode(struct inode *inode) -{ - struct erofs_vnode *vi = EROFS_V(inode); - - /* be careful RCU symlink path (see ext4_inode_info->i_data)! */ - if (is_inode_fast_symlink(inode)) - kfree(inode->i_link); - - kfree(vi->xattr_shared_xattrs); - - kmem_cache_free(erofs_inode_cachep, vi); -} - -static bool check_layout_compatibility(struct super_block *sb, - struct erofs_super_block *layout) -{ - const unsigned int requirements = le32_to_cpu(layout->requirements); - - EROFS_SB(sb)->requirements = requirements; - - /* check if current kernel meets all mandatory requirements */ - if (requirements & (~EROFS_ALL_REQUIREMENTS)) { - errln("unidentified requirements %x, please upgrade kernel version", - requirements & ~EROFS_ALL_REQUIREMENTS); - return false; - } - return true; -} - -static int superblock_read(struct super_block *sb) -{ - struct erofs_sb_info *sbi; - struct buffer_head *bh; - struct erofs_super_block *layout; - unsigned int blkszbits; - int ret; - - bh = sb_bread(sb, 0); - - if (!bh) { - errln("cannot read erofs superblock"); - return -EIO; - } - - sbi = EROFS_SB(sb); - layout = (struct erofs_super_block *)((u8 *)bh->b_data - + EROFS_SUPER_OFFSET); - - ret = -EINVAL; - if (le32_to_cpu(layout->magic) != EROFS_SUPER_MAGIC_V1) { - errln("cannot find valid erofs superblock"); - goto out; - } - - blkszbits = layout->blkszbits; - /* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */ - if (unlikely(blkszbits != LOG_BLOCK_SIZE)) { - errln("blksize %u isn't supported on this platform", - 1 << blkszbits); - goto out; - } - - if (!check_layout_compatibility(sb, layout)) - goto out; - - sbi->blocks = le32_to_cpu(layout->blocks); - sbi->meta_blkaddr = le32_to_cpu(layout->meta_blkaddr); -#ifdef CONFIG_EROFS_FS_XATTR - sbi->xattr_blkaddr = le32_to_cpu(layout->xattr_blkaddr); -#endif - sbi->islotbits = ffs(sizeof(struct erofs_inode_v1)) - 1; - sbi->root_nid = le16_to_cpu(layout->root_nid); - sbi->inos = le64_to_cpu(layout->inos); - - sbi->build_time = le64_to_cpu(layout->build_time); - sbi->build_time_nsec = le32_to_cpu(layout->build_time_nsec); - - memcpy(&sb->s_uuid, layout->uuid, sizeof(layout->uuid)); - - ret = strscpy(sbi->volume_name, layout->volume_name, - sizeof(layout->volume_name)); - if (ret < 0) { /* -E2BIG */ - errln("bad volume name without NIL terminator"); - ret = -EFSCORRUPTED; - goto out; - } - ret = 0; -out: - brelse(bh); - return ret; -} - -#ifdef CONFIG_EROFS_FAULT_INJECTION -const char *erofs_fault_name[FAULT_MAX] = { - [FAULT_KMALLOC] = "kmalloc", - [FAULT_READ_IO] = "read IO error", -}; - -static void __erofs_build_fault_attr(struct erofs_sb_info *sbi, - unsigned int rate) -{ - struct erofs_fault_info *ffi = &sbi->fault_info; - - if (rate) { - atomic_set(&ffi->inject_ops, 0); - ffi->inject_rate = rate; - ffi->inject_type = (1 << FAULT_MAX) - 1; - } else { - memset(ffi, 0, sizeof(struct erofs_fault_info)); - } - - set_opt(sbi, FAULT_INJECTION); -} - -static int erofs_build_fault_attr(struct erofs_sb_info *sbi, - substring_t *args) -{ - int rate = 0; - - if (args->from && match_int(args, &rate)) - return -EINVAL; - - __erofs_build_fault_attr(sbi, rate); - return 0; -} - -static unsigned int erofs_get_fault_rate(struct erofs_sb_info *sbi) -{ - return sbi->fault_info.inject_rate; -} -#else -static void __erofs_build_fault_attr(struct erofs_sb_info *sbi, - unsigned int rate) -{ -} - -static int erofs_build_fault_attr(struct erofs_sb_info *sbi, - substring_t *args) -{ - infoln("fault_injection options not supported"); - return 0; -} - -static unsigned int erofs_get_fault_rate(struct erofs_sb_info *sbi) -{ - return 0; -} -#endif - -#ifdef CONFIG_EROFS_FS_ZIP -static int erofs_build_cache_strategy(struct erofs_sb_info *sbi, - substring_t *args) -{ - const char *cs = match_strdup(args); - int err = 0; - - if (!cs) { - errln("Not enough memory to store cache strategy"); - return -ENOMEM; - } - - if (!strcmp(cs, "disabled")) { - sbi->cache_strategy = EROFS_ZIP_CACHE_DISABLED; - } else if (!strcmp(cs, "readahead")) { - sbi->cache_strategy = EROFS_ZIP_CACHE_READAHEAD; - } else if (!strcmp(cs, "readaround")) { - sbi->cache_strategy = EROFS_ZIP_CACHE_READAROUND; - } else { - errln("Unrecognized cache strategy \"%s\"", cs); - err = -EINVAL; - } - kfree(cs); - return err; -} -#else -static int erofs_build_cache_strategy(struct erofs_sb_info *sbi, - substring_t *args) -{ - infoln("EROFS compression is disabled, so cache strategy is ignored"); - return 0; -} -#endif - -/* set up default EROFS parameters */ -static void default_options(struct erofs_sb_info *sbi) -{ -#ifdef CONFIG_EROFS_FS_ZIP - sbi->cache_strategy = EROFS_ZIP_CACHE_READAROUND; - sbi->max_sync_decompress_pages = 3; -#endif -#ifdef CONFIG_EROFS_FS_XATTR - set_opt(sbi, XATTR_USER); -#endif -#ifdef CONFIG_EROFS_FS_POSIX_ACL - set_opt(sbi, POSIX_ACL); -#endif -} - -enum { - Opt_user_xattr, - Opt_nouser_xattr, - Opt_acl, - Opt_noacl, - Opt_fault_injection, - Opt_cache_strategy, - Opt_err -}; - -static match_table_t erofs_tokens = { - {Opt_user_xattr, "user_xattr"}, - {Opt_nouser_xattr, "nouser_xattr"}, - {Opt_acl, "acl"}, - {Opt_noacl, "noacl"}, - {Opt_fault_injection, "fault_injection=%u"}, - {Opt_cache_strategy, "cache_strategy=%s"}, - {Opt_err, NULL} -}; - -static int parse_options(struct super_block *sb, char *options) -{ - substring_t args[MAX_OPT_ARGS]; - char *p; - int err; - - if (!options) - return 0; - - while ((p = strsep(&options, ","))) { - int token; - - if (!*p) - continue; - - args[0].to = args[0].from = NULL; - token = match_token(p, erofs_tokens, args); - - switch (token) { -#ifdef CONFIG_EROFS_FS_XATTR - case Opt_user_xattr: - set_opt(EROFS_SB(sb), XATTR_USER); - break; - case Opt_nouser_xattr: - clear_opt(EROFS_SB(sb), XATTR_USER); - break; -#else - case Opt_user_xattr: - infoln("user_xattr options not supported"); - break; - case Opt_nouser_xattr: - infoln("nouser_xattr options not supported"); - break; -#endif -#ifdef CONFIG_EROFS_FS_POSIX_ACL - case Opt_acl: - set_opt(EROFS_SB(sb), POSIX_ACL); - break; - case Opt_noacl: - clear_opt(EROFS_SB(sb), POSIX_ACL); - break; -#else - case Opt_acl: - infoln("acl options not supported"); - break; - case Opt_noacl: - infoln("noacl options not supported"); - break; -#endif - case Opt_fault_injection: - err = erofs_build_fault_attr(EROFS_SB(sb), args); - if (err) - return err; - break; - case Opt_cache_strategy: - err = erofs_build_cache_strategy(EROFS_SB(sb), args); - if (err) - return err; - break; - default: - errln("Unrecognized mount option \"%s\" or missing value", p); - return -EINVAL; - } - } - return 0; -} - -#ifdef CONFIG_EROFS_FS_ZIP -static const struct address_space_operations managed_cache_aops; - -static int managed_cache_releasepage(struct page *page, gfp_t gfp_mask) -{ - int ret = 1; /* 0 - busy */ - struct address_space *const mapping = page->mapping; - - DBG_BUGON(!PageLocked(page)); - DBG_BUGON(mapping->a_ops != &managed_cache_aops); - - if (PagePrivate(page)) - ret = erofs_try_to_free_cached_page(mapping, page); - - return ret; -} - -static void managed_cache_invalidatepage(struct page *page, - unsigned int offset, - unsigned int length) -{ - const unsigned int stop = length + offset; - - DBG_BUGON(!PageLocked(page)); - - /* Check for potential overflow in debug mode */ - DBG_BUGON(stop > PAGE_SIZE || stop < length); - - if (offset == 0 && stop == PAGE_SIZE) - while (!managed_cache_releasepage(page, GFP_NOFS)) - cond_resched(); -} - -static const struct address_space_operations managed_cache_aops = { - .releasepage = managed_cache_releasepage, - .invalidatepage = managed_cache_invalidatepage, -}; - -static int erofs_init_managed_cache(struct super_block *sb) -{ - struct erofs_sb_info *const sbi = EROFS_SB(sb); - struct inode *const inode = new_inode(sb); - - if (unlikely(!inode)) - return -ENOMEM; - - set_nlink(inode, 1); - inode->i_size = OFFSET_MAX; - - inode->i_mapping->a_ops = &managed_cache_aops; - mapping_set_gfp_mask(inode->i_mapping, - GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE); - sbi->managed_cache = inode; - return 0; -} -#else -static int erofs_init_managed_cache(struct super_block *sb) { return 0; } -#endif - -static int erofs_fill_super(struct super_block *sb, void *data, int silent) -{ - struct inode *inode; - struct erofs_sb_info *sbi; - int err; - - infoln("fill_super, device -> %s", sb->s_id); - infoln("options -> %s", (char *)data); - - sb->s_magic = EROFS_SUPER_MAGIC; - - if (unlikely(!sb_set_blocksize(sb, EROFS_BLKSIZ))) { - errln("failed to set erofs blksize"); - return -EINVAL; - } - - sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); - if (unlikely(!sbi)) - return -ENOMEM; - - sb->s_fs_info = sbi; - err = superblock_read(sb); - if (err) - return err; - - sb->s_flags |= SB_RDONLY | SB_NOATIME; - sb->s_maxbytes = MAX_LFS_FILESIZE; - sb->s_time_gran = 1; - - sb->s_op = &erofs_sops; - -#ifdef CONFIG_EROFS_FS_XATTR - sb->s_xattr = erofs_xattr_handlers; -#endif - /* set erofs default mount options */ - default_options(sbi); - - err = parse_options(sb, data); - if (unlikely(err)) - return err; - - if (!silent) - infoln("root inode @ nid %llu", ROOT_NID(sbi)); - - if (test_opt(sbi, POSIX_ACL)) - sb->s_flags |= SB_POSIXACL; - else - sb->s_flags &= ~SB_POSIXACL; - -#ifdef CONFIG_EROFS_FS_ZIP - INIT_RADIX_TREE(&sbi->workstn_tree, GFP_ATOMIC); -#endif - - /* get the root inode */ - inode = erofs_iget(sb, ROOT_NID(sbi), true); - if (IS_ERR(inode)) - return PTR_ERR(inode); - - if (unlikely(!S_ISDIR(inode->i_mode))) { - errln("rootino(nid %llu) is not a directory(i_mode %o)", - ROOT_NID(sbi), inode->i_mode); - iput(inode); - return -EINVAL; - } - - sb->s_root = d_make_root(inode); - if (unlikely(!sb->s_root)) - return -ENOMEM; - - erofs_shrinker_register(sb); - /* sb->s_umount is already locked, SB_ACTIVE and SB_BORN are not set */ - err = erofs_init_managed_cache(sb); - if (unlikely(err)) - return err; - - if (!silent) - infoln("mounted on %s with opts: %s.", sb->s_id, (char *)data); - return 0; -} - -static struct dentry *erofs_mount(struct file_system_type *fs_type, int flags, - const char *dev_name, void *data) -{ - return mount_bdev(fs_type, flags, dev_name, data, erofs_fill_super); -} - -/* - * could be triggered after deactivate_locked_super() - * is called, thus including umount and failed to initialize. - */ -static void erofs_kill_sb(struct super_block *sb) -{ - struct erofs_sb_info *sbi; - - WARN_ON(sb->s_magic != EROFS_SUPER_MAGIC); - infoln("unmounting for %s", sb->s_id); - - kill_block_super(sb); - - sbi = EROFS_SB(sb); - if (!sbi) - return; - kfree(sbi); - sb->s_fs_info = NULL; -} - -/* called when ->s_root is non-NULL */ -static void erofs_put_super(struct super_block *sb) -{ - struct erofs_sb_info *const sbi = EROFS_SB(sb); - - DBG_BUGON(!sbi); - - erofs_shrinker_unregister(sb); -#ifdef CONFIG_EROFS_FS_ZIP - iput(sbi->managed_cache); - sbi->managed_cache = NULL; -#endif -} - -static struct file_system_type erofs_fs_type = { - .owner = THIS_MODULE, - .name = "erofs", - .mount = erofs_mount, - .kill_sb = erofs_kill_sb, - .fs_flags = FS_REQUIRES_DEV, -}; -MODULE_ALIAS_FS("erofs"); - -static int __init erofs_module_init(void) -{ - int err; - - erofs_check_ondisk_layout_definitions(); - infoln("initializing erofs " EROFS_VERSION); - - err = erofs_init_inode_cache(); - if (err) - goto icache_err; - - err = erofs_init_shrinker(); - if (err) - goto shrinker_err; - - err = z_erofs_init_zip_subsystem(); - if (err) - goto zip_err; - - err = register_filesystem(&erofs_fs_type); - if (err) - goto fs_err; - - infoln("successfully to initialize erofs"); - return 0; - -fs_err: - z_erofs_exit_zip_subsystem(); -zip_err: - erofs_exit_shrinker(); -shrinker_err: - erofs_exit_inode_cache(); -icache_err: - return err; -} - -static void __exit erofs_module_exit(void) -{ - unregister_filesystem(&erofs_fs_type); - z_erofs_exit_zip_subsystem(); - erofs_exit_shrinker(); - erofs_exit_inode_cache(); - infoln("successfully finalize erofs"); -} - -/* get filesystem statistics */ -static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf) -{ - struct super_block *sb = dentry->d_sb; - struct erofs_sb_info *sbi = EROFS_SB(sb); - u64 id = huge_encode_dev(sb->s_bdev->bd_dev); - - buf->f_type = sb->s_magic; - buf->f_bsize = EROFS_BLKSIZ; - buf->f_blocks = sbi->blocks; - buf->f_bfree = buf->f_bavail = 0; - - buf->f_files = ULLONG_MAX; - buf->f_ffree = ULLONG_MAX - sbi->inos; - - buf->f_namelen = EROFS_NAME_LEN; - - buf->f_fsid.val[0] = (u32)id; - buf->f_fsid.val[1] = (u32)(id >> 32); - return 0; -} - -static int erofs_show_options(struct seq_file *seq, struct dentry *root) -{ - struct erofs_sb_info *sbi __maybe_unused = EROFS_SB(root->d_sb); - -#ifdef CONFIG_EROFS_FS_XATTR - if (test_opt(sbi, XATTR_USER)) - seq_puts(seq, ",user_xattr"); - else - seq_puts(seq, ",nouser_xattr"); -#endif -#ifdef CONFIG_EROFS_FS_POSIX_ACL - if (test_opt(sbi, POSIX_ACL)) - seq_puts(seq, ",acl"); - else - seq_puts(seq, ",noacl"); -#endif - if (test_opt(sbi, FAULT_INJECTION)) - seq_printf(seq, ",fault_injection=%u", - erofs_get_fault_rate(sbi)); -#ifdef CONFIG_EROFS_FS_ZIP - if (sbi->cache_strategy == EROFS_ZIP_CACHE_DISABLED) { - seq_puts(seq, ",cache_strategy=disabled"); - } else if (sbi->cache_strategy == EROFS_ZIP_CACHE_READAHEAD) { - seq_puts(seq, ",cache_strategy=readahead"); - } else if (sbi->cache_strategy == EROFS_ZIP_CACHE_READAROUND) { - seq_puts(seq, ",cache_strategy=readaround"); - } else { - seq_puts(seq, ",cache_strategy=(unknown)"); - DBG_BUGON(1); - } -#endif - return 0; -} - -static int erofs_remount(struct super_block *sb, int *flags, char *data) -{ - struct erofs_sb_info *sbi = EROFS_SB(sb); - unsigned int org_mnt_opt = sbi->mount_opt; - unsigned int org_inject_rate = erofs_get_fault_rate(sbi); - int err; - - DBG_BUGON(!sb_rdonly(sb)); - err = parse_options(sb, data); - if (err) - goto out; - - if (test_opt(sbi, POSIX_ACL)) - sb->s_flags |= SB_POSIXACL; - else - sb->s_flags &= ~SB_POSIXACL; - - *flags |= SB_RDONLY; - return 0; -out: - __erofs_build_fault_attr(sbi, org_inject_rate); - sbi->mount_opt = org_mnt_opt; - - return err; -} - -const struct super_operations erofs_sops = { - .put_super = erofs_put_super, - .alloc_inode = alloc_inode, - .free_inode = free_inode, - .statfs = erofs_statfs, - .show_options = erofs_show_options, - .remount_fs = erofs_remount, -}; - -module_init(erofs_module_init); -module_exit(erofs_module_exit); - -MODULE_DESCRIPTION("Enhanced ROM File System"); -MODULE_AUTHOR("Gao Xiang, Chao Yu, Miao Xie, CONSUMER BG, HUAWEI Inc."); -MODULE_LICENSE("GPL"); - diff --git a/drivers/staging/erofs/tagptr.h b/drivers/staging/erofs/tagptr.h deleted file mode 100644 index a72897c86744..000000000000 --- a/drivers/staging/erofs/tagptr.h +++ /dev/null @@ -1,110 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * A tagged pointer implementation - * - * Copyright (C) 2018 Gao Xiang - */ -#ifndef __EROFS_FS_TAGPTR_H -#define __EROFS_FS_TAGPTR_H - -#include -#include - -/* - * the name of tagged pointer types are tagptr{1, 2, 3...}_t - * avoid directly using the internal structs __tagptr{1, 2, 3...} - */ -#define __MAKE_TAGPTR(n) \ -typedef struct __tagptr##n { \ - uintptr_t v; \ -} tagptr##n##_t; - -__MAKE_TAGPTR(1) -__MAKE_TAGPTR(2) -__MAKE_TAGPTR(3) -__MAKE_TAGPTR(4) - -#undef __MAKE_TAGPTR - -extern void __compiletime_error("bad tagptr tags") - __bad_tagptr_tags(void); - -extern void __compiletime_error("bad tagptr type") - __bad_tagptr_type(void); - -/* fix the broken usage of "#define tagptr2_t tagptr3_t" by users */ -#define __tagptr_mask_1(ptr, n) \ - __builtin_types_compatible_p(typeof(ptr), struct __tagptr##n) ? \ - (1UL << (n)) - 1 : - -#define __tagptr_mask(ptr) (\ - __tagptr_mask_1(ptr, 1) ( \ - __tagptr_mask_1(ptr, 2) ( \ - __tagptr_mask_1(ptr, 3) ( \ - __tagptr_mask_1(ptr, 4) ( \ - __bad_tagptr_type(), 0))))) - -/* generate a tagged pointer from a raw value */ -#define tagptr_init(type, val) \ - ((typeof(type)){ .v = (uintptr_t)(val) }) - -/* - * directly cast a tagged pointer to the native pointer type, which - * could be used for backward compatibility of existing code. - */ -#define tagptr_cast_ptr(tptr) ((void *)(tptr).v) - -/* encode tagged pointers */ -#define tagptr_fold(type, ptr, _tags) ({ \ - const typeof(_tags) tags = (_tags); \ - if (__builtin_constant_p(tags) && (tags & ~__tagptr_mask(type))) \ - __bad_tagptr_tags(); \ -tagptr_init(type, (uintptr_t)(ptr) | tags); }) - -/* decode tagged pointers */ -#define tagptr_unfold_ptr(tptr) \ - ((void *)((tptr).v & ~__tagptr_mask(tptr))) - -#define tagptr_unfold_tags(tptr) \ - ((tptr).v & __tagptr_mask(tptr)) - -/* operations for the tagger pointer */ -#define tagptr_eq(_tptr1, _tptr2) ({ \ - typeof(_tptr1) tptr1 = (_tptr1); \ - typeof(_tptr2) tptr2 = (_tptr2); \ - (void)(&tptr1 == &tptr2); \ -(tptr1).v == (tptr2).v; }) - -/* lock-free CAS operation */ -#define tagptr_cmpxchg(_ptptr, _o, _n) ({ \ - typeof(_ptptr) ptptr = (_ptptr); \ - typeof(_o) o = (_o); \ - typeof(_n) n = (_n); \ - (void)(&o == &n); \ - (void)(&o == ptptr); \ -tagptr_init(o, cmpxchg(&ptptr->v, o.v, n.v)); }) - -/* wrap WRITE_ONCE if atomic update is needed */ -#define tagptr_replace_tags(_ptptr, tags) ({ \ - typeof(_ptptr) ptptr = (_ptptr); \ - *ptptr = tagptr_fold(*ptptr, tagptr_unfold_ptr(*ptptr), tags); \ -*ptptr; }) - -#define tagptr_set_tags(_ptptr, _tags) ({ \ - typeof(_ptptr) ptptr = (_ptptr); \ - const typeof(_tags) tags = (_tags); \ - if (__builtin_constant_p(tags) && (tags & ~__tagptr_mask(*ptptr))) \ - __bad_tagptr_tags(); \ - ptptr->v |= tags; \ -*ptptr; }) - -#define tagptr_clear_tags(_ptptr, _tags) ({ \ - typeof(_ptptr) ptptr = (_ptptr); \ - const typeof(_tags) tags = (_tags); \ - if (__builtin_constant_p(tags) && (tags & ~__tagptr_mask(*ptptr))) \ - __bad_tagptr_tags(); \ - ptptr->v &= ~tags; \ -*ptptr; }) - -#endif /* __EROFS_FS_TAGPTR_H */ - diff --git a/drivers/staging/erofs/utils.c b/drivers/staging/erofs/utils.c deleted file mode 100644 index 814c2ee037ae..000000000000 --- a/drivers/staging/erofs/utils.c +++ /dev/null @@ -1,335 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * linux/drivers/staging/erofs/utils.c - * - * Copyright (C) 2018 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#include "internal.h" -#include - -struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail) -{ - struct page *page; - - if (!list_empty(pool)) { - page = lru_to_page(pool); - DBG_BUGON(page_ref_count(page) != 1); - list_del(&page->lru); - } else { - page = alloc_pages(gfp | (nofail ? __GFP_NOFAIL : 0), 0); - } - return page; -} - -#if (EROFS_PCPUBUF_NR_PAGES > 0) -static struct { - u8 data[PAGE_SIZE * EROFS_PCPUBUF_NR_PAGES]; -} ____cacheline_aligned_in_smp erofs_pcpubuf[NR_CPUS]; - -void *erofs_get_pcpubuf(unsigned int pagenr) -{ - preempt_disable(); - return &erofs_pcpubuf[smp_processor_id()].data[pagenr * PAGE_SIZE]; -} -#endif - -#ifdef CONFIG_EROFS_FS_ZIP -/* global shrink count (for all mounted EROFS instances) */ -static atomic_long_t erofs_global_shrink_cnt; - -#define __erofs_workgroup_get(grp) atomic_inc(&(grp)->refcount) -#define __erofs_workgroup_put(grp) atomic_dec(&(grp)->refcount) - -static int erofs_workgroup_get(struct erofs_workgroup *grp) -{ - int o; - -repeat: - o = erofs_wait_on_workgroup_freezed(grp); - if (unlikely(o <= 0)) - return -1; - - if (unlikely(atomic_cmpxchg(&grp->refcount, o, o + 1) != o)) - goto repeat; - - /* decrease refcount paired by erofs_workgroup_put */ - if (unlikely(o == 1)) - atomic_long_dec(&erofs_global_shrink_cnt); - return 0; -} - -struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb, - pgoff_t index, bool *tag) -{ - struct erofs_sb_info *sbi = EROFS_SB(sb); - struct erofs_workgroup *grp; - -repeat: - rcu_read_lock(); - grp = radix_tree_lookup(&sbi->workstn_tree, index); - if (grp) { - *tag = xa_pointer_tag(grp); - grp = xa_untag_pointer(grp); - - if (erofs_workgroup_get(grp)) { - /* prefer to relax rcu read side */ - rcu_read_unlock(); - goto repeat; - } - - DBG_BUGON(index != grp->index); - } - rcu_read_unlock(); - return grp; -} - -int erofs_register_workgroup(struct super_block *sb, - struct erofs_workgroup *grp, - bool tag) -{ - struct erofs_sb_info *sbi; - int err; - - /* grp shouldn't be broken or used before */ - if (unlikely(atomic_read(&grp->refcount) != 1)) { - DBG_BUGON(1); - return -EINVAL; - } - - err = radix_tree_preload(GFP_NOFS); - if (err) - return err; - - sbi = EROFS_SB(sb); - xa_lock(&sbi->workstn_tree); - - grp = xa_tag_pointer(grp, tag); - - /* - * Bump up reference count before making this workgroup - * visible to other users in order to avoid potential UAF - * without serialized by workstn_lock. - */ - __erofs_workgroup_get(grp); - - err = radix_tree_insert(&sbi->workstn_tree, grp->index, grp); - if (unlikely(err)) - /* - * it's safe to decrease since the workgroup isn't visible - * and refcount >= 2 (cannot be freezed). - */ - __erofs_workgroup_put(grp); - - xa_unlock(&sbi->workstn_tree); - radix_tree_preload_end(); - return err; -} - -static void __erofs_workgroup_free(struct erofs_workgroup *grp) -{ - atomic_long_dec(&erofs_global_shrink_cnt); - erofs_workgroup_free_rcu(grp); -} - -int erofs_workgroup_put(struct erofs_workgroup *grp) -{ - int count = atomic_dec_return(&grp->refcount); - - if (count == 1) - atomic_long_inc(&erofs_global_shrink_cnt); - else if (!count) - __erofs_workgroup_free(grp); - return count; -} - -static void erofs_workgroup_unfreeze_final(struct erofs_workgroup *grp) -{ - erofs_workgroup_unfreeze(grp, 0); - __erofs_workgroup_free(grp); -} - -static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi, - struct erofs_workgroup *grp, - bool cleanup) -{ - /* - * If managed cache is on, refcount of workgroups - * themselves could be < 0 (freezed). In other words, - * there is no guarantee that all refcounts > 0. - */ - if (!erofs_workgroup_try_to_freeze(grp, 1)) - return false; - - /* - * Note that all cached pages should be unattached - * before deleted from the radix tree. Otherwise some - * cached pages could be still attached to the orphan - * old workgroup when the new one is available in the tree. - */ - if (erofs_try_to_free_all_cached_pages(sbi, grp)) { - erofs_workgroup_unfreeze(grp, 1); - return false; - } - - /* - * It's impossible to fail after the workgroup is freezed, - * however in order to avoid some race conditions, add a - * DBG_BUGON to observe this in advance. - */ - DBG_BUGON(xa_untag_pointer(radix_tree_delete(&sbi->workstn_tree, - grp->index)) != grp); - - /* - * If managed cache is on, last refcount should indicate - * the related workstation. - */ - erofs_workgroup_unfreeze_final(grp); - return true; -} - -static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi, - unsigned long nr_shrink, - bool cleanup) -{ - pgoff_t first_index = 0; - void *batch[PAGEVEC_SIZE]; - unsigned int freed = 0; - - int i, found; -repeat: - xa_lock(&sbi->workstn_tree); - - found = radix_tree_gang_lookup(&sbi->workstn_tree, - batch, first_index, PAGEVEC_SIZE); - - for (i = 0; i < found; ++i) { - struct erofs_workgroup *grp = xa_untag_pointer(batch[i]); - - first_index = grp->index + 1; - - /* try to shrink each valid workgroup */ - if (!erofs_try_to_release_workgroup(sbi, grp, cleanup)) - continue; - - ++freed; - if (unlikely(!--nr_shrink)) - break; - } - xa_unlock(&sbi->workstn_tree); - - if (i && nr_shrink) - goto repeat; - return freed; -} - -/* protected by 'erofs_sb_list_lock' */ -static unsigned int shrinker_run_no; - -/* protects the mounted 'erofs_sb_list' */ -static DEFINE_SPINLOCK(erofs_sb_list_lock); -static LIST_HEAD(erofs_sb_list); - -void erofs_shrinker_register(struct super_block *sb) -{ - struct erofs_sb_info *sbi = EROFS_SB(sb); - - mutex_init(&sbi->umount_mutex); - - spin_lock(&erofs_sb_list_lock); - list_add(&sbi->list, &erofs_sb_list); - spin_unlock(&erofs_sb_list_lock); -} - -void erofs_shrinker_unregister(struct super_block *sb) -{ - struct erofs_sb_info *const sbi = EROFS_SB(sb); - - mutex_lock(&sbi->umount_mutex); - erofs_shrink_workstation(sbi, ~0UL, true); - - spin_lock(&erofs_sb_list_lock); - list_del(&sbi->list); - spin_unlock(&erofs_sb_list_lock); - mutex_unlock(&sbi->umount_mutex); -} - -static unsigned long erofs_shrink_count(struct shrinker *shrink, - struct shrink_control *sc) -{ - return atomic_long_read(&erofs_global_shrink_cnt); -} - -static unsigned long erofs_shrink_scan(struct shrinker *shrink, - struct shrink_control *sc) -{ - struct erofs_sb_info *sbi; - struct list_head *p; - - unsigned long nr = sc->nr_to_scan; - unsigned int run_no; - unsigned long freed = 0; - - spin_lock(&erofs_sb_list_lock); - do { - run_no = ++shrinker_run_no; - } while (run_no == 0); - - /* Iterate over all mounted superblocks and try to shrink them */ - p = erofs_sb_list.next; - while (p != &erofs_sb_list) { - sbi = list_entry(p, struct erofs_sb_info, list); - - /* - * We move the ones we do to the end of the list, so we stop - * when we see one we have already done. - */ - if (sbi->shrinker_run_no == run_no) - break; - - if (!mutex_trylock(&sbi->umount_mutex)) { - p = p->next; - continue; - } - - spin_unlock(&erofs_sb_list_lock); - sbi->shrinker_run_no = run_no; - - freed += erofs_shrink_workstation(sbi, nr, false); - - spin_lock(&erofs_sb_list_lock); - /* Get the next list element before we move this one */ - p = p->next; - - /* - * Move this one to the end of the list to provide some - * fairness. - */ - list_move_tail(&sbi->list, &erofs_sb_list); - mutex_unlock(&sbi->umount_mutex); - - if (freed >= nr) - break; - } - spin_unlock(&erofs_sb_list_lock); - return freed; -} - -static struct shrinker erofs_shrinker_info = { - .scan_objects = erofs_shrink_scan, - .count_objects = erofs_shrink_count, - .seeks = DEFAULT_SEEKS, -}; - -int __init erofs_init_shrinker(void) -{ - return register_shrinker(&erofs_shrinker_info); -} - -void erofs_exit_shrinker(void) -{ - unregister_shrinker(&erofs_shrinker_info); -} -#endif /* !CONFIG_EROFS_FS_ZIP */ - diff --git a/drivers/staging/erofs/xattr.c b/drivers/staging/erofs/xattr.c deleted file mode 100644 index e7e5840e3f9d..000000000000 --- a/drivers/staging/erofs/xattr.c +++ /dev/null @@ -1,705 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * linux/drivers/staging/erofs/xattr.c - * - * Copyright (C) 2017-2018 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#include -#include "xattr.h" - -struct xattr_iter { - struct super_block *sb; - struct page *page; - void *kaddr; - - erofs_blk_t blkaddr; - unsigned int ofs; -}; - -static inline void xattr_iter_end(struct xattr_iter *it, bool atomic) -{ - /* the only user of kunmap() is 'init_inode_xattrs' */ - if (unlikely(!atomic)) - kunmap(it->page); - else - kunmap_atomic(it->kaddr); - - unlock_page(it->page); - put_page(it->page); -} - -static inline void xattr_iter_end_final(struct xattr_iter *it) -{ - if (!it->page) - return; - - xattr_iter_end(it, true); -} - -static int init_inode_xattrs(struct inode *inode) -{ - struct erofs_vnode *const vi = EROFS_V(inode); - struct xattr_iter it; - unsigned int i; - struct erofs_xattr_ibody_header *ih; - struct super_block *sb; - struct erofs_sb_info *sbi; - bool atomic_map; - int ret = 0; - - /* the most case is that xattrs of this inode are initialized. */ - if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags)) - return 0; - - if (wait_on_bit_lock(&vi->flags, EROFS_V_BL_XATTR_BIT, TASK_KILLABLE)) - return -ERESTARTSYS; - - /* someone has initialized xattrs for us? */ - if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags)) - goto out_unlock; - - /* - * bypass all xattr operations if ->xattr_isize is not greater than - * sizeof(struct erofs_xattr_ibody_header), in detail: - * 1) it is not enough to contain erofs_xattr_ibody_header then - * ->xattr_isize should be 0 (it means no xattr); - * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk - * undefined right now (maybe use later with some new sb feature). - */ - if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) { - errln("xattr_isize %d of nid %llu is not supported yet", - vi->xattr_isize, vi->nid); - ret = -EOPNOTSUPP; - goto out_unlock; - } else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) { - if (unlikely(vi->xattr_isize)) { - errln("bogus xattr ibody @ nid %llu", vi->nid); - DBG_BUGON(1); - ret = -EFSCORRUPTED; - goto out_unlock; /* xattr ondisk layout error */ - } - ret = -ENOATTR; - goto out_unlock; - } - - sb = inode->i_sb; - sbi = EROFS_SB(sb); - it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize); - it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize); - - it.page = erofs_get_inline_page(inode, it.blkaddr); - if (IS_ERR(it.page)) { - ret = PTR_ERR(it.page); - goto out_unlock; - } - - /* read in shared xattr array (non-atomic, see kmalloc below) */ - it.kaddr = kmap(it.page); - atomic_map = false; - - ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs); - - vi->xattr_shared_count = ih->h_shared_count; - vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count, - sizeof(uint), GFP_KERNEL); - if (!vi->xattr_shared_xattrs) { - xattr_iter_end(&it, atomic_map); - ret = -ENOMEM; - goto out_unlock; - } - - /* let's skip ibody header */ - it.ofs += sizeof(struct erofs_xattr_ibody_header); - - for (i = 0; i < vi->xattr_shared_count; ++i) { - if (unlikely(it.ofs >= EROFS_BLKSIZ)) { - /* cannot be unaligned */ - DBG_BUGON(it.ofs != EROFS_BLKSIZ); - xattr_iter_end(&it, atomic_map); - - it.page = erofs_get_meta_page(sb, ++it.blkaddr, - S_ISDIR(inode->i_mode)); - if (IS_ERR(it.page)) { - kfree(vi->xattr_shared_xattrs); - vi->xattr_shared_xattrs = NULL; - ret = PTR_ERR(it.page); - goto out_unlock; - } - - it.kaddr = kmap_atomic(it.page); - atomic_map = true; - it.ofs = 0; - } - vi->xattr_shared_xattrs[i] = - le32_to_cpu(*(__le32 *)(it.kaddr + it.ofs)); - it.ofs += sizeof(__le32); - } - xattr_iter_end(&it, atomic_map); - - set_bit(EROFS_V_EA_INITED_BIT, &vi->flags); - -out_unlock: - clear_and_wake_up_bit(EROFS_V_BL_XATTR_BIT, &vi->flags); - return ret; -} - -/* - * the general idea for these return values is - * if 0 is returned, go on processing the current xattr; - * 1 (> 0) is returned, skip this round to process the next xattr; - * -err (< 0) is returned, an error (maybe ENOXATTR) occurred - * and need to be handled - */ -struct xattr_iter_handlers { - int (*entry)(struct xattr_iter *_it, struct erofs_xattr_entry *entry); - int (*name)(struct xattr_iter *_it, unsigned int processed, char *buf, - unsigned int len); - int (*alloc_buffer)(struct xattr_iter *_it, unsigned int value_sz); - void (*value)(struct xattr_iter *_it, unsigned int processed, char *buf, - unsigned int len); -}; - -static inline int xattr_iter_fixup(struct xattr_iter *it) -{ - if (it->ofs < EROFS_BLKSIZ) - return 0; - - xattr_iter_end(it, true); - - it->blkaddr += erofs_blknr(it->ofs); - - it->page = erofs_get_meta_page(it->sb, it->blkaddr, false); - if (IS_ERR(it->page)) { - int err = PTR_ERR(it->page); - - it->page = NULL; - return err; - } - - it->kaddr = kmap_atomic(it->page); - it->ofs = erofs_blkoff(it->ofs); - return 0; -} - -static int inline_xattr_iter_begin(struct xattr_iter *it, - struct inode *inode) -{ - struct erofs_vnode *const vi = EROFS_V(inode); - struct erofs_sb_info *const sbi = EROFS_SB(inode->i_sb); - unsigned int xattr_header_sz, inline_xattr_ofs; - - xattr_header_sz = inlinexattr_header_size(inode); - if (unlikely(xattr_header_sz >= vi->xattr_isize)) { - DBG_BUGON(xattr_header_sz > vi->xattr_isize); - return -ENOATTR; - } - - inline_xattr_ofs = vi->inode_isize + xattr_header_sz; - - it->blkaddr = erofs_blknr(iloc(sbi, vi->nid) + inline_xattr_ofs); - it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs); - - it->page = erofs_get_inline_page(inode, it->blkaddr); - if (IS_ERR(it->page)) - return PTR_ERR(it->page); - - it->kaddr = kmap_atomic(it->page); - return vi->xattr_isize - xattr_header_sz; -} - -/* - * Regardless of success or failure, `xattr_foreach' will end up with - * `ofs' pointing to the next xattr item rather than an arbitrary position. - */ -static int xattr_foreach(struct xattr_iter *it, - const struct xattr_iter_handlers *op, - unsigned int *tlimit) -{ - struct erofs_xattr_entry entry; - unsigned int value_sz, processed, slice; - int err; - - /* 0. fixup blkaddr, ofs, ipage */ - err = xattr_iter_fixup(it); - if (err) - return err; - - /* - * 1. read xattr entry to the memory, - * since we do EROFS_XATTR_ALIGN - * therefore entry should be in the page - */ - entry = *(struct erofs_xattr_entry *)(it->kaddr + it->ofs); - if (tlimit) { - unsigned int entry_sz = EROFS_XATTR_ENTRY_SIZE(&entry); - - /* xattr on-disk corruption: xattr entry beyond xattr_isize */ - if (unlikely(*tlimit < entry_sz)) { - DBG_BUGON(1); - return -EFSCORRUPTED; - } - *tlimit -= entry_sz; - } - - it->ofs += sizeof(struct erofs_xattr_entry); - value_sz = le16_to_cpu(entry.e_value_size); - - /* handle entry */ - err = op->entry(it, &entry); - if (err) { - it->ofs += entry.e_name_len + value_sz; - goto out; - } - - /* 2. handle xattr name (ofs will finally be at the end of name) */ - processed = 0; - - while (processed < entry.e_name_len) { - if (it->ofs >= EROFS_BLKSIZ) { - DBG_BUGON(it->ofs > EROFS_BLKSIZ); - - err = xattr_iter_fixup(it); - if (err) - goto out; - it->ofs = 0; - } - - slice = min_t(unsigned int, PAGE_SIZE - it->ofs, - entry.e_name_len - processed); - - /* handle name */ - err = op->name(it, processed, it->kaddr + it->ofs, slice); - if (err) { - it->ofs += entry.e_name_len - processed + value_sz; - goto out; - } - - it->ofs += slice; - processed += slice; - } - - /* 3. handle xattr value */ - processed = 0; - - if (op->alloc_buffer) { - err = op->alloc_buffer(it, value_sz); - if (err) { - it->ofs += value_sz; - goto out; - } - } - - while (processed < value_sz) { - if (it->ofs >= EROFS_BLKSIZ) { - DBG_BUGON(it->ofs > EROFS_BLKSIZ); - - err = xattr_iter_fixup(it); - if (err) - goto out; - it->ofs = 0; - } - - slice = min_t(unsigned int, PAGE_SIZE - it->ofs, - value_sz - processed); - op->value(it, processed, it->kaddr + it->ofs, slice); - it->ofs += slice; - processed += slice; - } - -out: - /* xattrs should be 4-byte aligned (on-disk constraint) */ - it->ofs = EROFS_XATTR_ALIGN(it->ofs); - return err < 0 ? err : 0; -} - -struct getxattr_iter { - struct xattr_iter it; - - char *buffer; - int buffer_size, index; - struct qstr name; -}; - -static int xattr_entrymatch(struct xattr_iter *_it, - struct erofs_xattr_entry *entry) -{ - struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it); - - return (it->index != entry->e_name_index || - it->name.len != entry->e_name_len) ? -ENOATTR : 0; -} - -static int xattr_namematch(struct xattr_iter *_it, - unsigned int processed, char *buf, unsigned int len) -{ - struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it); - - return memcmp(buf, it->name.name + processed, len) ? -ENOATTR : 0; -} - -static int xattr_checkbuffer(struct xattr_iter *_it, - unsigned int value_sz) -{ - struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it); - int err = it->buffer_size < value_sz ? -ERANGE : 0; - - it->buffer_size = value_sz; - return !it->buffer ? 1 : err; -} - -static void xattr_copyvalue(struct xattr_iter *_it, - unsigned int processed, - char *buf, unsigned int len) -{ - struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it); - - memcpy(it->buffer + processed, buf, len); -} - -static const struct xattr_iter_handlers find_xattr_handlers = { - .entry = xattr_entrymatch, - .name = xattr_namematch, - .alloc_buffer = xattr_checkbuffer, - .value = xattr_copyvalue -}; - -static int inline_getxattr(struct inode *inode, struct getxattr_iter *it) -{ - int ret; - unsigned int remaining; - - ret = inline_xattr_iter_begin(&it->it, inode); - if (ret < 0) - return ret; - - remaining = ret; - while (remaining) { - ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining); - if (ret != -ENOATTR) - break; - } - xattr_iter_end_final(&it->it); - - return ret ? ret : it->buffer_size; -} - -static int shared_getxattr(struct inode *inode, struct getxattr_iter *it) -{ - struct erofs_vnode *const vi = EROFS_V(inode); - struct super_block *const sb = inode->i_sb; - struct erofs_sb_info *const sbi = EROFS_SB(sb); - unsigned int i; - int ret = -ENOATTR; - - for (i = 0; i < vi->xattr_shared_count; ++i) { - erofs_blk_t blkaddr = - xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]); - - it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]); - - if (!i || blkaddr != it->it.blkaddr) { - if (i) - xattr_iter_end(&it->it, true); - - it->it.page = erofs_get_meta_page(sb, blkaddr, false); - if (IS_ERR(it->it.page)) - return PTR_ERR(it->it.page); - - it->it.kaddr = kmap_atomic(it->it.page); - it->it.blkaddr = blkaddr; - } - - ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL); - if (ret != -ENOATTR) - break; - } - if (vi->xattr_shared_count) - xattr_iter_end_final(&it->it); - - return ret ? ret : it->buffer_size; -} - -static bool erofs_xattr_user_list(struct dentry *dentry) -{ - return test_opt(EROFS_SB(dentry->d_sb), XATTR_USER); -} - -static bool erofs_xattr_trusted_list(struct dentry *dentry) -{ - return capable(CAP_SYS_ADMIN); -} - -int erofs_getxattr(struct inode *inode, int index, - const char *name, - void *buffer, size_t buffer_size) -{ - int ret; - struct getxattr_iter it; - - if (unlikely(!name)) - return -EINVAL; - - ret = init_inode_xattrs(inode); - if (ret) - return ret; - - it.index = index; - - it.name.len = strlen(name); - if (it.name.len > EROFS_NAME_LEN) - return -ERANGE; - it.name.name = name; - - it.buffer = buffer; - it.buffer_size = buffer_size; - - it.it.sb = inode->i_sb; - ret = inline_getxattr(inode, &it); - if (ret == -ENOATTR) - ret = shared_getxattr(inode, &it); - return ret; -} - -static int erofs_xattr_generic_get(const struct xattr_handler *handler, - struct dentry *unused, struct inode *inode, - const char *name, void *buffer, size_t size) -{ - struct erofs_sb_info *const sbi = EROFS_I_SB(inode); - - switch (handler->flags) { - case EROFS_XATTR_INDEX_USER: - if (!test_opt(sbi, XATTR_USER)) - return -EOPNOTSUPP; - break; - case EROFS_XATTR_INDEX_TRUSTED: - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - break; - case EROFS_XATTR_INDEX_SECURITY: - break; - default: - return -EINVAL; - } - - return erofs_getxattr(inode, handler->flags, name, buffer, size); -} - -const struct xattr_handler erofs_xattr_user_handler = { - .prefix = XATTR_USER_PREFIX, - .flags = EROFS_XATTR_INDEX_USER, - .list = erofs_xattr_user_list, - .get = erofs_xattr_generic_get, -}; - -const struct xattr_handler erofs_xattr_trusted_handler = { - .prefix = XATTR_TRUSTED_PREFIX, - .flags = EROFS_XATTR_INDEX_TRUSTED, - .list = erofs_xattr_trusted_list, - .get = erofs_xattr_generic_get, -}; - -#ifdef CONFIG_EROFS_FS_SECURITY -const struct xattr_handler __maybe_unused erofs_xattr_security_handler = { - .prefix = XATTR_SECURITY_PREFIX, - .flags = EROFS_XATTR_INDEX_SECURITY, - .get = erofs_xattr_generic_get, -}; -#endif - -const struct xattr_handler *erofs_xattr_handlers[] = { - &erofs_xattr_user_handler, -#ifdef CONFIG_EROFS_FS_POSIX_ACL - &posix_acl_access_xattr_handler, - &posix_acl_default_xattr_handler, -#endif - &erofs_xattr_trusted_handler, -#ifdef CONFIG_EROFS_FS_SECURITY - &erofs_xattr_security_handler, -#endif - NULL, -}; - -struct listxattr_iter { - struct xattr_iter it; - - struct dentry *dentry; - char *buffer; - int buffer_size, buffer_ofs; -}; - -static int xattr_entrylist(struct xattr_iter *_it, - struct erofs_xattr_entry *entry) -{ - struct listxattr_iter *it = - container_of(_it, struct listxattr_iter, it); - unsigned int prefix_len; - const char *prefix; - - const struct xattr_handler *h = - erofs_xattr_handler(entry->e_name_index); - - if (!h || (h->list && !h->list(it->dentry))) - return 1; - - prefix = xattr_prefix(h); - prefix_len = strlen(prefix); - - if (!it->buffer) { - it->buffer_ofs += prefix_len + entry->e_name_len + 1; - return 1; - } - - if (it->buffer_ofs + prefix_len - + entry->e_name_len + 1 > it->buffer_size) - return -ERANGE; - - memcpy(it->buffer + it->buffer_ofs, prefix, prefix_len); - it->buffer_ofs += prefix_len; - return 0; -} - -static int xattr_namelist(struct xattr_iter *_it, - unsigned int processed, char *buf, unsigned int len) -{ - struct listxattr_iter *it = - container_of(_it, struct listxattr_iter, it); - - memcpy(it->buffer + it->buffer_ofs, buf, len); - it->buffer_ofs += len; - return 0; -} - -static int xattr_skipvalue(struct xattr_iter *_it, - unsigned int value_sz) -{ - struct listxattr_iter *it = - container_of(_it, struct listxattr_iter, it); - - it->buffer[it->buffer_ofs++] = '\0'; - return 1; -} - -static const struct xattr_iter_handlers list_xattr_handlers = { - .entry = xattr_entrylist, - .name = xattr_namelist, - .alloc_buffer = xattr_skipvalue, - .value = NULL -}; - -static int inline_listxattr(struct listxattr_iter *it) -{ - int ret; - unsigned int remaining; - - ret = inline_xattr_iter_begin(&it->it, d_inode(it->dentry)); - if (ret < 0) - return ret; - - remaining = ret; - while (remaining) { - ret = xattr_foreach(&it->it, &list_xattr_handlers, &remaining); - if (ret) - break; - } - xattr_iter_end_final(&it->it); - return ret ? ret : it->buffer_ofs; -} - -static int shared_listxattr(struct listxattr_iter *it) -{ - struct inode *const inode = d_inode(it->dentry); - struct erofs_vnode *const vi = EROFS_V(inode); - struct super_block *const sb = inode->i_sb; - struct erofs_sb_info *const sbi = EROFS_SB(sb); - unsigned int i; - int ret = 0; - - for (i = 0; i < vi->xattr_shared_count; ++i) { - erofs_blk_t blkaddr = - xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]); - - it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]); - if (!i || blkaddr != it->it.blkaddr) { - if (i) - xattr_iter_end(&it->it, true); - - it->it.page = erofs_get_meta_page(sb, blkaddr, false); - if (IS_ERR(it->it.page)) - return PTR_ERR(it->it.page); - - it->it.kaddr = kmap_atomic(it->it.page); - it->it.blkaddr = blkaddr; - } - - ret = xattr_foreach(&it->it, &list_xattr_handlers, NULL); - if (ret) - break; - } - if (vi->xattr_shared_count) - xattr_iter_end_final(&it->it); - - return ret ? ret : it->buffer_ofs; -} - -ssize_t erofs_listxattr(struct dentry *dentry, - char *buffer, size_t buffer_size) -{ - int ret; - struct listxattr_iter it; - - ret = init_inode_xattrs(d_inode(dentry)); - if (ret) - return ret; - - it.dentry = dentry; - it.buffer = buffer; - it.buffer_size = buffer_size; - it.buffer_ofs = 0; - - it.it.sb = dentry->d_sb; - - ret = inline_listxattr(&it); - if (ret < 0 && ret != -ENOATTR) - return ret; - return shared_listxattr(&it); -} - -#ifdef CONFIG_EROFS_FS_POSIX_ACL -struct posix_acl *erofs_get_acl(struct inode *inode, int type) -{ - struct posix_acl *acl; - int prefix, rc; - char *value = NULL; - - switch (type) { - case ACL_TYPE_ACCESS: - prefix = EROFS_XATTR_INDEX_POSIX_ACL_ACCESS; - break; - case ACL_TYPE_DEFAULT: - prefix = EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT; - break; - default: - return ERR_PTR(-EINVAL); - } - - rc = erofs_getxattr(inode, prefix, "", NULL, 0); - if (rc > 0) { - value = kmalloc(rc, GFP_KERNEL); - if (!value) - return ERR_PTR(-ENOMEM); - rc = erofs_getxattr(inode, prefix, "", value, rc); - } - - if (rc == -ENOATTR) - acl = NULL; - else if (rc < 0) - acl = ERR_PTR(rc); - else - acl = posix_acl_from_xattr(&init_user_ns, value, rc); - kfree(value); - return acl; -} -#endif - diff --git a/drivers/staging/erofs/xattr.h b/drivers/staging/erofs/xattr.h deleted file mode 100644 index e20249647541..000000000000 --- a/drivers/staging/erofs/xattr.h +++ /dev/null @@ -1,94 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * linux/drivers/staging/erofs/xattr.h - * - * Copyright (C) 2017-2018 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#ifndef __EROFS_XATTR_H -#define __EROFS_XATTR_H - -#include "internal.h" -#include -#include - -/* Attribute not found */ -#define ENOATTR ENODATA - -static inline unsigned int inlinexattr_header_size(struct inode *inode) -{ - return sizeof(struct erofs_xattr_ibody_header) - + sizeof(u32) * EROFS_V(inode)->xattr_shared_count; -} - -static inline erofs_blk_t xattrblock_addr(struct erofs_sb_info *sbi, - unsigned int xattr_id) -{ -#ifdef CONFIG_EROFS_FS_XATTR - return sbi->xattr_blkaddr + - xattr_id * sizeof(__u32) / EROFS_BLKSIZ; -#else - return 0; -#endif -} - -static inline unsigned int xattrblock_offset(struct erofs_sb_info *sbi, - unsigned int xattr_id) -{ - return (xattr_id * sizeof(__u32)) % EROFS_BLKSIZ; -} - -#ifdef CONFIG_EROFS_FS_XATTR -extern const struct xattr_handler erofs_xattr_user_handler; -extern const struct xattr_handler erofs_xattr_trusted_handler; -#ifdef CONFIG_EROFS_FS_SECURITY -extern const struct xattr_handler erofs_xattr_security_handler; -#endif - -static inline const struct xattr_handler *erofs_xattr_handler(unsigned int idx) -{ -static const struct xattr_handler *xattr_handler_map[] = { - [EROFS_XATTR_INDEX_USER] = &erofs_xattr_user_handler, -#ifdef CONFIG_EROFS_FS_POSIX_ACL - [EROFS_XATTR_INDEX_POSIX_ACL_ACCESS] = &posix_acl_access_xattr_handler, - [EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT] = - &posix_acl_default_xattr_handler, -#endif - [EROFS_XATTR_INDEX_TRUSTED] = &erofs_xattr_trusted_handler, -#ifdef CONFIG_EROFS_FS_SECURITY - [EROFS_XATTR_INDEX_SECURITY] = &erofs_xattr_security_handler, -#endif -}; - - return idx && idx < ARRAY_SIZE(xattr_handler_map) ? - xattr_handler_map[idx] : NULL; -} - -extern const struct xattr_handler *erofs_xattr_handlers[]; - -int erofs_getxattr(struct inode *, int, const char *, void *, size_t); -ssize_t erofs_listxattr(struct dentry *, char *, size_t); -#else -static inline int erofs_getxattr(struct inode *inode, int index, - const char *name, void *buffer, - size_t buffer_size) -{ - return -EOPNOTSUPP; -} - -static inline ssize_t erofs_listxattr(struct dentry *dentry, - char *buffer, size_t buffer_size) -{ - return -EOPNOTSUPP; -} -#endif /* !CONFIG_EROFS_FS_XATTR */ - -#ifdef CONFIG_EROFS_FS_POSIX_ACL -struct posix_acl *erofs_get_acl(struct inode *inode, int type); -#else -#define erofs_get_acl (NULL) -#endif - -#endif - diff --git a/drivers/staging/erofs/zdata.c b/drivers/staging/erofs/zdata.c deleted file mode 100644 index 60d7c20db87d..000000000000 --- a/drivers/staging/erofs/zdata.c +++ /dev/null @@ -1,1434 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * linux/drivers/staging/erofs/zdata.c - * - * Copyright (C) 2018 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#include "zdata.h" -#include "compress.h" -#include - -#include - -/* - * a compressed_pages[] placeholder in order to avoid - * being filled with file pages for in-place decompression. - */ -#define PAGE_UNALLOCATED ((void *)0x5F0E4B1D) - -/* how to allocate cached pages for a pcluster */ -enum z_erofs_cache_alloctype { - DONTALLOC, /* don't allocate any cached pages */ - DELAYEDALLOC, /* delayed allocation (at the time of submitting io) */ -}; - -/* - * tagged pointer with 1-bit tag for all compressed pages - * tag 0 - the page is just found with an extra page reference - */ -typedef tagptr1_t compressed_page_t; - -#define tag_compressed_page_justfound(page) \ - tagptr_fold(compressed_page_t, page, 1) - -static struct workqueue_struct *z_erofs_workqueue __read_mostly; -static struct kmem_cache *pcluster_cachep __read_mostly; - -void z_erofs_exit_zip_subsystem(void) -{ - destroy_workqueue(z_erofs_workqueue); - kmem_cache_destroy(pcluster_cachep); -} - -static inline int init_unzip_workqueue(void) -{ - const unsigned int onlinecpus = num_possible_cpus(); - const unsigned int flags = WQ_UNBOUND | WQ_HIGHPRI | WQ_CPU_INTENSIVE; - - /* - * no need to spawn too many threads, limiting threads could minimum - * scheduling overhead, perhaps per-CPU threads should be better? - */ - z_erofs_workqueue = alloc_workqueue("erofs_unzipd", flags, - onlinecpus + onlinecpus / 4); - return z_erofs_workqueue ? 0 : -ENOMEM; -} - -static void init_once(void *ptr) -{ - struct z_erofs_pcluster *pcl = ptr; - struct z_erofs_collection *cl = z_erofs_primarycollection(pcl); - unsigned int i; - - mutex_init(&cl->lock); - cl->nr_pages = 0; - cl->vcnt = 0; - for (i = 0; i < Z_EROFS_CLUSTER_MAX_PAGES; ++i) - pcl->compressed_pages[i] = NULL; -} - -static void init_always(struct z_erofs_pcluster *pcl) -{ - struct z_erofs_collection *cl = z_erofs_primarycollection(pcl); - - atomic_set(&pcl->obj.refcount, 1); - - DBG_BUGON(cl->nr_pages); - DBG_BUGON(cl->vcnt); -} - -int __init z_erofs_init_zip_subsystem(void) -{ - pcluster_cachep = kmem_cache_create("erofs_compress", - Z_EROFS_WORKGROUP_SIZE, 0, - SLAB_RECLAIM_ACCOUNT, init_once); - if (pcluster_cachep) { - if (!init_unzip_workqueue()) - return 0; - - kmem_cache_destroy(pcluster_cachep); - } - return -ENOMEM; -} - -enum z_erofs_collectmode { - COLLECT_SECONDARY, - COLLECT_PRIMARY, - /* - * The current collection was the tail of an exist chain, in addition - * that the previous processed chained collections are all decided to - * be hooked up to it. - * A new chain will be created for the remaining collections which are - * not processed yet, therefore different from COLLECT_PRIMARY_FOLLOWED, - * the next collection cannot reuse the whole page safely in - * the following scenario: - * ________________________________________________________________ - * | tail (partial) page | head (partial) page | - * | (belongs to the next cl) | (belongs to the current cl) | - * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________| - */ - COLLECT_PRIMARY_HOOKED, - COLLECT_PRIMARY_FOLLOWED_NOINPLACE, - /* - * The current collection has been linked with the owned chain, and - * could also be linked with the remaining collections, which means - * if the processing page is the tail page of the collection, thus - * the current collection can safely use the whole page (since - * the previous collection is under control) for in-place I/O, as - * illustrated below: - * ________________________________________________________________ - * | tail (partial) page | head (partial) page | - * | (of the current cl) | (of the previous collection) | - * | PRIMARY_FOLLOWED or | | - * |_____PRIMARY_HOOKED___|____________PRIMARY_FOLLOWED____________| - * - * [ (*) the above page can be used as inplace I/O. ] - */ - COLLECT_PRIMARY_FOLLOWED, -}; - -struct z_erofs_collector { - struct z_erofs_pagevec_ctor vector; - - struct z_erofs_pcluster *pcl, *tailpcl; - struct z_erofs_collection *cl; - struct page **compressedpages; - z_erofs_next_pcluster_t owned_head; - - enum z_erofs_collectmode mode; -}; - -struct z_erofs_decompress_frontend { - struct inode *const inode; - - struct z_erofs_collector clt; - struct erofs_map_blocks map; - - /* used for applying cache strategy on the fly */ - bool backmost; - erofs_off_t headoffset; -}; - -#define COLLECTOR_INIT() { \ - .owned_head = Z_EROFS_PCLUSTER_TAIL, \ - .mode = COLLECT_PRIMARY_FOLLOWED } - -#define DECOMPRESS_FRONTEND_INIT(__i) { \ - .inode = __i, .clt = COLLECTOR_INIT(), \ - .backmost = true, } - -static struct page *z_pagemap_global[Z_EROFS_VMAP_GLOBAL_PAGES]; -static DEFINE_MUTEX(z_pagemap_global_lock); - -static void preload_compressed_pages(struct z_erofs_collector *clt, - struct address_space *mc, - enum z_erofs_cache_alloctype type, - struct list_head *pagepool) -{ - const struct z_erofs_pcluster *pcl = clt->pcl; - const unsigned int clusterpages = BIT(pcl->clusterbits); - struct page **pages = clt->compressedpages; - pgoff_t index = pcl->obj.index + (pages - pcl->compressed_pages); - bool standalone = true; - - if (clt->mode < COLLECT_PRIMARY_FOLLOWED) - return; - - for (; pages < pcl->compressed_pages + clusterpages; ++pages) { - struct page *page; - compressed_page_t t; - - /* the compressed page was loaded before */ - if (READ_ONCE(*pages)) - continue; - - page = find_get_page(mc, index); - - if (page) { - t = tag_compressed_page_justfound(page); - } else if (type == DELAYEDALLOC) { - t = tagptr_init(compressed_page_t, PAGE_UNALLOCATED); - } else { /* DONTALLOC */ - if (standalone) - clt->compressedpages = pages; - standalone = false; - continue; - } - - if (!cmpxchg_relaxed(pages, NULL, tagptr_cast_ptr(t))) - continue; - - if (page) - put_page(page); - } - - if (standalone) /* downgrade to PRIMARY_FOLLOWED_NOINPLACE */ - clt->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE; -} - -/* called by erofs_shrinker to get rid of all compressed_pages */ -int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, - struct erofs_workgroup *grp) -{ - struct z_erofs_pcluster *const pcl = - container_of(grp, struct z_erofs_pcluster, obj); - struct address_space *const mapping = MNGD_MAPPING(sbi); - const unsigned int clusterpages = BIT(pcl->clusterbits); - int i; - - /* - * refcount of workgroup is now freezed as 1, - * therefore no need to worry about available decompression users. - */ - for (i = 0; i < clusterpages; ++i) { - struct page *page = pcl->compressed_pages[i]; - - if (!page) - continue; - - /* block other users from reclaiming or migrating the page */ - if (!trylock_page(page)) - return -EBUSY; - - if (unlikely(page->mapping != mapping)) - continue; - - /* barrier is implied in the following 'unlock_page' */ - WRITE_ONCE(pcl->compressed_pages[i], NULL); - set_page_private(page, 0); - ClearPagePrivate(page); - - unlock_page(page); - put_page(page); - } - return 0; -} - -int erofs_try_to_free_cached_page(struct address_space *mapping, - struct page *page) -{ - struct z_erofs_pcluster *const pcl = (void *)page_private(page); - const unsigned int clusterpages = BIT(pcl->clusterbits); - int ret = 0; /* 0 - busy */ - - if (erofs_workgroup_try_to_freeze(&pcl->obj, 1)) { - unsigned int i; - - for (i = 0; i < clusterpages; ++i) { - if (pcl->compressed_pages[i] == page) { - WRITE_ONCE(pcl->compressed_pages[i], NULL); - ret = 1; - break; - } - } - erofs_workgroup_unfreeze(&pcl->obj, 1); - - if (ret) { - ClearPagePrivate(page); - put_page(page); - } - } - return ret; -} - -/* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */ -static inline bool try_inplace_io(struct z_erofs_collector *clt, - struct page *page) -{ - struct z_erofs_pcluster *const pcl = clt->pcl; - const unsigned int clusterpages = BIT(pcl->clusterbits); - - while (clt->compressedpages < pcl->compressed_pages + clusterpages) { - if (!cmpxchg(clt->compressedpages++, NULL, page)) - return true; - } - return false; -} - -/* callers must be with collection lock held */ -static int z_erofs_attach_page(struct z_erofs_collector *clt, - struct page *page, - enum z_erofs_page_type type) -{ - int ret; - bool occupied; - - /* give priority for inplaceio */ - if (clt->mode >= COLLECT_PRIMARY && - type == Z_EROFS_PAGE_TYPE_EXCLUSIVE && - try_inplace_io(clt, page)) - return 0; - - ret = z_erofs_pagevec_enqueue(&clt->vector, - page, type, &occupied); - clt->cl->vcnt += (unsigned int)ret; - - return ret ? 0 : -EAGAIN; -} - -static enum z_erofs_collectmode -try_to_claim_pcluster(struct z_erofs_pcluster *pcl, - z_erofs_next_pcluster_t *owned_head) -{ - /* let's claim these following types of pclusters */ -retry: - if (pcl->next == Z_EROFS_PCLUSTER_NIL) { - /* type 1, nil pcluster */ - if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL, - *owned_head) != Z_EROFS_PCLUSTER_NIL) - goto retry; - - *owned_head = &pcl->next; - /* lucky, I am the followee :) */ - return COLLECT_PRIMARY_FOLLOWED; - } else if (pcl->next == Z_EROFS_PCLUSTER_TAIL) { - /* - * type 2, link to the end of a existing open chain, - * be careful that its submission itself is governed - * by the original owned chain. - */ - if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL, - *owned_head) != Z_EROFS_PCLUSTER_TAIL) - goto retry; - *owned_head = Z_EROFS_PCLUSTER_TAIL; - return COLLECT_PRIMARY_HOOKED; - } - return COLLECT_PRIMARY; /* :( better luck next time */ -} - -static struct z_erofs_collection *cllookup(struct z_erofs_collector *clt, - struct inode *inode, - struct erofs_map_blocks *map) -{ - struct erofs_workgroup *grp; - struct z_erofs_pcluster *pcl; - struct z_erofs_collection *cl; - unsigned int length; - bool tag; - - grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT, &tag); - if (!grp) - return NULL; - - pcl = container_of(grp, struct z_erofs_pcluster, obj); - if (clt->owned_head == &pcl->next || pcl == clt->tailpcl) { - DBG_BUGON(1); - erofs_workgroup_put(grp); - return ERR_PTR(-EFSCORRUPTED); - } - - cl = z_erofs_primarycollection(pcl); - if (unlikely(cl->pageofs != (map->m_la & ~PAGE_MASK))) { - DBG_BUGON(1); - erofs_workgroup_put(grp); - return ERR_PTR(-EFSCORRUPTED); - } - - length = READ_ONCE(pcl->length); - if (length & Z_EROFS_PCLUSTER_FULL_LENGTH) { - if ((map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) > length) { - DBG_BUGON(1); - erofs_workgroup_put(grp); - return ERR_PTR(-EFSCORRUPTED); - } - } else { - unsigned int llen = map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT; - - if (map->m_flags & EROFS_MAP_FULL_MAPPED) - llen |= Z_EROFS_PCLUSTER_FULL_LENGTH; - - while (llen > length && - length != cmpxchg_relaxed(&pcl->length, length, llen)) { - cpu_relax(); - length = READ_ONCE(pcl->length); - } - } - mutex_lock(&cl->lock); - /* used to check tail merging loop due to corrupted images */ - if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL) - clt->tailpcl = pcl; - clt->mode = try_to_claim_pcluster(pcl, &clt->owned_head); - /* clean tailpcl if the current owned_head is Z_EROFS_PCLUSTER_TAIL */ - if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL) - clt->tailpcl = NULL; - clt->pcl = pcl; - clt->cl = cl; - return cl; -} - -static struct z_erofs_collection *clregister(struct z_erofs_collector *clt, - struct inode *inode, - struct erofs_map_blocks *map) -{ - struct z_erofs_pcluster *pcl; - struct z_erofs_collection *cl; - int err; - - /* no available workgroup, let's allocate one */ - pcl = kmem_cache_alloc(pcluster_cachep, GFP_NOFS); - if (unlikely(!pcl)) - return ERR_PTR(-ENOMEM); - - init_always(pcl); - pcl->obj.index = map->m_pa >> PAGE_SHIFT; - - pcl->length = (map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) | - (map->m_flags & EROFS_MAP_FULL_MAPPED ? - Z_EROFS_PCLUSTER_FULL_LENGTH : 0); - - if (map->m_flags & EROFS_MAP_ZIPPED) - pcl->algorithmformat = Z_EROFS_COMPRESSION_LZ4; - else - pcl->algorithmformat = Z_EROFS_COMPRESSION_SHIFTED; - - pcl->clusterbits = EROFS_V(inode)->z_physical_clusterbits[0]; - pcl->clusterbits -= PAGE_SHIFT; - - /* new pclusters should be claimed as type 1, primary and followed */ - pcl->next = clt->owned_head; - clt->mode = COLLECT_PRIMARY_FOLLOWED; - - cl = z_erofs_primarycollection(pcl); - cl->pageofs = map->m_la & ~PAGE_MASK; - - /* - * lock all primary followed works before visible to others - * and mutex_trylock *never* fails for a new pcluster. - */ - mutex_trylock(&cl->lock); - - err = erofs_register_workgroup(inode->i_sb, &pcl->obj, 0); - if (err) { - mutex_unlock(&cl->lock); - kmem_cache_free(pcluster_cachep, pcl); - return ERR_PTR(-EAGAIN); - } - /* used to check tail merging loop due to corrupted images */ - if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL) - clt->tailpcl = pcl; - clt->owned_head = &pcl->next; - clt->pcl = pcl; - clt->cl = cl; - return cl; -} - -static int z_erofs_collector_begin(struct z_erofs_collector *clt, - struct inode *inode, - struct erofs_map_blocks *map) -{ - struct z_erofs_collection *cl; - - DBG_BUGON(clt->cl); - - /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous collection */ - DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_NIL); - DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); - - if (!PAGE_ALIGNED(map->m_pa)) { - DBG_BUGON(1); - return -EINVAL; - } - -repeat: - cl = cllookup(clt, inode, map); - if (!cl) { - cl = clregister(clt, inode, map); - - if (unlikely(cl == ERR_PTR(-EAGAIN))) - goto repeat; - } - - if (IS_ERR(cl)) - return PTR_ERR(cl); - - z_erofs_pagevec_ctor_init(&clt->vector, Z_EROFS_NR_INLINE_PAGEVECS, - cl->pagevec, cl->vcnt); - - clt->compressedpages = clt->pcl->compressed_pages; - if (clt->mode <= COLLECT_PRIMARY) /* cannot do in-place I/O */ - clt->compressedpages += Z_EROFS_CLUSTER_MAX_PAGES; - return 0; -} - -/* - * keep in mind that no referenced pclusters will be freed - * only after a RCU grace period. - */ -static void z_erofs_rcu_callback(struct rcu_head *head) -{ - struct z_erofs_collection *const cl = - container_of(head, struct z_erofs_collection, rcu); - - kmem_cache_free(pcluster_cachep, - container_of(cl, struct z_erofs_pcluster, - primary_collection)); -} - -void erofs_workgroup_free_rcu(struct erofs_workgroup *grp) -{ - struct z_erofs_pcluster *const pcl = - container_of(grp, struct z_erofs_pcluster, obj); - struct z_erofs_collection *const cl = z_erofs_primarycollection(pcl); - - call_rcu(&cl->rcu, z_erofs_rcu_callback); -} - -static void z_erofs_collection_put(struct z_erofs_collection *cl) -{ - struct z_erofs_pcluster *const pcl = - container_of(cl, struct z_erofs_pcluster, primary_collection); - - erofs_workgroup_put(&pcl->obj); -} - -static bool z_erofs_collector_end(struct z_erofs_collector *clt) -{ - struct z_erofs_collection *cl = clt->cl; - - if (!cl) - return false; - - z_erofs_pagevec_ctor_exit(&clt->vector, false); - mutex_unlock(&cl->lock); - - /* - * if all pending pages are added, don't hold its reference - * any longer if the pcluster isn't hosted by ourselves. - */ - if (clt->mode < COLLECT_PRIMARY_FOLLOWED_NOINPLACE) - z_erofs_collection_put(cl); - - clt->cl = NULL; - return true; -} - -static inline struct page *__stagingpage_alloc(struct list_head *pagepool, - gfp_t gfp) -{ - struct page *page = erofs_allocpage(pagepool, gfp, true); - - page->mapping = Z_EROFS_MAPPING_STAGING; - return page; -} - -static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe, - unsigned int cachestrategy, - erofs_off_t la) -{ - if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED) - return false; - - if (fe->backmost) - return true; - - return cachestrategy >= EROFS_ZIP_CACHE_READAROUND && - la < fe->headoffset; -} - -static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, - struct page *page, - struct list_head *pagepool) -{ - struct inode *const inode = fe->inode; - struct erofs_sb_info *const sbi __maybe_unused = EROFS_I_SB(inode); - struct erofs_map_blocks *const map = &fe->map; - struct z_erofs_collector *const clt = &fe->clt; - const loff_t offset = page_offset(page); - bool tight = (clt->mode >= COLLECT_PRIMARY_HOOKED); - - enum z_erofs_cache_alloctype cache_strategy; - enum z_erofs_page_type page_type; - unsigned int cur, end, spiltted, index; - int err = 0; - - /* register locked file pages as online pages in pack */ - z_erofs_onlinepage_init(page); - - spiltted = 0; - end = PAGE_SIZE; -repeat: - cur = end - 1; - - /* lucky, within the range of the current map_blocks */ - if (offset + cur >= map->m_la && - offset + cur < map->m_la + map->m_llen) { - /* didn't get a valid collection previously (very rare) */ - if (!clt->cl) - goto restart_now; - goto hitted; - } - - /* go ahead the next map_blocks */ - debugln("%s: [out-of-range] pos %llu", __func__, offset + cur); - - if (z_erofs_collector_end(clt)) - fe->backmost = false; - - map->m_la = offset + cur; - map->m_llen = 0; - err = z_erofs_map_blocks_iter(inode, map, 0); - if (unlikely(err)) - goto err_out; - -restart_now: - if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED))) - goto hitted; - - err = z_erofs_collector_begin(clt, inode, map); - if (unlikely(err)) - goto err_out; - - /* preload all compressed pages (maybe downgrade role if necessary) */ - if (should_alloc_managed_pages(fe, sbi->cache_strategy, map->m_la)) - cache_strategy = DELAYEDALLOC; - else - cache_strategy = DONTALLOC; - - preload_compressed_pages(clt, MNGD_MAPPING(sbi), - cache_strategy, pagepool); - - tight &= (clt->mode >= COLLECT_PRIMARY_HOOKED); -hitted: - cur = end - min_t(unsigned int, offset + end - map->m_la, end); - if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED))) { - zero_user_segment(page, cur, end); - goto next_part; - } - - /* let's derive page type */ - page_type = cur ? Z_EROFS_VLE_PAGE_TYPE_HEAD : - (!spiltted ? Z_EROFS_PAGE_TYPE_EXCLUSIVE : - (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE : - Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED)); - - if (cur) - tight &= (clt->mode >= COLLECT_PRIMARY_FOLLOWED); - -retry: - err = z_erofs_attach_page(clt, page, page_type); - /* should allocate an additional staging page for pagevec */ - if (err == -EAGAIN) { - struct page *const newpage = - __stagingpage_alloc(pagepool, GFP_NOFS); - - err = z_erofs_attach_page(clt, newpage, - Z_EROFS_PAGE_TYPE_EXCLUSIVE); - if (likely(!err)) - goto retry; - } - - if (unlikely(err)) - goto err_out; - - index = page->index - (map->m_la >> PAGE_SHIFT); - - z_erofs_onlinepage_fixup(page, index, true); - - /* bump up the number of spiltted parts of a page */ - ++spiltted; - /* also update nr_pages */ - clt->cl->nr_pages = max_t(pgoff_t, clt->cl->nr_pages, index + 1); -next_part: - /* can be used for verification */ - map->m_llen = offset + cur - map->m_la; - - end = cur; - if (end > 0) - goto repeat; - -out: - z_erofs_onlinepage_endio(page); - - debugln("%s, finish page: %pK spiltted: %u map->m_llen %llu", - __func__, page, spiltted, map->m_llen); - return err; - - /* if some error occurred while processing this page */ -err_out: - SetPageError(page); - goto out; -} - -static void z_erofs_vle_unzip_kickoff(void *ptr, int bios) -{ - tagptr1_t t = tagptr_init(tagptr1_t, ptr); - struct z_erofs_unzip_io *io = tagptr_unfold_ptr(t); - bool background = tagptr_unfold_tags(t); - - if (!background) { - unsigned long flags; - - spin_lock_irqsave(&io->u.wait.lock, flags); - if (!atomic_add_return(bios, &io->pending_bios)) - wake_up_locked(&io->u.wait); - spin_unlock_irqrestore(&io->u.wait.lock, flags); - return; - } - - if (!atomic_add_return(bios, &io->pending_bios)) - queue_work(z_erofs_workqueue, &io->u.work); -} - -static inline void z_erofs_vle_read_endio(struct bio *bio) -{ - struct erofs_sb_info *sbi = NULL; - blk_status_t err = bio->bi_status; - struct bio_vec *bvec; - struct bvec_iter_all iter_all; - - bio_for_each_segment_all(bvec, bio, iter_all) { - struct page *page = bvec->bv_page; - bool cachemngd = false; - - DBG_BUGON(PageUptodate(page)); - DBG_BUGON(!page->mapping); - - if (unlikely(!sbi && !z_erofs_page_is_staging(page))) { - sbi = EROFS_SB(page->mapping->host->i_sb); - - if (time_to_inject(sbi, FAULT_READ_IO)) { - erofs_show_injection_info(FAULT_READ_IO); - err = BLK_STS_IOERR; - } - } - - /* sbi should already be gotten if the page is managed */ - if (sbi) - cachemngd = erofs_page_is_managed(sbi, page); - - if (unlikely(err)) - SetPageError(page); - else if (cachemngd) - SetPageUptodate(page); - - if (cachemngd) - unlock_page(page); - } - - z_erofs_vle_unzip_kickoff(bio->bi_private, -1); - bio_put(bio); -} - -static int z_erofs_decompress_pcluster(struct super_block *sb, - struct z_erofs_pcluster *pcl, - struct list_head *pagepool) -{ - struct erofs_sb_info *const sbi = EROFS_SB(sb); - const unsigned int clusterpages = BIT(pcl->clusterbits); - struct z_erofs_pagevec_ctor ctor; - unsigned int i, outputsize, llen, nr_pages; - struct page *pages_onstack[Z_EROFS_VMAP_ONSTACK_PAGES]; - struct page **pages, **compressed_pages, *page; - - enum z_erofs_page_type page_type; - bool overlapped, partial; - struct z_erofs_collection *cl; - int err; - - might_sleep(); - cl = z_erofs_primarycollection(pcl); - DBG_BUGON(!READ_ONCE(cl->nr_pages)); - - mutex_lock(&cl->lock); - nr_pages = cl->nr_pages; - - if (likely(nr_pages <= Z_EROFS_VMAP_ONSTACK_PAGES)) { - pages = pages_onstack; - } else if (nr_pages <= Z_EROFS_VMAP_GLOBAL_PAGES && - mutex_trylock(&z_pagemap_global_lock)) { - pages = z_pagemap_global; - } else { - gfp_t gfp_flags = GFP_KERNEL; - - if (nr_pages > Z_EROFS_VMAP_GLOBAL_PAGES) - gfp_flags |= __GFP_NOFAIL; - - pages = kvmalloc_array(nr_pages, sizeof(struct page *), - gfp_flags); - - /* fallback to global pagemap for the lowmem scenario */ - if (unlikely(!pages)) { - mutex_lock(&z_pagemap_global_lock); - pages = z_pagemap_global; - } - } - - for (i = 0; i < nr_pages; ++i) - pages[i] = NULL; - - err = 0; - z_erofs_pagevec_ctor_init(&ctor, Z_EROFS_NR_INLINE_PAGEVECS, - cl->pagevec, 0); - - for (i = 0; i < cl->vcnt; ++i) { - unsigned int pagenr; - - page = z_erofs_pagevec_dequeue(&ctor, &page_type); - - /* all pages in pagevec ought to be valid */ - DBG_BUGON(!page); - DBG_BUGON(!page->mapping); - - if (z_erofs_put_stagingpage(pagepool, page)) - continue; - - if (page_type == Z_EROFS_VLE_PAGE_TYPE_HEAD) - pagenr = 0; - else - pagenr = z_erofs_onlinepage_index(page); - - DBG_BUGON(pagenr >= nr_pages); - - /* - * currently EROFS doesn't support multiref(dedup), - * so here erroring out one multiref page. - */ - if (unlikely(pages[pagenr])) { - DBG_BUGON(1); - SetPageError(pages[pagenr]); - z_erofs_onlinepage_endio(pages[pagenr]); - err = -EFSCORRUPTED; - } - pages[pagenr] = page; - } - z_erofs_pagevec_ctor_exit(&ctor, true); - - overlapped = false; - compressed_pages = pcl->compressed_pages; - - for (i = 0; i < clusterpages; ++i) { - unsigned int pagenr; - - page = compressed_pages[i]; - - /* all compressed pages ought to be valid */ - DBG_BUGON(!page); - DBG_BUGON(!page->mapping); - - if (!z_erofs_page_is_staging(page)) { - if (erofs_page_is_managed(sbi, page)) { - if (unlikely(!PageUptodate(page))) - err = -EIO; - continue; - } - - /* - * only if non-head page can be selected - * for inplace decompression - */ - pagenr = z_erofs_onlinepage_index(page); - - DBG_BUGON(pagenr >= nr_pages); - if (unlikely(pages[pagenr])) { - DBG_BUGON(1); - SetPageError(pages[pagenr]); - z_erofs_onlinepage_endio(pages[pagenr]); - err = -EFSCORRUPTED; - } - pages[pagenr] = page; - - overlapped = true; - } - - /* PG_error needs checking for inplaced and staging pages */ - if (unlikely(PageError(page))) { - DBG_BUGON(PageUptodate(page)); - err = -EIO; - } - } - - if (unlikely(err)) - goto out; - - llen = pcl->length >> Z_EROFS_PCLUSTER_LENGTH_BIT; - if (nr_pages << PAGE_SHIFT >= cl->pageofs + llen) { - outputsize = llen; - partial = !(pcl->length & Z_EROFS_PCLUSTER_FULL_LENGTH); - } else { - outputsize = (nr_pages << PAGE_SHIFT) - cl->pageofs; - partial = true; - } - - err = z_erofs_decompress(&(struct z_erofs_decompress_req) { - .sb = sb, - .in = compressed_pages, - .out = pages, - .pageofs_out = cl->pageofs, - .inputsize = PAGE_SIZE, - .outputsize = outputsize, - .alg = pcl->algorithmformat, - .inplace_io = overlapped, - .partial_decoding = partial - }, pagepool); - -out: - /* must handle all compressed pages before endding pages */ - for (i = 0; i < clusterpages; ++i) { - page = compressed_pages[i]; - - if (erofs_page_is_managed(sbi, page)) - continue; - - /* recycle all individual staging pages */ - (void)z_erofs_put_stagingpage(pagepool, page); - - WRITE_ONCE(compressed_pages[i], NULL); - } - - for (i = 0; i < nr_pages; ++i) { - page = pages[i]; - if (!page) - continue; - - DBG_BUGON(!page->mapping); - - /* recycle all individual staging pages */ - if (z_erofs_put_stagingpage(pagepool, page)) - continue; - - if (unlikely(err < 0)) - SetPageError(page); - - z_erofs_onlinepage_endio(page); - } - - if (pages == z_pagemap_global) - mutex_unlock(&z_pagemap_global_lock); - else if (unlikely(pages != pages_onstack)) - kvfree(pages); - - cl->nr_pages = 0; - cl->vcnt = 0; - - /* all cl locks MUST be taken before the following line */ - WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL); - - /* all cl locks SHOULD be released right now */ - mutex_unlock(&cl->lock); - - z_erofs_collection_put(cl); - return err; -} - -static void z_erofs_vle_unzip_all(struct super_block *sb, - struct z_erofs_unzip_io *io, - struct list_head *pagepool) -{ - z_erofs_next_pcluster_t owned = io->head; - - while (owned != Z_EROFS_PCLUSTER_TAIL_CLOSED) { - struct z_erofs_pcluster *pcl; - - /* no possible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */ - DBG_BUGON(owned == Z_EROFS_PCLUSTER_TAIL); - - /* no possible that 'owned' equals NULL */ - DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL); - - pcl = container_of(owned, struct z_erofs_pcluster, next); - owned = READ_ONCE(pcl->next); - - z_erofs_decompress_pcluster(sb, pcl, pagepool); - } -} - -static void z_erofs_vle_unzip_wq(struct work_struct *work) -{ - struct z_erofs_unzip_io_sb *iosb = - container_of(work, struct z_erofs_unzip_io_sb, io.u.work); - LIST_HEAD(pagepool); - - DBG_BUGON(iosb->io.head == Z_EROFS_PCLUSTER_TAIL_CLOSED); - z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &pagepool); - - put_pages_list(&pagepool); - kvfree(iosb); -} - -static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl, - unsigned int nr, - struct list_head *pagepool, - struct address_space *mc, - gfp_t gfp) -{ - /* determined at compile time to avoid too many #ifdefs */ - const bool nocache = __builtin_constant_p(mc) ? !mc : false; - const pgoff_t index = pcl->obj.index; - bool tocache = false; - - struct address_space *mapping; - struct page *oldpage, *page; - - compressed_page_t t; - int justfound; - -repeat: - page = READ_ONCE(pcl->compressed_pages[nr]); - oldpage = page; - - if (!page) - goto out_allocpage; - - /* - * the cached page has not been allocated and - * an placeholder is out there, prepare it now. - */ - if (!nocache && page == PAGE_UNALLOCATED) { - tocache = true; - goto out_allocpage; - } - - /* process the target tagged pointer */ - t = tagptr_init(compressed_page_t, page); - justfound = tagptr_unfold_tags(t); - page = tagptr_unfold_ptr(t); - - mapping = READ_ONCE(page->mapping); - - /* - * if managed cache is disabled, it's no way to - * get such a cached-like page. - */ - if (nocache) { - /* if managed cache is disabled, it is impossible `justfound' */ - DBG_BUGON(justfound); - - /* and it should be locked, not uptodate, and not truncated */ - DBG_BUGON(!PageLocked(page)); - DBG_BUGON(PageUptodate(page)); - DBG_BUGON(!mapping); - goto out; - } - - /* - * unmanaged (file) pages are all locked solidly, - * therefore it is impossible for `mapping' to be NULL. - */ - if (mapping && mapping != mc) - /* ought to be unmanaged pages */ - goto out; - - lock_page(page); - - /* only true if page reclaim goes wrong, should never happen */ - DBG_BUGON(justfound && PagePrivate(page)); - - /* the page is still in manage cache */ - if (page->mapping == mc) { - WRITE_ONCE(pcl->compressed_pages[nr], page); - - ClearPageError(page); - if (!PagePrivate(page)) { - /* - * impossible to be !PagePrivate(page) for - * the current restriction as well if - * the page is already in compressed_pages[]. - */ - DBG_BUGON(!justfound); - - justfound = 0; - set_page_private(page, (unsigned long)pcl); - SetPagePrivate(page); - } - - /* no need to submit io if it is already up-to-date */ - if (PageUptodate(page)) { - unlock_page(page); - page = NULL; - } - goto out; - } - - /* - * the managed page has been truncated, it's unsafe to - * reuse this one, let's allocate a new cache-managed page. - */ - DBG_BUGON(page->mapping); - DBG_BUGON(!justfound); - - tocache = true; - unlock_page(page); - put_page(page); -out_allocpage: - page = __stagingpage_alloc(pagepool, gfp); - if (oldpage != cmpxchg(&pcl->compressed_pages[nr], oldpage, page)) { - list_add(&page->lru, pagepool); - cpu_relax(); - goto repeat; - } - if (nocache || !tocache) - goto out; - if (add_to_page_cache_lru(page, mc, index + nr, gfp)) { - page->mapping = Z_EROFS_MAPPING_STAGING; - goto out; - } - - set_page_private(page, (unsigned long)pcl); - SetPagePrivate(page); -out: /* the only exit (for tracing and debugging) */ - return page; -} - -static struct z_erofs_unzip_io *jobqueue_init(struct super_block *sb, - struct z_erofs_unzip_io *io, - bool foreground) -{ - struct z_erofs_unzip_io_sb *iosb; - - if (foreground) { - /* waitqueue available for foreground io */ - DBG_BUGON(!io); - - init_waitqueue_head(&io->u.wait); - atomic_set(&io->pending_bios, 0); - goto out; - } - - iosb = kvzalloc(sizeof(*iosb), GFP_KERNEL | __GFP_NOFAIL); - DBG_BUGON(!iosb); - - /* initialize fields in the allocated descriptor */ - io = &iosb->io; - iosb->sb = sb; - INIT_WORK(&io->u.work, z_erofs_vle_unzip_wq); -out: - io->head = Z_EROFS_PCLUSTER_TAIL_CLOSED; - return io; -} - -/* define decompression jobqueue types */ -enum { - JQ_BYPASS, - JQ_SUBMIT, - NR_JOBQUEUES, -}; - -static void *jobqueueset_init(struct super_block *sb, - z_erofs_next_pcluster_t qtail[], - struct z_erofs_unzip_io *q[], - struct z_erofs_unzip_io *fgq, - bool forcefg) -{ - /* - * if managed cache is enabled, bypass jobqueue is needed, - * no need to read from device for all pclusters in this queue. - */ - q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, true); - qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head; - - q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, forcefg); - qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head; - - return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], !forcefg)); -} - -static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl, - z_erofs_next_pcluster_t qtail[], - z_erofs_next_pcluster_t owned_head) -{ - z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT]; - z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS]; - - DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); - if (owned_head == Z_EROFS_PCLUSTER_TAIL) - owned_head = Z_EROFS_PCLUSTER_TAIL_CLOSED; - - WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL_CLOSED); - - WRITE_ONCE(*submit_qtail, owned_head); - WRITE_ONCE(*bypass_qtail, &pcl->next); - - qtail[JQ_BYPASS] = &pcl->next; -} - -static bool postsubmit_is_all_bypassed(struct z_erofs_unzip_io *q[], - unsigned int nr_bios, - bool force_fg) -{ - /* - * although background is preferred, no one is pending for submission. - * don't issue workqueue for decompression but drop it directly instead. - */ - if (force_fg || nr_bios) - return false; - - kvfree(container_of(q[JQ_SUBMIT], struct z_erofs_unzip_io_sb, io)); - return true; -} - -static bool z_erofs_vle_submit_all(struct super_block *sb, - z_erofs_next_pcluster_t owned_head, - struct list_head *pagepool, - struct z_erofs_unzip_io *fgq, - bool force_fg) -{ - struct erofs_sb_info *const sbi __maybe_unused = EROFS_SB(sb); - z_erofs_next_pcluster_t qtail[NR_JOBQUEUES]; - struct z_erofs_unzip_io *q[NR_JOBQUEUES]; - struct bio *bio; - void *bi_private; - /* since bio will be NULL, no need to initialize last_index */ - pgoff_t uninitialized_var(last_index); - bool force_submit = false; - unsigned int nr_bios; - - if (unlikely(owned_head == Z_EROFS_PCLUSTER_TAIL)) - return false; - - force_submit = false; - bio = NULL; - nr_bios = 0; - bi_private = jobqueueset_init(sb, qtail, q, fgq, force_fg); - - /* by default, all need io submission */ - q[JQ_SUBMIT]->head = owned_head; - - do { - struct z_erofs_pcluster *pcl; - unsigned int clusterpages; - pgoff_t first_index; - struct page *page; - unsigned int i = 0, bypass = 0; - int err; - - /* no possible 'owned_head' equals the following */ - DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); - DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL); - - pcl = container_of(owned_head, struct z_erofs_pcluster, next); - - clusterpages = BIT(pcl->clusterbits); - - /* close the main owned chain at first */ - owned_head = cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL, - Z_EROFS_PCLUSTER_TAIL_CLOSED); - - first_index = pcl->obj.index; - force_submit |= (first_index != last_index + 1); - -repeat: - page = pickup_page_for_submission(pcl, i, pagepool, - MNGD_MAPPING(sbi), - GFP_NOFS); - if (!page) { - force_submit = true; - ++bypass; - goto skippage; - } - - if (bio && force_submit) { -submit_bio_retry: - __submit_bio(bio, REQ_OP_READ, 0); - bio = NULL; - } - - if (!bio) { - bio = erofs_grab_bio(sb, first_index + i, - BIO_MAX_PAGES, bi_private, - z_erofs_vle_read_endio, true); - ++nr_bios; - } - - err = bio_add_page(bio, page, PAGE_SIZE, 0); - if (err < PAGE_SIZE) - goto submit_bio_retry; - - force_submit = false; - last_index = first_index + i; -skippage: - if (++i < clusterpages) - goto repeat; - - if (bypass < clusterpages) - qtail[JQ_SUBMIT] = &pcl->next; - else - move_to_bypass_jobqueue(pcl, qtail, owned_head); - } while (owned_head != Z_EROFS_PCLUSTER_TAIL); - - if (bio) - __submit_bio(bio, REQ_OP_READ, 0); - - if (postsubmit_is_all_bypassed(q, nr_bios, force_fg)) - return true; - - z_erofs_vle_unzip_kickoff(bi_private, nr_bios); - return true; -} - -static void z_erofs_submit_and_unzip(struct super_block *sb, - struct z_erofs_collector *clt, - struct list_head *pagepool, - bool force_fg) -{ - struct z_erofs_unzip_io io[NR_JOBQUEUES]; - - if (!z_erofs_vle_submit_all(sb, clt->owned_head, - pagepool, io, force_fg)) - return; - - /* decompress no I/O pclusters immediately */ - z_erofs_vle_unzip_all(sb, &io[JQ_BYPASS], pagepool); - - if (!force_fg) - return; - - /* wait until all bios are completed */ - wait_event(io[JQ_SUBMIT].u.wait, - !atomic_read(&io[JQ_SUBMIT].pending_bios)); - - /* let's synchronous decompression */ - z_erofs_vle_unzip_all(sb, &io[JQ_SUBMIT], pagepool); -} - -static int z_erofs_vle_normalaccess_readpage(struct file *file, - struct page *page) -{ - struct inode *const inode = page->mapping->host; - struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); - int err; - LIST_HEAD(pagepool); - - trace_erofs_readpage(page, false); - - f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT; - - err = z_erofs_do_read_page(&f, page, &pagepool); - (void)z_erofs_collector_end(&f.clt); - - /* if some compressed cluster ready, need submit them anyway */ - z_erofs_submit_and_unzip(inode->i_sb, &f.clt, &pagepool, true); - - if (err) - errln("%s, failed to read, err [%d]", __func__, err); - - if (f.map.mpage) - put_page(f.map.mpage); - - /* clean up the remaining free pages */ - put_pages_list(&pagepool); - return err; -} - -static bool should_decompress_synchronously(struct erofs_sb_info *sbi, - unsigned int nr) -{ - return nr <= sbi->max_sync_decompress_pages; -} - -static int z_erofs_vle_normalaccess_readpages(struct file *filp, - struct address_space *mapping, - struct list_head *pages, - unsigned int nr_pages) -{ - struct inode *const inode = mapping->host; - struct erofs_sb_info *const sbi = EROFS_I_SB(inode); - - bool sync = should_decompress_synchronously(sbi, nr_pages); - struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); - gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL); - struct page *head = NULL; - LIST_HEAD(pagepool); - - trace_erofs_readpages(mapping->host, lru_to_page(pages), - nr_pages, false); - - f.headoffset = (erofs_off_t)lru_to_page(pages)->index << PAGE_SHIFT; - - for (; nr_pages; --nr_pages) { - struct page *page = lru_to_page(pages); - - prefetchw(&page->flags); - list_del(&page->lru); - - /* - * A pure asynchronous readahead is indicated if - * a PG_readahead marked page is hitted at first. - * Let's also do asynchronous decompression for this case. - */ - sync &= !(PageReadahead(page) && !head); - - if (add_to_page_cache_lru(page, mapping, page->index, gfp)) { - list_add(&page->lru, &pagepool); - continue; - } - - set_page_private(page, (unsigned long)head); - head = page; - } - - while (head) { - struct page *page = head; - int err; - - /* traversal in reverse order */ - head = (void *)page_private(page); - - err = z_erofs_do_read_page(&f, page, &pagepool); - if (err) { - struct erofs_vnode *vi = EROFS_V(inode); - - errln("%s, readahead error at page %lu of nid %llu", - __func__, page->index, vi->nid); - } - put_page(page); - } - - (void)z_erofs_collector_end(&f.clt); - - z_erofs_submit_and_unzip(inode->i_sb, &f.clt, &pagepool, sync); - - if (f.map.mpage) - put_page(f.map.mpage); - - /* clean up the remaining free pages */ - put_pages_list(&pagepool); - return 0; -} - -const struct address_space_operations z_erofs_vle_normalaccess_aops = { - .readpage = z_erofs_vle_normalaccess_readpage, - .readpages = z_erofs_vle_normalaccess_readpages, -}; - diff --git a/drivers/staging/erofs/zdata.h b/drivers/staging/erofs/zdata.h deleted file mode 100644 index e11fe1959ca2..000000000000 --- a/drivers/staging/erofs/zdata.h +++ /dev/null @@ -1,195 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * linux/drivers/staging/erofs/zdata.h - * - * Copyright (C) 2018 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#ifndef __EROFS_FS_ZDATA_H -#define __EROFS_FS_ZDATA_H - -#include "internal.h" -#include "zpvec.h" - -#define Z_EROFS_NR_INLINE_PAGEVECS 3 - -/* - * Structure fields follow one of the following exclusion rules. - * - * I: Modifiable by initialization/destruction paths and read-only - * for everyone else; - * - * L: Field should be protected by pageset lock; - * - * A: Field should be accessed / updated in atomic for parallelized code. - */ -struct z_erofs_collection { - struct mutex lock; - - /* I: page offset of start position of decompression */ - unsigned short pageofs; - - /* L: maximum relative page index in pagevec[] */ - unsigned short nr_pages; - - /* L: total number of pages in pagevec[] */ - unsigned int vcnt; - - union { - /* L: inline a certain number of pagevecs for bootstrap */ - erofs_vtptr_t pagevec[Z_EROFS_NR_INLINE_PAGEVECS]; - - /* I: can be used to free the pcluster by RCU. */ - struct rcu_head rcu; - }; -}; - -#define Z_EROFS_PCLUSTER_FULL_LENGTH 0x00000001 -#define Z_EROFS_PCLUSTER_LENGTH_BIT 1 - -/* - * let's leave a type here in case of introducing - * another tagged pointer later. - */ -typedef void *z_erofs_next_pcluster_t; - -struct z_erofs_pcluster { - struct erofs_workgroup obj; - struct z_erofs_collection primary_collection; - - /* A: point to next chained pcluster or TAILs */ - z_erofs_next_pcluster_t next; - - /* A: compressed pages (including multi-usage pages) */ - struct page *compressed_pages[Z_EROFS_CLUSTER_MAX_PAGES]; - - /* A: lower limit of decompressed length and if full length or not */ - unsigned int length; - - /* I: compression algorithm format */ - unsigned char algorithmformat; - /* I: bit shift of physical cluster size */ - unsigned char clusterbits; -}; - -#define z_erofs_primarycollection(pcluster) (&(pcluster)->primary_collection) - -/* let's avoid the valid 32-bit kernel addresses */ - -/* the chained workgroup has't submitted io (still open) */ -#define Z_EROFS_PCLUSTER_TAIL ((void *)0x5F0ECAFE) -/* the chained workgroup has already submitted io */ -#define Z_EROFS_PCLUSTER_TAIL_CLOSED ((void *)0x5F0EDEAD) - -#define Z_EROFS_PCLUSTER_NIL (NULL) - -#define Z_EROFS_WORKGROUP_SIZE sizeof(struct z_erofs_pcluster) - -struct z_erofs_unzip_io { - atomic_t pending_bios; - z_erofs_next_pcluster_t head; - - union { - wait_queue_head_t wait; - struct work_struct work; - } u; -}; - -struct z_erofs_unzip_io_sb { - struct z_erofs_unzip_io io; - struct super_block *sb; -}; - -#define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping) -static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi, - struct page *page) -{ - return page->mapping == MNGD_MAPPING(sbi); -} - -#define Z_EROFS_ONLINEPAGE_COUNT_BITS 2 -#define Z_EROFS_ONLINEPAGE_COUNT_MASK ((1 << Z_EROFS_ONLINEPAGE_COUNT_BITS) - 1) -#define Z_EROFS_ONLINEPAGE_INDEX_SHIFT (Z_EROFS_ONLINEPAGE_COUNT_BITS) - -/* - * waiters (aka. ongoing_packs): # to unlock the page - * sub-index: 0 - for partial page, >= 1 full page sub-index - */ -typedef atomic_t z_erofs_onlinepage_t; - -/* type punning */ -union z_erofs_onlinepage_converter { - z_erofs_onlinepage_t *o; - unsigned long *v; -}; - -static inline unsigned int z_erofs_onlinepage_index(struct page *page) -{ - union z_erofs_onlinepage_converter u; - - DBG_BUGON(!PagePrivate(page)); - u.v = &page_private(page); - - return atomic_read(u.o) >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT; -} - -static inline void z_erofs_onlinepage_init(struct page *page) -{ - union { - z_erofs_onlinepage_t o; - unsigned long v; - /* keep from being unlocked in advance */ - } u = { .o = ATOMIC_INIT(1) }; - - set_page_private(page, u.v); - smp_wmb(); - SetPagePrivate(page); -} - -static inline void z_erofs_onlinepage_fixup(struct page *page, - uintptr_t index, bool down) -{ - unsigned long *p, o, v, id; -repeat: - p = &page_private(page); - o = READ_ONCE(*p); - - id = o >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT; - if (id) { - if (!index) - return; - - DBG_BUGON(id != index); - } - - v = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) | - ((o & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down); - if (cmpxchg(p, o, v) != o) - goto repeat; -} - -static inline void z_erofs_onlinepage_endio(struct page *page) -{ - union z_erofs_onlinepage_converter u; - unsigned int v; - - DBG_BUGON(!PagePrivate(page)); - u.v = &page_private(page); - - v = atomic_dec_return(u.o); - if (!(v & Z_EROFS_ONLINEPAGE_COUNT_MASK)) { - ClearPagePrivate(page); - if (!PageError(page)) - SetPageUptodate(page); - unlock_page(page); - } - debugln("%s, page %p value %x", __func__, page, atomic_read(u.o)); -} - -#define Z_EROFS_VMAP_ONSTACK_PAGES \ - min_t(unsigned int, THREAD_SIZE / 8 / sizeof(struct page *), 96U) -#define Z_EROFS_VMAP_GLOBAL_PAGES 2048 - -#endif - diff --git a/drivers/staging/erofs/zmap.c b/drivers/staging/erofs/zmap.c deleted file mode 100644 index 774dacbc5b32..000000000000 --- a/drivers/staging/erofs/zmap.c +++ /dev/null @@ -1,468 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * linux/drivers/staging/erofs/zmap.c - * - * Copyright (C) 2018-2019 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#include "internal.h" -#include -#include - -int z_erofs_fill_inode(struct inode *inode) -{ - struct erofs_vnode *const vi = EROFS_V(inode); - - if (vi->datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY) { - vi->z_advise = 0; - vi->z_algorithmtype[0] = 0; - vi->z_algorithmtype[1] = 0; - vi->z_logical_clusterbits = LOG_BLOCK_SIZE; - vi->z_physical_clusterbits[0] = vi->z_logical_clusterbits; - vi->z_physical_clusterbits[1] = vi->z_logical_clusterbits; - set_bit(EROFS_V_Z_INITED_BIT, &vi->flags); - } - - inode->i_mapping->a_ops = &z_erofs_vle_normalaccess_aops; - return 0; -} - -static int fill_inode_lazy(struct inode *inode) -{ - struct erofs_vnode *const vi = EROFS_V(inode); - struct super_block *const sb = inode->i_sb; - int err; - erofs_off_t pos; - struct page *page; - void *kaddr; - struct z_erofs_map_header *h; - - if (test_bit(EROFS_V_Z_INITED_BIT, &vi->flags)) - return 0; - - if (wait_on_bit_lock(&vi->flags, EROFS_V_BL_Z_BIT, TASK_KILLABLE)) - return -ERESTARTSYS; - - err = 0; - if (test_bit(EROFS_V_Z_INITED_BIT, &vi->flags)) - goto out_unlock; - - DBG_BUGON(vi->datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY); - - pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize + - vi->xattr_isize, 8); - page = erofs_get_meta_page(sb, erofs_blknr(pos), false); - if (IS_ERR(page)) { - err = PTR_ERR(page); - goto out_unlock; - } - - kaddr = kmap_atomic(page); - - h = kaddr + erofs_blkoff(pos); - vi->z_advise = le16_to_cpu(h->h_advise); - vi->z_algorithmtype[0] = h->h_algorithmtype & 15; - vi->z_algorithmtype[1] = h->h_algorithmtype >> 4; - - if (vi->z_algorithmtype[0] >= Z_EROFS_COMPRESSION_MAX) { - errln("unknown compression format %u for nid %llu, please upgrade kernel", - vi->z_algorithmtype[0], vi->nid); - err = -EOPNOTSUPP; - goto unmap_done; - } - - vi->z_logical_clusterbits = LOG_BLOCK_SIZE + (h->h_clusterbits & 7); - vi->z_physical_clusterbits[0] = vi->z_logical_clusterbits + - ((h->h_clusterbits >> 3) & 3); - - if (vi->z_physical_clusterbits[0] != LOG_BLOCK_SIZE) { - errln("unsupported physical clusterbits %u for nid %llu, please upgrade kernel", - vi->z_physical_clusterbits[0], vi->nid); - err = -EOPNOTSUPP; - goto unmap_done; - } - - vi->z_physical_clusterbits[1] = vi->z_logical_clusterbits + - ((h->h_clusterbits >> 5) & 7); - set_bit(EROFS_V_Z_INITED_BIT, &vi->flags); -unmap_done: - kunmap_atomic(kaddr); - unlock_page(page); - put_page(page); -out_unlock: - clear_and_wake_up_bit(EROFS_V_BL_Z_BIT, &vi->flags); - return err; -} - -struct z_erofs_maprecorder { - struct inode *inode; - struct erofs_map_blocks *map; - void *kaddr; - - unsigned long lcn; - /* compression extent information gathered */ - u8 type; - u16 clusterofs; - u16 delta[2]; - erofs_blk_t pblk; -}; - -static int z_erofs_reload_indexes(struct z_erofs_maprecorder *m, - erofs_blk_t eblk) -{ - struct super_block *const sb = m->inode->i_sb; - struct erofs_map_blocks *const map = m->map; - struct page *mpage = map->mpage; - - if (mpage) { - if (mpage->index == eblk) { - if (!m->kaddr) - m->kaddr = kmap_atomic(mpage); - return 0; - } - - if (m->kaddr) { - kunmap_atomic(m->kaddr); - m->kaddr = NULL; - } - put_page(mpage); - } - - mpage = erofs_get_meta_page(sb, eblk, false); - if (IS_ERR(mpage)) { - map->mpage = NULL; - return PTR_ERR(mpage); - } - m->kaddr = kmap_atomic(mpage); - unlock_page(mpage); - map->mpage = mpage; - return 0; -} - -static int vle_legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m, - unsigned long lcn) -{ - struct inode *const inode = m->inode; - struct erofs_vnode *const vi = EROFS_V(inode); - const erofs_off_t ibase = iloc(EROFS_I_SB(inode), vi->nid); - const erofs_off_t pos = - Z_EROFS_VLE_LEGACY_INDEX_ALIGN(ibase + vi->inode_isize + - vi->xattr_isize) + - lcn * sizeof(struct z_erofs_vle_decompressed_index); - struct z_erofs_vle_decompressed_index *di; - unsigned int advise, type; - int err; - - err = z_erofs_reload_indexes(m, erofs_blknr(pos)); - if (err) - return err; - - m->lcn = lcn; - di = m->kaddr + erofs_blkoff(pos); - - advise = le16_to_cpu(di->di_advise); - type = (advise >> Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT) & - ((1 << Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) - 1); - switch (type) { - case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: - m->clusterofs = 1 << vi->z_logical_clusterbits; - m->delta[0] = le16_to_cpu(di->di_u.delta[0]); - m->delta[1] = le16_to_cpu(di->di_u.delta[1]); - break; - case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: - case Z_EROFS_VLE_CLUSTER_TYPE_HEAD: - m->clusterofs = le16_to_cpu(di->di_clusterofs); - m->pblk = le32_to_cpu(di->di_u.blkaddr); - break; - default: - DBG_BUGON(1); - return -EOPNOTSUPP; - } - m->type = type; - return 0; -} - -static unsigned int decode_compactedbits(unsigned int lobits, - unsigned int lomask, - u8 *in, unsigned int pos, u8 *type) -{ - const unsigned int v = get_unaligned_le32(in + pos / 8) >> (pos & 7); - const unsigned int lo = v & lomask; - - *type = (v >> lobits) & 3; - return lo; -} - -static int unpack_compacted_index(struct z_erofs_maprecorder *m, - unsigned int amortizedshift, - unsigned int eofs) -{ - struct erofs_vnode *const vi = EROFS_V(m->inode); - const unsigned int lclusterbits = vi->z_logical_clusterbits; - const unsigned int lomask = (1 << lclusterbits) - 1; - unsigned int vcnt, base, lo, encodebits, nblk; - int i; - u8 *in, type; - - if (1 << amortizedshift == 4) - vcnt = 2; - else if (1 << amortizedshift == 2 && lclusterbits == 12) - vcnt = 16; - else - return -EOPNOTSUPP; - - encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt; - base = round_down(eofs, vcnt << amortizedshift); - in = m->kaddr + base; - - i = (eofs - base) >> amortizedshift; - - lo = decode_compactedbits(lclusterbits, lomask, - in, encodebits * i, &type); - m->type = type; - if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) { - m->clusterofs = 1 << lclusterbits; - if (i + 1 != vcnt) { - m->delta[0] = lo; - return 0; - } - /* - * since the last lcluster in the pack is special, - * of which lo saves delta[1] rather than delta[0]. - * Hence, get delta[0] by the previous lcluster indirectly. - */ - lo = decode_compactedbits(lclusterbits, lomask, - in, encodebits * (i - 1), &type); - if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) - lo = 0; - m->delta[0] = lo + 1; - return 0; - } - m->clusterofs = lo; - m->delta[0] = 0; - /* figout out blkaddr (pblk) for HEAD lclusters */ - nblk = 1; - while (i > 0) { - --i; - lo = decode_compactedbits(lclusterbits, lomask, - in, encodebits * i, &type); - if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) - i -= lo; - - if (i >= 0) - ++nblk; - } - in += (vcnt << amortizedshift) - sizeof(__le32); - m->pblk = le32_to_cpu(*(__le32 *)in) + nblk; - return 0; -} - -static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m, - unsigned long lcn) -{ - struct inode *const inode = m->inode; - struct erofs_vnode *const vi = EROFS_V(inode); - const unsigned int lclusterbits = vi->z_logical_clusterbits; - const erofs_off_t ebase = ALIGN(iloc(EROFS_I_SB(inode), vi->nid) + - vi->inode_isize + vi->xattr_isize, 8) + - sizeof(struct z_erofs_map_header); - const unsigned int totalidx = DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ); - unsigned int compacted_4b_initial, compacted_2b; - unsigned int amortizedshift; - erofs_off_t pos; - int err; - - if (lclusterbits != 12) - return -EOPNOTSUPP; - - if (lcn >= totalidx) - return -EINVAL; - - m->lcn = lcn; - /* used to align to 32-byte (compacted_2b) alignment */ - compacted_4b_initial = (32 - ebase % 32) / 4; - if (compacted_4b_initial == 32 / 4) - compacted_4b_initial = 0; - - if (vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) - compacted_2b = rounddown(totalidx - compacted_4b_initial, 16); - else - compacted_2b = 0; - - pos = ebase; - if (lcn < compacted_4b_initial) { - amortizedshift = 2; - goto out; - } - pos += compacted_4b_initial * 4; - lcn -= compacted_4b_initial; - - if (lcn < compacted_2b) { - amortizedshift = 1; - goto out; - } - pos += compacted_2b * 2; - lcn -= compacted_2b; - amortizedshift = 2; -out: - pos += lcn * (1 << amortizedshift); - err = z_erofs_reload_indexes(m, erofs_blknr(pos)); - if (err) - return err; - return unpack_compacted_index(m, amortizedshift, erofs_blkoff(pos)); -} - -static int vle_load_cluster_from_disk(struct z_erofs_maprecorder *m, - unsigned int lcn) -{ - const unsigned int datamode = EROFS_V(m->inode)->datamode; - - if (datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY) - return vle_legacy_load_cluster_from_disk(m, lcn); - - if (datamode == EROFS_INODE_FLAT_COMPRESSION) - return compacted_load_cluster_from_disk(m, lcn); - - return -EINVAL; -} - -static int vle_extent_lookback(struct z_erofs_maprecorder *m, - unsigned int lookback_distance) -{ - struct erofs_vnode *const vi = EROFS_V(m->inode); - struct erofs_map_blocks *const map = m->map; - const unsigned int lclusterbits = vi->z_logical_clusterbits; - unsigned long lcn = m->lcn; - int err; - - if (lcn < lookback_distance) { - errln("bogus lookback distance @ nid %llu", vi->nid); - DBG_BUGON(1); - return -EFSCORRUPTED; - } - - /* load extent head logical cluster if needed */ - lcn -= lookback_distance; - err = vle_load_cluster_from_disk(m, lcn); - if (err) - return err; - - switch (m->type) { - case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: - if (unlikely(!m->delta[0])) { - errln("invalid lookback distance 0 at nid %llu", - vi->nid); - DBG_BUGON(1); - return -EFSCORRUPTED; - } - return vle_extent_lookback(m, m->delta[0]); - case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: - map->m_flags &= ~EROFS_MAP_ZIPPED; - /* fallthrough */ - case Z_EROFS_VLE_CLUSTER_TYPE_HEAD: - map->m_la = (lcn << lclusterbits) | m->clusterofs; - break; - default: - errln("unknown type %u at lcn %lu of nid %llu", - m->type, lcn, vi->nid); - DBG_BUGON(1); - return -EOPNOTSUPP; - } - return 0; -} - -int z_erofs_map_blocks_iter(struct inode *inode, - struct erofs_map_blocks *map, - int flags) -{ - struct erofs_vnode *const vi = EROFS_V(inode); - struct z_erofs_maprecorder m = { - .inode = inode, - .map = map, - }; - int err = 0; - unsigned int lclusterbits, endoff; - unsigned long long ofs, end; - - trace_z_erofs_map_blocks_iter_enter(inode, map, flags); - - /* when trying to read beyond EOF, leave it unmapped */ - if (unlikely(map->m_la >= inode->i_size)) { - map->m_llen = map->m_la + 1 - inode->i_size; - map->m_la = inode->i_size; - map->m_flags = 0; - goto out; - } - - err = fill_inode_lazy(inode); - if (err) - goto out; - - lclusterbits = vi->z_logical_clusterbits; - ofs = map->m_la; - m.lcn = ofs >> lclusterbits; - endoff = ofs & ((1 << lclusterbits) - 1); - - err = vle_load_cluster_from_disk(&m, m.lcn); - if (err) - goto unmap_out; - - map->m_flags = EROFS_MAP_ZIPPED; /* by default, compressed */ - end = (m.lcn + 1ULL) << lclusterbits; - - switch (m.type) { - case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: - if (endoff >= m.clusterofs) - map->m_flags &= ~EROFS_MAP_ZIPPED; - /* fallthrough */ - case Z_EROFS_VLE_CLUSTER_TYPE_HEAD: - if (endoff >= m.clusterofs) { - map->m_la = (m.lcn << lclusterbits) | m.clusterofs; - break; - } - /* m.lcn should be >= 1 if endoff < m.clusterofs */ - if (unlikely(!m.lcn)) { - errln("invalid logical cluster 0 at nid %llu", - vi->nid); - err = -EFSCORRUPTED; - goto unmap_out; - } - end = (m.lcn << lclusterbits) | m.clusterofs; - map->m_flags |= EROFS_MAP_FULL_MAPPED; - m.delta[0] = 1; - /* fallthrough */ - case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: - /* get the correspoinding first chunk */ - err = vle_extent_lookback(&m, m.delta[0]); - if (unlikely(err)) - goto unmap_out; - break; - default: - errln("unknown type %u at offset %llu of nid %llu", - m.type, ofs, vi->nid); - err = -EOPNOTSUPP; - goto unmap_out; - } - - map->m_llen = end - map->m_la; - map->m_plen = 1 << lclusterbits; - map->m_pa = blknr_to_addr(m.pblk); - map->m_flags |= EROFS_MAP_MAPPED; - -unmap_out: - if (m.kaddr) - kunmap_atomic(m.kaddr); - -out: - debugln("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o", - __func__, map->m_la, map->m_pa, - map->m_llen, map->m_plen, map->m_flags); - - trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err); - - /* aggressively BUG_ON iff CONFIG_EROFS_FS_DEBUG is on */ - DBG_BUGON(err < 0 && err != -ENOMEM); - return err; -} - diff --git a/drivers/staging/erofs/zpvec.h b/drivers/staging/erofs/zpvec.h deleted file mode 100644 index 9798f5627786..000000000000 --- a/drivers/staging/erofs/zpvec.h +++ /dev/null @@ -1,159 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * linux/drivers/staging/erofs/zpvec.h - * - * Copyright (C) 2018 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#ifndef __EROFS_FS_ZPVEC_H -#define __EROFS_FS_ZPVEC_H - -#include "tagptr.h" - -/* page type in pagevec for decompress subsystem */ -enum z_erofs_page_type { - /* including Z_EROFS_VLE_PAGE_TAIL_EXCLUSIVE */ - Z_EROFS_PAGE_TYPE_EXCLUSIVE, - - Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED, - - Z_EROFS_VLE_PAGE_TYPE_HEAD, - Z_EROFS_VLE_PAGE_TYPE_MAX -}; - -extern void __compiletime_error("Z_EROFS_PAGE_TYPE_EXCLUSIVE != 0") - __bad_page_type_exclusive(void); - -/* pagevec tagged pointer */ -typedef tagptr2_t erofs_vtptr_t; - -/* pagevec collector */ -struct z_erofs_pagevec_ctor { - struct page *curr, *next; - erofs_vtptr_t *pages; - - unsigned int nr, index; -}; - -static inline void z_erofs_pagevec_ctor_exit(struct z_erofs_pagevec_ctor *ctor, - bool atomic) -{ - if (!ctor->curr) - return; - - if (atomic) - kunmap_atomic(ctor->pages); - else - kunmap(ctor->curr); -} - -static inline struct page * -z_erofs_pagevec_ctor_next_page(struct z_erofs_pagevec_ctor *ctor, - unsigned int nr) -{ - unsigned int index; - - /* keep away from occupied pages */ - if (ctor->next) - return ctor->next; - - for (index = 0; index < nr; ++index) { - const erofs_vtptr_t t = ctor->pages[index]; - const unsigned int tags = tagptr_unfold_tags(t); - - if (tags == Z_EROFS_PAGE_TYPE_EXCLUSIVE) - return tagptr_unfold_ptr(t); - } - DBG_BUGON(nr >= ctor->nr); - return NULL; -} - -static inline void -z_erofs_pagevec_ctor_pagedown(struct z_erofs_pagevec_ctor *ctor, - bool atomic) -{ - struct page *next = z_erofs_pagevec_ctor_next_page(ctor, ctor->nr); - - z_erofs_pagevec_ctor_exit(ctor, atomic); - - ctor->curr = next; - ctor->next = NULL; - ctor->pages = atomic ? - kmap_atomic(ctor->curr) : kmap(ctor->curr); - - ctor->nr = PAGE_SIZE / sizeof(struct page *); - ctor->index = 0; -} - -static inline void z_erofs_pagevec_ctor_init(struct z_erofs_pagevec_ctor *ctor, - unsigned int nr, - erofs_vtptr_t *pages, - unsigned int i) -{ - ctor->nr = nr; - ctor->curr = ctor->next = NULL; - ctor->pages = pages; - - if (i >= nr) { - i -= nr; - z_erofs_pagevec_ctor_pagedown(ctor, false); - while (i > ctor->nr) { - i -= ctor->nr; - z_erofs_pagevec_ctor_pagedown(ctor, false); - } - } - ctor->next = z_erofs_pagevec_ctor_next_page(ctor, i); - ctor->index = i; -} - -static inline bool z_erofs_pagevec_enqueue(struct z_erofs_pagevec_ctor *ctor, - struct page *page, - enum z_erofs_page_type type, - bool *occupied) -{ - *occupied = false; - if (unlikely(!ctor->next && type)) - if (ctor->index + 1 == ctor->nr) - return false; - - if (unlikely(ctor->index >= ctor->nr)) - z_erofs_pagevec_ctor_pagedown(ctor, false); - - /* exclusive page type must be 0 */ - if (Z_EROFS_PAGE_TYPE_EXCLUSIVE != (uintptr_t)NULL) - __bad_page_type_exclusive(); - - /* should remind that collector->next never equal to 1, 2 */ - if (type == (uintptr_t)ctor->next) { - ctor->next = page; - *occupied = true; - } - ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, page, type); - return true; -} - -static inline struct page * -z_erofs_pagevec_dequeue(struct z_erofs_pagevec_ctor *ctor, - enum z_erofs_page_type *type) -{ - erofs_vtptr_t t; - - if (unlikely(ctor->index >= ctor->nr)) { - DBG_BUGON(!ctor->next); - z_erofs_pagevec_ctor_pagedown(ctor, true); - } - - t = ctor->pages[ctor->index]; - - *type = tagptr_unfold_tags(t); - - /* should remind that collector->next never equal to 1, 2 */ - if (*type == (uintptr_t)ctor->next) - ctor->next = tagptr_unfold_ptr(t); - - ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, NULL, 0); - return tagptr_unfold_ptr(t); -} -#endif - diff --git a/fs/Kconfig b/fs/Kconfig index bfb1c6095c7a..669d46550e6d 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -261,6 +261,7 @@ source "fs/romfs/Kconfig" source "fs/pstore/Kconfig" source "fs/sysv/Kconfig" source "fs/ufs/Kconfig" +source "fs/erofs/Kconfig" endif # MISC_FILESYSTEMS diff --git a/fs/Makefile b/fs/Makefile index d60089fd689b..b2e4973a0bea 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -130,3 +130,4 @@ obj-$(CONFIG_F2FS_FS) += f2fs/ obj-$(CONFIG_CEPH_FS) += ceph/ obj-$(CONFIG_PSTORE) += pstore/ obj-$(CONFIG_EFIVAR_FS) += efivarfs/ +obj-$(CONFIG_EROFS_FS) += erofs/ diff --git a/fs/erofs/Kconfig b/fs/erofs/Kconfig new file mode 100644 index 000000000000..16316d1adca3 --- /dev/null +++ b/fs/erofs/Kconfig @@ -0,0 +1,98 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config EROFS_FS + tristate "EROFS filesystem support" + depends on BLOCK + help + EROFS (Enhanced Read-Only File System) is a lightweight + read-only file system with modern designs (eg. page-sized + blocks, inline xattrs/data, etc.) for scenarios which need + high-performance read-only requirements, e.g. Android OS + for mobile phones and LIVECDs. + + It also provides fixed-sized output compression support, + which improves storage density, keeps relatively higher + compression ratios, which is more useful to achieve high + performance for embedded devices with limited memory. + + If unsure, say N. + +config EROFS_FS_DEBUG + bool "EROFS debugging feature" + depends on EROFS_FS + help + Print debugging messages and enable more BUG_ONs which check + filesystem consistency and find potential issues aggressively, + which can be used for Android eng build, for example. + + For daily use, say N. + +config EROFS_FAULT_INJECTION + bool "EROFS fault injection facility" + depends on EROFS_FS + help + Test EROFS to inject faults such as ENOMEM, EIO, and so on. + If unsure, say N. + +config EROFS_FS_XATTR + bool "EROFS extended attributes" + depends on EROFS_FS + default y + help + Extended attributes are name:value pairs associated with inodes by + the kernel or by users (see the attr(5) manual page, or visit + for details). + + If unsure, say N. + +config EROFS_FS_POSIX_ACL + bool "EROFS Access Control Lists" + depends on EROFS_FS_XATTR + select FS_POSIX_ACL + default y + help + Posix Access Control Lists (ACLs) support permissions for users and + groups beyond the owner/group/world scheme. + + To learn more about Access Control Lists, visit the POSIX ACLs for + Linux website . + + If you don't know what Access Control Lists are, say N. + +config EROFS_FS_SECURITY + bool "EROFS Security Labels" + depends on EROFS_FS_XATTR + default y + help + Security labels provide an access control facility to support Linux + Security Models (LSMs) accepted by AppArmor, SELinux, Smack and TOMOYO + Linux. This option enables an extended attribute handler for file + security labels in the erofs filesystem, so that it requires enabling + the extended attribute support in advance. + + If you are not using a security module, say N. + +config EROFS_FS_ZIP + bool "EROFS Data Compression Support" + depends on EROFS_FS + select LZ4_DECOMPRESS + default y + help + Enable fixed-sized output compression for EROFS. + + If you don't want to enable compression feature, say N. + +config EROFS_FS_CLUSTER_PAGE_LIMIT + int "EROFS Cluster Pages Hard Limit" + depends on EROFS_FS_ZIP + range 1 256 + default "1" + help + Indicates maximum # of pages of a compressed + physical cluster. + + For example, if files in a image were compressed + into 8k-unit, hard limit should not be configured + less than 2. Otherwise, the image will be refused + to mount on this kernel. + diff --git a/fs/erofs/Makefile b/fs/erofs/Makefile new file mode 100644 index 000000000000..46f2aa4ba46c --- /dev/null +++ b/fs/erofs/Makefile @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0-only + +EROFS_VERSION = "1.0" + +ccflags-y += -DEROFS_VERSION=\"$(EROFS_VERSION)\" + +obj-$(CONFIG_EROFS_FS) += erofs.o +erofs-objs := super.o inode.o data.o namei.o dir.o utils.o +erofs-$(CONFIG_EROFS_FS_XATTR) += xattr.o +erofs-$(CONFIG_EROFS_FS_ZIP) += decompressor.o zmap.o zdata.o + diff --git a/fs/erofs/compress.h b/fs/erofs/compress.h new file mode 100644 index 000000000000..07d279fd5d67 --- /dev/null +++ b/fs/erofs/compress.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2019 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#ifndef __EROFS_FS_COMPRESS_H +#define __EROFS_FS_COMPRESS_H + +#include "internal.h" + +enum { + Z_EROFS_COMPRESSION_SHIFTED = Z_EROFS_COMPRESSION_MAX, + Z_EROFS_COMPRESSION_RUNTIME_MAX +}; + +struct z_erofs_decompress_req { + struct super_block *sb; + struct page **in, **out; + + unsigned short pageofs_out; + unsigned int inputsize, outputsize; + + /* indicate the algorithm will be used for decompression */ + unsigned int alg; + bool inplace_io, partial_decoding; +}; + +/* + * - 0x5A110C8D ('sallocated', Z_EROFS_MAPPING_STAGING) - + * used to mark temporary allocated pages from other + * file/cached pages and NULL mapping pages. + */ +#define Z_EROFS_MAPPING_STAGING ((void *)0x5A110C8D) + +/* check if a page is marked as staging */ +static inline bool z_erofs_page_is_staging(struct page *page) +{ + return page->mapping == Z_EROFS_MAPPING_STAGING; +} + +static inline bool z_erofs_put_stagingpage(struct list_head *pagepool, + struct page *page) +{ + if (!z_erofs_page_is_staging(page)) + return false; + + /* staging pages should not be used by others at the same time */ + if (page_ref_count(page) > 1) + put_page(page); + else + list_add(&page->lru, pagepool); + return true; +} + +int z_erofs_decompress(struct z_erofs_decompress_req *rq, + struct list_head *pagepool); + +#endif + diff --git a/fs/erofs/data.c b/fs/erofs/data.c new file mode 100644 index 000000000000..fda16ec8863e --- /dev/null +++ b/fs/erofs/data.c @@ -0,0 +1,423 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2017-2018 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#include "internal.h" +#include + +#include + +static inline void read_endio(struct bio *bio) +{ + struct super_block *const sb = bio->bi_private; + struct bio_vec *bvec; + blk_status_t err = bio->bi_status; + struct bvec_iter_all iter_all; + + if (time_to_inject(EROFS_SB(sb), FAULT_READ_IO)) { + erofs_show_injection_info(FAULT_READ_IO); + err = BLK_STS_IOERR; + } + + bio_for_each_segment_all(bvec, bio, iter_all) { + struct page *page = bvec->bv_page; + + /* page is already locked */ + DBG_BUGON(PageUptodate(page)); + + if (unlikely(err)) + SetPageError(page); + else + SetPageUptodate(page); + + unlock_page(page); + /* page could be reclaimed now */ + } + bio_put(bio); +} + +/* prio -- true is used for dir */ +struct page *__erofs_get_meta_page(struct super_block *sb, + erofs_blk_t blkaddr, bool prio, bool nofail) +{ + struct inode *const bd_inode = sb->s_bdev->bd_inode; + struct address_space *const mapping = bd_inode->i_mapping; + /* prefer retrying in the allocator to blindly looping below */ + const gfp_t gfp = mapping_gfp_constraint(mapping, ~__GFP_FS) | + (nofail ? __GFP_NOFAIL : 0); + unsigned int io_retries = nofail ? EROFS_IO_MAX_RETRIES_NOFAIL : 0; + struct page *page; + int err; + +repeat: + page = find_or_create_page(mapping, blkaddr, gfp); + if (unlikely(!page)) { + DBG_BUGON(nofail); + return ERR_PTR(-ENOMEM); + } + DBG_BUGON(!PageLocked(page)); + + if (!PageUptodate(page)) { + struct bio *bio; + + bio = erofs_grab_bio(sb, blkaddr, 1, sb, read_endio, nofail); + if (IS_ERR(bio)) { + DBG_BUGON(nofail); + err = PTR_ERR(bio); + goto err_out; + } + + err = bio_add_page(bio, page, PAGE_SIZE, 0); + if (unlikely(err != PAGE_SIZE)) { + err = -EFAULT; + goto err_out; + } + + __submit_bio(bio, REQ_OP_READ, + REQ_META | (prio ? REQ_PRIO : 0)); + + lock_page(page); + + /* this page has been truncated by others */ + if (unlikely(page->mapping != mapping)) { +unlock_repeat: + unlock_page(page); + put_page(page); + goto repeat; + } + + /* more likely a read error */ + if (unlikely(!PageUptodate(page))) { + if (io_retries) { + --io_retries; + goto unlock_repeat; + } + err = -EIO; + goto err_out; + } + } + return page; + +err_out: + unlock_page(page); + put_page(page); + return ERR_PTR(err); +} + +static int erofs_map_blocks_flatmode(struct inode *inode, + struct erofs_map_blocks *map, + int flags) +{ + int err = 0; + erofs_blk_t nblocks, lastblk; + u64 offset = map->m_la; + struct erofs_vnode *vi = EROFS_V(inode); + + trace_erofs_map_blocks_flatmode_enter(inode, map, flags); + + nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE); + lastblk = nblocks - is_inode_flat_inline(inode); + + if (unlikely(offset >= inode->i_size)) { + /* leave out-of-bound access unmapped */ + map->m_flags = 0; + map->m_plen = 0; + goto out; + } + + /* there is no hole in flatmode */ + map->m_flags = EROFS_MAP_MAPPED; + + if (offset < blknr_to_addr(lastblk)) { + map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la; + map->m_plen = blknr_to_addr(lastblk) - offset; + } else if (is_inode_flat_inline(inode)) { + /* 2 - inode inline B: inode, [xattrs], inline last blk... */ + struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb); + + map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize + + vi->xattr_isize + erofs_blkoff(map->m_la); + map->m_plen = inode->i_size - offset; + + /* inline data should be located in one meta block */ + if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) { + errln("inline data cross block boundary @ nid %llu", + vi->nid); + DBG_BUGON(1); + err = -EFSCORRUPTED; + goto err_out; + } + + map->m_flags |= EROFS_MAP_META; + } else { + errln("internal error @ nid: %llu (size %llu), m_la 0x%llx", + vi->nid, inode->i_size, map->m_la); + DBG_BUGON(1); + err = -EIO; + goto err_out; + } + +out: + map->m_llen = map->m_plen; + +err_out: + trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0); + return err; +} + +int erofs_map_blocks(struct inode *inode, + struct erofs_map_blocks *map, int flags) +{ + if (unlikely(is_inode_layout_compression(inode))) { + int err = z_erofs_map_blocks_iter(inode, map, flags); + + if (map->mpage) { + put_page(map->mpage); + map->mpage = NULL; + } + return err; + } + return erofs_map_blocks_flatmode(inode, map, flags); +} + +static inline struct bio *erofs_read_raw_page(struct bio *bio, + struct address_space *mapping, + struct page *page, + erofs_off_t *last_block, + unsigned int nblocks, + bool ra) +{ + struct inode *const inode = mapping->host; + struct super_block *const sb = inode->i_sb; + erofs_off_t current_block = (erofs_off_t)page->index; + int err; + + DBG_BUGON(!nblocks); + + if (PageUptodate(page)) { + err = 0; + goto has_updated; + } + + /* note that for readpage case, bio also equals to NULL */ + if (bio && + /* not continuous */ + *last_block + 1 != current_block) { +submit_bio_retry: + __submit_bio(bio, REQ_OP_READ, 0); + bio = NULL; + } + + if (!bio) { + struct erofs_map_blocks map = { + .m_la = blknr_to_addr(current_block), + }; + erofs_blk_t blknr; + unsigned int blkoff; + + err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW); + if (unlikely(err)) + goto err_out; + + /* zero out the holed page */ + if (unlikely(!(map.m_flags & EROFS_MAP_MAPPED))) { + zero_user_segment(page, 0, PAGE_SIZE); + SetPageUptodate(page); + + /* imply err = 0, see erofs_map_blocks */ + goto has_updated; + } + + /* for RAW access mode, m_plen must be equal to m_llen */ + DBG_BUGON(map.m_plen != map.m_llen); + + blknr = erofs_blknr(map.m_pa); + blkoff = erofs_blkoff(map.m_pa); + + /* deal with inline page */ + if (map.m_flags & EROFS_MAP_META) { + void *vsrc, *vto; + struct page *ipage; + + DBG_BUGON(map.m_plen > PAGE_SIZE); + + ipage = erofs_get_meta_page(inode->i_sb, blknr, 0); + + if (IS_ERR(ipage)) { + err = PTR_ERR(ipage); + goto err_out; + } + + vsrc = kmap_atomic(ipage); + vto = kmap_atomic(page); + memcpy(vto, vsrc + blkoff, map.m_plen); + memset(vto + map.m_plen, 0, PAGE_SIZE - map.m_plen); + kunmap_atomic(vto); + kunmap_atomic(vsrc); + flush_dcache_page(page); + + SetPageUptodate(page); + /* TODO: could we unlock the page earlier? */ + unlock_page(ipage); + put_page(ipage); + + /* imply err = 0, see erofs_map_blocks */ + goto has_updated; + } + + /* pa must be block-aligned for raw reading */ + DBG_BUGON(erofs_blkoff(map.m_pa)); + + /* max # of continuous pages */ + if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE)) + nblocks = DIV_ROUND_UP(map.m_plen, PAGE_SIZE); + if (nblocks > BIO_MAX_PAGES) + nblocks = BIO_MAX_PAGES; + + bio = erofs_grab_bio(sb, blknr, nblocks, sb, + read_endio, false); + if (IS_ERR(bio)) { + err = PTR_ERR(bio); + bio = NULL; + goto err_out; + } + } + + err = bio_add_page(bio, page, PAGE_SIZE, 0); + /* out of the extent or bio is full */ + if (err < PAGE_SIZE) + goto submit_bio_retry; + + *last_block = current_block; + + /* shift in advance in case of it followed by too many gaps */ + if (bio->bi_iter.bi_size >= bio->bi_max_vecs * PAGE_SIZE) { + /* err should reassign to 0 after submitting */ + err = 0; + goto submit_bio_out; + } + + return bio; + +err_out: + /* for sync reading, set page error immediately */ + if (!ra) { + SetPageError(page); + ClearPageUptodate(page); + } +has_updated: + unlock_page(page); + + /* if updated manually, continuous pages has a gap */ + if (bio) +submit_bio_out: + __submit_bio(bio, REQ_OP_READ, 0); + + return unlikely(err) ? ERR_PTR(err) : NULL; +} + +/* + * since we dont have write or truncate flows, so no inode + * locking needs to be held at the moment. + */ +static int erofs_raw_access_readpage(struct file *file, struct page *page) +{ + erofs_off_t last_block; + struct bio *bio; + + trace_erofs_readpage(page, true); + + bio = erofs_read_raw_page(NULL, page->mapping, + page, &last_block, 1, false); + + if (IS_ERR(bio)) + return PTR_ERR(bio); + + DBG_BUGON(bio); /* since we have only one bio -- must be NULL */ + return 0; +} + +static int erofs_raw_access_readpages(struct file *filp, + struct address_space *mapping, + struct list_head *pages, + unsigned int nr_pages) +{ + erofs_off_t last_block; + struct bio *bio = NULL; + gfp_t gfp = readahead_gfp_mask(mapping); + struct page *page = list_last_entry(pages, struct page, lru); + + trace_erofs_readpages(mapping->host, page, nr_pages, true); + + for (; nr_pages; --nr_pages) { + page = list_entry(pages->prev, struct page, lru); + + prefetchw(&page->flags); + list_del(&page->lru); + + if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) { + bio = erofs_read_raw_page(bio, mapping, page, + &last_block, nr_pages, true); + + /* all the page errors are ignored when readahead */ + if (IS_ERR(bio)) { + pr_err("%s, readahead error at page %lu of nid %llu\n", + __func__, page->index, + EROFS_V(mapping->host)->nid); + + bio = NULL; + } + } + + /* pages could still be locked */ + put_page(page); + } + DBG_BUGON(!list_empty(pages)); + + /* the rare case (end in gaps) */ + if (unlikely(bio)) + __submit_bio(bio, REQ_OP_READ, 0); + return 0; +} + +static int erofs_get_block(struct inode *inode, sector_t iblock, + struct buffer_head *bh, int create) +{ + struct erofs_map_blocks map = { + .m_la = iblock << 9, + }; + int err; + + err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW); + if (err) + return err; + + if (map.m_flags & EROFS_MAP_MAPPED) + bh->b_blocknr = erofs_blknr(map.m_pa); + + return err; +} + +static sector_t erofs_bmap(struct address_space *mapping, sector_t block) +{ + struct inode *inode = mapping->host; + + if (is_inode_flat_inline(inode)) { + erofs_blk_t blks = i_size_read(inode) >> LOG_BLOCK_SIZE; + + if (block >> LOG_SECTORS_PER_BLOCK >= blks) + return 0; + } + + return generic_block_bmap(mapping, block, erofs_get_block); +} + +/* for uncompressed (aligned) files and raw access for other files */ +const struct address_space_operations erofs_raw_access_aops = { + .readpage = erofs_raw_access_readpage, + .readpages = erofs_raw_access_readpages, + .bmap = erofs_bmap, +}; + diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c new file mode 100644 index 000000000000..5f4b7f302863 --- /dev/null +++ b/fs/erofs/decompressor.c @@ -0,0 +1,358 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2019 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#include "compress.h" +#include +#include + +#ifndef LZ4_DISTANCE_MAX /* history window size */ +#define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */ +#endif + +#define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1) +#ifndef LZ4_DECOMPRESS_INPLACE_MARGIN +#define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32) +#endif + +struct z_erofs_decompressor { + /* + * if destpages have sparsed pages, fill them with bounce pages. + * it also check whether destpages indicate continuous physical memory. + */ + int (*prepare_destpages)(struct z_erofs_decompress_req *rq, + struct list_head *pagepool); + int (*decompress)(struct z_erofs_decompress_req *rq, u8 *out); + char *name; +}; + +static bool use_vmap; +module_param(use_vmap, bool, 0444); +MODULE_PARM_DESC(use_vmap, "Use vmap() instead of vm_map_ram() (default 0)"); + +static int lz4_prepare_destpages(struct z_erofs_decompress_req *rq, + struct list_head *pagepool) +{ + const unsigned int nr = + PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; + struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL }; + unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES, + BITS_PER_LONG)] = { 0 }; + void *kaddr = NULL; + unsigned int i, j, top; + + top = 0; + for (i = j = 0; i < nr; ++i, ++j) { + struct page *const page = rq->out[i]; + struct page *victim; + + if (j >= LZ4_MAX_DISTANCE_PAGES) + j = 0; + + /* 'valid' bounced can only be tested after a complete round */ + if (test_bit(j, bounced)) { + DBG_BUGON(i < LZ4_MAX_DISTANCE_PAGES); + DBG_BUGON(top >= LZ4_MAX_DISTANCE_PAGES); + availables[top++] = rq->out[i - LZ4_MAX_DISTANCE_PAGES]; + } + + if (page) { + __clear_bit(j, bounced); + if (kaddr) { + if (kaddr + PAGE_SIZE == page_address(page)) + kaddr += PAGE_SIZE; + else + kaddr = NULL; + } else if (!i) { + kaddr = page_address(page); + } + continue; + } + kaddr = NULL; + __set_bit(j, bounced); + + if (top) { + victim = availables[--top]; + get_page(victim); + } else { + victim = erofs_allocpage(pagepool, GFP_KERNEL, false); + if (unlikely(!victim)) + return -ENOMEM; + victim->mapping = Z_EROFS_MAPPING_STAGING; + } + rq->out[i] = victim; + } + return kaddr ? 1 : 0; +} + +static void *generic_copy_inplace_data(struct z_erofs_decompress_req *rq, + u8 *src, unsigned int pageofs_in) +{ + /* + * if in-place decompression is ongoing, those decompressed + * pages should be copied in order to avoid being overlapped. + */ + struct page **in = rq->in; + u8 *const tmp = erofs_get_pcpubuf(0); + u8 *tmpp = tmp; + unsigned int inlen = rq->inputsize - pageofs_in; + unsigned int count = min_t(uint, inlen, PAGE_SIZE - pageofs_in); + + while (tmpp < tmp + inlen) { + if (!src) + src = kmap_atomic(*in); + memcpy(tmpp, src + pageofs_in, count); + kunmap_atomic(src); + src = NULL; + tmpp += count; + pageofs_in = 0; + count = PAGE_SIZE; + ++in; + } + return tmp; +} + +static int lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out) +{ + unsigned int inputmargin, inlen; + u8 *src; + bool copied, support_0padding; + int ret; + + if (rq->inputsize > PAGE_SIZE) + return -EOPNOTSUPP; + + src = kmap_atomic(*rq->in); + inputmargin = 0; + support_0padding = false; + + /* decompression inplace is only safe when 0padding is enabled */ + if (EROFS_SB(rq->sb)->requirements & EROFS_REQUIREMENT_LZ4_0PADDING) { + support_0padding = true; + + while (!src[inputmargin & ~PAGE_MASK]) + if (!(++inputmargin & ~PAGE_MASK)) + break; + + if (inputmargin >= rq->inputsize) { + kunmap_atomic(src); + return -EIO; + } + } + + copied = false; + inlen = rq->inputsize - inputmargin; + if (rq->inplace_io) { + const uint oend = (rq->pageofs_out + + rq->outputsize) & ~PAGE_MASK; + const uint nr = PAGE_ALIGN(rq->pageofs_out + + rq->outputsize) >> PAGE_SHIFT; + + if (rq->partial_decoding || !support_0padding || + rq->out[nr - 1] != rq->in[0] || + rq->inputsize - oend < + LZ4_DECOMPRESS_INPLACE_MARGIN(inlen)) { + src = generic_copy_inplace_data(rq, src, inputmargin); + inputmargin = 0; + copied = true; + } + } + + ret = LZ4_decompress_safe_partial(src + inputmargin, out, + inlen, rq->outputsize, + rq->outputsize); + if (ret < 0) { + errln("%s, failed to decompress, in[%p, %u, %u] out[%p, %u]", + __func__, src + inputmargin, inlen, inputmargin, + out, rq->outputsize); + WARN_ON(1); + print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET, + 16, 1, src + inputmargin, inlen, true); + print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET, + 16, 1, out, rq->outputsize, true); + ret = -EIO; + } + + if (copied) + erofs_put_pcpubuf(src); + else + kunmap_atomic(src); + return ret; +} + +static struct z_erofs_decompressor decompressors[] = { + [Z_EROFS_COMPRESSION_SHIFTED] = { + .name = "shifted" + }, + [Z_EROFS_COMPRESSION_LZ4] = { + .prepare_destpages = lz4_prepare_destpages, + .decompress = lz4_decompress, + .name = "lz4" + }, +}; + +static void copy_from_pcpubuf(struct page **out, const char *dst, + unsigned short pageofs_out, + unsigned int outputsize) +{ + const char *end = dst + outputsize; + const unsigned int righthalf = PAGE_SIZE - pageofs_out; + const char *cur = dst - pageofs_out; + + while (cur < end) { + struct page *const page = *out++; + + if (page) { + char *buf = kmap_atomic(page); + + if (cur >= dst) { + memcpy(buf, cur, min_t(uint, PAGE_SIZE, + end - cur)); + } else { + memcpy(buf + pageofs_out, cur + pageofs_out, + min_t(uint, righthalf, end - cur)); + } + kunmap_atomic(buf); + } + cur += PAGE_SIZE; + } +} + +static void *erofs_vmap(struct page **pages, unsigned int count) +{ + int i = 0; + + if (use_vmap) + return vmap(pages, count, VM_MAP, PAGE_KERNEL); + + while (1) { + void *addr = vm_map_ram(pages, count, -1, PAGE_KERNEL); + + /* retry two more times (totally 3 times) */ + if (addr || ++i >= 3) + return addr; + vm_unmap_aliases(); + } + return NULL; +} + +static void erofs_vunmap(const void *mem, unsigned int count) +{ + if (!use_vmap) + vm_unmap_ram(mem, count); + else + vunmap(mem); +} + +static int decompress_generic(struct z_erofs_decompress_req *rq, + struct list_head *pagepool) +{ + const unsigned int nrpages_out = + PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; + const struct z_erofs_decompressor *alg = decompressors + rq->alg; + unsigned int dst_maptype; + void *dst; + int ret; + + if (nrpages_out == 1 && !rq->inplace_io) { + DBG_BUGON(!*rq->out); + dst = kmap_atomic(*rq->out); + dst_maptype = 0; + goto dstmap_out; + } + + /* + * For the case of small output size (especially much less + * than PAGE_SIZE), memcpy the decompressed data rather than + * compressed data is preferred. + */ + if (rq->outputsize <= PAGE_SIZE * 7 / 8) { + dst = erofs_get_pcpubuf(0); + if (IS_ERR(dst)) + return PTR_ERR(dst); + + rq->inplace_io = false; + ret = alg->decompress(rq, dst); + if (!ret) + copy_from_pcpubuf(rq->out, dst, rq->pageofs_out, + rq->outputsize); + + erofs_put_pcpubuf(dst); + return ret; + } + + ret = alg->prepare_destpages(rq, pagepool); + if (ret < 0) { + return ret; + } else if (ret) { + dst = page_address(*rq->out); + dst_maptype = 1; + goto dstmap_out; + } + + dst = erofs_vmap(rq->out, nrpages_out); + if (!dst) + return -ENOMEM; + dst_maptype = 2; + +dstmap_out: + ret = alg->decompress(rq, dst + rq->pageofs_out); + + if (!dst_maptype) + kunmap_atomic(dst); + else if (dst_maptype == 2) + erofs_vunmap(dst, nrpages_out); + return ret; +} + +static int shifted_decompress(const struct z_erofs_decompress_req *rq, + struct list_head *pagepool) +{ + const unsigned int nrpages_out = + PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; + const unsigned int righthalf = PAGE_SIZE - rq->pageofs_out; + unsigned char *src, *dst; + + if (nrpages_out > 2) { + DBG_BUGON(1); + return -EIO; + } + + if (rq->out[0] == *rq->in) { + DBG_BUGON(nrpages_out != 1); + return 0; + } + + src = kmap_atomic(*rq->in); + if (!rq->out[0]) { + dst = NULL; + } else { + dst = kmap_atomic(rq->out[0]); + memcpy(dst + rq->pageofs_out, src, righthalf); + } + + if (rq->out[1] == *rq->in) { + memmove(src, src + righthalf, rq->pageofs_out); + } else if (nrpages_out == 2) { + if (dst) + kunmap_atomic(dst); + DBG_BUGON(!rq->out[1]); + dst = kmap_atomic(rq->out[1]); + memcpy(dst, src + righthalf, rq->pageofs_out); + } + if (dst) + kunmap_atomic(dst); + kunmap_atomic(src); + return 0; +} + +int z_erofs_decompress(struct z_erofs_decompress_req *rq, + struct list_head *pagepool) +{ + if (rq->alg == Z_EROFS_COMPRESSION_SHIFTED) + return shifted_decompress(rq, pagepool); + return decompress_generic(rq, pagepool); +} + diff --git a/fs/erofs/dir.c b/fs/erofs/dir.c new file mode 100644 index 000000000000..1976e60e5174 --- /dev/null +++ b/fs/erofs/dir.c @@ -0,0 +1,139 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2017-2018 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#include "internal.h" + +static void debug_one_dentry(unsigned char d_type, const char *de_name, + unsigned int de_namelen) +{ +#ifdef CONFIG_EROFS_FS_DEBUG + /* since the on-disk name could not have the trailing '\0' */ + unsigned char dbg_namebuf[EROFS_NAME_LEN + 1]; + + memcpy(dbg_namebuf, de_name, de_namelen); + dbg_namebuf[de_namelen] = '\0'; + + debugln("found dirent %s de_len %u d_type %d", dbg_namebuf, + de_namelen, d_type); +#endif +} + +static int erofs_fill_dentries(struct inode *dir, struct dir_context *ctx, + void *dentry_blk, unsigned int *ofs, + unsigned int nameoff, unsigned int maxsize) +{ + struct erofs_dirent *de = dentry_blk + *ofs; + const struct erofs_dirent *end = dentry_blk + nameoff; + + while (de < end) { + const char *de_name; + unsigned int de_namelen; + unsigned char d_type; + + d_type = fs_ftype_to_dtype(de->file_type); + + nameoff = le16_to_cpu(de->nameoff); + de_name = (char *)dentry_blk + nameoff; + + /* the last dirent in the block? */ + if (de + 1 >= end) + de_namelen = strnlen(de_name, maxsize - nameoff); + else + de_namelen = le16_to_cpu(de[1].nameoff) - nameoff; + + /* a corrupted entry is found */ + if (unlikely(nameoff + de_namelen > maxsize || + de_namelen > EROFS_NAME_LEN)) { + errln("bogus dirent @ nid %llu", EROFS_V(dir)->nid); + DBG_BUGON(1); + return -EFSCORRUPTED; + } + + debug_one_dentry(d_type, de_name, de_namelen); + if (!dir_emit(ctx, de_name, de_namelen, + le64_to_cpu(de->nid), d_type)) + /* stopped by some reason */ + return 1; + ++de; + *ofs += sizeof(struct erofs_dirent); + } + *ofs = maxsize; + return 0; +} + +static int erofs_readdir(struct file *f, struct dir_context *ctx) +{ + struct inode *dir = file_inode(f); + struct address_space *mapping = dir->i_mapping; + const size_t dirsize = i_size_read(dir); + unsigned int i = ctx->pos / EROFS_BLKSIZ; + unsigned int ofs = ctx->pos % EROFS_BLKSIZ; + int err = 0; + bool initial = true; + + while (ctx->pos < dirsize) { + struct page *dentry_page; + struct erofs_dirent *de; + unsigned int nameoff, maxsize; + + dentry_page = read_mapping_page(mapping, i, NULL); + if (dentry_page == ERR_PTR(-ENOMEM)) { + err = -ENOMEM; + break; + } else if (IS_ERR(dentry_page)) { + errln("fail to readdir of logical block %u of nid %llu", + i, EROFS_V(dir)->nid); + err = -EFSCORRUPTED; + break; + } + + de = (struct erofs_dirent *)kmap(dentry_page); + + nameoff = le16_to_cpu(de->nameoff); + + if (unlikely(nameoff < sizeof(struct erofs_dirent) || + nameoff >= PAGE_SIZE)) { + errln("%s, invalid de[0].nameoff %u @ nid %llu", + __func__, nameoff, EROFS_V(dir)->nid); + err = -EFSCORRUPTED; + goto skip_this; + } + + maxsize = min_t(unsigned int, + dirsize - ctx->pos + ofs, PAGE_SIZE); + + /* search dirents at the arbitrary position */ + if (unlikely(initial)) { + initial = false; + + ofs = roundup(ofs, sizeof(struct erofs_dirent)); + if (unlikely(ofs >= nameoff)) + goto skip_this; + } + + err = erofs_fill_dentries(dir, ctx, de, &ofs, + nameoff, maxsize); +skip_this: + kunmap(dentry_page); + + put_page(dentry_page); + + ctx->pos = blknr_to_addr(i) + ofs; + + if (unlikely(err)) + break; + ++i; + ofs = 0; + } + return err < 0 ? err : 0; +} + +const struct file_operations erofs_dir_fops = { + .llseek = generic_file_llseek, + .read = generic_read_dir, + .iterate_shared = erofs_readdir, +}; + diff --git a/fs/erofs/erofs_fs.h b/fs/erofs/erofs_fs.h new file mode 100644 index 000000000000..afa7d45ca958 --- /dev/null +++ b/fs/erofs/erofs_fs.h @@ -0,0 +1,307 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR Apache-2.0 */ +/* + * Copyright (C) 2017-2018 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#ifndef __EROFS_FS_H +#define __EROFS_FS_H + +/* Enhanced(Extended) ROM File System */ +#define EROFS_SUPER_OFFSET 1024 + +/* + * Any bits that aren't in EROFS_ALL_REQUIREMENTS should be + * incompatible with this kernel version. + */ +#define EROFS_REQUIREMENT_LZ4_0PADDING 0x00000001 +#define EROFS_ALL_REQUIREMENTS EROFS_REQUIREMENT_LZ4_0PADDING + +struct erofs_super_block { +/* 0 */__le32 magic; /* in the little endian */ +/* 4 */__le32 checksum; /* crc32c(super_block) */ +/* 8 */__le32 features; /* (aka. feature_compat) */ +/* 12 */__u8 blkszbits; /* support block_size == PAGE_SIZE only */ +/* 13 */__u8 reserved; + +/* 14 */__le16 root_nid; +/* 16 */__le64 inos; /* total valid ino # (== f_files - f_favail) */ + +/* 24 */__le64 build_time; /* inode v1 time derivation */ +/* 32 */__le32 build_time_nsec; +/* 36 */__le32 blocks; /* used for statfs */ +/* 40 */__le32 meta_blkaddr; +/* 44 */__le32 xattr_blkaddr; +/* 48 */__u8 uuid[16]; /* 128-bit uuid for volume */ +/* 64 */__u8 volume_name[16]; /* volume name */ +/* 80 */__le32 requirements; /* (aka. feature_incompat) */ + +/* 84 */__u8 reserved2[44]; +} __packed; /* 128 bytes */ + +/* + * erofs inode data mapping: + * 0 - inode plain without inline data A: + * inode, [xattrs], ... | ... | no-holed data + * 1 - inode VLE compression B (legacy): + * inode, [xattrs], extents ... | ... + * 2 - inode plain with inline data C: + * inode, [xattrs], last_inline_data, ... | ... | no-holed data + * 3 - inode compression D: + * inode, [xattrs], map_header, extents ... | ... + * 4~7 - reserved + */ +enum { + EROFS_INODE_FLAT_PLAIN, + EROFS_INODE_FLAT_COMPRESSION_LEGACY, + EROFS_INODE_FLAT_INLINE, + EROFS_INODE_FLAT_COMPRESSION, + EROFS_INODE_LAYOUT_MAX +}; + +static inline bool erofs_inode_is_data_compressed(unsigned int datamode) +{ + if (datamode == EROFS_INODE_FLAT_COMPRESSION) + return true; + return datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY; +} + +/* bit definitions of inode i_advise */ +#define EROFS_I_VERSION_BITS 1 +#define EROFS_I_DATA_MAPPING_BITS 3 + +#define EROFS_I_VERSION_BIT 0 +#define EROFS_I_DATA_MAPPING_BIT 1 + +struct erofs_inode_v1 { +/* 0 */__le16 i_advise; + +/* 1 header + n-1 * 4 bytes inline xattr to keep continuity */ +/* 2 */__le16 i_xattr_icount; +/* 4 */__le16 i_mode; +/* 6 */__le16 i_nlink; +/* 8 */__le32 i_size; +/* 12 */__le32 i_reserved; +/* 16 */union { + /* file total compressed blocks for data mapping 1 */ + __le32 compressed_blocks; + __le32 raw_blkaddr; + + /* for device files, used to indicate old/new device # */ + __le32 rdev; + } i_u __packed; +/* 20 */__le32 i_ino; /* only used for 32-bit stat compatibility */ +/* 24 */__le16 i_uid; +/* 26 */__le16 i_gid; +/* 28 */__le32 i_reserved2; +} __packed; + +/* 32 bytes on-disk inode */ +#define EROFS_INODE_LAYOUT_V1 0 +/* 64 bytes on-disk inode */ +#define EROFS_INODE_LAYOUT_V2 1 + +struct erofs_inode_v2 { +/* 0 */__le16 i_advise; + +/* 1 header + n-1 * 4 bytes inline xattr to keep continuity */ +/* 2 */__le16 i_xattr_icount; +/* 4 */__le16 i_mode; +/* 6 */__le16 i_reserved; +/* 8 */__le64 i_size; +/* 16 */union { + /* file total compressed blocks for data mapping 1 */ + __le32 compressed_blocks; + __le32 raw_blkaddr; + + /* for device files, used to indicate old/new device # */ + __le32 rdev; + } i_u __packed; + + /* only used for 32-bit stat compatibility */ +/* 20 */__le32 i_ino; + +/* 24 */__le32 i_uid; +/* 28 */__le32 i_gid; +/* 32 */__le64 i_ctime; +/* 40 */__le32 i_ctime_nsec; +/* 44 */__le32 i_nlink; +/* 48 */__u8 i_reserved2[16]; +} __packed; /* 64 bytes */ + +#define EROFS_MAX_SHARED_XATTRS (128) +/* h_shared_count between 129 ... 255 are special # */ +#define EROFS_SHARED_XATTR_EXTENT (255) + +/* + * inline xattrs (n == i_xattr_icount): + * erofs_xattr_ibody_header(1) + (n - 1) * 4 bytes + * 12 bytes / \ + * / \ + * /-----------------------\ + * | erofs_xattr_entries+ | + * +-----------------------+ + * inline xattrs must starts in erofs_xattr_ibody_header, + * for read-only fs, no need to introduce h_refcount + */ +struct erofs_xattr_ibody_header { + __le32 h_reserved; + __u8 h_shared_count; + __u8 h_reserved2[7]; + __le32 h_shared_xattrs[0]; /* shared xattr id array */ +} __packed; + +/* Name indexes */ +#define EROFS_XATTR_INDEX_USER 1 +#define EROFS_XATTR_INDEX_POSIX_ACL_ACCESS 2 +#define EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT 3 +#define EROFS_XATTR_INDEX_TRUSTED 4 +#define EROFS_XATTR_INDEX_LUSTRE 5 +#define EROFS_XATTR_INDEX_SECURITY 6 + +/* xattr entry (for both inline & shared xattrs) */ +struct erofs_xattr_entry { + __u8 e_name_len; /* length of name */ + __u8 e_name_index; /* attribute name index */ + __le16 e_value_size; /* size of attribute value */ + /* followed by e_name and e_value */ + char e_name[0]; /* attribute name */ +} __packed; + +#define ondisk_xattr_ibody_size(count) ({\ + u32 __count = le16_to_cpu(count); \ + ((__count) == 0) ? 0 : \ + sizeof(struct erofs_xattr_ibody_header) + \ + sizeof(__u32) * ((__count) - 1); }) + +#define EROFS_XATTR_ALIGN(size) round_up(size, sizeof(struct erofs_xattr_entry)) +#define EROFS_XATTR_ENTRY_SIZE(entry) EROFS_XATTR_ALIGN( \ + sizeof(struct erofs_xattr_entry) + \ + (entry)->e_name_len + le16_to_cpu((entry)->e_value_size)) + +/* available compression algorithm types */ +enum { + Z_EROFS_COMPRESSION_LZ4, + Z_EROFS_COMPRESSION_MAX +}; + +/* + * bit 0 : COMPACTED_2B indexes (0 - off; 1 - on) + * e.g. for 4k logical cluster size, 4B if compacted 2B is off; + * (4B) + 2B + (4B) if compacted 2B is on. + */ +#define Z_EROFS_ADVISE_COMPACTED_2B_BIT 0 + +#define Z_EROFS_ADVISE_COMPACTED_2B (1 << Z_EROFS_ADVISE_COMPACTED_2B_BIT) + +struct z_erofs_map_header { + __le32 h_reserved1; + __le16 h_advise; + /* + * bit 0-3 : algorithm type of head 1 (logical cluster type 01); + * bit 4-7 : algorithm type of head 2 (logical cluster type 11). + */ + __u8 h_algorithmtype; + /* + * bit 0-2 : logical cluster bits - 12, e.g. 0 for 4096; + * bit 3-4 : (physical - logical) cluster bits of head 1: + * For example, if logical clustersize = 4096, 1 for 8192. + * bit 5-7 : (physical - logical) cluster bits of head 2. + */ + __u8 h_clusterbits; +}; + +#define Z_EROFS_VLE_LEGACY_HEADER_PADDING 8 + +/* + * Z_EROFS Variable-sized Logical Extent cluster type: + * 0 - literal (uncompressed) cluster + * 1 - compressed cluster (for the head logical cluster) + * 2 - compressed cluster (for the other logical clusters) + * + * In detail, + * 0 - literal (uncompressed) cluster, + * di_advise = 0 + * di_clusterofs = the literal data offset of the cluster + * di_blkaddr = the blkaddr of the literal cluster + * + * 1 - compressed cluster (for the head logical cluster) + * di_advise = 1 + * di_clusterofs = the decompressed data offset of the cluster + * di_blkaddr = the blkaddr of the compressed cluster + * + * 2 - compressed cluster (for the other logical clusters) + * di_advise = 2 + * di_clusterofs = + * the decompressed data offset in its own head cluster + * di_u.delta[0] = distance to its corresponding head cluster + * di_u.delta[1] = distance to its corresponding tail cluster + * (di_advise could be 0, 1 or 2) + */ +enum { + Z_EROFS_VLE_CLUSTER_TYPE_PLAIN, + Z_EROFS_VLE_CLUSTER_TYPE_HEAD, + Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD, + Z_EROFS_VLE_CLUSTER_TYPE_RESERVED, + Z_EROFS_VLE_CLUSTER_TYPE_MAX +}; + +#define Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS 2 +#define Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT 0 + +struct z_erofs_vle_decompressed_index { + __le16 di_advise; + /* where to decompress in the head cluster */ + __le16 di_clusterofs; + + union { + /* for the head cluster */ + __le32 blkaddr; + /* + * for the rest clusters + * eg. for 4k page-sized cluster, maximum 4K*64k = 256M) + * [0] - pointing to the head cluster + * [1] - pointing to the tail cluster + */ + __le16 delta[2]; + } di_u __packed; /* 8 bytes */ +} __packed; + +#define Z_EROFS_VLE_LEGACY_INDEX_ALIGN(size) \ + (round_up(size, sizeof(struct z_erofs_vle_decompressed_index)) + \ + sizeof(struct z_erofs_map_header) + Z_EROFS_VLE_LEGACY_HEADER_PADDING) + +/* dirent sorts in alphabet order, thus we can do binary search */ +struct erofs_dirent { + __le64 nid; /* 0, node number */ + __le16 nameoff; /* 8, start offset of file name */ + __u8 file_type; /* 10, file type */ + __u8 reserved; /* 11, reserved */ +} __packed; + +/* + * EROFS file types should match generic FT_* types and + * it seems no need to add BUILD_BUG_ONs since potential + * unmatchness will break other fses as well... + */ + +#define EROFS_NAME_LEN 255 + +/* check the EROFS on-disk layout strictly at compile time */ +static inline void erofs_check_ondisk_layout_definitions(void) +{ + BUILD_BUG_ON(sizeof(struct erofs_super_block) != 128); + BUILD_BUG_ON(sizeof(struct erofs_inode_v1) != 32); + BUILD_BUG_ON(sizeof(struct erofs_inode_v2) != 64); + BUILD_BUG_ON(sizeof(struct erofs_xattr_ibody_header) != 12); + BUILD_BUG_ON(sizeof(struct erofs_xattr_entry) != 4); + BUILD_BUG_ON(sizeof(struct z_erofs_map_header) != 8); + BUILD_BUG_ON(sizeof(struct z_erofs_vle_decompressed_index) != 8); + BUILD_BUG_ON(sizeof(struct erofs_dirent) != 12); + + BUILD_BUG_ON(BIT(Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) < + Z_EROFS_VLE_CLUSTER_TYPE_MAX - 1); +} + +#endif + diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c new file mode 100644 index 000000000000..80f4fe919ee7 --- /dev/null +++ b/fs/erofs/inode.c @@ -0,0 +1,332 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2017-2018 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#include "xattr.h" + +#include + +/* no locking */ +static int read_inode(struct inode *inode, void *data) +{ + struct erofs_vnode *vi = EROFS_V(inode); + struct erofs_inode_v1 *v1 = data; + const unsigned int advise = le16_to_cpu(v1->i_advise); + erofs_blk_t nblks = 0; + + vi->datamode = __inode_data_mapping(advise); + + if (unlikely(vi->datamode >= EROFS_INODE_LAYOUT_MAX)) { + errln("unsupported data mapping %u of nid %llu", + vi->datamode, vi->nid); + DBG_BUGON(1); + return -EOPNOTSUPP; + } + + if (__inode_version(advise) == EROFS_INODE_LAYOUT_V2) { + struct erofs_inode_v2 *v2 = data; + + vi->inode_isize = sizeof(struct erofs_inode_v2); + vi->xattr_isize = ondisk_xattr_ibody_size(v2->i_xattr_icount); + + inode->i_mode = le16_to_cpu(v2->i_mode); + if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || + S_ISLNK(inode->i_mode)) + vi->raw_blkaddr = le32_to_cpu(v2->i_u.raw_blkaddr); + else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) + inode->i_rdev = + new_decode_dev(le32_to_cpu(v2->i_u.rdev)); + else if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) + inode->i_rdev = 0; + else + goto bogusimode; + + i_uid_write(inode, le32_to_cpu(v2->i_uid)); + i_gid_write(inode, le32_to_cpu(v2->i_gid)); + set_nlink(inode, le32_to_cpu(v2->i_nlink)); + + /* ns timestamp */ + inode->i_mtime.tv_sec = inode->i_ctime.tv_sec = + le64_to_cpu(v2->i_ctime); + inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = + le32_to_cpu(v2->i_ctime_nsec); + + inode->i_size = le64_to_cpu(v2->i_size); + + /* total blocks for compressed files */ + if (is_inode_layout_compression(inode)) + nblks = le32_to_cpu(v2->i_u.compressed_blocks); + } else if (__inode_version(advise) == EROFS_INODE_LAYOUT_V1) { + struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb); + + vi->inode_isize = sizeof(struct erofs_inode_v1); + vi->xattr_isize = ondisk_xattr_ibody_size(v1->i_xattr_icount); + + inode->i_mode = le16_to_cpu(v1->i_mode); + if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || + S_ISLNK(inode->i_mode)) + vi->raw_blkaddr = le32_to_cpu(v1->i_u.raw_blkaddr); + else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) + inode->i_rdev = + new_decode_dev(le32_to_cpu(v1->i_u.rdev)); + else if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) + inode->i_rdev = 0; + else + goto bogusimode; + + i_uid_write(inode, le16_to_cpu(v1->i_uid)); + i_gid_write(inode, le16_to_cpu(v1->i_gid)); + set_nlink(inode, le16_to_cpu(v1->i_nlink)); + + /* use build time to derive all file time */ + inode->i_mtime.tv_sec = inode->i_ctime.tv_sec = + sbi->build_time; + inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = + sbi->build_time_nsec; + + inode->i_size = le32_to_cpu(v1->i_size); + if (is_inode_layout_compression(inode)) + nblks = le32_to_cpu(v1->i_u.compressed_blocks); + } else { + errln("unsupported on-disk inode version %u of nid %llu", + __inode_version(advise), vi->nid); + DBG_BUGON(1); + return -EOPNOTSUPP; + } + + if (!nblks) + /* measure inode.i_blocks as generic filesystems */ + inode->i_blocks = roundup(inode->i_size, EROFS_BLKSIZ) >> 9; + else + inode->i_blocks = nblks << LOG_SECTORS_PER_BLOCK; + return 0; + +bogusimode: + errln("bogus i_mode (%o) @ nid %llu", inode->i_mode, vi->nid); + DBG_BUGON(1); + return -EFSCORRUPTED; +} + +/* + * try_lock can be required since locking order is: + * file data(fs_inode) + * meta(bd_inode) + * but the majority of the callers is "iget", + * in that case we are pretty sure no deadlock since + * no data operations exist. However I tend to + * try_lock since it takes no much overhead and + * will success immediately. + */ +static int fill_inline_data(struct inode *inode, void *data, + unsigned int m_pofs) +{ + struct erofs_vnode *vi = EROFS_V(inode); + struct erofs_sb_info *sbi = EROFS_I_SB(inode); + + /* should be inode inline C */ + if (!is_inode_flat_inline(inode)) + return 0; + + /* fast symlink (following ext4) */ + if (S_ISLNK(inode->i_mode) && inode->i_size < PAGE_SIZE) { + char *lnk = erofs_kmalloc(sbi, inode->i_size + 1, GFP_KERNEL); + + if (unlikely(!lnk)) + return -ENOMEM; + + m_pofs += vi->inode_isize + vi->xattr_isize; + + /* inline symlink data shouldn't across page boundary as well */ + if (unlikely(m_pofs + inode->i_size > PAGE_SIZE)) { + kfree(lnk); + errln("inline data cross block boundary @ nid %llu", + vi->nid); + DBG_BUGON(1); + return -EFSCORRUPTED; + } + + /* get in-page inline data */ + memcpy(lnk, data + m_pofs, inode->i_size); + lnk[inode->i_size] = '\0'; + + inode->i_link = lnk; + set_inode_fast_symlink(inode); + } + return 0; +} + +static int fill_inode(struct inode *inode, int isdir) +{ + struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb); + struct erofs_vnode *vi = EROFS_V(inode); + struct page *page; + void *data; + int err; + erofs_blk_t blkaddr; + unsigned int ofs; + erofs_off_t inode_loc; + + trace_erofs_fill_inode(inode, isdir); + inode_loc = iloc(sbi, vi->nid); + blkaddr = erofs_blknr(inode_loc); + ofs = erofs_blkoff(inode_loc); + + debugln("%s, reading inode nid %llu at %u of blkaddr %u", + __func__, vi->nid, ofs, blkaddr); + + page = erofs_get_meta_page(inode->i_sb, blkaddr, isdir); + + if (IS_ERR(page)) { + errln("failed to get inode (nid: %llu) page, err %ld", + vi->nid, PTR_ERR(page)); + return PTR_ERR(page); + } + + DBG_BUGON(!PageUptodate(page)); + data = page_address(page); + + err = read_inode(inode, data + ofs); + if (!err) { + /* setup the new inode */ + if (S_ISREG(inode->i_mode)) { + inode->i_op = &erofs_generic_iops; + inode->i_fop = &generic_ro_fops; + } else if (S_ISDIR(inode->i_mode)) { + inode->i_op = &erofs_dir_iops; + inode->i_fop = &erofs_dir_fops; + } else if (S_ISLNK(inode->i_mode)) { + /* by default, page_get_link is used for symlink */ + inode->i_op = &erofs_symlink_iops; + inode_nohighmem(inode); + } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || + S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { + inode->i_op = &erofs_generic_iops; + init_special_inode(inode, inode->i_mode, inode->i_rdev); + goto out_unlock; + } else { + err = -EFSCORRUPTED; + goto out_unlock; + } + + if (is_inode_layout_compression(inode)) { + err = z_erofs_fill_inode(inode); + goto out_unlock; + } + + inode->i_mapping->a_ops = &erofs_raw_access_aops; + + /* fill last page if inline data is available */ + err = fill_inline_data(inode, data, ofs); + } + +out_unlock: + unlock_page(page); + put_page(page); + return err; +} + +/* + * erofs nid is 64bits, but i_ino is 'unsigned long', therefore + * we should do more for 32-bit platform to find the right inode. + */ +#if BITS_PER_LONG == 32 +static int erofs_ilookup_test_actor(struct inode *inode, void *opaque) +{ + const erofs_nid_t nid = *(erofs_nid_t *)opaque; + + return EROFS_V(inode)->nid == nid; +} + +static int erofs_iget_set_actor(struct inode *inode, void *opaque) +{ + const erofs_nid_t nid = *(erofs_nid_t *)opaque; + + inode->i_ino = erofs_inode_hash(nid); + return 0; +} +#endif + +static inline struct inode *erofs_iget_locked(struct super_block *sb, + erofs_nid_t nid) +{ + const unsigned long hashval = erofs_inode_hash(nid); + +#if BITS_PER_LONG >= 64 + /* it is safe to use iget_locked for >= 64-bit platform */ + return iget_locked(sb, hashval); +#else + return iget5_locked(sb, hashval, erofs_ilookup_test_actor, + erofs_iget_set_actor, &nid); +#endif +} + +struct inode *erofs_iget(struct super_block *sb, + erofs_nid_t nid, + bool isdir) +{ + struct inode *inode = erofs_iget_locked(sb, nid); + + if (unlikely(!inode)) + return ERR_PTR(-ENOMEM); + + if (inode->i_state & I_NEW) { + int err; + struct erofs_vnode *vi = EROFS_V(inode); + + vi->nid = nid; + + err = fill_inode(inode, isdir); + if (likely(!err)) + unlock_new_inode(inode); + else { + iget_failed(inode); + inode = ERR_PTR(err); + } + } + return inode; +} + +int erofs_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) +{ + struct inode *const inode = d_inode(path->dentry); + + if (is_inode_layout_compression(inode)) + stat->attributes |= STATX_ATTR_COMPRESSED; + + stat->attributes |= STATX_ATTR_IMMUTABLE; + stat->attributes_mask |= (STATX_ATTR_COMPRESSED | + STATX_ATTR_IMMUTABLE); + + generic_fillattr(inode, stat); + return 0; +} + +const struct inode_operations erofs_generic_iops = { + .getattr = erofs_getattr, +#ifdef CONFIG_EROFS_FS_XATTR + .listxattr = erofs_listxattr, +#endif + .get_acl = erofs_get_acl, +}; + +const struct inode_operations erofs_symlink_iops = { + .get_link = page_get_link, + .getattr = erofs_getattr, +#ifdef CONFIG_EROFS_FS_XATTR + .listxattr = erofs_listxattr, +#endif + .get_acl = erofs_get_acl, +}; + +const struct inode_operations erofs_fast_symlink_iops = { + .get_link = simple_get_link, + .getattr = erofs_getattr, +#ifdef CONFIG_EROFS_FS_XATTR + .listxattr = erofs_listxattr, +#endif + .get_acl = erofs_get_acl, +}; + diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h new file mode 100644 index 000000000000..620b73fcc416 --- /dev/null +++ b/fs/erofs/internal.h @@ -0,0 +1,553 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2017-2018 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#ifndef __EROFS_INTERNAL_H +#define __EROFS_INTERNAL_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "erofs_fs.h" + +/* redefine pr_fmt "erofs: " */ +#undef pr_fmt +#define pr_fmt(fmt) "erofs: " fmt + +#define errln(x, ...) pr_err(x "\n", ##__VA_ARGS__) +#define infoln(x, ...) pr_info(x "\n", ##__VA_ARGS__) +#ifdef CONFIG_EROFS_FS_DEBUG +#define debugln(x, ...) pr_debug(x "\n", ##__VA_ARGS__) +#define DBG_BUGON BUG_ON +#else +#define debugln(x, ...) ((void)0) +#define DBG_BUGON(x) ((void)(x)) +#endif /* !CONFIG_EROFS_FS_DEBUG */ + +enum { + FAULT_KMALLOC, + FAULT_READ_IO, + FAULT_MAX, +}; + +#ifdef CONFIG_EROFS_FAULT_INJECTION +extern const char *erofs_fault_name[FAULT_MAX]; +#define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type))) + +struct erofs_fault_info { + atomic_t inject_ops; + unsigned int inject_rate; + unsigned int inject_type; +}; +#endif /* CONFIG_EROFS_FAULT_INJECTION */ + +/* EROFS_SUPER_MAGIC_V1 to represent the whole file system */ +#define EROFS_SUPER_MAGIC EROFS_SUPER_MAGIC_V1 + +typedef u64 erofs_nid_t; +typedef u64 erofs_off_t; +/* data type for filesystem-wide blocks number */ +typedef u32 erofs_blk_t; + +struct erofs_sb_info { +#ifdef CONFIG_EROFS_FS_ZIP + /* list for all registered superblocks, mainly for shrinker */ + struct list_head list; + struct mutex umount_mutex; + + /* the dedicated workstation for compression */ + struct radix_tree_root workstn_tree; + + /* threshold for decompression synchronously */ + unsigned int max_sync_decompress_pages; + + unsigned int shrinker_run_no; + + /* current strategy of how to use managed cache */ + unsigned char cache_strategy; + + /* pseudo inode to manage cached pages */ + struct inode *managed_cache; +#endif /* CONFIG_EROFS_FS_ZIP */ + u32 blocks; + u32 meta_blkaddr; +#ifdef CONFIG_EROFS_FS_XATTR + u32 xattr_blkaddr; +#endif + + /* inode slot unit size in bit shift */ + unsigned char islotbits; + + u32 build_time_nsec; + u64 build_time; + + /* what we really care is nid, rather than ino.. */ + erofs_nid_t root_nid; + /* used for statfs, f_files - f_favail */ + u64 inos; + + u8 uuid[16]; /* 128-bit uuid for volume */ + u8 volume_name[16]; /* volume name */ + u32 requirements; + + unsigned int mount_opt; + +#ifdef CONFIG_EROFS_FAULT_INJECTION + struct erofs_fault_info fault_info; /* For fault injection */ +#endif +}; + +#ifdef CONFIG_EROFS_FAULT_INJECTION +#define erofs_show_injection_info(type) \ + infoln("inject %s in %s of %pS", erofs_fault_name[type], \ + __func__, __builtin_return_address(0)) + +static inline bool time_to_inject(struct erofs_sb_info *sbi, int type) +{ + struct erofs_fault_info *ffi = &sbi->fault_info; + + if (!ffi->inject_rate) + return false; + + if (!IS_FAULT_SET(ffi, type)) + return false; + + atomic_inc(&ffi->inject_ops); + if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) { + atomic_set(&ffi->inject_ops, 0); + return true; + } + return false; +} +#else +static inline bool time_to_inject(struct erofs_sb_info *sbi, int type) +{ + return false; +} + +static inline void erofs_show_injection_info(int type) +{ +} +#endif /* !CONFIG_EROFS_FAULT_INJECTION */ + +static inline void *erofs_kmalloc(struct erofs_sb_info *sbi, + size_t size, gfp_t flags) +{ + if (time_to_inject(sbi, FAULT_KMALLOC)) { + erofs_show_injection_info(FAULT_KMALLOC); + return NULL; + } + return kmalloc(size, flags); +} + +#define EROFS_SB(sb) ((struct erofs_sb_info *)(sb)->s_fs_info) +#define EROFS_I_SB(inode) ((struct erofs_sb_info *)(inode)->i_sb->s_fs_info) + +/* Mount flags set via mount options or defaults */ +#define EROFS_MOUNT_XATTR_USER 0x00000010 +#define EROFS_MOUNT_POSIX_ACL 0x00000020 +#define EROFS_MOUNT_FAULT_INJECTION 0x00000040 + +#define clear_opt(sbi, option) ((sbi)->mount_opt &= ~EROFS_MOUNT_##option) +#define set_opt(sbi, option) ((sbi)->mount_opt |= EROFS_MOUNT_##option) +#define test_opt(sbi, option) ((sbi)->mount_opt & EROFS_MOUNT_##option) + +#ifdef CONFIG_EROFS_FS_ZIP +enum { + EROFS_ZIP_CACHE_DISABLED, + EROFS_ZIP_CACHE_READAHEAD, + EROFS_ZIP_CACHE_READAROUND +}; + +#define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL) + +/* basic unit of the workstation of a super_block */ +struct erofs_workgroup { + /* the workgroup index in the workstation */ + pgoff_t index; + + /* overall workgroup reference count */ + atomic_t refcount; +}; + +#if defined(CONFIG_SMP) +static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp, + int val) +{ + preempt_disable(); + if (val != atomic_cmpxchg(&grp->refcount, val, EROFS_LOCKED_MAGIC)) { + preempt_enable(); + return false; + } + return true; +} + +static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp, + int orig_val) +{ + /* + * other observers should notice all modifications + * in the freezing period. + */ + smp_mb(); + atomic_set(&grp->refcount, orig_val); + preempt_enable(); +} + +static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp) +{ + return atomic_cond_read_relaxed(&grp->refcount, + VAL != EROFS_LOCKED_MAGIC); +} +#else +static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp, + int val) +{ + preempt_disable(); + /* no need to spin on UP platforms, let's just disable preemption. */ + if (val != atomic_read(&grp->refcount)) { + preempt_enable(); + return false; + } + return true; +} + +static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp, + int orig_val) +{ + preempt_enable(); +} + +static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp) +{ + int v = atomic_read(&grp->refcount); + + /* workgroup is never freezed on uniprocessor systems */ + DBG_BUGON(v == EROFS_LOCKED_MAGIC); + return v; +} +#endif /* !CONFIG_SMP */ + +/* hard limit of pages per compressed cluster */ +#define Z_EROFS_CLUSTER_MAX_PAGES (CONFIG_EROFS_FS_CLUSTER_PAGE_LIMIT) +#define EROFS_PCPUBUF_NR_PAGES Z_EROFS_CLUSTER_MAX_PAGES +#else +#define EROFS_PCPUBUF_NR_PAGES 0 +#endif /* !CONFIG_EROFS_FS_ZIP */ + +/* we strictly follow PAGE_SIZE and no buffer head yet */ +#define LOG_BLOCK_SIZE PAGE_SHIFT + +#undef LOG_SECTORS_PER_BLOCK +#define LOG_SECTORS_PER_BLOCK (PAGE_SHIFT - 9) + +#undef SECTORS_PER_BLOCK +#define SECTORS_PER_BLOCK (1 << SECTORS_PER_BLOCK) + +#define EROFS_BLKSIZ (1 << LOG_BLOCK_SIZE) + +#if (EROFS_BLKSIZ % 4096 || !EROFS_BLKSIZ) +#error erofs cannot be used in this platform +#endif + +#define EROFS_IO_MAX_RETRIES_NOFAIL 5 + +#define ROOT_NID(sb) ((sb)->root_nid) + +#define erofs_blknr(addr) ((addr) / EROFS_BLKSIZ) +#define erofs_blkoff(addr) ((addr) % EROFS_BLKSIZ) +#define blknr_to_addr(nr) ((erofs_off_t)(nr) * EROFS_BLKSIZ) + +static inline erofs_off_t iloc(struct erofs_sb_info *sbi, erofs_nid_t nid) +{ + return blknr_to_addr(sbi->meta_blkaddr) + (nid << sbi->islotbits); +} + +/* atomic flag definitions */ +#define EROFS_V_EA_INITED_BIT 0 +#define EROFS_V_Z_INITED_BIT 1 + +/* bitlock definitions (arranged in reverse order) */ +#define EROFS_V_BL_XATTR_BIT (BITS_PER_LONG - 1) +#define EROFS_V_BL_Z_BIT (BITS_PER_LONG - 2) + +struct erofs_vnode { + erofs_nid_t nid; + + /* atomic flags (including bitlocks) */ + unsigned long flags; + + unsigned char datamode; + unsigned char inode_isize; + unsigned short xattr_isize; + + unsigned int xattr_shared_count; + unsigned int *xattr_shared_xattrs; + + union { + erofs_blk_t raw_blkaddr; +#ifdef CONFIG_EROFS_FS_ZIP + struct { + unsigned short z_advise; + unsigned char z_algorithmtype[2]; + unsigned char z_logical_clusterbits; + unsigned char z_physical_clusterbits[2]; + }; +#endif /* CONFIG_EROFS_FS_ZIP */ + }; + /* the corresponding vfs inode */ + struct inode vfs_inode; +}; + +#define EROFS_V(ptr) \ + container_of(ptr, struct erofs_vnode, vfs_inode) + +#define __inode_advise(x, bit, bits) \ + (((x) >> (bit)) & ((1 << (bits)) - 1)) + +#define __inode_version(advise) \ + __inode_advise(advise, EROFS_I_VERSION_BIT, \ + EROFS_I_VERSION_BITS) + +#define __inode_data_mapping(advise) \ + __inode_advise(advise, EROFS_I_DATA_MAPPING_BIT,\ + EROFS_I_DATA_MAPPING_BITS) + +static inline unsigned long inode_datablocks(struct inode *inode) +{ + /* since i_size cannot be changed */ + return DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ); +} + +static inline bool is_inode_layout_compression(struct inode *inode) +{ + return erofs_inode_is_data_compressed(EROFS_V(inode)->datamode); +} + +static inline bool is_inode_flat_inline(struct inode *inode) +{ + return EROFS_V(inode)->datamode == EROFS_INODE_FLAT_INLINE; +} + +extern const struct super_operations erofs_sops; + +extern const struct address_space_operations erofs_raw_access_aops; +#ifdef CONFIG_EROFS_FS_ZIP +extern const struct address_space_operations z_erofs_vle_normalaccess_aops; +#endif + +/* + * Logical to physical block mapping, used by erofs_map_blocks() + * + * Different with other file systems, it is used for 2 access modes: + * + * 1) RAW access mode: + * + * Users pass a valid (m_lblk, m_lofs -- usually 0) pair, + * and get the valid m_pblk, m_pofs and the longest m_len(in bytes). + * + * Note that m_lblk in the RAW access mode refers to the number of + * the compressed ondisk block rather than the uncompressed + * in-memory block for the compressed file. + * + * m_pofs equals to m_lofs except for the inline data page. + * + * 2) Normal access mode: + * + * If the inode is not compressed, it has no difference with + * the RAW access mode. However, if the inode is compressed, + * users should pass a valid (m_lblk, m_lofs) pair, and get + * the needed m_pblk, m_pofs, m_len to get the compressed data + * and the updated m_lblk, m_lofs which indicates the start + * of the corresponding uncompressed data in the file. + */ +enum { + BH_Zipped = BH_PrivateStart, + BH_FullMapped, +}; + +/* Has a disk mapping */ +#define EROFS_MAP_MAPPED (1 << BH_Mapped) +/* Located in metadata (could be copied from bd_inode) */ +#define EROFS_MAP_META (1 << BH_Meta) +/* The extent has been compressed */ +#define EROFS_MAP_ZIPPED (1 << BH_Zipped) +/* The length of extent is full */ +#define EROFS_MAP_FULL_MAPPED (1 << BH_FullMapped) + +struct erofs_map_blocks { + erofs_off_t m_pa, m_la; + u64 m_plen, m_llen; + + unsigned int m_flags; + + struct page *mpage; +}; + +/* Flags used by erofs_map_blocks() */ +#define EROFS_GET_BLOCKS_RAW 0x0001 + +/* zmap.c */ +#ifdef CONFIG_EROFS_FS_ZIP +int z_erofs_fill_inode(struct inode *inode); +int z_erofs_map_blocks_iter(struct inode *inode, + struct erofs_map_blocks *map, + int flags); +#else +static inline int z_erofs_fill_inode(struct inode *inode) { return -EOPNOTSUPP; } +static inline int z_erofs_map_blocks_iter(struct inode *inode, + struct erofs_map_blocks *map, + int flags) +{ + return -EOPNOTSUPP; +} +#endif /* !CONFIG_EROFS_FS_ZIP */ + +/* data.c */ +static inline struct bio *erofs_grab_bio(struct super_block *sb, + erofs_blk_t blkaddr, + unsigned int nr_pages, + void *bi_private, bio_end_io_t endio, + bool nofail) +{ + const gfp_t gfp = GFP_NOIO; + struct bio *bio; + + do { + if (nr_pages == 1) { + bio = bio_alloc(gfp | (nofail ? __GFP_NOFAIL : 0), 1); + if (unlikely(!bio)) { + DBG_BUGON(nofail); + return ERR_PTR(-ENOMEM); + } + break; + } + bio = bio_alloc(gfp, nr_pages); + nr_pages /= 2; + } while (unlikely(!bio)); + + bio->bi_end_io = endio; + bio_set_dev(bio, sb->s_bdev); + bio->bi_iter.bi_sector = (sector_t)blkaddr << LOG_SECTORS_PER_BLOCK; + bio->bi_private = bi_private; + return bio; +} + +static inline void __submit_bio(struct bio *bio, unsigned int op, + unsigned int op_flags) +{ + bio_set_op_attrs(bio, op, op_flags); + submit_bio(bio); +} + +struct page *__erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr, + bool prio, bool nofail); + +static inline struct page *erofs_get_meta_page(struct super_block *sb, + erofs_blk_t blkaddr, bool prio) +{ + return __erofs_get_meta_page(sb, blkaddr, prio, false); +} + +int erofs_map_blocks(struct inode *, struct erofs_map_blocks *, int); + +static inline struct page *erofs_get_inline_page(struct inode *inode, + erofs_blk_t blkaddr) +{ + return erofs_get_meta_page(inode->i_sb, blkaddr, + S_ISDIR(inode->i_mode)); +} + +/* inode.c */ +static inline unsigned long erofs_inode_hash(erofs_nid_t nid) +{ +#if BITS_PER_LONG == 32 + return (nid >> 32) ^ (nid & 0xffffffff); +#else + return nid; +#endif +} + +extern const struct inode_operations erofs_generic_iops; +extern const struct inode_operations erofs_symlink_iops; +extern const struct inode_operations erofs_fast_symlink_iops; + +static inline void set_inode_fast_symlink(struct inode *inode) +{ + inode->i_op = &erofs_fast_symlink_iops; +} + +static inline bool is_inode_fast_symlink(struct inode *inode) +{ + return inode->i_op == &erofs_fast_symlink_iops; +} + +struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid, bool dir); +int erofs_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags); + +/* namei.c */ +extern const struct inode_operations erofs_dir_iops; + +int erofs_namei(struct inode *dir, struct qstr *name, + erofs_nid_t *nid, unsigned int *d_type); + +/* dir.c */ +extern const struct file_operations erofs_dir_fops; + +/* utils.c / zdata.c */ +struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail); + +#if (EROFS_PCPUBUF_NR_PAGES > 0) +void *erofs_get_pcpubuf(unsigned int pagenr); +#define erofs_put_pcpubuf(buf) do { \ + (void)&(buf); \ + preempt_enable(); \ +} while (0) +#else +static inline void *erofs_get_pcpubuf(unsigned int pagenr) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +#define erofs_put_pcpubuf(buf) do {} while (0) +#endif + +#ifdef CONFIG_EROFS_FS_ZIP +int erofs_workgroup_put(struct erofs_workgroup *grp); +struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb, + pgoff_t index, bool *tag); +int erofs_register_workgroup(struct super_block *sb, + struct erofs_workgroup *grp, bool tag); +void erofs_workgroup_free_rcu(struct erofs_workgroup *grp); +void erofs_shrinker_register(struct super_block *sb); +void erofs_shrinker_unregister(struct super_block *sb); +int __init erofs_init_shrinker(void); +void erofs_exit_shrinker(void); +int __init z_erofs_init_zip_subsystem(void); +void z_erofs_exit_zip_subsystem(void); +int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, + struct erofs_workgroup *egrp); +int erofs_try_to_free_cached_page(struct address_space *mapping, + struct page *page); +#else +static inline void erofs_shrinker_register(struct super_block *sb) {} +static inline void erofs_shrinker_unregister(struct super_block *sb) {} +static inline int erofs_init_shrinker(void) { return 0; } +static inline void erofs_exit_shrinker(void) {} +static inline int z_erofs_init_zip_subsystem(void) { return 0; } +static inline void z_erofs_exit_zip_subsystem(void) {} +#endif /* !CONFIG_EROFS_FS_ZIP */ + +#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */ + +#endif /* __EROFS_INTERNAL_H */ + diff --git a/fs/erofs/namei.c b/fs/erofs/namei.c new file mode 100644 index 000000000000..8832b5d95d91 --- /dev/null +++ b/fs/erofs/namei.c @@ -0,0 +1,251 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2017-2018 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#include "xattr.h" + +#include + +struct erofs_qstr { + const unsigned char *name; + const unsigned char *end; +}; + +/* based on the end of qn is accurate and it must have the trailing '\0' */ +static inline int dirnamecmp(const struct erofs_qstr *qn, + const struct erofs_qstr *qd, + unsigned int *matched) +{ + unsigned int i = *matched; + + /* + * on-disk error, let's only BUG_ON in the debugging mode. + * otherwise, it will return 1 to just skip the invalid name + * and go on (in consideration of the lookup performance). + */ + DBG_BUGON(qd->name > qd->end); + + /* qd could not have trailing '\0' */ + /* However it is absolutely safe if < qd->end */ + while (qd->name + i < qd->end && qd->name[i] != '\0') { + if (qn->name[i] != qd->name[i]) { + *matched = i; + return qn->name[i] > qd->name[i] ? 1 : -1; + } + ++i; + } + *matched = i; + /* See comments in __d_alloc on the terminating NUL character */ + return qn->name[i] == '\0' ? 0 : 1; +} + +#define nameoff_from_disk(off, sz) (le16_to_cpu(off) & ((sz) - 1)) + +static struct erofs_dirent *find_target_dirent(struct erofs_qstr *name, + u8 *data, + unsigned int dirblksize, + const int ndirents) +{ + int head, back; + unsigned int startprfx, endprfx; + struct erofs_dirent *const de = (struct erofs_dirent *)data; + + /* since the 1st dirent has been evaluated previously */ + head = 1; + back = ndirents - 1; + startprfx = endprfx = 0; + + while (head <= back) { + const int mid = head + (back - head) / 2; + const int nameoff = nameoff_from_disk(de[mid].nameoff, + dirblksize); + unsigned int matched = min(startprfx, endprfx); + struct erofs_qstr dname = { + .name = data + nameoff, + .end = unlikely(mid >= ndirents - 1) ? + data + dirblksize : + data + nameoff_from_disk(de[mid + 1].nameoff, + dirblksize) + }; + + /* string comparison without already matched prefix */ + int ret = dirnamecmp(name, &dname, &matched); + + if (unlikely(!ret)) { + return de + mid; + } else if (ret > 0) { + head = mid + 1; + startprfx = matched; + } else { + back = mid - 1; + endprfx = matched; + } + } + + return ERR_PTR(-ENOENT); +} + +static struct page *find_target_block_classic(struct inode *dir, + struct erofs_qstr *name, + int *_ndirents) +{ + unsigned int startprfx, endprfx; + int head, back; + struct address_space *const mapping = dir->i_mapping; + struct page *candidate = ERR_PTR(-ENOENT); + + startprfx = endprfx = 0; + head = 0; + back = inode_datablocks(dir) - 1; + + while (head <= back) { + const int mid = head + (back - head) / 2; + struct page *page = read_mapping_page(mapping, mid, NULL); + + if (!IS_ERR(page)) { + struct erofs_dirent *de = kmap_atomic(page); + const int nameoff = nameoff_from_disk(de->nameoff, + EROFS_BLKSIZ); + const int ndirents = nameoff / sizeof(*de); + int diff; + unsigned int matched; + struct erofs_qstr dname; + + if (unlikely(!ndirents)) { + kunmap_atomic(de); + put_page(page); + errln("corrupted dir block %d @ nid %llu", + mid, EROFS_V(dir)->nid); + DBG_BUGON(1); + page = ERR_PTR(-EFSCORRUPTED); + goto out; + } + + matched = min(startprfx, endprfx); + + dname.name = (u8 *)de + nameoff; + if (ndirents == 1) + dname.end = (u8 *)de + EROFS_BLKSIZ; + else + dname.end = (u8 *)de + + nameoff_from_disk(de[1].nameoff, + EROFS_BLKSIZ); + + /* string comparison without already matched prefix */ + diff = dirnamecmp(name, &dname, &matched); + kunmap_atomic(de); + + if (unlikely(!diff)) { + *_ndirents = 0; + goto out; + } else if (diff > 0) { + head = mid + 1; + startprfx = matched; + + if (!IS_ERR(candidate)) + put_page(candidate); + candidate = page; + *_ndirents = ndirents; + } else { + put_page(page); + + back = mid - 1; + endprfx = matched; + } + continue; + } +out: /* free if the candidate is valid */ + if (!IS_ERR(candidate)) + put_page(candidate); + return page; + } + return candidate; +} + +int erofs_namei(struct inode *dir, + struct qstr *name, + erofs_nid_t *nid, unsigned int *d_type) +{ + int ndirents; + struct page *page; + void *data; + struct erofs_dirent *de; + struct erofs_qstr qn; + + if (unlikely(!dir->i_size)) + return -ENOENT; + + qn.name = name->name; + qn.end = name->name + name->len; + + ndirents = 0; + page = find_target_block_classic(dir, &qn, &ndirents); + + if (IS_ERR(page)) + return PTR_ERR(page); + + data = kmap_atomic(page); + /* the target page has been mapped */ + if (ndirents) + de = find_target_dirent(&qn, data, EROFS_BLKSIZ, ndirents); + else + de = (struct erofs_dirent *)data; + + if (!IS_ERR(de)) { + *nid = le64_to_cpu(de->nid); + *d_type = de->file_type; + } + + kunmap_atomic(data); + put_page(page); + + return PTR_ERR_OR_ZERO(de); +} + +/* NOTE: i_mutex is already held by vfs */ +static struct dentry *erofs_lookup(struct inode *dir, + struct dentry *dentry, + unsigned int flags) +{ + int err; + erofs_nid_t nid; + unsigned int d_type; + struct inode *inode; + + DBG_BUGON(!d_really_is_negative(dentry)); + /* dentry must be unhashed in lookup, no need to worry about */ + DBG_BUGON(!d_unhashed(dentry)); + + trace_erofs_lookup(dir, dentry, flags); + + /* file name exceeds fs limit */ + if (unlikely(dentry->d_name.len > EROFS_NAME_LEN)) + return ERR_PTR(-ENAMETOOLONG); + + /* false uninitialized warnings on gcc 4.8.x */ + err = erofs_namei(dir, &dentry->d_name, &nid, &d_type); + + if (err == -ENOENT) { + /* negative dentry */ + inode = NULL; + } else if (unlikely(err)) { + inode = ERR_PTR(err); + } else { + debugln("%s, %s (nid %llu) found, d_type %u", __func__, + dentry->d_name.name, nid, d_type); + inode = erofs_iget(dir->i_sb, nid, d_type == FT_DIR); + } + return d_splice_alias(inode, dentry); +} + +const struct inode_operations erofs_dir_iops = { + .lookup = erofs_lookup, + .getattr = erofs_getattr, +#ifdef CONFIG_EROFS_FS_XATTR + .listxattr = erofs_listxattr, +#endif + .get_acl = erofs_get_acl, +}; + diff --git a/fs/erofs/super.c b/fs/erofs/super.c new file mode 100644 index 000000000000..6d3a9bcb8daa --- /dev/null +++ b/fs/erofs/super.c @@ -0,0 +1,669 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2017-2018 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#include +#include +#include +#include +#include +#include "xattr.h" + +#define CREATE_TRACE_POINTS +#include + +static struct kmem_cache *erofs_inode_cachep __read_mostly; + +static void init_once(void *ptr) +{ + struct erofs_vnode *vi = ptr; + + inode_init_once(&vi->vfs_inode); +} + +static int __init erofs_init_inode_cache(void) +{ + erofs_inode_cachep = kmem_cache_create("erofs_inode", + sizeof(struct erofs_vnode), 0, + SLAB_RECLAIM_ACCOUNT, + init_once); + + return erofs_inode_cachep ? 0 : -ENOMEM; +} + +static void erofs_exit_inode_cache(void) +{ + kmem_cache_destroy(erofs_inode_cachep); +} + +static struct inode *alloc_inode(struct super_block *sb) +{ + struct erofs_vnode *vi = + kmem_cache_alloc(erofs_inode_cachep, GFP_KERNEL); + + if (!vi) + return NULL; + + /* zero out everything except vfs_inode */ + memset(vi, 0, offsetof(struct erofs_vnode, vfs_inode)); + return &vi->vfs_inode; +} + +static void free_inode(struct inode *inode) +{ + struct erofs_vnode *vi = EROFS_V(inode); + + /* be careful RCU symlink path (see ext4_inode_info->i_data)! */ + if (is_inode_fast_symlink(inode)) + kfree(inode->i_link); + + kfree(vi->xattr_shared_xattrs); + + kmem_cache_free(erofs_inode_cachep, vi); +} + +static bool check_layout_compatibility(struct super_block *sb, + struct erofs_super_block *layout) +{ + const unsigned int requirements = le32_to_cpu(layout->requirements); + + EROFS_SB(sb)->requirements = requirements; + + /* check if current kernel meets all mandatory requirements */ + if (requirements & (~EROFS_ALL_REQUIREMENTS)) { + errln("unidentified requirements %x, please upgrade kernel version", + requirements & ~EROFS_ALL_REQUIREMENTS); + return false; + } + return true; +} + +static int superblock_read(struct super_block *sb) +{ + struct erofs_sb_info *sbi; + struct buffer_head *bh; + struct erofs_super_block *layout; + unsigned int blkszbits; + int ret; + + bh = sb_bread(sb, 0); + + if (!bh) { + errln("cannot read erofs superblock"); + return -EIO; + } + + sbi = EROFS_SB(sb); + layout = (struct erofs_super_block *)((u8 *)bh->b_data + + EROFS_SUPER_OFFSET); + + ret = -EINVAL; + if (le32_to_cpu(layout->magic) != EROFS_SUPER_MAGIC_V1) { + errln("cannot find valid erofs superblock"); + goto out; + } + + blkszbits = layout->blkszbits; + /* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */ + if (unlikely(blkszbits != LOG_BLOCK_SIZE)) { + errln("blksize %u isn't supported on this platform", + 1 << blkszbits); + goto out; + } + + if (!check_layout_compatibility(sb, layout)) + goto out; + + sbi->blocks = le32_to_cpu(layout->blocks); + sbi->meta_blkaddr = le32_to_cpu(layout->meta_blkaddr); +#ifdef CONFIG_EROFS_FS_XATTR + sbi->xattr_blkaddr = le32_to_cpu(layout->xattr_blkaddr); +#endif + sbi->islotbits = ffs(sizeof(struct erofs_inode_v1)) - 1; + sbi->root_nid = le16_to_cpu(layout->root_nid); + sbi->inos = le64_to_cpu(layout->inos); + + sbi->build_time = le64_to_cpu(layout->build_time); + sbi->build_time_nsec = le32_to_cpu(layout->build_time_nsec); + + memcpy(&sb->s_uuid, layout->uuid, sizeof(layout->uuid)); + + ret = strscpy(sbi->volume_name, layout->volume_name, + sizeof(layout->volume_name)); + if (ret < 0) { /* -E2BIG */ + errln("bad volume name without NIL terminator"); + ret = -EFSCORRUPTED; + goto out; + } + ret = 0; +out: + brelse(bh); + return ret; +} + +#ifdef CONFIG_EROFS_FAULT_INJECTION +const char *erofs_fault_name[FAULT_MAX] = { + [FAULT_KMALLOC] = "kmalloc", + [FAULT_READ_IO] = "read IO error", +}; + +static void __erofs_build_fault_attr(struct erofs_sb_info *sbi, + unsigned int rate) +{ + struct erofs_fault_info *ffi = &sbi->fault_info; + + if (rate) { + atomic_set(&ffi->inject_ops, 0); + ffi->inject_rate = rate; + ffi->inject_type = (1 << FAULT_MAX) - 1; + } else { + memset(ffi, 0, sizeof(struct erofs_fault_info)); + } + + set_opt(sbi, FAULT_INJECTION); +} + +static int erofs_build_fault_attr(struct erofs_sb_info *sbi, + substring_t *args) +{ + int rate = 0; + + if (args->from && match_int(args, &rate)) + return -EINVAL; + + __erofs_build_fault_attr(sbi, rate); + return 0; +} + +static unsigned int erofs_get_fault_rate(struct erofs_sb_info *sbi) +{ + return sbi->fault_info.inject_rate; +} +#else +static void __erofs_build_fault_attr(struct erofs_sb_info *sbi, + unsigned int rate) +{ +} + +static int erofs_build_fault_attr(struct erofs_sb_info *sbi, + substring_t *args) +{ + infoln("fault_injection options not supported"); + return 0; +} + +static unsigned int erofs_get_fault_rate(struct erofs_sb_info *sbi) +{ + return 0; +} +#endif + +#ifdef CONFIG_EROFS_FS_ZIP +static int erofs_build_cache_strategy(struct erofs_sb_info *sbi, + substring_t *args) +{ + const char *cs = match_strdup(args); + int err = 0; + + if (!cs) { + errln("Not enough memory to store cache strategy"); + return -ENOMEM; + } + + if (!strcmp(cs, "disabled")) { + sbi->cache_strategy = EROFS_ZIP_CACHE_DISABLED; + } else if (!strcmp(cs, "readahead")) { + sbi->cache_strategy = EROFS_ZIP_CACHE_READAHEAD; + } else if (!strcmp(cs, "readaround")) { + sbi->cache_strategy = EROFS_ZIP_CACHE_READAROUND; + } else { + errln("Unrecognized cache strategy \"%s\"", cs); + err = -EINVAL; + } + kfree(cs); + return err; +} +#else +static int erofs_build_cache_strategy(struct erofs_sb_info *sbi, + substring_t *args) +{ + infoln("EROFS compression is disabled, so cache strategy is ignored"); + return 0; +} +#endif + +/* set up default EROFS parameters */ +static void default_options(struct erofs_sb_info *sbi) +{ +#ifdef CONFIG_EROFS_FS_ZIP + sbi->cache_strategy = EROFS_ZIP_CACHE_READAROUND; + sbi->max_sync_decompress_pages = 3; +#endif +#ifdef CONFIG_EROFS_FS_XATTR + set_opt(sbi, XATTR_USER); +#endif +#ifdef CONFIG_EROFS_FS_POSIX_ACL + set_opt(sbi, POSIX_ACL); +#endif +} + +enum { + Opt_user_xattr, + Opt_nouser_xattr, + Opt_acl, + Opt_noacl, + Opt_fault_injection, + Opt_cache_strategy, + Opt_err +}; + +static match_table_t erofs_tokens = { + {Opt_user_xattr, "user_xattr"}, + {Opt_nouser_xattr, "nouser_xattr"}, + {Opt_acl, "acl"}, + {Opt_noacl, "noacl"}, + {Opt_fault_injection, "fault_injection=%u"}, + {Opt_cache_strategy, "cache_strategy=%s"}, + {Opt_err, NULL} +}; + +static int parse_options(struct super_block *sb, char *options) +{ + substring_t args[MAX_OPT_ARGS]; + char *p; + int err; + + if (!options) + return 0; + + while ((p = strsep(&options, ","))) { + int token; + + if (!*p) + continue; + + args[0].to = args[0].from = NULL; + token = match_token(p, erofs_tokens, args); + + switch (token) { +#ifdef CONFIG_EROFS_FS_XATTR + case Opt_user_xattr: + set_opt(EROFS_SB(sb), XATTR_USER); + break; + case Opt_nouser_xattr: + clear_opt(EROFS_SB(sb), XATTR_USER); + break; +#else + case Opt_user_xattr: + infoln("user_xattr options not supported"); + break; + case Opt_nouser_xattr: + infoln("nouser_xattr options not supported"); + break; +#endif +#ifdef CONFIG_EROFS_FS_POSIX_ACL + case Opt_acl: + set_opt(EROFS_SB(sb), POSIX_ACL); + break; + case Opt_noacl: + clear_opt(EROFS_SB(sb), POSIX_ACL); + break; +#else + case Opt_acl: + infoln("acl options not supported"); + break; + case Opt_noacl: + infoln("noacl options not supported"); + break; +#endif + case Opt_fault_injection: + err = erofs_build_fault_attr(EROFS_SB(sb), args); + if (err) + return err; + break; + case Opt_cache_strategy: + err = erofs_build_cache_strategy(EROFS_SB(sb), args); + if (err) + return err; + break; + default: + errln("Unrecognized mount option \"%s\" or missing value", p); + return -EINVAL; + } + } + return 0; +} + +#ifdef CONFIG_EROFS_FS_ZIP +static const struct address_space_operations managed_cache_aops; + +static int managed_cache_releasepage(struct page *page, gfp_t gfp_mask) +{ + int ret = 1; /* 0 - busy */ + struct address_space *const mapping = page->mapping; + + DBG_BUGON(!PageLocked(page)); + DBG_BUGON(mapping->a_ops != &managed_cache_aops); + + if (PagePrivate(page)) + ret = erofs_try_to_free_cached_page(mapping, page); + + return ret; +} + +static void managed_cache_invalidatepage(struct page *page, + unsigned int offset, + unsigned int length) +{ + const unsigned int stop = length + offset; + + DBG_BUGON(!PageLocked(page)); + + /* Check for potential overflow in debug mode */ + DBG_BUGON(stop > PAGE_SIZE || stop < length); + + if (offset == 0 && stop == PAGE_SIZE) + while (!managed_cache_releasepage(page, GFP_NOFS)) + cond_resched(); +} + +static const struct address_space_operations managed_cache_aops = { + .releasepage = managed_cache_releasepage, + .invalidatepage = managed_cache_invalidatepage, +}; + +static int erofs_init_managed_cache(struct super_block *sb) +{ + struct erofs_sb_info *const sbi = EROFS_SB(sb); + struct inode *const inode = new_inode(sb); + + if (unlikely(!inode)) + return -ENOMEM; + + set_nlink(inode, 1); + inode->i_size = OFFSET_MAX; + + inode->i_mapping->a_ops = &managed_cache_aops; + mapping_set_gfp_mask(inode->i_mapping, + GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE); + sbi->managed_cache = inode; + return 0; +} +#else +static int erofs_init_managed_cache(struct super_block *sb) { return 0; } +#endif + +static int erofs_fill_super(struct super_block *sb, void *data, int silent) +{ + struct inode *inode; + struct erofs_sb_info *sbi; + int err; + + infoln("fill_super, device -> %s", sb->s_id); + infoln("options -> %s", (char *)data); + + sb->s_magic = EROFS_SUPER_MAGIC; + + if (unlikely(!sb_set_blocksize(sb, EROFS_BLKSIZ))) { + errln("failed to set erofs blksize"); + return -EINVAL; + } + + sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); + if (unlikely(!sbi)) + return -ENOMEM; + + sb->s_fs_info = sbi; + err = superblock_read(sb); + if (err) + return err; + + sb->s_flags |= SB_RDONLY | SB_NOATIME; + sb->s_maxbytes = MAX_LFS_FILESIZE; + sb->s_time_gran = 1; + + sb->s_op = &erofs_sops; + +#ifdef CONFIG_EROFS_FS_XATTR + sb->s_xattr = erofs_xattr_handlers; +#endif + /* set erofs default mount options */ + default_options(sbi); + + err = parse_options(sb, data); + if (unlikely(err)) + return err; + + if (!silent) + infoln("root inode @ nid %llu", ROOT_NID(sbi)); + + if (test_opt(sbi, POSIX_ACL)) + sb->s_flags |= SB_POSIXACL; + else + sb->s_flags &= ~SB_POSIXACL; + +#ifdef CONFIG_EROFS_FS_ZIP + INIT_RADIX_TREE(&sbi->workstn_tree, GFP_ATOMIC); +#endif + + /* get the root inode */ + inode = erofs_iget(sb, ROOT_NID(sbi), true); + if (IS_ERR(inode)) + return PTR_ERR(inode); + + if (unlikely(!S_ISDIR(inode->i_mode))) { + errln("rootino(nid %llu) is not a directory(i_mode %o)", + ROOT_NID(sbi), inode->i_mode); + iput(inode); + return -EINVAL; + } + + sb->s_root = d_make_root(inode); + if (unlikely(!sb->s_root)) + return -ENOMEM; + + erofs_shrinker_register(sb); + /* sb->s_umount is already locked, SB_ACTIVE and SB_BORN are not set */ + err = erofs_init_managed_cache(sb); + if (unlikely(err)) + return err; + + if (!silent) + infoln("mounted on %s with opts: %s.", sb->s_id, (char *)data); + return 0; +} + +static struct dentry *erofs_mount(struct file_system_type *fs_type, int flags, + const char *dev_name, void *data) +{ + return mount_bdev(fs_type, flags, dev_name, data, erofs_fill_super); +} + +/* + * could be triggered after deactivate_locked_super() + * is called, thus including umount and failed to initialize. + */ +static void erofs_kill_sb(struct super_block *sb) +{ + struct erofs_sb_info *sbi; + + WARN_ON(sb->s_magic != EROFS_SUPER_MAGIC); + infoln("unmounting for %s", sb->s_id); + + kill_block_super(sb); + + sbi = EROFS_SB(sb); + if (!sbi) + return; + kfree(sbi); + sb->s_fs_info = NULL; +} + +/* called when ->s_root is non-NULL */ +static void erofs_put_super(struct super_block *sb) +{ + struct erofs_sb_info *const sbi = EROFS_SB(sb); + + DBG_BUGON(!sbi); + + erofs_shrinker_unregister(sb); +#ifdef CONFIG_EROFS_FS_ZIP + iput(sbi->managed_cache); + sbi->managed_cache = NULL; +#endif +} + +static struct file_system_type erofs_fs_type = { + .owner = THIS_MODULE, + .name = "erofs", + .mount = erofs_mount, + .kill_sb = erofs_kill_sb, + .fs_flags = FS_REQUIRES_DEV, +}; +MODULE_ALIAS_FS("erofs"); + +static int __init erofs_module_init(void) +{ + int err; + + erofs_check_ondisk_layout_definitions(); + infoln("initializing erofs " EROFS_VERSION); + + err = erofs_init_inode_cache(); + if (err) + goto icache_err; + + err = erofs_init_shrinker(); + if (err) + goto shrinker_err; + + err = z_erofs_init_zip_subsystem(); + if (err) + goto zip_err; + + err = register_filesystem(&erofs_fs_type); + if (err) + goto fs_err; + + infoln("successfully to initialize erofs"); + return 0; + +fs_err: + z_erofs_exit_zip_subsystem(); +zip_err: + erofs_exit_shrinker(); +shrinker_err: + erofs_exit_inode_cache(); +icache_err: + return err; +} + +static void __exit erofs_module_exit(void) +{ + unregister_filesystem(&erofs_fs_type); + z_erofs_exit_zip_subsystem(); + erofs_exit_shrinker(); + erofs_exit_inode_cache(); + infoln("successfully finalize erofs"); +} + +/* get filesystem statistics */ +static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf) +{ + struct super_block *sb = dentry->d_sb; + struct erofs_sb_info *sbi = EROFS_SB(sb); + u64 id = huge_encode_dev(sb->s_bdev->bd_dev); + + buf->f_type = sb->s_magic; + buf->f_bsize = EROFS_BLKSIZ; + buf->f_blocks = sbi->blocks; + buf->f_bfree = buf->f_bavail = 0; + + buf->f_files = ULLONG_MAX; + buf->f_ffree = ULLONG_MAX - sbi->inos; + + buf->f_namelen = EROFS_NAME_LEN; + + buf->f_fsid.val[0] = (u32)id; + buf->f_fsid.val[1] = (u32)(id >> 32); + return 0; +} + +static int erofs_show_options(struct seq_file *seq, struct dentry *root) +{ + struct erofs_sb_info *sbi __maybe_unused = EROFS_SB(root->d_sb); + +#ifdef CONFIG_EROFS_FS_XATTR + if (test_opt(sbi, XATTR_USER)) + seq_puts(seq, ",user_xattr"); + else + seq_puts(seq, ",nouser_xattr"); +#endif +#ifdef CONFIG_EROFS_FS_POSIX_ACL + if (test_opt(sbi, POSIX_ACL)) + seq_puts(seq, ",acl"); + else + seq_puts(seq, ",noacl"); +#endif + if (test_opt(sbi, FAULT_INJECTION)) + seq_printf(seq, ",fault_injection=%u", + erofs_get_fault_rate(sbi)); +#ifdef CONFIG_EROFS_FS_ZIP + if (sbi->cache_strategy == EROFS_ZIP_CACHE_DISABLED) { + seq_puts(seq, ",cache_strategy=disabled"); + } else if (sbi->cache_strategy == EROFS_ZIP_CACHE_READAHEAD) { + seq_puts(seq, ",cache_strategy=readahead"); + } else if (sbi->cache_strategy == EROFS_ZIP_CACHE_READAROUND) { + seq_puts(seq, ",cache_strategy=readaround"); + } else { + seq_puts(seq, ",cache_strategy=(unknown)"); + DBG_BUGON(1); + } +#endif + return 0; +} + +static int erofs_remount(struct super_block *sb, int *flags, char *data) +{ + struct erofs_sb_info *sbi = EROFS_SB(sb); + unsigned int org_mnt_opt = sbi->mount_opt; + unsigned int org_inject_rate = erofs_get_fault_rate(sbi); + int err; + + DBG_BUGON(!sb_rdonly(sb)); + err = parse_options(sb, data); + if (err) + goto out; + + if (test_opt(sbi, POSIX_ACL)) + sb->s_flags |= SB_POSIXACL; + else + sb->s_flags &= ~SB_POSIXACL; + + *flags |= SB_RDONLY; + return 0; +out: + __erofs_build_fault_attr(sbi, org_inject_rate); + sbi->mount_opt = org_mnt_opt; + + return err; +} + +const struct super_operations erofs_sops = { + .put_super = erofs_put_super, + .alloc_inode = alloc_inode, + .free_inode = free_inode, + .statfs = erofs_statfs, + .show_options = erofs_show_options, + .remount_fs = erofs_remount, +}; + +module_init(erofs_module_init); +module_exit(erofs_module_exit); + +MODULE_DESCRIPTION("Enhanced ROM File System"); +MODULE_AUTHOR("Gao Xiang, Chao Yu, Miao Xie, CONSUMER BG, HUAWEI Inc."); +MODULE_LICENSE("GPL"); + diff --git a/fs/erofs/tagptr.h b/fs/erofs/tagptr.h new file mode 100644 index 000000000000..a72897c86744 --- /dev/null +++ b/fs/erofs/tagptr.h @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * A tagged pointer implementation + * + * Copyright (C) 2018 Gao Xiang + */ +#ifndef __EROFS_FS_TAGPTR_H +#define __EROFS_FS_TAGPTR_H + +#include +#include + +/* + * the name of tagged pointer types are tagptr{1, 2, 3...}_t + * avoid directly using the internal structs __tagptr{1, 2, 3...} + */ +#define __MAKE_TAGPTR(n) \ +typedef struct __tagptr##n { \ + uintptr_t v; \ +} tagptr##n##_t; + +__MAKE_TAGPTR(1) +__MAKE_TAGPTR(2) +__MAKE_TAGPTR(3) +__MAKE_TAGPTR(4) + +#undef __MAKE_TAGPTR + +extern void __compiletime_error("bad tagptr tags") + __bad_tagptr_tags(void); + +extern void __compiletime_error("bad tagptr type") + __bad_tagptr_type(void); + +/* fix the broken usage of "#define tagptr2_t tagptr3_t" by users */ +#define __tagptr_mask_1(ptr, n) \ + __builtin_types_compatible_p(typeof(ptr), struct __tagptr##n) ? \ + (1UL << (n)) - 1 : + +#define __tagptr_mask(ptr) (\ + __tagptr_mask_1(ptr, 1) ( \ + __tagptr_mask_1(ptr, 2) ( \ + __tagptr_mask_1(ptr, 3) ( \ + __tagptr_mask_1(ptr, 4) ( \ + __bad_tagptr_type(), 0))))) + +/* generate a tagged pointer from a raw value */ +#define tagptr_init(type, val) \ + ((typeof(type)){ .v = (uintptr_t)(val) }) + +/* + * directly cast a tagged pointer to the native pointer type, which + * could be used for backward compatibility of existing code. + */ +#define tagptr_cast_ptr(tptr) ((void *)(tptr).v) + +/* encode tagged pointers */ +#define tagptr_fold(type, ptr, _tags) ({ \ + const typeof(_tags) tags = (_tags); \ + if (__builtin_constant_p(tags) && (tags & ~__tagptr_mask(type))) \ + __bad_tagptr_tags(); \ +tagptr_init(type, (uintptr_t)(ptr) | tags); }) + +/* decode tagged pointers */ +#define tagptr_unfold_ptr(tptr) \ + ((void *)((tptr).v & ~__tagptr_mask(tptr))) + +#define tagptr_unfold_tags(tptr) \ + ((tptr).v & __tagptr_mask(tptr)) + +/* operations for the tagger pointer */ +#define tagptr_eq(_tptr1, _tptr2) ({ \ + typeof(_tptr1) tptr1 = (_tptr1); \ + typeof(_tptr2) tptr2 = (_tptr2); \ + (void)(&tptr1 == &tptr2); \ +(tptr1).v == (tptr2).v; }) + +/* lock-free CAS operation */ +#define tagptr_cmpxchg(_ptptr, _o, _n) ({ \ + typeof(_ptptr) ptptr = (_ptptr); \ + typeof(_o) o = (_o); \ + typeof(_n) n = (_n); \ + (void)(&o == &n); \ + (void)(&o == ptptr); \ +tagptr_init(o, cmpxchg(&ptptr->v, o.v, n.v)); }) + +/* wrap WRITE_ONCE if atomic update is needed */ +#define tagptr_replace_tags(_ptptr, tags) ({ \ + typeof(_ptptr) ptptr = (_ptptr); \ + *ptptr = tagptr_fold(*ptptr, tagptr_unfold_ptr(*ptptr), tags); \ +*ptptr; }) + +#define tagptr_set_tags(_ptptr, _tags) ({ \ + typeof(_ptptr) ptptr = (_ptptr); \ + const typeof(_tags) tags = (_tags); \ + if (__builtin_constant_p(tags) && (tags & ~__tagptr_mask(*ptptr))) \ + __bad_tagptr_tags(); \ + ptptr->v |= tags; \ +*ptptr; }) + +#define tagptr_clear_tags(_ptptr, _tags) ({ \ + typeof(_ptptr) ptptr = (_ptptr); \ + const typeof(_tags) tags = (_tags); \ + if (__builtin_constant_p(tags) && (tags & ~__tagptr_mask(*ptptr))) \ + __bad_tagptr_tags(); \ + ptptr->v &= ~tags; \ +*ptptr; }) + +#endif /* __EROFS_FS_TAGPTR_H */ + diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c new file mode 100644 index 000000000000..1dd041aa0f5a --- /dev/null +++ b/fs/erofs/utils.c @@ -0,0 +1,333 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2018 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#include "internal.h" +#include + +struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail) +{ + struct page *page; + + if (!list_empty(pool)) { + page = lru_to_page(pool); + DBG_BUGON(page_ref_count(page) != 1); + list_del(&page->lru); + } else { + page = alloc_pages(gfp | (nofail ? __GFP_NOFAIL : 0), 0); + } + return page; +} + +#if (EROFS_PCPUBUF_NR_PAGES > 0) +static struct { + u8 data[PAGE_SIZE * EROFS_PCPUBUF_NR_PAGES]; +} ____cacheline_aligned_in_smp erofs_pcpubuf[NR_CPUS]; + +void *erofs_get_pcpubuf(unsigned int pagenr) +{ + preempt_disable(); + return &erofs_pcpubuf[smp_processor_id()].data[pagenr * PAGE_SIZE]; +} +#endif + +#ifdef CONFIG_EROFS_FS_ZIP +/* global shrink count (for all mounted EROFS instances) */ +static atomic_long_t erofs_global_shrink_cnt; + +#define __erofs_workgroup_get(grp) atomic_inc(&(grp)->refcount) +#define __erofs_workgroup_put(grp) atomic_dec(&(grp)->refcount) + +static int erofs_workgroup_get(struct erofs_workgroup *grp) +{ + int o; + +repeat: + o = erofs_wait_on_workgroup_freezed(grp); + if (unlikely(o <= 0)) + return -1; + + if (unlikely(atomic_cmpxchg(&grp->refcount, o, o + 1) != o)) + goto repeat; + + /* decrease refcount paired by erofs_workgroup_put */ + if (unlikely(o == 1)) + atomic_long_dec(&erofs_global_shrink_cnt); + return 0; +} + +struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb, + pgoff_t index, bool *tag) +{ + struct erofs_sb_info *sbi = EROFS_SB(sb); + struct erofs_workgroup *grp; + +repeat: + rcu_read_lock(); + grp = radix_tree_lookup(&sbi->workstn_tree, index); + if (grp) { + *tag = xa_pointer_tag(grp); + grp = xa_untag_pointer(grp); + + if (erofs_workgroup_get(grp)) { + /* prefer to relax rcu read side */ + rcu_read_unlock(); + goto repeat; + } + + DBG_BUGON(index != grp->index); + } + rcu_read_unlock(); + return grp; +} + +int erofs_register_workgroup(struct super_block *sb, + struct erofs_workgroup *grp, + bool tag) +{ + struct erofs_sb_info *sbi; + int err; + + /* grp shouldn't be broken or used before */ + if (unlikely(atomic_read(&grp->refcount) != 1)) { + DBG_BUGON(1); + return -EINVAL; + } + + err = radix_tree_preload(GFP_NOFS); + if (err) + return err; + + sbi = EROFS_SB(sb); + xa_lock(&sbi->workstn_tree); + + grp = xa_tag_pointer(grp, tag); + + /* + * Bump up reference count before making this workgroup + * visible to other users in order to avoid potential UAF + * without serialized by workstn_lock. + */ + __erofs_workgroup_get(grp); + + err = radix_tree_insert(&sbi->workstn_tree, grp->index, grp); + if (unlikely(err)) + /* + * it's safe to decrease since the workgroup isn't visible + * and refcount >= 2 (cannot be freezed). + */ + __erofs_workgroup_put(grp); + + xa_unlock(&sbi->workstn_tree); + radix_tree_preload_end(); + return err; +} + +static void __erofs_workgroup_free(struct erofs_workgroup *grp) +{ + atomic_long_dec(&erofs_global_shrink_cnt); + erofs_workgroup_free_rcu(grp); +} + +int erofs_workgroup_put(struct erofs_workgroup *grp) +{ + int count = atomic_dec_return(&grp->refcount); + + if (count == 1) + atomic_long_inc(&erofs_global_shrink_cnt); + else if (!count) + __erofs_workgroup_free(grp); + return count; +} + +static void erofs_workgroup_unfreeze_final(struct erofs_workgroup *grp) +{ + erofs_workgroup_unfreeze(grp, 0); + __erofs_workgroup_free(grp); +} + +static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi, + struct erofs_workgroup *grp, + bool cleanup) +{ + /* + * If managed cache is on, refcount of workgroups + * themselves could be < 0 (freezed). In other words, + * there is no guarantee that all refcounts > 0. + */ + if (!erofs_workgroup_try_to_freeze(grp, 1)) + return false; + + /* + * Note that all cached pages should be unattached + * before deleted from the radix tree. Otherwise some + * cached pages could be still attached to the orphan + * old workgroup when the new one is available in the tree. + */ + if (erofs_try_to_free_all_cached_pages(sbi, grp)) { + erofs_workgroup_unfreeze(grp, 1); + return false; + } + + /* + * It's impossible to fail after the workgroup is freezed, + * however in order to avoid some race conditions, add a + * DBG_BUGON to observe this in advance. + */ + DBG_BUGON(xa_untag_pointer(radix_tree_delete(&sbi->workstn_tree, + grp->index)) != grp); + + /* + * If managed cache is on, last refcount should indicate + * the related workstation. + */ + erofs_workgroup_unfreeze_final(grp); + return true; +} + +static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi, + unsigned long nr_shrink, + bool cleanup) +{ + pgoff_t first_index = 0; + void *batch[PAGEVEC_SIZE]; + unsigned int freed = 0; + + int i, found; +repeat: + xa_lock(&sbi->workstn_tree); + + found = radix_tree_gang_lookup(&sbi->workstn_tree, + batch, first_index, PAGEVEC_SIZE); + + for (i = 0; i < found; ++i) { + struct erofs_workgroup *grp = xa_untag_pointer(batch[i]); + + first_index = grp->index + 1; + + /* try to shrink each valid workgroup */ + if (!erofs_try_to_release_workgroup(sbi, grp, cleanup)) + continue; + + ++freed; + if (unlikely(!--nr_shrink)) + break; + } + xa_unlock(&sbi->workstn_tree); + + if (i && nr_shrink) + goto repeat; + return freed; +} + +/* protected by 'erofs_sb_list_lock' */ +static unsigned int shrinker_run_no; + +/* protects the mounted 'erofs_sb_list' */ +static DEFINE_SPINLOCK(erofs_sb_list_lock); +static LIST_HEAD(erofs_sb_list); + +void erofs_shrinker_register(struct super_block *sb) +{ + struct erofs_sb_info *sbi = EROFS_SB(sb); + + mutex_init(&sbi->umount_mutex); + + spin_lock(&erofs_sb_list_lock); + list_add(&sbi->list, &erofs_sb_list); + spin_unlock(&erofs_sb_list_lock); +} + +void erofs_shrinker_unregister(struct super_block *sb) +{ + struct erofs_sb_info *const sbi = EROFS_SB(sb); + + mutex_lock(&sbi->umount_mutex); + erofs_shrink_workstation(sbi, ~0UL, true); + + spin_lock(&erofs_sb_list_lock); + list_del(&sbi->list); + spin_unlock(&erofs_sb_list_lock); + mutex_unlock(&sbi->umount_mutex); +} + +static unsigned long erofs_shrink_count(struct shrinker *shrink, + struct shrink_control *sc) +{ + return atomic_long_read(&erofs_global_shrink_cnt); +} + +static unsigned long erofs_shrink_scan(struct shrinker *shrink, + struct shrink_control *sc) +{ + struct erofs_sb_info *sbi; + struct list_head *p; + + unsigned long nr = sc->nr_to_scan; + unsigned int run_no; + unsigned long freed = 0; + + spin_lock(&erofs_sb_list_lock); + do { + run_no = ++shrinker_run_no; + } while (run_no == 0); + + /* Iterate over all mounted superblocks and try to shrink them */ + p = erofs_sb_list.next; + while (p != &erofs_sb_list) { + sbi = list_entry(p, struct erofs_sb_info, list); + + /* + * We move the ones we do to the end of the list, so we stop + * when we see one we have already done. + */ + if (sbi->shrinker_run_no == run_no) + break; + + if (!mutex_trylock(&sbi->umount_mutex)) { + p = p->next; + continue; + } + + spin_unlock(&erofs_sb_list_lock); + sbi->shrinker_run_no = run_no; + + freed += erofs_shrink_workstation(sbi, nr, false); + + spin_lock(&erofs_sb_list_lock); + /* Get the next list element before we move this one */ + p = p->next; + + /* + * Move this one to the end of the list to provide some + * fairness. + */ + list_move_tail(&sbi->list, &erofs_sb_list); + mutex_unlock(&sbi->umount_mutex); + + if (freed >= nr) + break; + } + spin_unlock(&erofs_sb_list_lock); + return freed; +} + +static struct shrinker erofs_shrinker_info = { + .scan_objects = erofs_shrink_scan, + .count_objects = erofs_shrink_count, + .seeks = DEFAULT_SEEKS, +}; + +int __init erofs_init_shrinker(void) +{ + return register_shrinker(&erofs_shrinker_info); +} + +void erofs_exit_shrinker(void) +{ + unregister_shrinker(&erofs_shrinker_info); +} +#endif /* !CONFIG_EROFS_FS_ZIP */ + diff --git a/fs/erofs/xattr.c b/fs/erofs/xattr.c new file mode 100644 index 000000000000..a8286998a079 --- /dev/null +++ b/fs/erofs/xattr.c @@ -0,0 +1,703 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2017-2018 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#include +#include "xattr.h" + +struct xattr_iter { + struct super_block *sb; + struct page *page; + void *kaddr; + + erofs_blk_t blkaddr; + unsigned int ofs; +}; + +static inline void xattr_iter_end(struct xattr_iter *it, bool atomic) +{ + /* the only user of kunmap() is 'init_inode_xattrs' */ + if (unlikely(!atomic)) + kunmap(it->page); + else + kunmap_atomic(it->kaddr); + + unlock_page(it->page); + put_page(it->page); +} + +static inline void xattr_iter_end_final(struct xattr_iter *it) +{ + if (!it->page) + return; + + xattr_iter_end(it, true); +} + +static int init_inode_xattrs(struct inode *inode) +{ + struct erofs_vnode *const vi = EROFS_V(inode); + struct xattr_iter it; + unsigned int i; + struct erofs_xattr_ibody_header *ih; + struct super_block *sb; + struct erofs_sb_info *sbi; + bool atomic_map; + int ret = 0; + + /* the most case is that xattrs of this inode are initialized. */ + if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags)) + return 0; + + if (wait_on_bit_lock(&vi->flags, EROFS_V_BL_XATTR_BIT, TASK_KILLABLE)) + return -ERESTARTSYS; + + /* someone has initialized xattrs for us? */ + if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags)) + goto out_unlock; + + /* + * bypass all xattr operations if ->xattr_isize is not greater than + * sizeof(struct erofs_xattr_ibody_header), in detail: + * 1) it is not enough to contain erofs_xattr_ibody_header then + * ->xattr_isize should be 0 (it means no xattr); + * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk + * undefined right now (maybe use later with some new sb feature). + */ + if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) { + errln("xattr_isize %d of nid %llu is not supported yet", + vi->xattr_isize, vi->nid); + ret = -EOPNOTSUPP; + goto out_unlock; + } else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) { + if (unlikely(vi->xattr_isize)) { + errln("bogus xattr ibody @ nid %llu", vi->nid); + DBG_BUGON(1); + ret = -EFSCORRUPTED; + goto out_unlock; /* xattr ondisk layout error */ + } + ret = -ENOATTR; + goto out_unlock; + } + + sb = inode->i_sb; + sbi = EROFS_SB(sb); + it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize); + it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize); + + it.page = erofs_get_inline_page(inode, it.blkaddr); + if (IS_ERR(it.page)) { + ret = PTR_ERR(it.page); + goto out_unlock; + } + + /* read in shared xattr array (non-atomic, see kmalloc below) */ + it.kaddr = kmap(it.page); + atomic_map = false; + + ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs); + + vi->xattr_shared_count = ih->h_shared_count; + vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count, + sizeof(uint), GFP_KERNEL); + if (!vi->xattr_shared_xattrs) { + xattr_iter_end(&it, atomic_map); + ret = -ENOMEM; + goto out_unlock; + } + + /* let's skip ibody header */ + it.ofs += sizeof(struct erofs_xattr_ibody_header); + + for (i = 0; i < vi->xattr_shared_count; ++i) { + if (unlikely(it.ofs >= EROFS_BLKSIZ)) { + /* cannot be unaligned */ + DBG_BUGON(it.ofs != EROFS_BLKSIZ); + xattr_iter_end(&it, atomic_map); + + it.page = erofs_get_meta_page(sb, ++it.blkaddr, + S_ISDIR(inode->i_mode)); + if (IS_ERR(it.page)) { + kfree(vi->xattr_shared_xattrs); + vi->xattr_shared_xattrs = NULL; + ret = PTR_ERR(it.page); + goto out_unlock; + } + + it.kaddr = kmap_atomic(it.page); + atomic_map = true; + it.ofs = 0; + } + vi->xattr_shared_xattrs[i] = + le32_to_cpu(*(__le32 *)(it.kaddr + it.ofs)); + it.ofs += sizeof(__le32); + } + xattr_iter_end(&it, atomic_map); + + set_bit(EROFS_V_EA_INITED_BIT, &vi->flags); + +out_unlock: + clear_and_wake_up_bit(EROFS_V_BL_XATTR_BIT, &vi->flags); + return ret; +} + +/* + * the general idea for these return values is + * if 0 is returned, go on processing the current xattr; + * 1 (> 0) is returned, skip this round to process the next xattr; + * -err (< 0) is returned, an error (maybe ENOXATTR) occurred + * and need to be handled + */ +struct xattr_iter_handlers { + int (*entry)(struct xattr_iter *_it, struct erofs_xattr_entry *entry); + int (*name)(struct xattr_iter *_it, unsigned int processed, char *buf, + unsigned int len); + int (*alloc_buffer)(struct xattr_iter *_it, unsigned int value_sz); + void (*value)(struct xattr_iter *_it, unsigned int processed, char *buf, + unsigned int len); +}; + +static inline int xattr_iter_fixup(struct xattr_iter *it) +{ + if (it->ofs < EROFS_BLKSIZ) + return 0; + + xattr_iter_end(it, true); + + it->blkaddr += erofs_blknr(it->ofs); + + it->page = erofs_get_meta_page(it->sb, it->blkaddr, false); + if (IS_ERR(it->page)) { + int err = PTR_ERR(it->page); + + it->page = NULL; + return err; + } + + it->kaddr = kmap_atomic(it->page); + it->ofs = erofs_blkoff(it->ofs); + return 0; +} + +static int inline_xattr_iter_begin(struct xattr_iter *it, + struct inode *inode) +{ + struct erofs_vnode *const vi = EROFS_V(inode); + struct erofs_sb_info *const sbi = EROFS_SB(inode->i_sb); + unsigned int xattr_header_sz, inline_xattr_ofs; + + xattr_header_sz = inlinexattr_header_size(inode); + if (unlikely(xattr_header_sz >= vi->xattr_isize)) { + DBG_BUGON(xattr_header_sz > vi->xattr_isize); + return -ENOATTR; + } + + inline_xattr_ofs = vi->inode_isize + xattr_header_sz; + + it->blkaddr = erofs_blknr(iloc(sbi, vi->nid) + inline_xattr_ofs); + it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs); + + it->page = erofs_get_inline_page(inode, it->blkaddr); + if (IS_ERR(it->page)) + return PTR_ERR(it->page); + + it->kaddr = kmap_atomic(it->page); + return vi->xattr_isize - xattr_header_sz; +} + +/* + * Regardless of success or failure, `xattr_foreach' will end up with + * `ofs' pointing to the next xattr item rather than an arbitrary position. + */ +static int xattr_foreach(struct xattr_iter *it, + const struct xattr_iter_handlers *op, + unsigned int *tlimit) +{ + struct erofs_xattr_entry entry; + unsigned int value_sz, processed, slice; + int err; + + /* 0. fixup blkaddr, ofs, ipage */ + err = xattr_iter_fixup(it); + if (err) + return err; + + /* + * 1. read xattr entry to the memory, + * since we do EROFS_XATTR_ALIGN + * therefore entry should be in the page + */ + entry = *(struct erofs_xattr_entry *)(it->kaddr + it->ofs); + if (tlimit) { + unsigned int entry_sz = EROFS_XATTR_ENTRY_SIZE(&entry); + + /* xattr on-disk corruption: xattr entry beyond xattr_isize */ + if (unlikely(*tlimit < entry_sz)) { + DBG_BUGON(1); + return -EFSCORRUPTED; + } + *tlimit -= entry_sz; + } + + it->ofs += sizeof(struct erofs_xattr_entry); + value_sz = le16_to_cpu(entry.e_value_size); + + /* handle entry */ + err = op->entry(it, &entry); + if (err) { + it->ofs += entry.e_name_len + value_sz; + goto out; + } + + /* 2. handle xattr name (ofs will finally be at the end of name) */ + processed = 0; + + while (processed < entry.e_name_len) { + if (it->ofs >= EROFS_BLKSIZ) { + DBG_BUGON(it->ofs > EROFS_BLKSIZ); + + err = xattr_iter_fixup(it); + if (err) + goto out; + it->ofs = 0; + } + + slice = min_t(unsigned int, PAGE_SIZE - it->ofs, + entry.e_name_len - processed); + + /* handle name */ + err = op->name(it, processed, it->kaddr + it->ofs, slice); + if (err) { + it->ofs += entry.e_name_len - processed + value_sz; + goto out; + } + + it->ofs += slice; + processed += slice; + } + + /* 3. handle xattr value */ + processed = 0; + + if (op->alloc_buffer) { + err = op->alloc_buffer(it, value_sz); + if (err) { + it->ofs += value_sz; + goto out; + } + } + + while (processed < value_sz) { + if (it->ofs >= EROFS_BLKSIZ) { + DBG_BUGON(it->ofs > EROFS_BLKSIZ); + + err = xattr_iter_fixup(it); + if (err) + goto out; + it->ofs = 0; + } + + slice = min_t(unsigned int, PAGE_SIZE - it->ofs, + value_sz - processed); + op->value(it, processed, it->kaddr + it->ofs, slice); + it->ofs += slice; + processed += slice; + } + +out: + /* xattrs should be 4-byte aligned (on-disk constraint) */ + it->ofs = EROFS_XATTR_ALIGN(it->ofs); + return err < 0 ? err : 0; +} + +struct getxattr_iter { + struct xattr_iter it; + + char *buffer; + int buffer_size, index; + struct qstr name; +}; + +static int xattr_entrymatch(struct xattr_iter *_it, + struct erofs_xattr_entry *entry) +{ + struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it); + + return (it->index != entry->e_name_index || + it->name.len != entry->e_name_len) ? -ENOATTR : 0; +} + +static int xattr_namematch(struct xattr_iter *_it, + unsigned int processed, char *buf, unsigned int len) +{ + struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it); + + return memcmp(buf, it->name.name + processed, len) ? -ENOATTR : 0; +} + +static int xattr_checkbuffer(struct xattr_iter *_it, + unsigned int value_sz) +{ + struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it); + int err = it->buffer_size < value_sz ? -ERANGE : 0; + + it->buffer_size = value_sz; + return !it->buffer ? 1 : err; +} + +static void xattr_copyvalue(struct xattr_iter *_it, + unsigned int processed, + char *buf, unsigned int len) +{ + struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it); + + memcpy(it->buffer + processed, buf, len); +} + +static const struct xattr_iter_handlers find_xattr_handlers = { + .entry = xattr_entrymatch, + .name = xattr_namematch, + .alloc_buffer = xattr_checkbuffer, + .value = xattr_copyvalue +}; + +static int inline_getxattr(struct inode *inode, struct getxattr_iter *it) +{ + int ret; + unsigned int remaining; + + ret = inline_xattr_iter_begin(&it->it, inode); + if (ret < 0) + return ret; + + remaining = ret; + while (remaining) { + ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining); + if (ret != -ENOATTR) + break; + } + xattr_iter_end_final(&it->it); + + return ret ? ret : it->buffer_size; +} + +static int shared_getxattr(struct inode *inode, struct getxattr_iter *it) +{ + struct erofs_vnode *const vi = EROFS_V(inode); + struct super_block *const sb = inode->i_sb; + struct erofs_sb_info *const sbi = EROFS_SB(sb); + unsigned int i; + int ret = -ENOATTR; + + for (i = 0; i < vi->xattr_shared_count; ++i) { + erofs_blk_t blkaddr = + xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]); + + it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]); + + if (!i || blkaddr != it->it.blkaddr) { + if (i) + xattr_iter_end(&it->it, true); + + it->it.page = erofs_get_meta_page(sb, blkaddr, false); + if (IS_ERR(it->it.page)) + return PTR_ERR(it->it.page); + + it->it.kaddr = kmap_atomic(it->it.page); + it->it.blkaddr = blkaddr; + } + + ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL); + if (ret != -ENOATTR) + break; + } + if (vi->xattr_shared_count) + xattr_iter_end_final(&it->it); + + return ret ? ret : it->buffer_size; +} + +static bool erofs_xattr_user_list(struct dentry *dentry) +{ + return test_opt(EROFS_SB(dentry->d_sb), XATTR_USER); +} + +static bool erofs_xattr_trusted_list(struct dentry *dentry) +{ + return capable(CAP_SYS_ADMIN); +} + +int erofs_getxattr(struct inode *inode, int index, + const char *name, + void *buffer, size_t buffer_size) +{ + int ret; + struct getxattr_iter it; + + if (unlikely(!name)) + return -EINVAL; + + ret = init_inode_xattrs(inode); + if (ret) + return ret; + + it.index = index; + + it.name.len = strlen(name); + if (it.name.len > EROFS_NAME_LEN) + return -ERANGE; + it.name.name = name; + + it.buffer = buffer; + it.buffer_size = buffer_size; + + it.it.sb = inode->i_sb; + ret = inline_getxattr(inode, &it); + if (ret == -ENOATTR) + ret = shared_getxattr(inode, &it); + return ret; +} + +static int erofs_xattr_generic_get(const struct xattr_handler *handler, + struct dentry *unused, struct inode *inode, + const char *name, void *buffer, size_t size) +{ + struct erofs_sb_info *const sbi = EROFS_I_SB(inode); + + switch (handler->flags) { + case EROFS_XATTR_INDEX_USER: + if (!test_opt(sbi, XATTR_USER)) + return -EOPNOTSUPP; + break; + case EROFS_XATTR_INDEX_TRUSTED: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + break; + case EROFS_XATTR_INDEX_SECURITY: + break; + default: + return -EINVAL; + } + + return erofs_getxattr(inode, handler->flags, name, buffer, size); +} + +const struct xattr_handler erofs_xattr_user_handler = { + .prefix = XATTR_USER_PREFIX, + .flags = EROFS_XATTR_INDEX_USER, + .list = erofs_xattr_user_list, + .get = erofs_xattr_generic_get, +}; + +const struct xattr_handler erofs_xattr_trusted_handler = { + .prefix = XATTR_TRUSTED_PREFIX, + .flags = EROFS_XATTR_INDEX_TRUSTED, + .list = erofs_xattr_trusted_list, + .get = erofs_xattr_generic_get, +}; + +#ifdef CONFIG_EROFS_FS_SECURITY +const struct xattr_handler __maybe_unused erofs_xattr_security_handler = { + .prefix = XATTR_SECURITY_PREFIX, + .flags = EROFS_XATTR_INDEX_SECURITY, + .get = erofs_xattr_generic_get, +}; +#endif + +const struct xattr_handler *erofs_xattr_handlers[] = { + &erofs_xattr_user_handler, +#ifdef CONFIG_EROFS_FS_POSIX_ACL + &posix_acl_access_xattr_handler, + &posix_acl_default_xattr_handler, +#endif + &erofs_xattr_trusted_handler, +#ifdef CONFIG_EROFS_FS_SECURITY + &erofs_xattr_security_handler, +#endif + NULL, +}; + +struct listxattr_iter { + struct xattr_iter it; + + struct dentry *dentry; + char *buffer; + int buffer_size, buffer_ofs; +}; + +static int xattr_entrylist(struct xattr_iter *_it, + struct erofs_xattr_entry *entry) +{ + struct listxattr_iter *it = + container_of(_it, struct listxattr_iter, it); + unsigned int prefix_len; + const char *prefix; + + const struct xattr_handler *h = + erofs_xattr_handler(entry->e_name_index); + + if (!h || (h->list && !h->list(it->dentry))) + return 1; + + prefix = xattr_prefix(h); + prefix_len = strlen(prefix); + + if (!it->buffer) { + it->buffer_ofs += prefix_len + entry->e_name_len + 1; + return 1; + } + + if (it->buffer_ofs + prefix_len + + entry->e_name_len + 1 > it->buffer_size) + return -ERANGE; + + memcpy(it->buffer + it->buffer_ofs, prefix, prefix_len); + it->buffer_ofs += prefix_len; + return 0; +} + +static int xattr_namelist(struct xattr_iter *_it, + unsigned int processed, char *buf, unsigned int len) +{ + struct listxattr_iter *it = + container_of(_it, struct listxattr_iter, it); + + memcpy(it->buffer + it->buffer_ofs, buf, len); + it->buffer_ofs += len; + return 0; +} + +static int xattr_skipvalue(struct xattr_iter *_it, + unsigned int value_sz) +{ + struct listxattr_iter *it = + container_of(_it, struct listxattr_iter, it); + + it->buffer[it->buffer_ofs++] = '\0'; + return 1; +} + +static const struct xattr_iter_handlers list_xattr_handlers = { + .entry = xattr_entrylist, + .name = xattr_namelist, + .alloc_buffer = xattr_skipvalue, + .value = NULL +}; + +static int inline_listxattr(struct listxattr_iter *it) +{ + int ret; + unsigned int remaining; + + ret = inline_xattr_iter_begin(&it->it, d_inode(it->dentry)); + if (ret < 0) + return ret; + + remaining = ret; + while (remaining) { + ret = xattr_foreach(&it->it, &list_xattr_handlers, &remaining); + if (ret) + break; + } + xattr_iter_end_final(&it->it); + return ret ? ret : it->buffer_ofs; +} + +static int shared_listxattr(struct listxattr_iter *it) +{ + struct inode *const inode = d_inode(it->dentry); + struct erofs_vnode *const vi = EROFS_V(inode); + struct super_block *const sb = inode->i_sb; + struct erofs_sb_info *const sbi = EROFS_SB(sb); + unsigned int i; + int ret = 0; + + for (i = 0; i < vi->xattr_shared_count; ++i) { + erofs_blk_t blkaddr = + xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]); + + it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]); + if (!i || blkaddr != it->it.blkaddr) { + if (i) + xattr_iter_end(&it->it, true); + + it->it.page = erofs_get_meta_page(sb, blkaddr, false); + if (IS_ERR(it->it.page)) + return PTR_ERR(it->it.page); + + it->it.kaddr = kmap_atomic(it->it.page); + it->it.blkaddr = blkaddr; + } + + ret = xattr_foreach(&it->it, &list_xattr_handlers, NULL); + if (ret) + break; + } + if (vi->xattr_shared_count) + xattr_iter_end_final(&it->it); + + return ret ? ret : it->buffer_ofs; +} + +ssize_t erofs_listxattr(struct dentry *dentry, + char *buffer, size_t buffer_size) +{ + int ret; + struct listxattr_iter it; + + ret = init_inode_xattrs(d_inode(dentry)); + if (ret) + return ret; + + it.dentry = dentry; + it.buffer = buffer; + it.buffer_size = buffer_size; + it.buffer_ofs = 0; + + it.it.sb = dentry->d_sb; + + ret = inline_listxattr(&it); + if (ret < 0 && ret != -ENOATTR) + return ret; + return shared_listxattr(&it); +} + +#ifdef CONFIG_EROFS_FS_POSIX_ACL +struct posix_acl *erofs_get_acl(struct inode *inode, int type) +{ + struct posix_acl *acl; + int prefix, rc; + char *value = NULL; + + switch (type) { + case ACL_TYPE_ACCESS: + prefix = EROFS_XATTR_INDEX_POSIX_ACL_ACCESS; + break; + case ACL_TYPE_DEFAULT: + prefix = EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT; + break; + default: + return ERR_PTR(-EINVAL); + } + + rc = erofs_getxattr(inode, prefix, "", NULL, 0); + if (rc > 0) { + value = kmalloc(rc, GFP_KERNEL); + if (!value) + return ERR_PTR(-ENOMEM); + rc = erofs_getxattr(inode, prefix, "", value, rc); + } + + if (rc == -ENOATTR) + acl = NULL; + else if (rc < 0) + acl = ERR_PTR(rc); + else + acl = posix_acl_from_xattr(&init_user_ns, value, rc); + kfree(value); + return acl; +} +#endif + diff --git a/fs/erofs/xattr.h b/fs/erofs/xattr.h new file mode 100644 index 000000000000..c5ca47d814dd --- /dev/null +++ b/fs/erofs/xattr.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2017-2018 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#ifndef __EROFS_XATTR_H +#define __EROFS_XATTR_H + +#include "internal.h" +#include +#include + +/* Attribute not found */ +#define ENOATTR ENODATA + +static inline unsigned int inlinexattr_header_size(struct inode *inode) +{ + return sizeof(struct erofs_xattr_ibody_header) + + sizeof(u32) * EROFS_V(inode)->xattr_shared_count; +} + +static inline erofs_blk_t xattrblock_addr(struct erofs_sb_info *sbi, + unsigned int xattr_id) +{ +#ifdef CONFIG_EROFS_FS_XATTR + return sbi->xattr_blkaddr + + xattr_id * sizeof(__u32) / EROFS_BLKSIZ; +#else + return 0; +#endif +} + +static inline unsigned int xattrblock_offset(struct erofs_sb_info *sbi, + unsigned int xattr_id) +{ + return (xattr_id * sizeof(__u32)) % EROFS_BLKSIZ; +} + +#ifdef CONFIG_EROFS_FS_XATTR +extern const struct xattr_handler erofs_xattr_user_handler; +extern const struct xattr_handler erofs_xattr_trusted_handler; +#ifdef CONFIG_EROFS_FS_SECURITY +extern const struct xattr_handler erofs_xattr_security_handler; +#endif + +static inline const struct xattr_handler *erofs_xattr_handler(unsigned int idx) +{ +static const struct xattr_handler *xattr_handler_map[] = { + [EROFS_XATTR_INDEX_USER] = &erofs_xattr_user_handler, +#ifdef CONFIG_EROFS_FS_POSIX_ACL + [EROFS_XATTR_INDEX_POSIX_ACL_ACCESS] = &posix_acl_access_xattr_handler, + [EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT] = + &posix_acl_default_xattr_handler, +#endif + [EROFS_XATTR_INDEX_TRUSTED] = &erofs_xattr_trusted_handler, +#ifdef CONFIG_EROFS_FS_SECURITY + [EROFS_XATTR_INDEX_SECURITY] = &erofs_xattr_security_handler, +#endif +}; + + return idx && idx < ARRAY_SIZE(xattr_handler_map) ? + xattr_handler_map[idx] : NULL; +} + +extern const struct xattr_handler *erofs_xattr_handlers[]; + +int erofs_getxattr(struct inode *, int, const char *, void *, size_t); +ssize_t erofs_listxattr(struct dentry *, char *, size_t); +#else +static inline int erofs_getxattr(struct inode *inode, int index, + const char *name, void *buffer, + size_t buffer_size) +{ + return -EOPNOTSUPP; +} + +static inline ssize_t erofs_listxattr(struct dentry *dentry, + char *buffer, size_t buffer_size) +{ + return -EOPNOTSUPP; +} +#endif /* !CONFIG_EROFS_FS_XATTR */ + +#ifdef CONFIG_EROFS_FS_POSIX_ACL +struct posix_acl *erofs_get_acl(struct inode *inode, int type); +#else +#define erofs_get_acl (NULL) +#endif + +#endif + diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c new file mode 100644 index 000000000000..b32ad585237c --- /dev/null +++ b/fs/erofs/zdata.c @@ -0,0 +1,1432 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2018 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#include "zdata.h" +#include "compress.h" +#include + +#include + +/* + * a compressed_pages[] placeholder in order to avoid + * being filled with file pages for in-place decompression. + */ +#define PAGE_UNALLOCATED ((void *)0x5F0E4B1D) + +/* how to allocate cached pages for a pcluster */ +enum z_erofs_cache_alloctype { + DONTALLOC, /* don't allocate any cached pages */ + DELAYEDALLOC, /* delayed allocation (at the time of submitting io) */ +}; + +/* + * tagged pointer with 1-bit tag for all compressed pages + * tag 0 - the page is just found with an extra page reference + */ +typedef tagptr1_t compressed_page_t; + +#define tag_compressed_page_justfound(page) \ + tagptr_fold(compressed_page_t, page, 1) + +static struct workqueue_struct *z_erofs_workqueue __read_mostly; +static struct kmem_cache *pcluster_cachep __read_mostly; + +void z_erofs_exit_zip_subsystem(void) +{ + destroy_workqueue(z_erofs_workqueue); + kmem_cache_destroy(pcluster_cachep); +} + +static inline int init_unzip_workqueue(void) +{ + const unsigned int onlinecpus = num_possible_cpus(); + const unsigned int flags = WQ_UNBOUND | WQ_HIGHPRI | WQ_CPU_INTENSIVE; + + /* + * no need to spawn too many threads, limiting threads could minimum + * scheduling overhead, perhaps per-CPU threads should be better? + */ + z_erofs_workqueue = alloc_workqueue("erofs_unzipd", flags, + onlinecpus + onlinecpus / 4); + return z_erofs_workqueue ? 0 : -ENOMEM; +} + +static void init_once(void *ptr) +{ + struct z_erofs_pcluster *pcl = ptr; + struct z_erofs_collection *cl = z_erofs_primarycollection(pcl); + unsigned int i; + + mutex_init(&cl->lock); + cl->nr_pages = 0; + cl->vcnt = 0; + for (i = 0; i < Z_EROFS_CLUSTER_MAX_PAGES; ++i) + pcl->compressed_pages[i] = NULL; +} + +static void init_always(struct z_erofs_pcluster *pcl) +{ + struct z_erofs_collection *cl = z_erofs_primarycollection(pcl); + + atomic_set(&pcl->obj.refcount, 1); + + DBG_BUGON(cl->nr_pages); + DBG_BUGON(cl->vcnt); +} + +int __init z_erofs_init_zip_subsystem(void) +{ + pcluster_cachep = kmem_cache_create("erofs_compress", + Z_EROFS_WORKGROUP_SIZE, 0, + SLAB_RECLAIM_ACCOUNT, init_once); + if (pcluster_cachep) { + if (!init_unzip_workqueue()) + return 0; + + kmem_cache_destroy(pcluster_cachep); + } + return -ENOMEM; +} + +enum z_erofs_collectmode { + COLLECT_SECONDARY, + COLLECT_PRIMARY, + /* + * The current collection was the tail of an exist chain, in addition + * that the previous processed chained collections are all decided to + * be hooked up to it. + * A new chain will be created for the remaining collections which are + * not processed yet, therefore different from COLLECT_PRIMARY_FOLLOWED, + * the next collection cannot reuse the whole page safely in + * the following scenario: + * ________________________________________________________________ + * | tail (partial) page | head (partial) page | + * | (belongs to the next cl) | (belongs to the current cl) | + * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________| + */ + COLLECT_PRIMARY_HOOKED, + COLLECT_PRIMARY_FOLLOWED_NOINPLACE, + /* + * The current collection has been linked with the owned chain, and + * could also be linked with the remaining collections, which means + * if the processing page is the tail page of the collection, thus + * the current collection can safely use the whole page (since + * the previous collection is under control) for in-place I/O, as + * illustrated below: + * ________________________________________________________________ + * | tail (partial) page | head (partial) page | + * | (of the current cl) | (of the previous collection) | + * | PRIMARY_FOLLOWED or | | + * |_____PRIMARY_HOOKED___|____________PRIMARY_FOLLOWED____________| + * + * [ (*) the above page can be used as inplace I/O. ] + */ + COLLECT_PRIMARY_FOLLOWED, +}; + +struct z_erofs_collector { + struct z_erofs_pagevec_ctor vector; + + struct z_erofs_pcluster *pcl, *tailpcl; + struct z_erofs_collection *cl; + struct page **compressedpages; + z_erofs_next_pcluster_t owned_head; + + enum z_erofs_collectmode mode; +}; + +struct z_erofs_decompress_frontend { + struct inode *const inode; + + struct z_erofs_collector clt; + struct erofs_map_blocks map; + + /* used for applying cache strategy on the fly */ + bool backmost; + erofs_off_t headoffset; +}; + +#define COLLECTOR_INIT() { \ + .owned_head = Z_EROFS_PCLUSTER_TAIL, \ + .mode = COLLECT_PRIMARY_FOLLOWED } + +#define DECOMPRESS_FRONTEND_INIT(__i) { \ + .inode = __i, .clt = COLLECTOR_INIT(), \ + .backmost = true, } + +static struct page *z_pagemap_global[Z_EROFS_VMAP_GLOBAL_PAGES]; +static DEFINE_MUTEX(z_pagemap_global_lock); + +static void preload_compressed_pages(struct z_erofs_collector *clt, + struct address_space *mc, + enum z_erofs_cache_alloctype type, + struct list_head *pagepool) +{ + const struct z_erofs_pcluster *pcl = clt->pcl; + const unsigned int clusterpages = BIT(pcl->clusterbits); + struct page **pages = clt->compressedpages; + pgoff_t index = pcl->obj.index + (pages - pcl->compressed_pages); + bool standalone = true; + + if (clt->mode < COLLECT_PRIMARY_FOLLOWED) + return; + + for (; pages < pcl->compressed_pages + clusterpages; ++pages) { + struct page *page; + compressed_page_t t; + + /* the compressed page was loaded before */ + if (READ_ONCE(*pages)) + continue; + + page = find_get_page(mc, index); + + if (page) { + t = tag_compressed_page_justfound(page); + } else if (type == DELAYEDALLOC) { + t = tagptr_init(compressed_page_t, PAGE_UNALLOCATED); + } else { /* DONTALLOC */ + if (standalone) + clt->compressedpages = pages; + standalone = false; + continue; + } + + if (!cmpxchg_relaxed(pages, NULL, tagptr_cast_ptr(t))) + continue; + + if (page) + put_page(page); + } + + if (standalone) /* downgrade to PRIMARY_FOLLOWED_NOINPLACE */ + clt->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE; +} + +/* called by erofs_shrinker to get rid of all compressed_pages */ +int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, + struct erofs_workgroup *grp) +{ + struct z_erofs_pcluster *const pcl = + container_of(grp, struct z_erofs_pcluster, obj); + struct address_space *const mapping = MNGD_MAPPING(sbi); + const unsigned int clusterpages = BIT(pcl->clusterbits); + int i; + + /* + * refcount of workgroup is now freezed as 1, + * therefore no need to worry about available decompression users. + */ + for (i = 0; i < clusterpages; ++i) { + struct page *page = pcl->compressed_pages[i]; + + if (!page) + continue; + + /* block other users from reclaiming or migrating the page */ + if (!trylock_page(page)) + return -EBUSY; + + if (unlikely(page->mapping != mapping)) + continue; + + /* barrier is implied in the following 'unlock_page' */ + WRITE_ONCE(pcl->compressed_pages[i], NULL); + set_page_private(page, 0); + ClearPagePrivate(page); + + unlock_page(page); + put_page(page); + } + return 0; +} + +int erofs_try_to_free_cached_page(struct address_space *mapping, + struct page *page) +{ + struct z_erofs_pcluster *const pcl = (void *)page_private(page); + const unsigned int clusterpages = BIT(pcl->clusterbits); + int ret = 0; /* 0 - busy */ + + if (erofs_workgroup_try_to_freeze(&pcl->obj, 1)) { + unsigned int i; + + for (i = 0; i < clusterpages; ++i) { + if (pcl->compressed_pages[i] == page) { + WRITE_ONCE(pcl->compressed_pages[i], NULL); + ret = 1; + break; + } + } + erofs_workgroup_unfreeze(&pcl->obj, 1); + + if (ret) { + ClearPagePrivate(page); + put_page(page); + } + } + return ret; +} + +/* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */ +static inline bool try_inplace_io(struct z_erofs_collector *clt, + struct page *page) +{ + struct z_erofs_pcluster *const pcl = clt->pcl; + const unsigned int clusterpages = BIT(pcl->clusterbits); + + while (clt->compressedpages < pcl->compressed_pages + clusterpages) { + if (!cmpxchg(clt->compressedpages++, NULL, page)) + return true; + } + return false; +} + +/* callers must be with collection lock held */ +static int z_erofs_attach_page(struct z_erofs_collector *clt, + struct page *page, + enum z_erofs_page_type type) +{ + int ret; + bool occupied; + + /* give priority for inplaceio */ + if (clt->mode >= COLLECT_PRIMARY && + type == Z_EROFS_PAGE_TYPE_EXCLUSIVE && + try_inplace_io(clt, page)) + return 0; + + ret = z_erofs_pagevec_enqueue(&clt->vector, + page, type, &occupied); + clt->cl->vcnt += (unsigned int)ret; + + return ret ? 0 : -EAGAIN; +} + +static enum z_erofs_collectmode +try_to_claim_pcluster(struct z_erofs_pcluster *pcl, + z_erofs_next_pcluster_t *owned_head) +{ + /* let's claim these following types of pclusters */ +retry: + if (pcl->next == Z_EROFS_PCLUSTER_NIL) { + /* type 1, nil pcluster */ + if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL, + *owned_head) != Z_EROFS_PCLUSTER_NIL) + goto retry; + + *owned_head = &pcl->next; + /* lucky, I am the followee :) */ + return COLLECT_PRIMARY_FOLLOWED; + } else if (pcl->next == Z_EROFS_PCLUSTER_TAIL) { + /* + * type 2, link to the end of a existing open chain, + * be careful that its submission itself is governed + * by the original owned chain. + */ + if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL, + *owned_head) != Z_EROFS_PCLUSTER_TAIL) + goto retry; + *owned_head = Z_EROFS_PCLUSTER_TAIL; + return COLLECT_PRIMARY_HOOKED; + } + return COLLECT_PRIMARY; /* :( better luck next time */ +} + +static struct z_erofs_collection *cllookup(struct z_erofs_collector *clt, + struct inode *inode, + struct erofs_map_blocks *map) +{ + struct erofs_workgroup *grp; + struct z_erofs_pcluster *pcl; + struct z_erofs_collection *cl; + unsigned int length; + bool tag; + + grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT, &tag); + if (!grp) + return NULL; + + pcl = container_of(grp, struct z_erofs_pcluster, obj); + if (clt->owned_head == &pcl->next || pcl == clt->tailpcl) { + DBG_BUGON(1); + erofs_workgroup_put(grp); + return ERR_PTR(-EFSCORRUPTED); + } + + cl = z_erofs_primarycollection(pcl); + if (unlikely(cl->pageofs != (map->m_la & ~PAGE_MASK))) { + DBG_BUGON(1); + erofs_workgroup_put(grp); + return ERR_PTR(-EFSCORRUPTED); + } + + length = READ_ONCE(pcl->length); + if (length & Z_EROFS_PCLUSTER_FULL_LENGTH) { + if ((map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) > length) { + DBG_BUGON(1); + erofs_workgroup_put(grp); + return ERR_PTR(-EFSCORRUPTED); + } + } else { + unsigned int llen = map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT; + + if (map->m_flags & EROFS_MAP_FULL_MAPPED) + llen |= Z_EROFS_PCLUSTER_FULL_LENGTH; + + while (llen > length && + length != cmpxchg_relaxed(&pcl->length, length, llen)) { + cpu_relax(); + length = READ_ONCE(pcl->length); + } + } + mutex_lock(&cl->lock); + /* used to check tail merging loop due to corrupted images */ + if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL) + clt->tailpcl = pcl; + clt->mode = try_to_claim_pcluster(pcl, &clt->owned_head); + /* clean tailpcl if the current owned_head is Z_EROFS_PCLUSTER_TAIL */ + if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL) + clt->tailpcl = NULL; + clt->pcl = pcl; + clt->cl = cl; + return cl; +} + +static struct z_erofs_collection *clregister(struct z_erofs_collector *clt, + struct inode *inode, + struct erofs_map_blocks *map) +{ + struct z_erofs_pcluster *pcl; + struct z_erofs_collection *cl; + int err; + + /* no available workgroup, let's allocate one */ + pcl = kmem_cache_alloc(pcluster_cachep, GFP_NOFS); + if (unlikely(!pcl)) + return ERR_PTR(-ENOMEM); + + init_always(pcl); + pcl->obj.index = map->m_pa >> PAGE_SHIFT; + + pcl->length = (map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) | + (map->m_flags & EROFS_MAP_FULL_MAPPED ? + Z_EROFS_PCLUSTER_FULL_LENGTH : 0); + + if (map->m_flags & EROFS_MAP_ZIPPED) + pcl->algorithmformat = Z_EROFS_COMPRESSION_LZ4; + else + pcl->algorithmformat = Z_EROFS_COMPRESSION_SHIFTED; + + pcl->clusterbits = EROFS_V(inode)->z_physical_clusterbits[0]; + pcl->clusterbits -= PAGE_SHIFT; + + /* new pclusters should be claimed as type 1, primary and followed */ + pcl->next = clt->owned_head; + clt->mode = COLLECT_PRIMARY_FOLLOWED; + + cl = z_erofs_primarycollection(pcl); + cl->pageofs = map->m_la & ~PAGE_MASK; + + /* + * lock all primary followed works before visible to others + * and mutex_trylock *never* fails for a new pcluster. + */ + mutex_trylock(&cl->lock); + + err = erofs_register_workgroup(inode->i_sb, &pcl->obj, 0); + if (err) { + mutex_unlock(&cl->lock); + kmem_cache_free(pcluster_cachep, pcl); + return ERR_PTR(-EAGAIN); + } + /* used to check tail merging loop due to corrupted images */ + if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL) + clt->tailpcl = pcl; + clt->owned_head = &pcl->next; + clt->pcl = pcl; + clt->cl = cl; + return cl; +} + +static int z_erofs_collector_begin(struct z_erofs_collector *clt, + struct inode *inode, + struct erofs_map_blocks *map) +{ + struct z_erofs_collection *cl; + + DBG_BUGON(clt->cl); + + /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous collection */ + DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_NIL); + DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); + + if (!PAGE_ALIGNED(map->m_pa)) { + DBG_BUGON(1); + return -EINVAL; + } + +repeat: + cl = cllookup(clt, inode, map); + if (!cl) { + cl = clregister(clt, inode, map); + + if (unlikely(cl == ERR_PTR(-EAGAIN))) + goto repeat; + } + + if (IS_ERR(cl)) + return PTR_ERR(cl); + + z_erofs_pagevec_ctor_init(&clt->vector, Z_EROFS_NR_INLINE_PAGEVECS, + cl->pagevec, cl->vcnt); + + clt->compressedpages = clt->pcl->compressed_pages; + if (clt->mode <= COLLECT_PRIMARY) /* cannot do in-place I/O */ + clt->compressedpages += Z_EROFS_CLUSTER_MAX_PAGES; + return 0; +} + +/* + * keep in mind that no referenced pclusters will be freed + * only after a RCU grace period. + */ +static void z_erofs_rcu_callback(struct rcu_head *head) +{ + struct z_erofs_collection *const cl = + container_of(head, struct z_erofs_collection, rcu); + + kmem_cache_free(pcluster_cachep, + container_of(cl, struct z_erofs_pcluster, + primary_collection)); +} + +void erofs_workgroup_free_rcu(struct erofs_workgroup *grp) +{ + struct z_erofs_pcluster *const pcl = + container_of(grp, struct z_erofs_pcluster, obj); + struct z_erofs_collection *const cl = z_erofs_primarycollection(pcl); + + call_rcu(&cl->rcu, z_erofs_rcu_callback); +} + +static void z_erofs_collection_put(struct z_erofs_collection *cl) +{ + struct z_erofs_pcluster *const pcl = + container_of(cl, struct z_erofs_pcluster, primary_collection); + + erofs_workgroup_put(&pcl->obj); +} + +static bool z_erofs_collector_end(struct z_erofs_collector *clt) +{ + struct z_erofs_collection *cl = clt->cl; + + if (!cl) + return false; + + z_erofs_pagevec_ctor_exit(&clt->vector, false); + mutex_unlock(&cl->lock); + + /* + * if all pending pages are added, don't hold its reference + * any longer if the pcluster isn't hosted by ourselves. + */ + if (clt->mode < COLLECT_PRIMARY_FOLLOWED_NOINPLACE) + z_erofs_collection_put(cl); + + clt->cl = NULL; + return true; +} + +static inline struct page *__stagingpage_alloc(struct list_head *pagepool, + gfp_t gfp) +{ + struct page *page = erofs_allocpage(pagepool, gfp, true); + + page->mapping = Z_EROFS_MAPPING_STAGING; + return page; +} + +static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe, + unsigned int cachestrategy, + erofs_off_t la) +{ + if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED) + return false; + + if (fe->backmost) + return true; + + return cachestrategy >= EROFS_ZIP_CACHE_READAROUND && + la < fe->headoffset; +} + +static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, + struct page *page, + struct list_head *pagepool) +{ + struct inode *const inode = fe->inode; + struct erofs_sb_info *const sbi __maybe_unused = EROFS_I_SB(inode); + struct erofs_map_blocks *const map = &fe->map; + struct z_erofs_collector *const clt = &fe->clt; + const loff_t offset = page_offset(page); + bool tight = (clt->mode >= COLLECT_PRIMARY_HOOKED); + + enum z_erofs_cache_alloctype cache_strategy; + enum z_erofs_page_type page_type; + unsigned int cur, end, spiltted, index; + int err = 0; + + /* register locked file pages as online pages in pack */ + z_erofs_onlinepage_init(page); + + spiltted = 0; + end = PAGE_SIZE; +repeat: + cur = end - 1; + + /* lucky, within the range of the current map_blocks */ + if (offset + cur >= map->m_la && + offset + cur < map->m_la + map->m_llen) { + /* didn't get a valid collection previously (very rare) */ + if (!clt->cl) + goto restart_now; + goto hitted; + } + + /* go ahead the next map_blocks */ + debugln("%s: [out-of-range] pos %llu", __func__, offset + cur); + + if (z_erofs_collector_end(clt)) + fe->backmost = false; + + map->m_la = offset + cur; + map->m_llen = 0; + err = z_erofs_map_blocks_iter(inode, map, 0); + if (unlikely(err)) + goto err_out; + +restart_now: + if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED))) + goto hitted; + + err = z_erofs_collector_begin(clt, inode, map); + if (unlikely(err)) + goto err_out; + + /* preload all compressed pages (maybe downgrade role if necessary) */ + if (should_alloc_managed_pages(fe, sbi->cache_strategy, map->m_la)) + cache_strategy = DELAYEDALLOC; + else + cache_strategy = DONTALLOC; + + preload_compressed_pages(clt, MNGD_MAPPING(sbi), + cache_strategy, pagepool); + + tight &= (clt->mode >= COLLECT_PRIMARY_HOOKED); +hitted: + cur = end - min_t(unsigned int, offset + end - map->m_la, end); + if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED))) { + zero_user_segment(page, cur, end); + goto next_part; + } + + /* let's derive page type */ + page_type = cur ? Z_EROFS_VLE_PAGE_TYPE_HEAD : + (!spiltted ? Z_EROFS_PAGE_TYPE_EXCLUSIVE : + (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE : + Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED)); + + if (cur) + tight &= (clt->mode >= COLLECT_PRIMARY_FOLLOWED); + +retry: + err = z_erofs_attach_page(clt, page, page_type); + /* should allocate an additional staging page for pagevec */ + if (err == -EAGAIN) { + struct page *const newpage = + __stagingpage_alloc(pagepool, GFP_NOFS); + + err = z_erofs_attach_page(clt, newpage, + Z_EROFS_PAGE_TYPE_EXCLUSIVE); + if (likely(!err)) + goto retry; + } + + if (unlikely(err)) + goto err_out; + + index = page->index - (map->m_la >> PAGE_SHIFT); + + z_erofs_onlinepage_fixup(page, index, true); + + /* bump up the number of spiltted parts of a page */ + ++spiltted; + /* also update nr_pages */ + clt->cl->nr_pages = max_t(pgoff_t, clt->cl->nr_pages, index + 1); +next_part: + /* can be used for verification */ + map->m_llen = offset + cur - map->m_la; + + end = cur; + if (end > 0) + goto repeat; + +out: + z_erofs_onlinepage_endio(page); + + debugln("%s, finish page: %pK spiltted: %u map->m_llen %llu", + __func__, page, spiltted, map->m_llen); + return err; + + /* if some error occurred while processing this page */ +err_out: + SetPageError(page); + goto out; +} + +static void z_erofs_vle_unzip_kickoff(void *ptr, int bios) +{ + tagptr1_t t = tagptr_init(tagptr1_t, ptr); + struct z_erofs_unzip_io *io = tagptr_unfold_ptr(t); + bool background = tagptr_unfold_tags(t); + + if (!background) { + unsigned long flags; + + spin_lock_irqsave(&io->u.wait.lock, flags); + if (!atomic_add_return(bios, &io->pending_bios)) + wake_up_locked(&io->u.wait); + spin_unlock_irqrestore(&io->u.wait.lock, flags); + return; + } + + if (!atomic_add_return(bios, &io->pending_bios)) + queue_work(z_erofs_workqueue, &io->u.work); +} + +static inline void z_erofs_vle_read_endio(struct bio *bio) +{ + struct erofs_sb_info *sbi = NULL; + blk_status_t err = bio->bi_status; + struct bio_vec *bvec; + struct bvec_iter_all iter_all; + + bio_for_each_segment_all(bvec, bio, iter_all) { + struct page *page = bvec->bv_page; + bool cachemngd = false; + + DBG_BUGON(PageUptodate(page)); + DBG_BUGON(!page->mapping); + + if (unlikely(!sbi && !z_erofs_page_is_staging(page))) { + sbi = EROFS_SB(page->mapping->host->i_sb); + + if (time_to_inject(sbi, FAULT_READ_IO)) { + erofs_show_injection_info(FAULT_READ_IO); + err = BLK_STS_IOERR; + } + } + + /* sbi should already be gotten if the page is managed */ + if (sbi) + cachemngd = erofs_page_is_managed(sbi, page); + + if (unlikely(err)) + SetPageError(page); + else if (cachemngd) + SetPageUptodate(page); + + if (cachemngd) + unlock_page(page); + } + + z_erofs_vle_unzip_kickoff(bio->bi_private, -1); + bio_put(bio); +} + +static int z_erofs_decompress_pcluster(struct super_block *sb, + struct z_erofs_pcluster *pcl, + struct list_head *pagepool) +{ + struct erofs_sb_info *const sbi = EROFS_SB(sb); + const unsigned int clusterpages = BIT(pcl->clusterbits); + struct z_erofs_pagevec_ctor ctor; + unsigned int i, outputsize, llen, nr_pages; + struct page *pages_onstack[Z_EROFS_VMAP_ONSTACK_PAGES]; + struct page **pages, **compressed_pages, *page; + + enum z_erofs_page_type page_type; + bool overlapped, partial; + struct z_erofs_collection *cl; + int err; + + might_sleep(); + cl = z_erofs_primarycollection(pcl); + DBG_BUGON(!READ_ONCE(cl->nr_pages)); + + mutex_lock(&cl->lock); + nr_pages = cl->nr_pages; + + if (likely(nr_pages <= Z_EROFS_VMAP_ONSTACK_PAGES)) { + pages = pages_onstack; + } else if (nr_pages <= Z_EROFS_VMAP_GLOBAL_PAGES && + mutex_trylock(&z_pagemap_global_lock)) { + pages = z_pagemap_global; + } else { + gfp_t gfp_flags = GFP_KERNEL; + + if (nr_pages > Z_EROFS_VMAP_GLOBAL_PAGES) + gfp_flags |= __GFP_NOFAIL; + + pages = kvmalloc_array(nr_pages, sizeof(struct page *), + gfp_flags); + + /* fallback to global pagemap for the lowmem scenario */ + if (unlikely(!pages)) { + mutex_lock(&z_pagemap_global_lock); + pages = z_pagemap_global; + } + } + + for (i = 0; i < nr_pages; ++i) + pages[i] = NULL; + + err = 0; + z_erofs_pagevec_ctor_init(&ctor, Z_EROFS_NR_INLINE_PAGEVECS, + cl->pagevec, 0); + + for (i = 0; i < cl->vcnt; ++i) { + unsigned int pagenr; + + page = z_erofs_pagevec_dequeue(&ctor, &page_type); + + /* all pages in pagevec ought to be valid */ + DBG_BUGON(!page); + DBG_BUGON(!page->mapping); + + if (z_erofs_put_stagingpage(pagepool, page)) + continue; + + if (page_type == Z_EROFS_VLE_PAGE_TYPE_HEAD) + pagenr = 0; + else + pagenr = z_erofs_onlinepage_index(page); + + DBG_BUGON(pagenr >= nr_pages); + + /* + * currently EROFS doesn't support multiref(dedup), + * so here erroring out one multiref page. + */ + if (unlikely(pages[pagenr])) { + DBG_BUGON(1); + SetPageError(pages[pagenr]); + z_erofs_onlinepage_endio(pages[pagenr]); + err = -EFSCORRUPTED; + } + pages[pagenr] = page; + } + z_erofs_pagevec_ctor_exit(&ctor, true); + + overlapped = false; + compressed_pages = pcl->compressed_pages; + + for (i = 0; i < clusterpages; ++i) { + unsigned int pagenr; + + page = compressed_pages[i]; + + /* all compressed pages ought to be valid */ + DBG_BUGON(!page); + DBG_BUGON(!page->mapping); + + if (!z_erofs_page_is_staging(page)) { + if (erofs_page_is_managed(sbi, page)) { + if (unlikely(!PageUptodate(page))) + err = -EIO; + continue; + } + + /* + * only if non-head page can be selected + * for inplace decompression + */ + pagenr = z_erofs_onlinepage_index(page); + + DBG_BUGON(pagenr >= nr_pages); + if (unlikely(pages[pagenr])) { + DBG_BUGON(1); + SetPageError(pages[pagenr]); + z_erofs_onlinepage_endio(pages[pagenr]); + err = -EFSCORRUPTED; + } + pages[pagenr] = page; + + overlapped = true; + } + + /* PG_error needs checking for inplaced and staging pages */ + if (unlikely(PageError(page))) { + DBG_BUGON(PageUptodate(page)); + err = -EIO; + } + } + + if (unlikely(err)) + goto out; + + llen = pcl->length >> Z_EROFS_PCLUSTER_LENGTH_BIT; + if (nr_pages << PAGE_SHIFT >= cl->pageofs + llen) { + outputsize = llen; + partial = !(pcl->length & Z_EROFS_PCLUSTER_FULL_LENGTH); + } else { + outputsize = (nr_pages << PAGE_SHIFT) - cl->pageofs; + partial = true; + } + + err = z_erofs_decompress(&(struct z_erofs_decompress_req) { + .sb = sb, + .in = compressed_pages, + .out = pages, + .pageofs_out = cl->pageofs, + .inputsize = PAGE_SIZE, + .outputsize = outputsize, + .alg = pcl->algorithmformat, + .inplace_io = overlapped, + .partial_decoding = partial + }, pagepool); + +out: + /* must handle all compressed pages before endding pages */ + for (i = 0; i < clusterpages; ++i) { + page = compressed_pages[i]; + + if (erofs_page_is_managed(sbi, page)) + continue; + + /* recycle all individual staging pages */ + (void)z_erofs_put_stagingpage(pagepool, page); + + WRITE_ONCE(compressed_pages[i], NULL); + } + + for (i = 0; i < nr_pages; ++i) { + page = pages[i]; + if (!page) + continue; + + DBG_BUGON(!page->mapping); + + /* recycle all individual staging pages */ + if (z_erofs_put_stagingpage(pagepool, page)) + continue; + + if (unlikely(err < 0)) + SetPageError(page); + + z_erofs_onlinepage_endio(page); + } + + if (pages == z_pagemap_global) + mutex_unlock(&z_pagemap_global_lock); + else if (unlikely(pages != pages_onstack)) + kvfree(pages); + + cl->nr_pages = 0; + cl->vcnt = 0; + + /* all cl locks MUST be taken before the following line */ + WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL); + + /* all cl locks SHOULD be released right now */ + mutex_unlock(&cl->lock); + + z_erofs_collection_put(cl); + return err; +} + +static void z_erofs_vle_unzip_all(struct super_block *sb, + struct z_erofs_unzip_io *io, + struct list_head *pagepool) +{ + z_erofs_next_pcluster_t owned = io->head; + + while (owned != Z_EROFS_PCLUSTER_TAIL_CLOSED) { + struct z_erofs_pcluster *pcl; + + /* no possible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */ + DBG_BUGON(owned == Z_EROFS_PCLUSTER_TAIL); + + /* no possible that 'owned' equals NULL */ + DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL); + + pcl = container_of(owned, struct z_erofs_pcluster, next); + owned = READ_ONCE(pcl->next); + + z_erofs_decompress_pcluster(sb, pcl, pagepool); + } +} + +static void z_erofs_vle_unzip_wq(struct work_struct *work) +{ + struct z_erofs_unzip_io_sb *iosb = + container_of(work, struct z_erofs_unzip_io_sb, io.u.work); + LIST_HEAD(pagepool); + + DBG_BUGON(iosb->io.head == Z_EROFS_PCLUSTER_TAIL_CLOSED); + z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &pagepool); + + put_pages_list(&pagepool); + kvfree(iosb); +} + +static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl, + unsigned int nr, + struct list_head *pagepool, + struct address_space *mc, + gfp_t gfp) +{ + /* determined at compile time to avoid too many #ifdefs */ + const bool nocache = __builtin_constant_p(mc) ? !mc : false; + const pgoff_t index = pcl->obj.index; + bool tocache = false; + + struct address_space *mapping; + struct page *oldpage, *page; + + compressed_page_t t; + int justfound; + +repeat: + page = READ_ONCE(pcl->compressed_pages[nr]); + oldpage = page; + + if (!page) + goto out_allocpage; + + /* + * the cached page has not been allocated and + * an placeholder is out there, prepare it now. + */ + if (!nocache && page == PAGE_UNALLOCATED) { + tocache = true; + goto out_allocpage; + } + + /* process the target tagged pointer */ + t = tagptr_init(compressed_page_t, page); + justfound = tagptr_unfold_tags(t); + page = tagptr_unfold_ptr(t); + + mapping = READ_ONCE(page->mapping); + + /* + * if managed cache is disabled, it's no way to + * get such a cached-like page. + */ + if (nocache) { + /* if managed cache is disabled, it is impossible `justfound' */ + DBG_BUGON(justfound); + + /* and it should be locked, not uptodate, and not truncated */ + DBG_BUGON(!PageLocked(page)); + DBG_BUGON(PageUptodate(page)); + DBG_BUGON(!mapping); + goto out; + } + + /* + * unmanaged (file) pages are all locked solidly, + * therefore it is impossible for `mapping' to be NULL. + */ + if (mapping && mapping != mc) + /* ought to be unmanaged pages */ + goto out; + + lock_page(page); + + /* only true if page reclaim goes wrong, should never happen */ + DBG_BUGON(justfound && PagePrivate(page)); + + /* the page is still in manage cache */ + if (page->mapping == mc) { + WRITE_ONCE(pcl->compressed_pages[nr], page); + + ClearPageError(page); + if (!PagePrivate(page)) { + /* + * impossible to be !PagePrivate(page) for + * the current restriction as well if + * the page is already in compressed_pages[]. + */ + DBG_BUGON(!justfound); + + justfound = 0; + set_page_private(page, (unsigned long)pcl); + SetPagePrivate(page); + } + + /* no need to submit io if it is already up-to-date */ + if (PageUptodate(page)) { + unlock_page(page); + page = NULL; + } + goto out; + } + + /* + * the managed page has been truncated, it's unsafe to + * reuse this one, let's allocate a new cache-managed page. + */ + DBG_BUGON(page->mapping); + DBG_BUGON(!justfound); + + tocache = true; + unlock_page(page); + put_page(page); +out_allocpage: + page = __stagingpage_alloc(pagepool, gfp); + if (oldpage != cmpxchg(&pcl->compressed_pages[nr], oldpage, page)) { + list_add(&page->lru, pagepool); + cpu_relax(); + goto repeat; + } + if (nocache || !tocache) + goto out; + if (add_to_page_cache_lru(page, mc, index + nr, gfp)) { + page->mapping = Z_EROFS_MAPPING_STAGING; + goto out; + } + + set_page_private(page, (unsigned long)pcl); + SetPagePrivate(page); +out: /* the only exit (for tracing and debugging) */ + return page; +} + +static struct z_erofs_unzip_io *jobqueue_init(struct super_block *sb, + struct z_erofs_unzip_io *io, + bool foreground) +{ + struct z_erofs_unzip_io_sb *iosb; + + if (foreground) { + /* waitqueue available for foreground io */ + DBG_BUGON(!io); + + init_waitqueue_head(&io->u.wait); + atomic_set(&io->pending_bios, 0); + goto out; + } + + iosb = kvzalloc(sizeof(*iosb), GFP_KERNEL | __GFP_NOFAIL); + DBG_BUGON(!iosb); + + /* initialize fields in the allocated descriptor */ + io = &iosb->io; + iosb->sb = sb; + INIT_WORK(&io->u.work, z_erofs_vle_unzip_wq); +out: + io->head = Z_EROFS_PCLUSTER_TAIL_CLOSED; + return io; +} + +/* define decompression jobqueue types */ +enum { + JQ_BYPASS, + JQ_SUBMIT, + NR_JOBQUEUES, +}; + +static void *jobqueueset_init(struct super_block *sb, + z_erofs_next_pcluster_t qtail[], + struct z_erofs_unzip_io *q[], + struct z_erofs_unzip_io *fgq, + bool forcefg) +{ + /* + * if managed cache is enabled, bypass jobqueue is needed, + * no need to read from device for all pclusters in this queue. + */ + q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, true); + qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head; + + q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, forcefg); + qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head; + + return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], !forcefg)); +} + +static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl, + z_erofs_next_pcluster_t qtail[], + z_erofs_next_pcluster_t owned_head) +{ + z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT]; + z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS]; + + DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); + if (owned_head == Z_EROFS_PCLUSTER_TAIL) + owned_head = Z_EROFS_PCLUSTER_TAIL_CLOSED; + + WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL_CLOSED); + + WRITE_ONCE(*submit_qtail, owned_head); + WRITE_ONCE(*bypass_qtail, &pcl->next); + + qtail[JQ_BYPASS] = &pcl->next; +} + +static bool postsubmit_is_all_bypassed(struct z_erofs_unzip_io *q[], + unsigned int nr_bios, + bool force_fg) +{ + /* + * although background is preferred, no one is pending for submission. + * don't issue workqueue for decompression but drop it directly instead. + */ + if (force_fg || nr_bios) + return false; + + kvfree(container_of(q[JQ_SUBMIT], struct z_erofs_unzip_io_sb, io)); + return true; +} + +static bool z_erofs_vle_submit_all(struct super_block *sb, + z_erofs_next_pcluster_t owned_head, + struct list_head *pagepool, + struct z_erofs_unzip_io *fgq, + bool force_fg) +{ + struct erofs_sb_info *const sbi __maybe_unused = EROFS_SB(sb); + z_erofs_next_pcluster_t qtail[NR_JOBQUEUES]; + struct z_erofs_unzip_io *q[NR_JOBQUEUES]; + struct bio *bio; + void *bi_private; + /* since bio will be NULL, no need to initialize last_index */ + pgoff_t uninitialized_var(last_index); + bool force_submit = false; + unsigned int nr_bios; + + if (unlikely(owned_head == Z_EROFS_PCLUSTER_TAIL)) + return false; + + force_submit = false; + bio = NULL; + nr_bios = 0; + bi_private = jobqueueset_init(sb, qtail, q, fgq, force_fg); + + /* by default, all need io submission */ + q[JQ_SUBMIT]->head = owned_head; + + do { + struct z_erofs_pcluster *pcl; + unsigned int clusterpages; + pgoff_t first_index; + struct page *page; + unsigned int i = 0, bypass = 0; + int err; + + /* no possible 'owned_head' equals the following */ + DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); + DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL); + + pcl = container_of(owned_head, struct z_erofs_pcluster, next); + + clusterpages = BIT(pcl->clusterbits); + + /* close the main owned chain at first */ + owned_head = cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL, + Z_EROFS_PCLUSTER_TAIL_CLOSED); + + first_index = pcl->obj.index; + force_submit |= (first_index != last_index + 1); + +repeat: + page = pickup_page_for_submission(pcl, i, pagepool, + MNGD_MAPPING(sbi), + GFP_NOFS); + if (!page) { + force_submit = true; + ++bypass; + goto skippage; + } + + if (bio && force_submit) { +submit_bio_retry: + __submit_bio(bio, REQ_OP_READ, 0); + bio = NULL; + } + + if (!bio) { + bio = erofs_grab_bio(sb, first_index + i, + BIO_MAX_PAGES, bi_private, + z_erofs_vle_read_endio, true); + ++nr_bios; + } + + err = bio_add_page(bio, page, PAGE_SIZE, 0); + if (err < PAGE_SIZE) + goto submit_bio_retry; + + force_submit = false; + last_index = first_index + i; +skippage: + if (++i < clusterpages) + goto repeat; + + if (bypass < clusterpages) + qtail[JQ_SUBMIT] = &pcl->next; + else + move_to_bypass_jobqueue(pcl, qtail, owned_head); + } while (owned_head != Z_EROFS_PCLUSTER_TAIL); + + if (bio) + __submit_bio(bio, REQ_OP_READ, 0); + + if (postsubmit_is_all_bypassed(q, nr_bios, force_fg)) + return true; + + z_erofs_vle_unzip_kickoff(bi_private, nr_bios); + return true; +} + +static void z_erofs_submit_and_unzip(struct super_block *sb, + struct z_erofs_collector *clt, + struct list_head *pagepool, + bool force_fg) +{ + struct z_erofs_unzip_io io[NR_JOBQUEUES]; + + if (!z_erofs_vle_submit_all(sb, clt->owned_head, + pagepool, io, force_fg)) + return; + + /* decompress no I/O pclusters immediately */ + z_erofs_vle_unzip_all(sb, &io[JQ_BYPASS], pagepool); + + if (!force_fg) + return; + + /* wait until all bios are completed */ + wait_event(io[JQ_SUBMIT].u.wait, + !atomic_read(&io[JQ_SUBMIT].pending_bios)); + + /* let's synchronous decompression */ + z_erofs_vle_unzip_all(sb, &io[JQ_SUBMIT], pagepool); +} + +static int z_erofs_vle_normalaccess_readpage(struct file *file, + struct page *page) +{ + struct inode *const inode = page->mapping->host; + struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); + int err; + LIST_HEAD(pagepool); + + trace_erofs_readpage(page, false); + + f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT; + + err = z_erofs_do_read_page(&f, page, &pagepool); + (void)z_erofs_collector_end(&f.clt); + + /* if some compressed cluster ready, need submit them anyway */ + z_erofs_submit_and_unzip(inode->i_sb, &f.clt, &pagepool, true); + + if (err) + errln("%s, failed to read, err [%d]", __func__, err); + + if (f.map.mpage) + put_page(f.map.mpage); + + /* clean up the remaining free pages */ + put_pages_list(&pagepool); + return err; +} + +static bool should_decompress_synchronously(struct erofs_sb_info *sbi, + unsigned int nr) +{ + return nr <= sbi->max_sync_decompress_pages; +} + +static int z_erofs_vle_normalaccess_readpages(struct file *filp, + struct address_space *mapping, + struct list_head *pages, + unsigned int nr_pages) +{ + struct inode *const inode = mapping->host; + struct erofs_sb_info *const sbi = EROFS_I_SB(inode); + + bool sync = should_decompress_synchronously(sbi, nr_pages); + struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); + gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL); + struct page *head = NULL; + LIST_HEAD(pagepool); + + trace_erofs_readpages(mapping->host, lru_to_page(pages), + nr_pages, false); + + f.headoffset = (erofs_off_t)lru_to_page(pages)->index << PAGE_SHIFT; + + for (; nr_pages; --nr_pages) { + struct page *page = lru_to_page(pages); + + prefetchw(&page->flags); + list_del(&page->lru); + + /* + * A pure asynchronous readahead is indicated if + * a PG_readahead marked page is hitted at first. + * Let's also do asynchronous decompression for this case. + */ + sync &= !(PageReadahead(page) && !head); + + if (add_to_page_cache_lru(page, mapping, page->index, gfp)) { + list_add(&page->lru, &pagepool); + continue; + } + + set_page_private(page, (unsigned long)head); + head = page; + } + + while (head) { + struct page *page = head; + int err; + + /* traversal in reverse order */ + head = (void *)page_private(page); + + err = z_erofs_do_read_page(&f, page, &pagepool); + if (err) { + struct erofs_vnode *vi = EROFS_V(inode); + + errln("%s, readahead error at page %lu of nid %llu", + __func__, page->index, vi->nid); + } + put_page(page); + } + + (void)z_erofs_collector_end(&f.clt); + + z_erofs_submit_and_unzip(inode->i_sb, &f.clt, &pagepool, sync); + + if (f.map.mpage) + put_page(f.map.mpage); + + /* clean up the remaining free pages */ + put_pages_list(&pagepool); + return 0; +} + +const struct address_space_operations z_erofs_vle_normalaccess_aops = { + .readpage = z_erofs_vle_normalaccess_readpage, + .readpages = z_erofs_vle_normalaccess_readpages, +}; + diff --git a/fs/erofs/zdata.h b/fs/erofs/zdata.h new file mode 100644 index 000000000000..4fc547bc01f9 --- /dev/null +++ b/fs/erofs/zdata.h @@ -0,0 +1,193 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2018 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#ifndef __EROFS_FS_ZDATA_H +#define __EROFS_FS_ZDATA_H + +#include "internal.h" +#include "zpvec.h" + +#define Z_EROFS_NR_INLINE_PAGEVECS 3 + +/* + * Structure fields follow one of the following exclusion rules. + * + * I: Modifiable by initialization/destruction paths and read-only + * for everyone else; + * + * L: Field should be protected by pageset lock; + * + * A: Field should be accessed / updated in atomic for parallelized code. + */ +struct z_erofs_collection { + struct mutex lock; + + /* I: page offset of start position of decompression */ + unsigned short pageofs; + + /* L: maximum relative page index in pagevec[] */ + unsigned short nr_pages; + + /* L: total number of pages in pagevec[] */ + unsigned int vcnt; + + union { + /* L: inline a certain number of pagevecs for bootstrap */ + erofs_vtptr_t pagevec[Z_EROFS_NR_INLINE_PAGEVECS]; + + /* I: can be used to free the pcluster by RCU. */ + struct rcu_head rcu; + }; +}; + +#define Z_EROFS_PCLUSTER_FULL_LENGTH 0x00000001 +#define Z_EROFS_PCLUSTER_LENGTH_BIT 1 + +/* + * let's leave a type here in case of introducing + * another tagged pointer later. + */ +typedef void *z_erofs_next_pcluster_t; + +struct z_erofs_pcluster { + struct erofs_workgroup obj; + struct z_erofs_collection primary_collection; + + /* A: point to next chained pcluster or TAILs */ + z_erofs_next_pcluster_t next; + + /* A: compressed pages (including multi-usage pages) */ + struct page *compressed_pages[Z_EROFS_CLUSTER_MAX_PAGES]; + + /* A: lower limit of decompressed length and if full length or not */ + unsigned int length; + + /* I: compression algorithm format */ + unsigned char algorithmformat; + /* I: bit shift of physical cluster size */ + unsigned char clusterbits; +}; + +#define z_erofs_primarycollection(pcluster) (&(pcluster)->primary_collection) + +/* let's avoid the valid 32-bit kernel addresses */ + +/* the chained workgroup has't submitted io (still open) */ +#define Z_EROFS_PCLUSTER_TAIL ((void *)0x5F0ECAFE) +/* the chained workgroup has already submitted io */ +#define Z_EROFS_PCLUSTER_TAIL_CLOSED ((void *)0x5F0EDEAD) + +#define Z_EROFS_PCLUSTER_NIL (NULL) + +#define Z_EROFS_WORKGROUP_SIZE sizeof(struct z_erofs_pcluster) + +struct z_erofs_unzip_io { + atomic_t pending_bios; + z_erofs_next_pcluster_t head; + + union { + wait_queue_head_t wait; + struct work_struct work; + } u; +}; + +struct z_erofs_unzip_io_sb { + struct z_erofs_unzip_io io; + struct super_block *sb; +}; + +#define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping) +static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi, + struct page *page) +{ + return page->mapping == MNGD_MAPPING(sbi); +} + +#define Z_EROFS_ONLINEPAGE_COUNT_BITS 2 +#define Z_EROFS_ONLINEPAGE_COUNT_MASK ((1 << Z_EROFS_ONLINEPAGE_COUNT_BITS) - 1) +#define Z_EROFS_ONLINEPAGE_INDEX_SHIFT (Z_EROFS_ONLINEPAGE_COUNT_BITS) + +/* + * waiters (aka. ongoing_packs): # to unlock the page + * sub-index: 0 - for partial page, >= 1 full page sub-index + */ +typedef atomic_t z_erofs_onlinepage_t; + +/* type punning */ +union z_erofs_onlinepage_converter { + z_erofs_onlinepage_t *o; + unsigned long *v; +}; + +static inline unsigned int z_erofs_onlinepage_index(struct page *page) +{ + union z_erofs_onlinepage_converter u; + + DBG_BUGON(!PagePrivate(page)); + u.v = &page_private(page); + + return atomic_read(u.o) >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT; +} + +static inline void z_erofs_onlinepage_init(struct page *page) +{ + union { + z_erofs_onlinepage_t o; + unsigned long v; + /* keep from being unlocked in advance */ + } u = { .o = ATOMIC_INIT(1) }; + + set_page_private(page, u.v); + smp_wmb(); + SetPagePrivate(page); +} + +static inline void z_erofs_onlinepage_fixup(struct page *page, + uintptr_t index, bool down) +{ + unsigned long *p, o, v, id; +repeat: + p = &page_private(page); + o = READ_ONCE(*p); + + id = o >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT; + if (id) { + if (!index) + return; + + DBG_BUGON(id != index); + } + + v = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) | + ((o & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down); + if (cmpxchg(p, o, v) != o) + goto repeat; +} + +static inline void z_erofs_onlinepage_endio(struct page *page) +{ + union z_erofs_onlinepage_converter u; + unsigned int v; + + DBG_BUGON(!PagePrivate(page)); + u.v = &page_private(page); + + v = atomic_dec_return(u.o); + if (!(v & Z_EROFS_ONLINEPAGE_COUNT_MASK)) { + ClearPagePrivate(page); + if (!PageError(page)) + SetPageUptodate(page); + unlock_page(page); + } + debugln("%s, page %p value %x", __func__, page, atomic_read(u.o)); +} + +#define Z_EROFS_VMAP_ONSTACK_PAGES \ + min_t(unsigned int, THREAD_SIZE / 8 / sizeof(struct page *), 96U) +#define Z_EROFS_VMAP_GLOBAL_PAGES 2048 + +#endif + diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c new file mode 100644 index 000000000000..4dc9cec01297 --- /dev/null +++ b/fs/erofs/zmap.c @@ -0,0 +1,466 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2018-2019 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#include "internal.h" +#include +#include + +int z_erofs_fill_inode(struct inode *inode) +{ + struct erofs_vnode *const vi = EROFS_V(inode); + + if (vi->datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY) { + vi->z_advise = 0; + vi->z_algorithmtype[0] = 0; + vi->z_algorithmtype[1] = 0; + vi->z_logical_clusterbits = LOG_BLOCK_SIZE; + vi->z_physical_clusterbits[0] = vi->z_logical_clusterbits; + vi->z_physical_clusterbits[1] = vi->z_logical_clusterbits; + set_bit(EROFS_V_Z_INITED_BIT, &vi->flags); + } + + inode->i_mapping->a_ops = &z_erofs_vle_normalaccess_aops; + return 0; +} + +static int fill_inode_lazy(struct inode *inode) +{ + struct erofs_vnode *const vi = EROFS_V(inode); + struct super_block *const sb = inode->i_sb; + int err; + erofs_off_t pos; + struct page *page; + void *kaddr; + struct z_erofs_map_header *h; + + if (test_bit(EROFS_V_Z_INITED_BIT, &vi->flags)) + return 0; + + if (wait_on_bit_lock(&vi->flags, EROFS_V_BL_Z_BIT, TASK_KILLABLE)) + return -ERESTARTSYS; + + err = 0; + if (test_bit(EROFS_V_Z_INITED_BIT, &vi->flags)) + goto out_unlock; + + DBG_BUGON(vi->datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY); + + pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize + + vi->xattr_isize, 8); + page = erofs_get_meta_page(sb, erofs_blknr(pos), false); + if (IS_ERR(page)) { + err = PTR_ERR(page); + goto out_unlock; + } + + kaddr = kmap_atomic(page); + + h = kaddr + erofs_blkoff(pos); + vi->z_advise = le16_to_cpu(h->h_advise); + vi->z_algorithmtype[0] = h->h_algorithmtype & 15; + vi->z_algorithmtype[1] = h->h_algorithmtype >> 4; + + if (vi->z_algorithmtype[0] >= Z_EROFS_COMPRESSION_MAX) { + errln("unknown compression format %u for nid %llu, please upgrade kernel", + vi->z_algorithmtype[0], vi->nid); + err = -EOPNOTSUPP; + goto unmap_done; + } + + vi->z_logical_clusterbits = LOG_BLOCK_SIZE + (h->h_clusterbits & 7); + vi->z_physical_clusterbits[0] = vi->z_logical_clusterbits + + ((h->h_clusterbits >> 3) & 3); + + if (vi->z_physical_clusterbits[0] != LOG_BLOCK_SIZE) { + errln("unsupported physical clusterbits %u for nid %llu, please upgrade kernel", + vi->z_physical_clusterbits[0], vi->nid); + err = -EOPNOTSUPP; + goto unmap_done; + } + + vi->z_physical_clusterbits[1] = vi->z_logical_clusterbits + + ((h->h_clusterbits >> 5) & 7); + set_bit(EROFS_V_Z_INITED_BIT, &vi->flags); +unmap_done: + kunmap_atomic(kaddr); + unlock_page(page); + put_page(page); +out_unlock: + clear_and_wake_up_bit(EROFS_V_BL_Z_BIT, &vi->flags); + return err; +} + +struct z_erofs_maprecorder { + struct inode *inode; + struct erofs_map_blocks *map; + void *kaddr; + + unsigned long lcn; + /* compression extent information gathered */ + u8 type; + u16 clusterofs; + u16 delta[2]; + erofs_blk_t pblk; +}; + +static int z_erofs_reload_indexes(struct z_erofs_maprecorder *m, + erofs_blk_t eblk) +{ + struct super_block *const sb = m->inode->i_sb; + struct erofs_map_blocks *const map = m->map; + struct page *mpage = map->mpage; + + if (mpage) { + if (mpage->index == eblk) { + if (!m->kaddr) + m->kaddr = kmap_atomic(mpage); + return 0; + } + + if (m->kaddr) { + kunmap_atomic(m->kaddr); + m->kaddr = NULL; + } + put_page(mpage); + } + + mpage = erofs_get_meta_page(sb, eblk, false); + if (IS_ERR(mpage)) { + map->mpage = NULL; + return PTR_ERR(mpage); + } + m->kaddr = kmap_atomic(mpage); + unlock_page(mpage); + map->mpage = mpage; + return 0; +} + +static int vle_legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m, + unsigned long lcn) +{ + struct inode *const inode = m->inode; + struct erofs_vnode *const vi = EROFS_V(inode); + const erofs_off_t ibase = iloc(EROFS_I_SB(inode), vi->nid); + const erofs_off_t pos = + Z_EROFS_VLE_LEGACY_INDEX_ALIGN(ibase + vi->inode_isize + + vi->xattr_isize) + + lcn * sizeof(struct z_erofs_vle_decompressed_index); + struct z_erofs_vle_decompressed_index *di; + unsigned int advise, type; + int err; + + err = z_erofs_reload_indexes(m, erofs_blknr(pos)); + if (err) + return err; + + m->lcn = lcn; + di = m->kaddr + erofs_blkoff(pos); + + advise = le16_to_cpu(di->di_advise); + type = (advise >> Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT) & + ((1 << Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) - 1); + switch (type) { + case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: + m->clusterofs = 1 << vi->z_logical_clusterbits; + m->delta[0] = le16_to_cpu(di->di_u.delta[0]); + m->delta[1] = le16_to_cpu(di->di_u.delta[1]); + break; + case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: + case Z_EROFS_VLE_CLUSTER_TYPE_HEAD: + m->clusterofs = le16_to_cpu(di->di_clusterofs); + m->pblk = le32_to_cpu(di->di_u.blkaddr); + break; + default: + DBG_BUGON(1); + return -EOPNOTSUPP; + } + m->type = type; + return 0; +} + +static unsigned int decode_compactedbits(unsigned int lobits, + unsigned int lomask, + u8 *in, unsigned int pos, u8 *type) +{ + const unsigned int v = get_unaligned_le32(in + pos / 8) >> (pos & 7); + const unsigned int lo = v & lomask; + + *type = (v >> lobits) & 3; + return lo; +} + +static int unpack_compacted_index(struct z_erofs_maprecorder *m, + unsigned int amortizedshift, + unsigned int eofs) +{ + struct erofs_vnode *const vi = EROFS_V(m->inode); + const unsigned int lclusterbits = vi->z_logical_clusterbits; + const unsigned int lomask = (1 << lclusterbits) - 1; + unsigned int vcnt, base, lo, encodebits, nblk; + int i; + u8 *in, type; + + if (1 << amortizedshift == 4) + vcnt = 2; + else if (1 << amortizedshift == 2 && lclusterbits == 12) + vcnt = 16; + else + return -EOPNOTSUPP; + + encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt; + base = round_down(eofs, vcnt << amortizedshift); + in = m->kaddr + base; + + i = (eofs - base) >> amortizedshift; + + lo = decode_compactedbits(lclusterbits, lomask, + in, encodebits * i, &type); + m->type = type; + if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) { + m->clusterofs = 1 << lclusterbits; + if (i + 1 != vcnt) { + m->delta[0] = lo; + return 0; + } + /* + * since the last lcluster in the pack is special, + * of which lo saves delta[1] rather than delta[0]. + * Hence, get delta[0] by the previous lcluster indirectly. + */ + lo = decode_compactedbits(lclusterbits, lomask, + in, encodebits * (i - 1), &type); + if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) + lo = 0; + m->delta[0] = lo + 1; + return 0; + } + m->clusterofs = lo; + m->delta[0] = 0; + /* figout out blkaddr (pblk) for HEAD lclusters */ + nblk = 1; + while (i > 0) { + --i; + lo = decode_compactedbits(lclusterbits, lomask, + in, encodebits * i, &type); + if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) + i -= lo; + + if (i >= 0) + ++nblk; + } + in += (vcnt << amortizedshift) - sizeof(__le32); + m->pblk = le32_to_cpu(*(__le32 *)in) + nblk; + return 0; +} + +static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m, + unsigned long lcn) +{ + struct inode *const inode = m->inode; + struct erofs_vnode *const vi = EROFS_V(inode); + const unsigned int lclusterbits = vi->z_logical_clusterbits; + const erofs_off_t ebase = ALIGN(iloc(EROFS_I_SB(inode), vi->nid) + + vi->inode_isize + vi->xattr_isize, 8) + + sizeof(struct z_erofs_map_header); + const unsigned int totalidx = DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ); + unsigned int compacted_4b_initial, compacted_2b; + unsigned int amortizedshift; + erofs_off_t pos; + int err; + + if (lclusterbits != 12) + return -EOPNOTSUPP; + + if (lcn >= totalidx) + return -EINVAL; + + m->lcn = lcn; + /* used to align to 32-byte (compacted_2b) alignment */ + compacted_4b_initial = (32 - ebase % 32) / 4; + if (compacted_4b_initial == 32 / 4) + compacted_4b_initial = 0; + + if (vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) + compacted_2b = rounddown(totalidx - compacted_4b_initial, 16); + else + compacted_2b = 0; + + pos = ebase; + if (lcn < compacted_4b_initial) { + amortizedshift = 2; + goto out; + } + pos += compacted_4b_initial * 4; + lcn -= compacted_4b_initial; + + if (lcn < compacted_2b) { + amortizedshift = 1; + goto out; + } + pos += compacted_2b * 2; + lcn -= compacted_2b; + amortizedshift = 2; +out: + pos += lcn * (1 << amortizedshift); + err = z_erofs_reload_indexes(m, erofs_blknr(pos)); + if (err) + return err; + return unpack_compacted_index(m, amortizedshift, erofs_blkoff(pos)); +} + +static int vle_load_cluster_from_disk(struct z_erofs_maprecorder *m, + unsigned int lcn) +{ + const unsigned int datamode = EROFS_V(m->inode)->datamode; + + if (datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY) + return vle_legacy_load_cluster_from_disk(m, lcn); + + if (datamode == EROFS_INODE_FLAT_COMPRESSION) + return compacted_load_cluster_from_disk(m, lcn); + + return -EINVAL; +} + +static int vle_extent_lookback(struct z_erofs_maprecorder *m, + unsigned int lookback_distance) +{ + struct erofs_vnode *const vi = EROFS_V(m->inode); + struct erofs_map_blocks *const map = m->map; + const unsigned int lclusterbits = vi->z_logical_clusterbits; + unsigned long lcn = m->lcn; + int err; + + if (lcn < lookback_distance) { + errln("bogus lookback distance @ nid %llu", vi->nid); + DBG_BUGON(1); + return -EFSCORRUPTED; + } + + /* load extent head logical cluster if needed */ + lcn -= lookback_distance; + err = vle_load_cluster_from_disk(m, lcn); + if (err) + return err; + + switch (m->type) { + case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: + if (unlikely(!m->delta[0])) { + errln("invalid lookback distance 0 at nid %llu", + vi->nid); + DBG_BUGON(1); + return -EFSCORRUPTED; + } + return vle_extent_lookback(m, m->delta[0]); + case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: + map->m_flags &= ~EROFS_MAP_ZIPPED; + /* fallthrough */ + case Z_EROFS_VLE_CLUSTER_TYPE_HEAD: + map->m_la = (lcn << lclusterbits) | m->clusterofs; + break; + default: + errln("unknown type %u at lcn %lu of nid %llu", + m->type, lcn, vi->nid); + DBG_BUGON(1); + return -EOPNOTSUPP; + } + return 0; +} + +int z_erofs_map_blocks_iter(struct inode *inode, + struct erofs_map_blocks *map, + int flags) +{ + struct erofs_vnode *const vi = EROFS_V(inode); + struct z_erofs_maprecorder m = { + .inode = inode, + .map = map, + }; + int err = 0; + unsigned int lclusterbits, endoff; + unsigned long long ofs, end; + + trace_z_erofs_map_blocks_iter_enter(inode, map, flags); + + /* when trying to read beyond EOF, leave it unmapped */ + if (unlikely(map->m_la >= inode->i_size)) { + map->m_llen = map->m_la + 1 - inode->i_size; + map->m_la = inode->i_size; + map->m_flags = 0; + goto out; + } + + err = fill_inode_lazy(inode); + if (err) + goto out; + + lclusterbits = vi->z_logical_clusterbits; + ofs = map->m_la; + m.lcn = ofs >> lclusterbits; + endoff = ofs & ((1 << lclusterbits) - 1); + + err = vle_load_cluster_from_disk(&m, m.lcn); + if (err) + goto unmap_out; + + map->m_flags = EROFS_MAP_ZIPPED; /* by default, compressed */ + end = (m.lcn + 1ULL) << lclusterbits; + + switch (m.type) { + case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: + if (endoff >= m.clusterofs) + map->m_flags &= ~EROFS_MAP_ZIPPED; + /* fallthrough */ + case Z_EROFS_VLE_CLUSTER_TYPE_HEAD: + if (endoff >= m.clusterofs) { + map->m_la = (m.lcn << lclusterbits) | m.clusterofs; + break; + } + /* m.lcn should be >= 1 if endoff < m.clusterofs */ + if (unlikely(!m.lcn)) { + errln("invalid logical cluster 0 at nid %llu", + vi->nid); + err = -EFSCORRUPTED; + goto unmap_out; + } + end = (m.lcn << lclusterbits) | m.clusterofs; + map->m_flags |= EROFS_MAP_FULL_MAPPED; + m.delta[0] = 1; + /* fallthrough */ + case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: + /* get the correspoinding first chunk */ + err = vle_extent_lookback(&m, m.delta[0]); + if (unlikely(err)) + goto unmap_out; + break; + default: + errln("unknown type %u at offset %llu of nid %llu", + m.type, ofs, vi->nid); + err = -EOPNOTSUPP; + goto unmap_out; + } + + map->m_llen = end - map->m_la; + map->m_plen = 1 << lclusterbits; + map->m_pa = blknr_to_addr(m.pblk); + map->m_flags |= EROFS_MAP_MAPPED; + +unmap_out: + if (m.kaddr) + kunmap_atomic(m.kaddr); + +out: + debugln("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o", + __func__, map->m_la, map->m_pa, + map->m_llen, map->m_plen, map->m_flags); + + trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err); + + /* aggressively BUG_ON iff CONFIG_EROFS_FS_DEBUG is on */ + DBG_BUGON(err < 0 && err != -ENOMEM); + return err; +} + diff --git a/fs/erofs/zpvec.h b/fs/erofs/zpvec.h new file mode 100644 index 000000000000..bd3cee16491c --- /dev/null +++ b/fs/erofs/zpvec.h @@ -0,0 +1,157 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2018 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#ifndef __EROFS_FS_ZPVEC_H +#define __EROFS_FS_ZPVEC_H + +#include "tagptr.h" + +/* page type in pagevec for decompress subsystem */ +enum z_erofs_page_type { + /* including Z_EROFS_VLE_PAGE_TAIL_EXCLUSIVE */ + Z_EROFS_PAGE_TYPE_EXCLUSIVE, + + Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED, + + Z_EROFS_VLE_PAGE_TYPE_HEAD, + Z_EROFS_VLE_PAGE_TYPE_MAX +}; + +extern void __compiletime_error("Z_EROFS_PAGE_TYPE_EXCLUSIVE != 0") + __bad_page_type_exclusive(void); + +/* pagevec tagged pointer */ +typedef tagptr2_t erofs_vtptr_t; + +/* pagevec collector */ +struct z_erofs_pagevec_ctor { + struct page *curr, *next; + erofs_vtptr_t *pages; + + unsigned int nr, index; +}; + +static inline void z_erofs_pagevec_ctor_exit(struct z_erofs_pagevec_ctor *ctor, + bool atomic) +{ + if (!ctor->curr) + return; + + if (atomic) + kunmap_atomic(ctor->pages); + else + kunmap(ctor->curr); +} + +static inline struct page * +z_erofs_pagevec_ctor_next_page(struct z_erofs_pagevec_ctor *ctor, + unsigned int nr) +{ + unsigned int index; + + /* keep away from occupied pages */ + if (ctor->next) + return ctor->next; + + for (index = 0; index < nr; ++index) { + const erofs_vtptr_t t = ctor->pages[index]; + const unsigned int tags = tagptr_unfold_tags(t); + + if (tags == Z_EROFS_PAGE_TYPE_EXCLUSIVE) + return tagptr_unfold_ptr(t); + } + DBG_BUGON(nr >= ctor->nr); + return NULL; +} + +static inline void +z_erofs_pagevec_ctor_pagedown(struct z_erofs_pagevec_ctor *ctor, + bool atomic) +{ + struct page *next = z_erofs_pagevec_ctor_next_page(ctor, ctor->nr); + + z_erofs_pagevec_ctor_exit(ctor, atomic); + + ctor->curr = next; + ctor->next = NULL; + ctor->pages = atomic ? + kmap_atomic(ctor->curr) : kmap(ctor->curr); + + ctor->nr = PAGE_SIZE / sizeof(struct page *); + ctor->index = 0; +} + +static inline void z_erofs_pagevec_ctor_init(struct z_erofs_pagevec_ctor *ctor, + unsigned int nr, + erofs_vtptr_t *pages, + unsigned int i) +{ + ctor->nr = nr; + ctor->curr = ctor->next = NULL; + ctor->pages = pages; + + if (i >= nr) { + i -= nr; + z_erofs_pagevec_ctor_pagedown(ctor, false); + while (i > ctor->nr) { + i -= ctor->nr; + z_erofs_pagevec_ctor_pagedown(ctor, false); + } + } + ctor->next = z_erofs_pagevec_ctor_next_page(ctor, i); + ctor->index = i; +} + +static inline bool z_erofs_pagevec_enqueue(struct z_erofs_pagevec_ctor *ctor, + struct page *page, + enum z_erofs_page_type type, + bool *occupied) +{ + *occupied = false; + if (unlikely(!ctor->next && type)) + if (ctor->index + 1 == ctor->nr) + return false; + + if (unlikely(ctor->index >= ctor->nr)) + z_erofs_pagevec_ctor_pagedown(ctor, false); + + /* exclusive page type must be 0 */ + if (Z_EROFS_PAGE_TYPE_EXCLUSIVE != (uintptr_t)NULL) + __bad_page_type_exclusive(); + + /* should remind that collector->next never equal to 1, 2 */ + if (type == (uintptr_t)ctor->next) { + ctor->next = page; + *occupied = true; + } + ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, page, type); + return true; +} + +static inline struct page * +z_erofs_pagevec_dequeue(struct z_erofs_pagevec_ctor *ctor, + enum z_erofs_page_type *type) +{ + erofs_vtptr_t t; + + if (unlikely(ctor->index >= ctor->nr)) { + DBG_BUGON(!ctor->next); + z_erofs_pagevec_ctor_pagedown(ctor, true); + } + + t = ctor->pages[ctor->index]; + + *type = tagptr_unfold_tags(t); + + /* should remind that collector->next never equal to 1, 2 */ + if (*type == (uintptr_t)ctor->next) + ctor->next = tagptr_unfold_ptr(t); + + ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, NULL, 0); + return tagptr_unfold_ptr(t); +} +#endif + diff --git a/include/trace/events/erofs.h b/include/trace/events/erofs.h new file mode 100644 index 000000000000..bfb2da9c4eee --- /dev/null +++ b/include/trace/events/erofs.h @@ -0,0 +1,256 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM erofs + +#if !defined(_TRACE_EROFS_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_EROFS_H + +#include + +#define show_dev(dev) MAJOR(dev), MINOR(dev) +#define show_dev_nid(entry) show_dev(entry->dev), entry->nid + +#define show_file_type(type) \ + __print_symbolic(type, \ + { 0, "FILE" }, \ + { 1, "DIR" }) + +#define show_map_flags(flags) __print_flags(flags, "|", \ + { EROFS_GET_BLOCKS_RAW, "RAW" }) + +#define show_mflags(flags) __print_flags(flags, "", \ + { EROFS_MAP_MAPPED, "M" }, \ + { EROFS_MAP_META, "I" }, \ + { EROFS_MAP_ZIPPED, "Z" }) + +TRACE_EVENT(erofs_lookup, + + TP_PROTO(struct inode *dir, struct dentry *dentry, unsigned int flags), + + TP_ARGS(dir, dentry, flags), + + TP_STRUCT__entry( + __field(dev_t, dev ) + __field(erofs_nid_t, nid ) + __field(const char *, name ) + __field(unsigned int, flags ) + ), + + TP_fast_assign( + __entry->dev = dir->i_sb->s_dev; + __entry->nid = EROFS_V(dir)->nid; + __entry->name = dentry->d_name.name; + __entry->flags = flags; + ), + + TP_printk("dev = (%d,%d), pnid = %llu, name:%s, flags:%x", + show_dev_nid(__entry), + __entry->name, + __entry->flags) +); + +TRACE_EVENT(erofs_fill_inode, + TP_PROTO(struct inode *inode, int isdir), + TP_ARGS(inode, isdir), + + TP_STRUCT__entry( + __field(dev_t, dev ) + __field(erofs_nid_t, nid ) + __field(erofs_blk_t, blkaddr ) + __field(unsigned int, ofs ) + __field(int, isdir ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->nid = EROFS_V(inode)->nid; + __entry->blkaddr = erofs_blknr(iloc(EROFS_I_SB(inode), __entry->nid)); + __entry->ofs = erofs_blkoff(iloc(EROFS_I_SB(inode), __entry->nid)); + __entry->isdir = isdir; + ), + + TP_printk("dev = (%d,%d), nid = %llu, blkaddr %u ofs %u, isdir %d", + show_dev_nid(__entry), + __entry->blkaddr, __entry->ofs, + __entry->isdir) +); + +TRACE_EVENT(erofs_readpage, + + TP_PROTO(struct page *page, bool raw), + + TP_ARGS(page, raw), + + TP_STRUCT__entry( + __field(dev_t, dev ) + __field(erofs_nid_t, nid ) + __field(int, dir ) + __field(pgoff_t, index ) + __field(int, uptodate) + __field(bool, raw ) + ), + + TP_fast_assign( + __entry->dev = page->mapping->host->i_sb->s_dev; + __entry->nid = EROFS_V(page->mapping->host)->nid; + __entry->dir = S_ISDIR(page->mapping->host->i_mode); + __entry->index = page->index; + __entry->uptodate = PageUptodate(page); + __entry->raw = raw; + ), + + TP_printk("dev = (%d,%d), nid = %llu, %s, index = %lu, uptodate = %d " + "raw = %d", + show_dev_nid(__entry), + show_file_type(__entry->dir), + (unsigned long)__entry->index, + __entry->uptodate, + __entry->raw) +); + +TRACE_EVENT(erofs_readpages, + + TP_PROTO(struct inode *inode, struct page *page, unsigned int nrpage, + bool raw), + + TP_ARGS(inode, page, nrpage, raw), + + TP_STRUCT__entry( + __field(dev_t, dev ) + __field(erofs_nid_t, nid ) + __field(pgoff_t, start ) + __field(unsigned int, nrpage ) + __field(bool, raw ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->nid = EROFS_V(inode)->nid; + __entry->start = page->index; + __entry->nrpage = nrpage; + __entry->raw = raw; + ), + + TP_printk("dev = (%d,%d), nid = %llu, start = %lu nrpage = %u raw = %d", + show_dev_nid(__entry), + (unsigned long)__entry->start, + __entry->nrpage, + __entry->raw) +); + +DECLARE_EVENT_CLASS(erofs__map_blocks_enter, + TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, + unsigned int flags), + + TP_ARGS(inode, map, flags), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( erofs_nid_t, nid ) + __field( erofs_off_t, la ) + __field( u64, llen ) + __field( unsigned int, flags ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->nid = EROFS_V(inode)->nid; + __entry->la = map->m_la; + __entry->llen = map->m_llen; + __entry->flags = flags; + ), + + TP_printk("dev = (%d,%d), nid = %llu, la %llu llen %llu flags %s", + show_dev_nid(__entry), + __entry->la, __entry->llen, + __entry->flags ? show_map_flags(__entry->flags) : "NULL") +); + +DEFINE_EVENT(erofs__map_blocks_enter, erofs_map_blocks_flatmode_enter, + TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, + unsigned flags), + + TP_ARGS(inode, map, flags) +); + +DEFINE_EVENT(erofs__map_blocks_enter, z_erofs_map_blocks_iter_enter, + TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, + unsigned int flags), + + TP_ARGS(inode, map, flags) +); + +DECLARE_EVENT_CLASS(erofs__map_blocks_exit, + TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, + unsigned int flags, int ret), + + TP_ARGS(inode, map, flags, ret), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( erofs_nid_t, nid ) + __field( unsigned int, flags ) + __field( erofs_off_t, la ) + __field( erofs_off_t, pa ) + __field( u64, llen ) + __field( u64, plen ) + __field( unsigned int, mflags ) + __field( int, ret ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->nid = EROFS_V(inode)->nid; + __entry->flags = flags; + __entry->la = map->m_la; + __entry->pa = map->m_pa; + __entry->llen = map->m_llen; + __entry->plen = map->m_plen; + __entry->mflags = map->m_flags; + __entry->ret = ret; + ), + + TP_printk("dev = (%d,%d), nid = %llu, flags %s " + "la %llu pa %llu llen %llu plen %llu mflags %s ret %d", + show_dev_nid(__entry), + __entry->flags ? show_map_flags(__entry->flags) : "NULL", + __entry->la, __entry->pa, __entry->llen, __entry->plen, + show_mflags(__entry->mflags), __entry->ret) +); + +DEFINE_EVENT(erofs__map_blocks_exit, erofs_map_blocks_flatmode_exit, + TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, + unsigned flags, int ret), + + TP_ARGS(inode, map, flags, ret) +); + +DEFINE_EVENT(erofs__map_blocks_exit, z_erofs_map_blocks_iter_exit, + TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, + unsigned int flags, int ret), + + TP_ARGS(inode, map, flags, ret) +); + +TRACE_EVENT(erofs_destroy_inode, + TP_PROTO(struct inode *inode), + + TP_ARGS(inode), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( erofs_nid_t, nid ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->nid = EROFS_V(inode)->nid; + ), + + TP_printk("dev = (%d,%d), nid = %llu", show_dev_nid(__entry)) +); + +#endif /* _TRACE_EROFS_H */ + + /* This part must be outside protection */ +#include diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h index 1274c692e59c..903cc2d2750b 100644 --- a/include/uapi/linux/magic.h +++ b/include/uapi/linux/magic.h @@ -19,6 +19,7 @@ #define SQUASHFS_MAGIC 0x73717368 #define ECRYPTFS_SUPER_MAGIC 0xf15f #define EFS_SUPER_MAGIC 0x414A53 +#define EROFS_SUPER_MAGIC_V1 0xE0F5E1E2 #define EXT2_SUPER_MAGIC 0xEF53 #define EXT3_SUPER_MAGIC 0xEF53 #define XENFS_SUPER_MAGIC 0xabba1974 -- cgit v1.2.3-59-g8ed1b From 75338cc8eb29c90323eb7d54ec09d25e86625515 Mon Sep 17 00:00:00 2001 From: Beniamin Bia Date: Wed, 21 Aug 2019 17:16:54 +0300 Subject: MAINTAINERS: Add Beniamin Bia for AD7606 driver Add Beniamin Bia as maintainer for AD7606 driver. Signed-off-by: Beniamin Bia Signed-off-by: Jonathan Cameron --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index ac3a5cd6a682..92c2603b71b7 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -903,6 +903,7 @@ F: Documentation/devicetree/bindings/iio/adc/adi,ad7124.yaml ANALOG DEVICES INC AD7606 DRIVER M: Stefan Popa +M: Beniamin Bia L: linux-iio@vger.kernel.org W: http://ez.analog.com/community/linux-device-drivers S: Supported -- cgit v1.2.3-59-g8ed1b From 416f882c3b404938ba26622469a956c4cd9f7558 Mon Sep 17 00:00:00 2001 From: Beniamin Bia Date: Wed, 21 Aug 2019 17:16:55 +0300 Subject: dt-bindings: iio: adc: Migrate AD7606 documentation to yaml The documentation for ad7606 was migrated to yaml. Signed-off-by: Beniamin Bia Reviewed-by: Rob Herring Signed-off-by: Jonathan Cameron --- .../devicetree/bindings/iio/adc/adi,ad7606.txt | 66 ---------- .../devicetree/bindings/iio/adc/adi,ad7606.yaml | 136 +++++++++++++++++++++ MAINTAINERS | 2 +- 3 files changed, 137 insertions(+), 67 deletions(-) delete mode 100644 Documentation/devicetree/bindings/iio/adc/adi,ad7606.txt create mode 100644 Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml (limited to 'MAINTAINERS') diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.txt b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.txt deleted file mode 100644 index d8652460198e..000000000000 --- a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.txt +++ /dev/null @@ -1,66 +0,0 @@ -Analog Devices AD7606 Simultaneous Sampling ADC - -Required properties for the AD7606: - -- compatible: Must be one of - * "adi,ad7605-4" - * "adi,ad7606-8" - * "adi,ad7606-6" - * "adi,ad7606-4" - * "adi,ad7616" -- reg: SPI chip select number for the device -- spi-max-frequency: Max SPI frequency to use - see: Documentation/devicetree/bindings/spi/spi-bus.txt -- spi-cpha: See Documentation/devicetree/bindings/spi/spi-bus.txt -- avcc-supply: phandle to the Avcc power supply -- interrupts: IRQ line for the ADC - see: Documentation/devicetree/bindings/interrupt-controller/interrupts.txt -- adi,conversion-start-gpios: must be the device tree identifier of the CONVST pin. - This logic input is used to initiate conversions on the analog - input channels. As the line is active high, it should be marked - GPIO_ACTIVE_HIGH. - -Optional properties: - -- reset-gpios: must be the device tree identifier of the RESET pin. If specified, - it will be asserted during driver probe. As the line is active high, - it should be marked GPIO_ACTIVE_HIGH. -- standby-gpios: must be the device tree identifier of the STBY pin. This pin is used - to place the AD7606 into one of two power-down modes, Standby mode or - Shutdown mode. As the line is active low, it should be marked - GPIO_ACTIVE_LOW. -- adi,first-data-gpios: must be the device tree identifier of the FRSTDATA pin. - The FRSTDATA output indicates when the first channel, V1, is - being read back on either the parallel, byte or serial interface. - As the line is active high, it should be marked GPIO_ACTIVE_HIGH. -- adi,range-gpios: must be the device tree identifier of the RANGE pin. The polarity on - this pin determines the input range of the analog input channels. If - this pin is tied to a logic high, the analog input range is ±10V for - all channels. If this pin is tied to a logic low, the analog input range - is ±5V for all channels. As the line is active high, it should be marked - GPIO_ACTIVE_HIGH. -- adi,oversampling-ratio-gpios: must be the device tree identifier of the over-sampling - mode pins. As the line is active high, it should be marked - GPIO_ACTIVE_HIGH. - -Example: - - adc@0 { - compatible = "adi,ad7606-8"; - reg = <0>; - spi-max-frequency = <1000000>; - spi-cpol; - - avcc-supply = <&adc_vref>; - - interrupts = <25 IRQ_TYPE_EDGE_FALLING>; - interrupt-parent = <&gpio>; - - adi,conversion-start-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>; - reset-gpios = <&gpio 27 GPIO_ACTIVE_HIGH>; - adi,first-data-gpios = <&gpio 22 GPIO_ACTIVE_HIGH>; - adi,oversampling-ratio-gpios = <&gpio 18 GPIO_ACTIVE_HIGH - &gpio 23 GPIO_ACTIVE_HIGH - &gpio 26 GPIO_ACTIVE_HIGH>; - standby-gpios = <&gpio 24 GPIO_ACTIVE_LOW>; - }; diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml new file mode 100644 index 000000000000..3ecdfa780ae3 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml @@ -0,0 +1,136 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/iio/adc/adi,ad7606.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Analog Devices AD7606 Simultaneous Sampling ADC + +maintainers: + - Beniamin Bia + - Stefan Popa + +description: | + Analog Devices AD7606 Simultaneous Sampling ADC + https://www.analog.com/media/en/technical-documentation/data-sheets/ad7606_7606-6_7606-4.pdf + https://www.analog.com/media/en/technical-documentation/data-sheets/AD7616.pdf + +properties: + compatible: + enum: + - adi,ad7605-4 + - adi,ad7606-8 + - adi,ad7606-6 + - adi,ad7606-4 + - adi,ad7616 + + reg: + maxItems: 1 + + spi-cpha: true + + avcc-supply: + description: + Phandle to the Avcc power supply + maxItems: 1 + + interrupts: + maxItems: 1 + + adi,conversion-start-gpios: + description: + Must be the device tree identifier of the CONVST pin. + This logic input is used to initiate conversions on the analog + input channels. As the line is active high, it should be marked + GPIO_ACTIVE_HIGH. + maxItems: 1 + + reset-gpios: + description: + Must be the device tree identifier of the RESET pin. If specified, + it will be asserted during driver probe. As the line is active high, + it should be marked GPIO_ACTIVE_HIGH. + maxItems: 1 + + standby-gpios: + description: + Must be the device tree identifier of the STBY pin. This pin is used + to place the AD7606 into one of two power-down modes, Standby mode or + Shutdown mode. As the line is active low, it should be marked + GPIO_ACTIVE_LOW. + maxItems: 1 + + adi,first-data-gpios: + description: + Must be the device tree identifier of the FRSTDATA pin. + The FRSTDATA output indicates when the first channel, V1, is + being read back on either the parallel, byte or serial interface. + As the line is active high, it should be marked GPIO_ACTIVE_HIGH. + maxItems: 1 + + adi,range-gpios: + description: + Must be the device tree identifier of the RANGE pin. The polarity on + this pin determines the input range of the analog input channels. If + this pin is tied to a logic high, the analog input range is ±10V for + all channels. If this pin is tied to a logic low, the analog input range + is ±5V for all channels. As the line is active high, it should be marked + GPIO_ACTIVE_HIGH. + maxItems: 1 + + adi,oversampling-ratio-gpios: + description: + Must be the device tree identifier of the over-sampling + mode pins. As the line is active high, it should be marked + GPIO_ACTIVE_HIGH. + maxItems: 1 + + adi,sw-mode: + description: + Software mode of operation, so far available only for ad7616. + It is enabled when all three oversampling mode pins are connected to + high level. The device is configured by the corresponding registers. If the + adi,oversampling-ratio-gpios property is defined, then the driver will set the + oversampling gpios to high. Otherwise, it is assumed that the pins are hardwired + to VDD. + type: boolean + +required: + - compatible + - reg + - spi-cpha + - avcc-supply + - interrupts + - adi,conversion-start-gpios + +examples: + - | + #include + #include + spi0 { + #address-cells = <1>; + #size-cells = <0>; + + adc@0 { + compatible = "adi,ad7606-8"; + reg = <0>; + spi-max-frequency = <1000000>; + spi-cpol; + spi-cpha; + + avcc-supply = <&adc_vref>; + + interrupts = <25 IRQ_TYPE_EDGE_FALLING>; + interrupt-parent = <&gpio>; + + adi,conversion-start-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>; + reset-gpios = <&gpio 27 GPIO_ACTIVE_HIGH>; + adi,first-data-gpios = <&gpio 22 GPIO_ACTIVE_HIGH>; + adi,oversampling-ratio-gpios = <&gpio 18 GPIO_ACTIVE_HIGH + &gpio 23 GPIO_ACTIVE_HIGH + &gpio 26 GPIO_ACTIVE_HIGH>; + standby-gpios = <&gpio 24 GPIO_ACTIVE_LOW>; + adi,sw-mode; + }; + }; +... diff --git a/MAINTAINERS b/MAINTAINERS index 92c2603b71b7..f0c03740b9fb 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -908,7 +908,7 @@ L: linux-iio@vger.kernel.org W: http://ez.analog.com/community/linux-device-drivers S: Supported F: drivers/iio/adc/ad7606.c -F: Documentation/devicetree/bindings/iio/adc/adi,ad7606.txt +F: Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml ANALOG DEVICES INC AD7768-1 DRIVER M: Stefan Popa -- cgit v1.2.3-59-g8ed1b From 8465def499c70d041a234087eff380108da7e830 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Sun, 25 Aug 2019 07:54:28 +0200 Subject: staging: greybus: move the greybus core to drivers/greybus The Greybus core code has been stable for a long time, and has been shipping for many years in millions of phones. With the advent of a recent Google Summer of Code project, and a number of new devices in the works from various companies, it is time to get the core greybus code out of staging as it really is going to be with us for a while. Cc: Johan Hovold Cc: linux-kernel@vger.kernel.org Cc: greybus-dev@lists.linaro.org Acked-by: Viresh Kumar Acked-by: Alex Elder Link: https://lore.kernel.org/r/20190825055429.18547-9-gregkh@linuxfoundation.org Signed-off-by: Greg Kroah-Hartman --- MAINTAINERS | 3 + drivers/Kconfig | 2 + drivers/Makefile | 1 + drivers/greybus/Kconfig | 16 + drivers/greybus/Makefile | 19 + drivers/greybus/bundle.c | 252 ++++++ drivers/greybus/connection.c | 942 +++++++++++++++++++++ drivers/greybus/control.c | 584 +++++++++++++ drivers/greybus/core.c | 349 ++++++++ drivers/greybus/debugfs.c | 29 + drivers/greybus/greybus_trace.h | 502 +++++++++++ drivers/greybus/hd.c | 256 ++++++ drivers/greybus/interface.c | 1263 ++++++++++++++++++++++++++++ drivers/greybus/manifest.c | 533 ++++++++++++ drivers/greybus/module.c | 236 ++++++ drivers/greybus/operation.c | 1264 ++++++++++++++++++++++++++++ drivers/greybus/svc.c | 1397 +++++++++++++++++++++++++++++++ drivers/greybus/svc_watchdog.c | 197 +++++ drivers/staging/greybus/Kconfig | 16 - drivers/staging/greybus/Makefile | 17 - drivers/staging/greybus/bundle.c | 252 ------ drivers/staging/greybus/connection.c | 942 --------------------- drivers/staging/greybus/control.c | 584 ------------- drivers/staging/greybus/core.c | 349 -------- drivers/staging/greybus/debugfs.c | 29 - drivers/staging/greybus/es2.c | 2 +- drivers/staging/greybus/greybus_trace.h | 502 ----------- drivers/staging/greybus/hd.c | 256 ------ drivers/staging/greybus/interface.c | 1263 ---------------------------- drivers/staging/greybus/manifest.c | 533 ------------ drivers/staging/greybus/module.c | 236 ------ drivers/staging/greybus/operation.c | 1264 ---------------------------- drivers/staging/greybus/svc.c | 1397 ------------------------------- drivers/staging/greybus/svc_watchdog.c | 197 ----- 34 files changed, 7846 insertions(+), 7838 deletions(-) create mode 100644 drivers/greybus/Kconfig create mode 100644 drivers/greybus/Makefile create mode 100644 drivers/greybus/bundle.c create mode 100644 drivers/greybus/connection.c create mode 100644 drivers/greybus/control.c create mode 100644 drivers/greybus/core.c create mode 100644 drivers/greybus/debugfs.c create mode 100644 drivers/greybus/greybus_trace.h create mode 100644 drivers/greybus/hd.c create mode 100644 drivers/greybus/interface.c create mode 100644 drivers/greybus/manifest.c create mode 100644 drivers/greybus/module.c create mode 100644 drivers/greybus/operation.c create mode 100644 drivers/greybus/svc.c create mode 100644 drivers/greybus/svc_watchdog.c delete mode 100644 drivers/staging/greybus/bundle.c delete mode 100644 drivers/staging/greybus/connection.c delete mode 100644 drivers/staging/greybus/control.c delete mode 100644 drivers/staging/greybus/core.c delete mode 100644 drivers/staging/greybus/debugfs.c delete mode 100644 drivers/staging/greybus/greybus_trace.h delete mode 100644 drivers/staging/greybus/hd.c delete mode 100644 drivers/staging/greybus/interface.c delete mode 100644 drivers/staging/greybus/manifest.c delete mode 100644 drivers/staging/greybus/module.c delete mode 100644 drivers/staging/greybus/operation.c delete mode 100644 drivers/staging/greybus/svc.c delete mode 100644 drivers/staging/greybus/svc_watchdog.c (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 0f38cba2c581..e3242687cd19 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7003,6 +7003,9 @@ M: Alex Elder M: Greg Kroah-Hartman S: Maintained F: drivers/staging/greybus/ +F: drivers/greybus/ +F: include/linux/greybus.h +F: include/linux/greybus/ L: greybus-dev@lists.linaro.org (moderated for non-subscribers) GREYBUS UART PROTOCOLS DRIVERS diff --git a/drivers/Kconfig b/drivers/Kconfig index 61cf4ea2c229..7dce76ae7369 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -146,6 +146,8 @@ source "drivers/hv/Kconfig" source "drivers/xen/Kconfig" +source "drivers/greybus/Kconfig" + source "drivers/staging/Kconfig" source "drivers/platform/Kconfig" diff --git a/drivers/Makefile b/drivers/Makefile index 6d37564e783c..73df8e5a2fce 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -148,6 +148,7 @@ obj-$(CONFIG_BCMA) += bcma/ obj-$(CONFIG_VHOST_RING) += vhost/ obj-$(CONFIG_VHOST) += vhost/ obj-$(CONFIG_VLYNQ) += vlynq/ +obj-$(CONFIG_GREYBUS) += greybus/ obj-$(CONFIG_STAGING) += staging/ obj-y += platform/ diff --git a/drivers/greybus/Kconfig b/drivers/greybus/Kconfig new file mode 100644 index 000000000000..158d8893114c --- /dev/null +++ b/drivers/greybus/Kconfig @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0 +menuconfig GREYBUS + tristate "Greybus support" + depends on SYSFS + ---help--- + This option enables the Greybus driver core. Greybus is an + hardware protocol that was designed to provide Unipro with a + sane application layer. It was originally designed for the + ARA project, a module phone system, but has shown up in other + phones, and can be tunneled over other busses in order to + control hardware devices. + + Say Y here to enable support for these types of drivers. + + To compile this code as a module, chose M here: the module + will be called greybus.ko diff --git a/drivers/greybus/Makefile b/drivers/greybus/Makefile new file mode 100644 index 000000000000..03b22616ec7d --- /dev/null +++ b/drivers/greybus/Makefile @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0 +# Greybus core +greybus-y := core.o \ + debugfs.o \ + hd.o \ + manifest.o \ + module.o \ + interface.o \ + bundle.o \ + connection.o \ + control.o \ + svc.o \ + svc_watchdog.o \ + operation.o + +obj-$(CONFIG_GREYBUS) += greybus.o + +# needed for trace events +ccflags-y += -I$(src) diff --git a/drivers/greybus/bundle.c b/drivers/greybus/bundle.c new file mode 100644 index 000000000000..84660729538b --- /dev/null +++ b/drivers/greybus/bundle.c @@ -0,0 +1,252 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Greybus bundles + * + * Copyright 2014-2015 Google Inc. + * Copyright 2014-2015 Linaro Ltd. + */ + +#include +#include "greybus_trace.h" + +static ssize_t bundle_class_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct gb_bundle *bundle = to_gb_bundle(dev); + + return sprintf(buf, "0x%02x\n", bundle->class); +} +static DEVICE_ATTR_RO(bundle_class); + +static ssize_t bundle_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct gb_bundle *bundle = to_gb_bundle(dev); + + return sprintf(buf, "%u\n", bundle->id); +} +static DEVICE_ATTR_RO(bundle_id); + +static ssize_t state_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct gb_bundle *bundle = to_gb_bundle(dev); + + if (!bundle->state) + return sprintf(buf, "\n"); + + return sprintf(buf, "%s\n", bundle->state); +} + +static ssize_t state_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t size) +{ + struct gb_bundle *bundle = to_gb_bundle(dev); + + kfree(bundle->state); + bundle->state = kstrdup(buf, GFP_KERNEL); + if (!bundle->state) + return -ENOMEM; + + /* Tell userspace that the file contents changed */ + sysfs_notify(&bundle->dev.kobj, NULL, "state"); + + return size; +} +static DEVICE_ATTR_RW(state); + +static struct attribute *bundle_attrs[] = { + &dev_attr_bundle_class.attr, + &dev_attr_bundle_id.attr, + &dev_attr_state.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(bundle); + +static struct gb_bundle *gb_bundle_find(struct gb_interface *intf, + u8 bundle_id) +{ + struct gb_bundle *bundle; + + list_for_each_entry(bundle, &intf->bundles, links) { + if (bundle->id == bundle_id) + return bundle; + } + + return NULL; +} + +static void gb_bundle_release(struct device *dev) +{ + struct gb_bundle *bundle = to_gb_bundle(dev); + + trace_gb_bundle_release(bundle); + + kfree(bundle->state); + kfree(bundle->cport_desc); + kfree(bundle); +} + +#ifdef CONFIG_PM +static void gb_bundle_disable_all_connections(struct gb_bundle *bundle) +{ + struct gb_connection *connection; + + list_for_each_entry(connection, &bundle->connections, bundle_links) + gb_connection_disable(connection); +} + +static void gb_bundle_enable_all_connections(struct gb_bundle *bundle) +{ + struct gb_connection *connection; + + list_for_each_entry(connection, &bundle->connections, bundle_links) + gb_connection_enable(connection); +} + +static int gb_bundle_suspend(struct device *dev) +{ + struct gb_bundle *bundle = to_gb_bundle(dev); + const struct dev_pm_ops *pm = dev->driver->pm; + int ret; + + if (pm && pm->runtime_suspend) { + ret = pm->runtime_suspend(&bundle->dev); + if (ret) + return ret; + } else { + gb_bundle_disable_all_connections(bundle); + } + + ret = gb_control_bundle_suspend(bundle->intf->control, bundle->id); + if (ret) { + if (pm && pm->runtime_resume) + ret = pm->runtime_resume(dev); + else + gb_bundle_enable_all_connections(bundle); + + return ret; + } + + return 0; +} + +static int gb_bundle_resume(struct device *dev) +{ + struct gb_bundle *bundle = to_gb_bundle(dev); + const struct dev_pm_ops *pm = dev->driver->pm; + int ret; + + ret = gb_control_bundle_resume(bundle->intf->control, bundle->id); + if (ret) + return ret; + + if (pm && pm->runtime_resume) { + ret = pm->runtime_resume(dev); + if (ret) + return ret; + } else { + gb_bundle_enable_all_connections(bundle); + } + + return 0; +} + +static int gb_bundle_idle(struct device *dev) +{ + pm_runtime_mark_last_busy(dev); + pm_request_autosuspend(dev); + + return 0; +} +#endif + +static const struct dev_pm_ops gb_bundle_pm_ops = { + SET_RUNTIME_PM_OPS(gb_bundle_suspend, gb_bundle_resume, gb_bundle_idle) +}; + +struct device_type greybus_bundle_type = { + .name = "greybus_bundle", + .release = gb_bundle_release, + .pm = &gb_bundle_pm_ops, +}; + +/* + * Create a gb_bundle structure to represent a discovered + * bundle. Returns a pointer to the new bundle or a null + * pointer if a failure occurs due to memory exhaustion. + */ +struct gb_bundle *gb_bundle_create(struct gb_interface *intf, u8 bundle_id, + u8 class) +{ + struct gb_bundle *bundle; + + if (bundle_id == BUNDLE_ID_NONE) { + dev_err(&intf->dev, "can't use bundle id %u\n", bundle_id); + return NULL; + } + + /* + * Reject any attempt to reuse a bundle id. We initialize + * these serially, so there's no need to worry about keeping + * the interface bundle list locked here. + */ + if (gb_bundle_find(intf, bundle_id)) { + dev_err(&intf->dev, "duplicate bundle id %u\n", bundle_id); + return NULL; + } + + bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); + if (!bundle) + return NULL; + + bundle->intf = intf; + bundle->id = bundle_id; + bundle->class = class; + INIT_LIST_HEAD(&bundle->connections); + + bundle->dev.parent = &intf->dev; + bundle->dev.bus = &greybus_bus_type; + bundle->dev.type = &greybus_bundle_type; + bundle->dev.groups = bundle_groups; + bundle->dev.dma_mask = intf->dev.dma_mask; + device_initialize(&bundle->dev); + dev_set_name(&bundle->dev, "%s.%d", dev_name(&intf->dev), bundle_id); + + list_add(&bundle->links, &intf->bundles); + + trace_gb_bundle_create(bundle); + + return bundle; +} + +int gb_bundle_add(struct gb_bundle *bundle) +{ + int ret; + + ret = device_add(&bundle->dev); + if (ret) { + dev_err(&bundle->dev, "failed to register bundle: %d\n", ret); + return ret; + } + + trace_gb_bundle_add(bundle); + + return 0; +} + +/* + * Tear down a previously set up bundle. + */ +void gb_bundle_destroy(struct gb_bundle *bundle) +{ + trace_gb_bundle_destroy(bundle); + + if (device_is_registered(&bundle->dev)) + device_del(&bundle->dev); + + list_del(&bundle->links); + + put_device(&bundle->dev); +} diff --git a/drivers/greybus/connection.c b/drivers/greybus/connection.c new file mode 100644 index 000000000000..fc8f57f97ce6 --- /dev/null +++ b/drivers/greybus/connection.c @@ -0,0 +1,942 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Greybus connections + * + * Copyright 2014 Google Inc. + * Copyright 2014 Linaro Ltd. + */ + +#include +#include + +#include "greybus_trace.h" + +#define GB_CONNECTION_CPORT_QUIESCE_TIMEOUT 1000 + +static void gb_connection_kref_release(struct kref *kref); + +static DEFINE_SPINLOCK(gb_connections_lock); +static DEFINE_MUTEX(gb_connection_mutex); + +/* Caller holds gb_connection_mutex. */ +static bool gb_connection_cport_in_use(struct gb_interface *intf, u16 cport_id) +{ + struct gb_host_device *hd = intf->hd; + struct gb_connection *connection; + + list_for_each_entry(connection, &hd->connections, hd_links) { + if (connection->intf == intf && + connection->intf_cport_id == cport_id) + return true; + } + + return false; +} + +static void gb_connection_get(struct gb_connection *connection) +{ + kref_get(&connection->kref); + + trace_gb_connection_get(connection); +} + +static void gb_connection_put(struct gb_connection *connection) +{ + trace_gb_connection_put(connection); + + kref_put(&connection->kref, gb_connection_kref_release); +} + +/* + * Returns a reference-counted pointer to the connection if found. + */ +static struct gb_connection * +gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id) +{ + struct gb_connection *connection; + unsigned long flags; + + spin_lock_irqsave(&gb_connections_lock, flags); + list_for_each_entry(connection, &hd->connections, hd_links) + if (connection->hd_cport_id == cport_id) { + gb_connection_get(connection); + goto found; + } + connection = NULL; +found: + spin_unlock_irqrestore(&gb_connections_lock, flags); + + return connection; +} + +/* + * Callback from the host driver to let us know that data has been + * received on the bundle. + */ +void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id, + u8 *data, size_t length) +{ + struct gb_connection *connection; + + trace_gb_hd_in(hd); + + connection = gb_connection_hd_find(hd, cport_id); + if (!connection) { + dev_err(&hd->dev, + "nonexistent connection (%zu bytes dropped)\n", length); + return; + } + gb_connection_recv(connection, data, length); + gb_connection_put(connection); +} +EXPORT_SYMBOL_GPL(greybus_data_rcvd); + +static void gb_connection_kref_release(struct kref *kref) +{ + struct gb_connection *connection; + + connection = container_of(kref, struct gb_connection, kref); + + trace_gb_connection_release(connection); + + kfree(connection); +} + +static void gb_connection_init_name(struct gb_connection *connection) +{ + u16 hd_cport_id = connection->hd_cport_id; + u16 cport_id = 0; + u8 intf_id = 0; + + if (connection->intf) { + intf_id = connection->intf->interface_id; + cport_id = connection->intf_cport_id; + } + + snprintf(connection->name, sizeof(connection->name), + "%u/%u:%u", hd_cport_id, intf_id, cport_id); +} + +/* + * _gb_connection_create() - create a Greybus connection + * @hd: host device of the connection + * @hd_cport_id: host-device cport id, or -1 for dynamic allocation + * @intf: remote interface, or NULL for static connections + * @bundle: remote-interface bundle (may be NULL) + * @cport_id: remote-interface cport id, or 0 for static connections + * @handler: request handler (may be NULL) + * @flags: connection flags + * + * Create a Greybus connection, representing the bidirectional link + * between a CPort on a (local) Greybus host device and a CPort on + * another Greybus interface. + * + * A connection also maintains the state of operations sent over the + * connection. + * + * Serialised against concurrent create and destroy using the + * gb_connection_mutex. + * + * Return: A pointer to the new connection if successful, or an ERR_PTR + * otherwise. + */ +static struct gb_connection * +_gb_connection_create(struct gb_host_device *hd, int hd_cport_id, + struct gb_interface *intf, + struct gb_bundle *bundle, int cport_id, + gb_request_handler_t handler, + unsigned long flags) +{ + struct gb_connection *connection; + int ret; + + mutex_lock(&gb_connection_mutex); + + if (intf && gb_connection_cport_in_use(intf, cport_id)) { + dev_err(&intf->dev, "cport %u already in use\n", cport_id); + ret = -EBUSY; + goto err_unlock; + } + + ret = gb_hd_cport_allocate(hd, hd_cport_id, flags); + if (ret < 0) { + dev_err(&hd->dev, "failed to allocate cport: %d\n", ret); + goto err_unlock; + } + hd_cport_id = ret; + + connection = kzalloc(sizeof(*connection), GFP_KERNEL); + if (!connection) { + ret = -ENOMEM; + goto err_hd_cport_release; + } + + connection->hd_cport_id = hd_cport_id; + connection->intf_cport_id = cport_id; + connection->hd = hd; + connection->intf = intf; + connection->bundle = bundle; + connection->handler = handler; + connection->flags = flags; + if (intf && (intf->quirks & GB_INTERFACE_QUIRK_NO_CPORT_FEATURES)) + connection->flags |= GB_CONNECTION_FLAG_NO_FLOWCTRL; + connection->state = GB_CONNECTION_STATE_DISABLED; + + atomic_set(&connection->op_cycle, 0); + mutex_init(&connection->mutex); + spin_lock_init(&connection->lock); + INIT_LIST_HEAD(&connection->operations); + + connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1, + dev_name(&hd->dev), hd_cport_id); + if (!connection->wq) { + ret = -ENOMEM; + goto err_free_connection; + } + + kref_init(&connection->kref); + + gb_connection_init_name(connection); + + spin_lock_irq(&gb_connections_lock); + list_add(&connection->hd_links, &hd->connections); + + if (bundle) + list_add(&connection->bundle_links, &bundle->connections); + else + INIT_LIST_HEAD(&connection->bundle_links); + + spin_unlock_irq(&gb_connections_lock); + + mutex_unlock(&gb_connection_mutex); + + trace_gb_connection_create(connection); + + return connection; + +err_free_connection: + kfree(connection); +err_hd_cport_release: + gb_hd_cport_release(hd, hd_cport_id); +err_unlock: + mutex_unlock(&gb_connection_mutex); + + return ERR_PTR(ret); +} + +struct gb_connection * +gb_connection_create_static(struct gb_host_device *hd, u16 hd_cport_id, + gb_request_handler_t handler) +{ + return _gb_connection_create(hd, hd_cport_id, NULL, NULL, 0, handler, + GB_CONNECTION_FLAG_HIGH_PRIO); +} + +struct gb_connection * +gb_connection_create_control(struct gb_interface *intf) +{ + return _gb_connection_create(intf->hd, -1, intf, NULL, 0, NULL, + GB_CONNECTION_FLAG_CONTROL | + GB_CONNECTION_FLAG_HIGH_PRIO); +} + +struct gb_connection * +gb_connection_create(struct gb_bundle *bundle, u16 cport_id, + gb_request_handler_t handler) +{ + struct gb_interface *intf = bundle->intf; + + return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id, + handler, 0); +} +EXPORT_SYMBOL_GPL(gb_connection_create); + +struct gb_connection * +gb_connection_create_flags(struct gb_bundle *bundle, u16 cport_id, + gb_request_handler_t handler, + unsigned long flags) +{ + struct gb_interface *intf = bundle->intf; + + if (WARN_ON_ONCE(flags & GB_CONNECTION_FLAG_CORE_MASK)) + flags &= ~GB_CONNECTION_FLAG_CORE_MASK; + + return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id, + handler, flags); +} +EXPORT_SYMBOL_GPL(gb_connection_create_flags); + +struct gb_connection * +gb_connection_create_offloaded(struct gb_bundle *bundle, u16 cport_id, + unsigned long flags) +{ + flags |= GB_CONNECTION_FLAG_OFFLOADED; + + return gb_connection_create_flags(bundle, cport_id, NULL, flags); +} +EXPORT_SYMBOL_GPL(gb_connection_create_offloaded); + +static int gb_connection_hd_cport_enable(struct gb_connection *connection) +{ + struct gb_host_device *hd = connection->hd; + int ret; + + if (!hd->driver->cport_enable) + return 0; + + ret = hd->driver->cport_enable(hd, connection->hd_cport_id, + connection->flags); + if (ret) { + dev_err(&hd->dev, "%s: failed to enable host cport: %d\n", + connection->name, ret); + return ret; + } + + return 0; +} + +static void gb_connection_hd_cport_disable(struct gb_connection *connection) +{ + struct gb_host_device *hd = connection->hd; + int ret; + + if (!hd->driver->cport_disable) + return; + + ret = hd->driver->cport_disable(hd, connection->hd_cport_id); + if (ret) { + dev_err(&hd->dev, "%s: failed to disable host cport: %d\n", + connection->name, ret); + } +} + +static int gb_connection_hd_cport_connected(struct gb_connection *connection) +{ + struct gb_host_device *hd = connection->hd; + int ret; + + if (!hd->driver->cport_connected) + return 0; + + ret = hd->driver->cport_connected(hd, connection->hd_cport_id); + if (ret) { + dev_err(&hd->dev, "%s: failed to set connected state: %d\n", + connection->name, ret); + return ret; + } + + return 0; +} + +static int gb_connection_hd_cport_flush(struct gb_connection *connection) +{ + struct gb_host_device *hd = connection->hd; + int ret; + + if (!hd->driver->cport_flush) + return 0; + + ret = hd->driver->cport_flush(hd, connection->hd_cport_id); + if (ret) { + dev_err(&hd->dev, "%s: failed to flush host cport: %d\n", + connection->name, ret); + return ret; + } + + return 0; +} + +static int gb_connection_hd_cport_quiesce(struct gb_connection *connection) +{ + struct gb_host_device *hd = connection->hd; + size_t peer_space; + int ret; + + if (!hd->driver->cport_quiesce) + return 0; + + peer_space = sizeof(struct gb_operation_msg_hdr) + + sizeof(struct gb_cport_shutdown_request); + + if (connection->mode_switch) + peer_space += sizeof(struct gb_operation_msg_hdr); + + if (!hd->driver->cport_quiesce) + return 0; + + ret = hd->driver->cport_quiesce(hd, connection->hd_cport_id, + peer_space, + GB_CONNECTION_CPORT_QUIESCE_TIMEOUT); + if (ret) { + dev_err(&hd->dev, "%s: failed to quiesce host cport: %d\n", + connection->name, ret); + return ret; + } + + return 0; +} + +static int gb_connection_hd_cport_clear(struct gb_connection *connection) +{ + struct gb_host_device *hd = connection->hd; + int ret; + + if (!hd->driver->cport_clear) + return 0; + + ret = hd->driver->cport_clear(hd, connection->hd_cport_id); + if (ret) { + dev_err(&hd->dev, "%s: failed to clear host cport: %d\n", + connection->name, ret); + return ret; + } + + return 0; +} + +/* + * Request the SVC to create a connection from AP's cport to interface's + * cport. + */ +static int +gb_connection_svc_connection_create(struct gb_connection *connection) +{ + struct gb_host_device *hd = connection->hd; + struct gb_interface *intf; + u8 cport_flags; + int ret; + + if (gb_connection_is_static(connection)) + return 0; + + intf = connection->intf; + + /* + * Enable either E2EFC or CSD, unless no flow control is requested. + */ + cport_flags = GB_SVC_CPORT_FLAG_CSV_N; + if (gb_connection_flow_control_disabled(connection)) { + cport_flags |= GB_SVC_CPORT_FLAG_CSD_N; + } else if (gb_connection_e2efc_enabled(connection)) { + cport_flags |= GB_SVC_CPORT_FLAG_CSD_N | + GB_SVC_CPORT_FLAG_E2EFC; + } + + ret = gb_svc_connection_create(hd->svc, + hd->svc->ap_intf_id, + connection->hd_cport_id, + intf->interface_id, + connection->intf_cport_id, + cport_flags); + if (ret) { + dev_err(&connection->hd->dev, + "%s: failed to create svc connection: %d\n", + connection->name, ret); + return ret; + } + + return 0; +} + +static void +gb_connection_svc_connection_destroy(struct gb_connection *connection) +{ + if (gb_connection_is_static(connection)) + return; + + gb_svc_connection_destroy(connection->hd->svc, + connection->hd->svc->ap_intf_id, + connection->hd_cport_id, + connection->intf->interface_id, + connection->intf_cport_id); +} + +/* Inform Interface about active CPorts */ +static int gb_connection_control_connected(struct gb_connection *connection) +{ + struct gb_control *control; + u16 cport_id = connection->intf_cport_id; + int ret; + + if (gb_connection_is_static(connection)) + return 0; + + if (gb_connection_is_control(connection)) + return 0; + + control = connection->intf->control; + + ret = gb_control_connected_operation(control, cport_id); + if (ret) { + dev_err(&connection->bundle->dev, + "failed to connect cport: %d\n", ret); + return ret; + } + + return 0; +} + +static void +gb_connection_control_disconnecting(struct gb_connection *connection) +{ + struct gb_control *control; + u16 cport_id = connection->intf_cport_id; + int ret; + + if (gb_connection_is_static(connection)) + return; + + control = connection->intf->control; + + ret = gb_control_disconnecting_operation(control, cport_id); + if (ret) { + dev_err(&connection->hd->dev, + "%s: failed to send disconnecting: %d\n", + connection->name, ret); + } +} + +static void +gb_connection_control_disconnected(struct gb_connection *connection) +{ + struct gb_control *control; + u16 cport_id = connection->intf_cport_id; + int ret; + + if (gb_connection_is_static(connection)) + return; + + control = connection->intf->control; + + if (gb_connection_is_control(connection)) { + if (connection->mode_switch) { + ret = gb_control_mode_switch_operation(control); + if (ret) { + /* + * Allow mode switch to time out waiting for + * mailbox event. + */ + return; + } + } + + return; + } + + ret = gb_control_disconnected_operation(control, cport_id); + if (ret) { + dev_warn(&connection->bundle->dev, + "failed to disconnect cport: %d\n", ret); + } +} + +static int gb_connection_shutdown_operation(struct gb_connection *connection, + u8 phase) +{ + struct gb_cport_shutdown_request *req; + struct gb_operation *operation; + int ret; + + operation = gb_operation_create_core(connection, + GB_REQUEST_TYPE_CPORT_SHUTDOWN, + sizeof(*req), 0, 0, + GFP_KERNEL); + if (!operation) + return -ENOMEM; + + req = operation->request->payload; + req->phase = phase; + + ret = gb_operation_request_send_sync(operation); + + gb_operation_put(operation); + + return ret; +} + +static int gb_connection_cport_shutdown(struct gb_connection *connection, + u8 phase) +{ + struct gb_host_device *hd = connection->hd; + const struct gb_hd_driver *drv = hd->driver; + int ret; + + if (gb_connection_is_static(connection)) + return 0; + + if (gb_connection_is_offloaded(connection)) { + if (!drv->cport_shutdown) + return 0; + + ret = drv->cport_shutdown(hd, connection->hd_cport_id, phase, + GB_OPERATION_TIMEOUT_DEFAULT); + } else { + ret = gb_connection_shutdown_operation(connection, phase); + } + + if (ret) { + dev_err(&hd->dev, "%s: failed to send cport shutdown (phase %d): %d\n", + connection->name, phase, ret); + return ret; + } + + return 0; +} + +static int +gb_connection_cport_shutdown_phase_1(struct gb_connection *connection) +{ + return gb_connection_cport_shutdown(connection, 1); +} + +static int +gb_connection_cport_shutdown_phase_2(struct gb_connection *connection) +{ + return gb_connection_cport_shutdown(connection, 2); +} + +/* + * Cancel all active operations on a connection. + * + * Locking: Called with connection lock held and state set to DISABLED or + * DISCONNECTING. + */ +static void gb_connection_cancel_operations(struct gb_connection *connection, + int errno) + __must_hold(&connection->lock) +{ + struct gb_operation *operation; + + while (!list_empty(&connection->operations)) { + operation = list_last_entry(&connection->operations, + struct gb_operation, links); + gb_operation_get(operation); + spin_unlock_irq(&connection->lock); + + if (gb_operation_is_incoming(operation)) + gb_operation_cancel_incoming(operation, errno); + else + gb_operation_cancel(operation, errno); + + gb_operation_put(operation); + + spin_lock_irq(&connection->lock); + } +} + +/* + * Cancel all active incoming operations on a connection. + * + * Locking: Called with connection lock held and state set to ENABLED_TX. + */ +static void +gb_connection_flush_incoming_operations(struct gb_connection *connection, + int errno) + __must_hold(&connection->lock) +{ + struct gb_operation *operation; + bool incoming; + + while (!list_empty(&connection->operations)) { + incoming = false; + list_for_each_entry(operation, &connection->operations, + links) { + if (gb_operation_is_incoming(operation)) { + gb_operation_get(operation); + incoming = true; + break; + } + } + + if (!incoming) + break; + + spin_unlock_irq(&connection->lock); + + /* FIXME: flush, not cancel? */ + gb_operation_cancel_incoming(operation, errno); + gb_operation_put(operation); + + spin_lock_irq(&connection->lock); + } +} + +/* + * _gb_connection_enable() - enable a connection + * @connection: connection to enable + * @rx: whether to enable incoming requests + * + * Connection-enable helper for DISABLED->ENABLED, DISABLED->ENABLED_TX, and + * ENABLED_TX->ENABLED state transitions. + * + * Locking: Caller holds connection->mutex. + */ +static int _gb_connection_enable(struct gb_connection *connection, bool rx) +{ + int ret; + + /* Handle ENABLED_TX -> ENABLED transitions. */ + if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) { + if (!(connection->handler && rx)) + return 0; + + spin_lock_irq(&connection->lock); + connection->state = GB_CONNECTION_STATE_ENABLED; + spin_unlock_irq(&connection->lock); + + return 0; + } + + ret = gb_connection_hd_cport_enable(connection); + if (ret) + return ret; + + ret = gb_connection_svc_connection_create(connection); + if (ret) + goto err_hd_cport_clear; + + ret = gb_connection_hd_cport_connected(connection); + if (ret) + goto err_svc_connection_destroy; + + spin_lock_irq(&connection->lock); + if (connection->handler && rx) + connection->state = GB_CONNECTION_STATE_ENABLED; + else + connection->state = GB_CONNECTION_STATE_ENABLED_TX; + spin_unlock_irq(&connection->lock); + + ret = gb_connection_control_connected(connection); + if (ret) + goto err_control_disconnecting; + + return 0; + +err_control_disconnecting: + spin_lock_irq(&connection->lock); + connection->state = GB_CONNECTION_STATE_DISCONNECTING; + gb_connection_cancel_operations(connection, -ESHUTDOWN); + spin_unlock_irq(&connection->lock); + + /* Transmit queue should already be empty. */ + gb_connection_hd_cport_flush(connection); + + gb_connection_control_disconnecting(connection); + gb_connection_cport_shutdown_phase_1(connection); + gb_connection_hd_cport_quiesce(connection); + gb_connection_cport_shutdown_phase_2(connection); + gb_connection_control_disconnected(connection); + connection->state = GB_CONNECTION_STATE_DISABLED; +err_svc_connection_destroy: + gb_connection_svc_connection_destroy(connection); +err_hd_cport_clear: + gb_connection_hd_cport_clear(connection); + + gb_connection_hd_cport_disable(connection); + + return ret; +} + +int gb_connection_enable(struct gb_connection *connection) +{ + int ret = 0; + + mutex_lock(&connection->mutex); + + if (connection->state == GB_CONNECTION_STATE_ENABLED) + goto out_unlock; + + ret = _gb_connection_enable(connection, true); + if (!ret) + trace_gb_connection_enable(connection); + +out_unlock: + mutex_unlock(&connection->mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(gb_connection_enable); + +int gb_connection_enable_tx(struct gb_connection *connection) +{ + int ret = 0; + + mutex_lock(&connection->mutex); + + if (connection->state == GB_CONNECTION_STATE_ENABLED) { + ret = -EINVAL; + goto out_unlock; + } + + if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) + goto out_unlock; + + ret = _gb_connection_enable(connection, false); + if (!ret) + trace_gb_connection_enable(connection); + +out_unlock: + mutex_unlock(&connection->mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(gb_connection_enable_tx); + +void gb_connection_disable_rx(struct gb_connection *connection) +{ + mutex_lock(&connection->mutex); + + spin_lock_irq(&connection->lock); + if (connection->state != GB_CONNECTION_STATE_ENABLED) { + spin_unlock_irq(&connection->lock); + goto out_unlock; + } + connection->state = GB_CONNECTION_STATE_ENABLED_TX; + gb_connection_flush_incoming_operations(connection, -ESHUTDOWN); + spin_unlock_irq(&connection->lock); + + trace_gb_connection_disable(connection); + +out_unlock: + mutex_unlock(&connection->mutex); +} +EXPORT_SYMBOL_GPL(gb_connection_disable_rx); + +void gb_connection_mode_switch_prepare(struct gb_connection *connection) +{ + connection->mode_switch = true; +} + +void gb_connection_mode_switch_complete(struct gb_connection *connection) +{ + gb_connection_svc_connection_destroy(connection); + gb_connection_hd_cport_clear(connection); + + gb_connection_hd_cport_disable(connection); + + connection->mode_switch = false; +} + +void gb_connection_disable(struct gb_connection *connection) +{ + mutex_lock(&connection->mutex); + + if (connection->state == GB_CONNECTION_STATE_DISABLED) + goto out_unlock; + + trace_gb_connection_disable(connection); + + spin_lock_irq(&connection->lock); + connection->state = GB_CONNECTION_STATE_DISCONNECTING; + gb_connection_cancel_operations(connection, -ESHUTDOWN); + spin_unlock_irq(&connection->lock); + + gb_connection_hd_cport_flush(connection); + + gb_connection_control_disconnecting(connection); + gb_connection_cport_shutdown_phase_1(connection); + gb_connection_hd_cport_quiesce(connection); + gb_connection_cport_shutdown_phase_2(connection); + gb_connection_control_disconnected(connection); + + connection->state = GB_CONNECTION_STATE_DISABLED; + + /* control-connection tear down is deferred when mode switching */ + if (!connection->mode_switch) { + gb_connection_svc_connection_destroy(connection); + gb_connection_hd_cport_clear(connection); + + gb_connection_hd_cport_disable(connection); + } + +out_unlock: + mutex_unlock(&connection->mutex); +} +EXPORT_SYMBOL_GPL(gb_connection_disable); + +/* Disable a connection without communicating with the remote end. */ +void gb_connection_disable_forced(struct gb_connection *connection) +{ + mutex_lock(&connection->mutex); + + if (connection->state == GB_CONNECTION_STATE_DISABLED) + goto out_unlock; + + trace_gb_connection_disable(connection); + + spin_lock_irq(&connection->lock); + connection->state = GB_CONNECTION_STATE_DISABLED; + gb_connection_cancel_operations(connection, -ESHUTDOWN); + spin_unlock_irq(&connection->lock); + + gb_connection_hd_cport_flush(connection); + + gb_connection_svc_connection_destroy(connection); + gb_connection_hd_cport_clear(connection); + + gb_connection_hd_cport_disable(connection); +out_unlock: + mutex_unlock(&connection->mutex); +} +EXPORT_SYMBOL_GPL(gb_connection_disable_forced); + +/* Caller must have disabled the connection before destroying it. */ +void gb_connection_destroy(struct gb_connection *connection) +{ + if (!connection) + return; + + if (WARN_ON(connection->state != GB_CONNECTION_STATE_DISABLED)) + gb_connection_disable(connection); + + mutex_lock(&gb_connection_mutex); + + spin_lock_irq(&gb_connections_lock); + list_del(&connection->bundle_links); + list_del(&connection->hd_links); + spin_unlock_irq(&gb_connections_lock); + + destroy_workqueue(connection->wq); + + gb_hd_cport_release(connection->hd, connection->hd_cport_id); + connection->hd_cport_id = CPORT_ID_BAD; + + mutex_unlock(&gb_connection_mutex); + + gb_connection_put(connection); +} +EXPORT_SYMBOL_GPL(gb_connection_destroy); + +void gb_connection_latency_tag_enable(struct gb_connection *connection) +{ + struct gb_host_device *hd = connection->hd; + int ret; + + if (!hd->driver->latency_tag_enable) + return; + + ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id); + if (ret) { + dev_err(&connection->hd->dev, + "%s: failed to enable latency tag: %d\n", + connection->name, ret); + } +} +EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable); + +void gb_connection_latency_tag_disable(struct gb_connection *connection) +{ + struct gb_host_device *hd = connection->hd; + int ret; + + if (!hd->driver->latency_tag_disable) + return; + + ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id); + if (ret) { + dev_err(&connection->hd->dev, + "%s: failed to disable latency tag: %d\n", + connection->name, ret); + } +} +EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable); diff --git a/drivers/greybus/control.c b/drivers/greybus/control.c new file mode 100644 index 000000000000..359a25841973 --- /dev/null +++ b/drivers/greybus/control.c @@ -0,0 +1,584 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Greybus CPort control protocol. + * + * Copyright 2015 Google Inc. + * Copyright 2015 Linaro Ltd. + */ + +#include +#include +#include +#include + +/* Highest control-protocol version supported */ +#define GB_CONTROL_VERSION_MAJOR 0 +#define GB_CONTROL_VERSION_MINOR 1 + +static int gb_control_get_version(struct gb_control *control) +{ + struct gb_interface *intf = control->connection->intf; + struct gb_control_version_request request; + struct gb_control_version_response response; + int ret; + + request.major = GB_CONTROL_VERSION_MAJOR; + request.minor = GB_CONTROL_VERSION_MINOR; + + ret = gb_operation_sync(control->connection, + GB_CONTROL_TYPE_VERSION, + &request, sizeof(request), &response, + sizeof(response)); + if (ret) { + dev_err(&intf->dev, + "failed to get control-protocol version: %d\n", + ret); + return ret; + } + + if (response.major > request.major) { + dev_err(&intf->dev, + "unsupported major control-protocol version (%u > %u)\n", + response.major, request.major); + return -ENOTSUPP; + } + + control->protocol_major = response.major; + control->protocol_minor = response.minor; + + dev_dbg(&intf->dev, "%s - %u.%u\n", __func__, response.major, + response.minor); + + return 0; +} + +static int gb_control_get_bundle_version(struct gb_control *control, + struct gb_bundle *bundle) +{ + struct gb_interface *intf = control->connection->intf; + struct gb_control_bundle_version_request request; + struct gb_control_bundle_version_response response; + int ret; + + request.bundle_id = bundle->id; + + ret = gb_operation_sync(control->connection, + GB_CONTROL_TYPE_BUNDLE_VERSION, + &request, sizeof(request), + &response, sizeof(response)); + if (ret) { + dev_err(&intf->dev, + "failed to get bundle %u class version: %d\n", + bundle->id, ret); + return ret; + } + + bundle->class_major = response.major; + bundle->class_minor = response.minor; + + dev_dbg(&intf->dev, "%s - %u: %u.%u\n", __func__, bundle->id, + response.major, response.minor); + + return 0; +} + +int gb_control_get_bundle_versions(struct gb_control *control) +{ + struct gb_interface *intf = control->connection->intf; + struct gb_bundle *bundle; + int ret; + + if (!control->has_bundle_version) + return 0; + + list_for_each_entry(bundle, &intf->bundles, links) { + ret = gb_control_get_bundle_version(control, bundle); + if (ret) + return ret; + } + + return 0; +} + +/* Get Manifest's size from the interface */ +int gb_control_get_manifest_size_operation(struct gb_interface *intf) +{ + struct gb_control_get_manifest_size_response response; + struct gb_connection *connection = intf->control->connection; + int ret; + + ret = gb_operation_sync(connection, GB_CONTROL_TYPE_GET_MANIFEST_SIZE, + NULL, 0, &response, sizeof(response)); + if (ret) { + dev_err(&connection->intf->dev, + "failed to get manifest size: %d\n", ret); + return ret; + } + + return le16_to_cpu(response.size); +} + +/* Reads Manifest from the interface */ +int gb_control_get_manifest_operation(struct gb_interface *intf, void *manifest, + size_t size) +{ + struct gb_connection *connection = intf->control->connection; + + return gb_operation_sync(connection, GB_CONTROL_TYPE_GET_MANIFEST, + NULL, 0, manifest, size); +} + +int gb_control_connected_operation(struct gb_control *control, u16 cport_id) +{ + struct gb_control_connected_request request; + + request.cport_id = cpu_to_le16(cport_id); + return gb_operation_sync(control->connection, GB_CONTROL_TYPE_CONNECTED, + &request, sizeof(request), NULL, 0); +} + +int gb_control_disconnected_operation(struct gb_control *control, u16 cport_id) +{ + struct gb_control_disconnected_request request; + + request.cport_id = cpu_to_le16(cport_id); + return gb_operation_sync(control->connection, + GB_CONTROL_TYPE_DISCONNECTED, &request, + sizeof(request), NULL, 0); +} + +int gb_control_disconnecting_operation(struct gb_control *control, + u16 cport_id) +{ + struct gb_control_disconnecting_request *request; + struct gb_operation *operation; + int ret; + + operation = gb_operation_create_core(control->connection, + GB_CONTROL_TYPE_DISCONNECTING, + sizeof(*request), 0, 0, + GFP_KERNEL); + if (!operation) + return -ENOMEM; + + request = operation->request->payload; + request->cport_id = cpu_to_le16(cport_id); + + ret = gb_operation_request_send_sync(operation); + if (ret) { + dev_err(&control->dev, "failed to send disconnecting: %d\n", + ret); + } + + gb_operation_put(operation); + + return ret; +} + +int gb_control_mode_switch_operation(struct gb_control *control) +{ + struct gb_operation *operation; + int ret; + + operation = gb_operation_create_core(control->connection, + GB_CONTROL_TYPE_MODE_SWITCH, + 0, 0, + GB_OPERATION_FLAG_UNIDIRECTIONAL, + GFP_KERNEL); + if (!operation) + return -ENOMEM; + + ret = gb_operation_request_send_sync(operation); + if (ret) + dev_err(&control->dev, "failed to send mode switch: %d\n", ret); + + gb_operation_put(operation); + + return ret; +} + +static int gb_control_bundle_pm_status_map(u8 status) +{ + switch (status) { + case GB_CONTROL_BUNDLE_PM_INVAL: + return -EINVAL; + case GB_CONTROL_BUNDLE_PM_BUSY: + return -EBUSY; + case GB_CONTROL_BUNDLE_PM_NA: + return -ENOMSG; + case GB_CONTROL_BUNDLE_PM_FAIL: + default: + return -EREMOTEIO; + } +} + +int gb_control_bundle_suspend(struct gb_control *control, u8 bundle_id) +{ + struct gb_control_bundle_pm_request request; + struct gb_control_bundle_pm_response response; + int ret; + + request.bundle_id = bundle_id; + ret = gb_operation_sync(control->connection, + GB_CONTROL_TYPE_BUNDLE_SUSPEND, &request, + sizeof(request), &response, sizeof(response)); + if (ret) { + dev_err(&control->dev, "failed to send bundle %u suspend: %d\n", + bundle_id, ret); + return ret; + } + + if (response.status != GB_CONTROL_BUNDLE_PM_OK) { + dev_err(&control->dev, "failed to suspend bundle %u: %d\n", + bundle_id, response.status); + return gb_control_bundle_pm_status_map(response.status); + } + + return 0; +} + +int gb_control_bundle_resume(struct gb_control *control, u8 bundle_id) +{ + struct gb_control_bundle_pm_request request; + struct gb_control_bundle_pm_response response; + int ret; + + request.bundle_id = bundle_id; + ret = gb_operation_sync(control->connection, + GB_CONTROL_TYPE_BUNDLE_RESUME, &request, + sizeof(request), &response, sizeof(response)); + if (ret) { + dev_err(&control->dev, "failed to send bundle %u resume: %d\n", + bundle_id, ret); + return ret; + } + + if (response.status != GB_CONTROL_BUNDLE_PM_OK) { + dev_err(&control->dev, "failed to resume bundle %u: %d\n", + bundle_id, response.status); + return gb_control_bundle_pm_status_map(response.status); + } + + return 0; +} + +int gb_control_bundle_deactivate(struct gb_control *control, u8 bundle_id) +{ + struct gb_control_bundle_pm_request request; + struct gb_control_bundle_pm_response response; + int ret; + + request.bundle_id = bundle_id; + ret = gb_operation_sync(control->connection, + GB_CONTROL_TYPE_BUNDLE_DEACTIVATE, &request, + sizeof(request), &response, sizeof(response)); + if (ret) { + dev_err(&control->dev, + "failed to send bundle %u deactivate: %d\n", bundle_id, + ret); + return ret; + } + + if (response.status != GB_CONTROL_BUNDLE_PM_OK) { + dev_err(&control->dev, "failed to deactivate bundle %u: %d\n", + bundle_id, response.status); + return gb_control_bundle_pm_status_map(response.status); + } + + return 0; +} + +int gb_control_bundle_activate(struct gb_control *control, u8 bundle_id) +{ + struct gb_control_bundle_pm_request request; + struct gb_control_bundle_pm_response response; + int ret; + + if (!control->has_bundle_activate) + return 0; + + request.bundle_id = bundle_id; + ret = gb_operation_sync(control->connection, + GB_CONTROL_TYPE_BUNDLE_ACTIVATE, &request, + sizeof(request), &response, sizeof(response)); + if (ret) { + dev_err(&control->dev, + "failed to send bundle %u activate: %d\n", bundle_id, + ret); + return ret; + } + + if (response.status != GB_CONTROL_BUNDLE_PM_OK) { + dev_err(&control->dev, "failed to activate bundle %u: %d\n", + bundle_id, response.status); + return gb_control_bundle_pm_status_map(response.status); + } + + return 0; +} + +static int gb_control_interface_pm_status_map(u8 status) +{ + switch (status) { + case GB_CONTROL_INTF_PM_BUSY: + return -EBUSY; + case GB_CONTROL_INTF_PM_NA: + return -ENOMSG; + default: + return -EREMOTEIO; + } +} + +int gb_control_interface_suspend_prepare(struct gb_control *control) +{ + struct gb_control_intf_pm_response response; + int ret; + + ret = gb_operation_sync(control->connection, + GB_CONTROL_TYPE_INTF_SUSPEND_PREPARE, NULL, 0, + &response, sizeof(response)); + if (ret) { + dev_err(&control->dev, + "failed to send interface suspend prepare: %d\n", ret); + return ret; + } + + if (response.status != GB_CONTROL_INTF_PM_OK) { + dev_err(&control->dev, "interface error while preparing suspend: %d\n", + response.status); + return gb_control_interface_pm_status_map(response.status); + } + + return 0; +} + +int gb_control_interface_deactivate_prepare(struct gb_control *control) +{ + struct gb_control_intf_pm_response response; + int ret; + + ret = gb_operation_sync(control->connection, + GB_CONTROL_TYPE_INTF_DEACTIVATE_PREPARE, NULL, + 0, &response, sizeof(response)); + if (ret) { + dev_err(&control->dev, "failed to send interface deactivate prepare: %d\n", + ret); + return ret; + } + + if (response.status != GB_CONTROL_INTF_PM_OK) { + dev_err(&control->dev, "interface error while preparing deactivate: %d\n", + response.status); + return gb_control_interface_pm_status_map(response.status); + } + + return 0; +} + +int gb_control_interface_hibernate_abort(struct gb_control *control) +{ + struct gb_control_intf_pm_response response; + int ret; + + ret = gb_operation_sync(control->connection, + GB_CONTROL_TYPE_INTF_HIBERNATE_ABORT, NULL, 0, + &response, sizeof(response)); + if (ret) { + dev_err(&control->dev, + "failed to send interface aborting hibernate: %d\n", + ret); + return ret; + } + + if (response.status != GB_CONTROL_INTF_PM_OK) { + dev_err(&control->dev, "interface error while aborting hibernate: %d\n", + response.status); + return gb_control_interface_pm_status_map(response.status); + } + + return 0; +} + +static ssize_t vendor_string_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct gb_control *control = to_gb_control(dev); + + return scnprintf(buf, PAGE_SIZE, "%s\n", control->vendor_string); +} +static DEVICE_ATTR_RO(vendor_string); + +static ssize_t product_string_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct gb_control *control = to_gb_control(dev); + + return scnprintf(buf, PAGE_SIZE, "%s\n", control->product_string); +} +static DEVICE_ATTR_RO(product_string); + +static struct attribute *control_attrs[] = { + &dev_attr_vendor_string.attr, + &dev_attr_product_string.attr, + NULL, +}; +ATTRIBUTE_GROUPS(control); + +static void gb_control_release(struct device *dev) +{ + struct gb_control *control = to_gb_control(dev); + + gb_connection_destroy(control->connection); + + kfree(control->vendor_string); + kfree(control->product_string); + + kfree(control); +} + +struct device_type greybus_control_type = { + .name = "greybus_control", + .release = gb_control_release, +}; + +struct gb_control *gb_control_create(struct gb_interface *intf) +{ + struct gb_connection *connection; + struct gb_control *control; + + control = kzalloc(sizeof(*control), GFP_KERNEL); + if (!control) + return ERR_PTR(-ENOMEM); + + control->intf = intf; + + connection = gb_connection_create_control(intf); + if (IS_ERR(connection)) { + dev_err(&intf->dev, + "failed to create control connection: %ld\n", + PTR_ERR(connection)); + kfree(control); + return ERR_CAST(connection); + } + + control->connection = connection; + + control->dev.parent = &intf->dev; + control->dev.bus = &greybus_bus_type; + control->dev.type = &greybus_control_type; + control->dev.groups = control_groups; + control->dev.dma_mask = intf->dev.dma_mask; + device_initialize(&control->dev); + dev_set_name(&control->dev, "%s.ctrl", dev_name(&intf->dev)); + + gb_connection_set_data(control->connection, control); + + return control; +} + +int gb_control_enable(struct gb_control *control) +{ + int ret; + + dev_dbg(&control->connection->intf->dev, "%s\n", __func__); + + ret = gb_connection_enable_tx(control->connection); + if (ret) { + dev_err(&control->connection->intf->dev, + "failed to enable control connection: %d\n", + ret); + return ret; + } + + ret = gb_control_get_version(control); + if (ret) + goto err_disable_connection; + + if (control->protocol_major > 0 || control->protocol_minor > 1) + control->has_bundle_version = true; + + /* FIXME: use protocol version instead */ + if (!(control->intf->quirks & GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE)) + control->has_bundle_activate = true; + + return 0; + +err_disable_connection: + gb_connection_disable(control->connection); + + return ret; +} + +void gb_control_disable(struct gb_control *control) +{ + dev_dbg(&control->connection->intf->dev, "%s\n", __func__); + + if (control->intf->disconnected) + gb_connection_disable_forced(control->connection); + else + gb_connection_disable(control->connection); +} + +int gb_control_suspend(struct gb_control *control) +{ + gb_connection_disable(control->connection); + + return 0; +} + +int gb_control_resume(struct gb_control *control) +{ + int ret; + + ret = gb_connection_enable_tx(control->connection); + if (ret) { + dev_err(&control->connection->intf->dev, + "failed to enable control connection: %d\n", ret); + return ret; + } + + return 0; +} + +int gb_control_add(struct gb_control *control) +{ + int ret; + + ret = device_add(&control->dev); + if (ret) { + dev_err(&control->dev, + "failed to register control device: %d\n", + ret); + return ret; + } + + return 0; +} + +void gb_control_del(struct gb_control *control) +{ + if (device_is_registered(&control->dev)) + device_del(&control->dev); +} + +struct gb_control *gb_control_get(struct gb_control *control) +{ + get_device(&control->dev); + + return control; +} + +void gb_control_put(struct gb_control *control) +{ + put_device(&control->dev); +} + +void gb_control_mode_switch_prepare(struct gb_control *control) +{ + gb_connection_mode_switch_prepare(control->connection); +} + +void gb_control_mode_switch_complete(struct gb_control *control) +{ + gb_connection_mode_switch_complete(control->connection); +} diff --git a/drivers/greybus/core.c b/drivers/greybus/core.c new file mode 100644 index 000000000000..e546c6431877 --- /dev/null +++ b/drivers/greybus/core.c @@ -0,0 +1,349 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Greybus "Core" + * + * Copyright 2014-2015 Google Inc. + * Copyright 2014-2015 Linaro Ltd. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#define CREATE_TRACE_POINTS +#include +#include "greybus_trace.h" + +#define GB_BUNDLE_AUTOSUSPEND_MS 3000 + +/* Allow greybus to be disabled at boot if needed */ +static bool nogreybus; +#ifdef MODULE +module_param(nogreybus, bool, 0444); +#else +core_param(nogreybus, nogreybus, bool, 0444); +#endif +int greybus_disabled(void) +{ + return nogreybus; +} +EXPORT_SYMBOL_GPL(greybus_disabled); + +static bool greybus_match_one_id(struct gb_bundle *bundle, + const struct greybus_bundle_id *id) +{ + if ((id->match_flags & GREYBUS_ID_MATCH_VENDOR) && + (id->vendor != bundle->intf->vendor_id)) + return false; + + if ((id->match_flags & GREYBUS_ID_MATCH_PRODUCT) && + (id->product != bundle->intf->product_id)) + return false; + + if ((id->match_flags & GREYBUS_ID_MATCH_CLASS) && + (id->class != bundle->class)) + return false; + + return true; +} + +static const struct greybus_bundle_id * +greybus_match_id(struct gb_bundle *bundle, const struct greybus_bundle_id *id) +{ + if (!id) + return NULL; + + for (; id->vendor || id->product || id->class || id->driver_info; + id++) { + if (greybus_match_one_id(bundle, id)) + return id; + } + + return NULL; +} + +static int greybus_match_device(struct device *dev, struct device_driver *drv) +{ + struct greybus_driver *driver = to_greybus_driver(drv); + struct gb_bundle *bundle; + const struct greybus_bundle_id *id; + + if (!is_gb_bundle(dev)) + return 0; + + bundle = to_gb_bundle(dev); + + id = greybus_match_id(bundle, driver->id_table); + if (id) + return 1; + /* FIXME - Dynamic ids? */ + return 0; +} + +static int greybus_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct gb_host_device *hd; + struct gb_module *module = NULL; + struct gb_interface *intf = NULL; + struct gb_control *control = NULL; + struct gb_bundle *bundle = NULL; + struct gb_svc *svc = NULL; + + if (is_gb_host_device(dev)) { + hd = to_gb_host_device(dev); + } else if (is_gb_module(dev)) { + module = to_gb_module(dev); + hd = module->hd; + } else if (is_gb_interface(dev)) { + intf = to_gb_interface(dev); + module = intf->module; + hd = intf->hd; + } else if (is_gb_control(dev)) { + control = to_gb_control(dev); + intf = control->intf; + module = intf->module; + hd = intf->hd; + } else if (is_gb_bundle(dev)) { + bundle = to_gb_bundle(dev); + intf = bundle->intf; + module = intf->module; + hd = intf->hd; + } else if (is_gb_svc(dev)) { + svc = to_gb_svc(dev); + hd = svc->hd; + } else { + dev_WARN(dev, "uevent for unknown greybus device \"type\"!\n"); + return -EINVAL; + } + + if (add_uevent_var(env, "BUS=%u", hd->bus_id)) + return -ENOMEM; + + if (module) { + if (add_uevent_var(env, "MODULE=%u", module->module_id)) + return -ENOMEM; + } + + if (intf) { + if (add_uevent_var(env, "INTERFACE=%u", intf->interface_id)) + return -ENOMEM; + if (add_uevent_var(env, "GREYBUS_ID=%08x/%08x", + intf->vendor_id, intf->product_id)) + return -ENOMEM; + } + + if (bundle) { + // FIXME + // add a uevent that can "load" a bundle type + // This is what we need to bind a driver to so use the info + // in gmod here as well + + if (add_uevent_var(env, "BUNDLE=%u", bundle->id)) + return -ENOMEM; + if (add_uevent_var(env, "BUNDLE_CLASS=%02x", bundle->class)) + return -ENOMEM; + } + + return 0; +} + +static void greybus_shutdown(struct device *dev) +{ + if (is_gb_host_device(dev)) { + struct gb_host_device *hd; + + hd = to_gb_host_device(dev); + gb_hd_shutdown(hd); + } +} + +struct bus_type greybus_bus_type = { + .name = "greybus", + .match = greybus_match_device, + .uevent = greybus_uevent, + .shutdown = greybus_shutdown, +}; + +static int greybus_probe(struct device *dev) +{ + struct greybus_driver *driver = to_greybus_driver(dev->driver); + struct gb_bundle *bundle = to_gb_bundle(dev); + const struct greybus_bundle_id *id; + int retval; + + /* match id */ + id = greybus_match_id(bundle, driver->id_table); + if (!id) + return -ENODEV; + + retval = pm_runtime_get_sync(&bundle->intf->dev); + if (retval < 0) { + pm_runtime_put_noidle(&bundle->intf->dev); + return retval; + } + + retval = gb_control_bundle_activate(bundle->intf->control, bundle->id); + if (retval) { + pm_runtime_put(&bundle->intf->dev); + return retval; + } + + /* + * Unbound bundle devices are always deactivated. During probe, the + * Runtime PM is set to enabled and active and the usage count is + * incremented. If the driver supports runtime PM, it should call + * pm_runtime_put() in its probe routine and pm_runtime_get_sync() + * in remove routine. + */ + pm_runtime_set_autosuspend_delay(dev, GB_BUNDLE_AUTOSUSPEND_MS); + pm_runtime_use_autosuspend(dev); + pm_runtime_get_noresume(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + + retval = driver->probe(bundle, id); + if (retval) { + /* + * Catch buggy drivers that fail to destroy their connections. + */ + WARN_ON(!list_empty(&bundle->connections)); + + gb_control_bundle_deactivate(bundle->intf->control, bundle->id); + + pm_runtime_disable(dev); + pm_runtime_set_suspended(dev); + pm_runtime_put_noidle(dev); + pm_runtime_dont_use_autosuspend(dev); + pm_runtime_put(&bundle->intf->dev); + + return retval; + } + + pm_runtime_put(&bundle->intf->dev); + + return 0; +} + +static int greybus_remove(struct device *dev) +{ + struct greybus_driver *driver = to_greybus_driver(dev->driver); + struct gb_bundle *bundle = to_gb_bundle(dev); + struct gb_connection *connection; + int retval; + + retval = pm_runtime_get_sync(dev); + if (retval < 0) + dev_err(dev, "failed to resume bundle: %d\n", retval); + + /* + * Disable (non-offloaded) connections early in case the interface is + * already gone to avoid unceccessary operation timeouts during + * driver disconnect. Otherwise, only disable incoming requests. + */ + list_for_each_entry(connection, &bundle->connections, bundle_links) { + if (gb_connection_is_offloaded(connection)) + continue; + + if (bundle->intf->disconnected) + gb_connection_disable_forced(connection); + else + gb_connection_disable_rx(connection); + } + + driver->disconnect(bundle); + + /* Catch buggy drivers that fail to destroy their connections. */ + WARN_ON(!list_empty(&bundle->connections)); + + if (!bundle->intf->disconnected) + gb_control_bundle_deactivate(bundle->intf->control, bundle->id); + + pm_runtime_put_noidle(dev); + pm_runtime_disable(dev); + pm_runtime_set_suspended(dev); + pm_runtime_dont_use_autosuspend(dev); + pm_runtime_put_noidle(dev); + + return 0; +} + +int greybus_register_driver(struct greybus_driver *driver, struct module *owner, + const char *mod_name) +{ + int retval; + + if (greybus_disabled()) + return -ENODEV; + + driver->driver.bus = &greybus_bus_type; + driver->driver.name = driver->name; + driver->driver.probe = greybus_probe; + driver->driver.remove = greybus_remove; + driver->driver.owner = owner; + driver->driver.mod_name = mod_name; + + retval = driver_register(&driver->driver); + if (retval) + return retval; + + pr_info("registered new driver %s\n", driver->name); + return 0; +} +EXPORT_SYMBOL_GPL(greybus_register_driver); + +void greybus_deregister_driver(struct greybus_driver *driver) +{ + driver_unregister(&driver->driver); +} +EXPORT_SYMBOL_GPL(greybus_deregister_driver); + +static int __init gb_init(void) +{ + int retval; + + if (greybus_disabled()) + return -ENODEV; + + BUILD_BUG_ON(CPORT_ID_MAX >= (long)CPORT_ID_BAD); + + gb_debugfs_init(); + + retval = bus_register(&greybus_bus_type); + if (retval) { + pr_err("bus_register failed (%d)\n", retval); + goto error_bus; + } + + retval = gb_hd_init(); + if (retval) { + pr_err("gb_hd_init failed (%d)\n", retval); + goto error_hd; + } + + retval = gb_operation_init(); + if (retval) { + pr_err("gb_operation_init failed (%d)\n", retval); + goto error_operation; + } + return 0; /* Success */ + +error_operation: + gb_hd_exit(); +error_hd: + bus_unregister(&greybus_bus_type); +error_bus: + gb_debugfs_cleanup(); + + return retval; +} +module_init(gb_init); + +static void __exit gb_exit(void) +{ + gb_operation_exit(); + gb_hd_exit(); + bus_unregister(&greybus_bus_type); + gb_debugfs_cleanup(); + tracepoint_synchronize_unregister(); +} +module_exit(gb_exit); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Greg Kroah-Hartman "); diff --git a/drivers/greybus/debugfs.c b/drivers/greybus/debugfs.c new file mode 100644 index 000000000000..e102d7badb9d --- /dev/null +++ b/drivers/greybus/debugfs.c @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Greybus debugfs code + * + * Copyright 2014 Google Inc. + * Copyright 2014 Linaro Ltd. + */ + +#include +#include + +static struct dentry *gb_debug_root; + +void __init gb_debugfs_init(void) +{ + gb_debug_root = debugfs_create_dir("greybus", NULL); +} + +void gb_debugfs_cleanup(void) +{ + debugfs_remove_recursive(gb_debug_root); + gb_debug_root = NULL; +} + +struct dentry *gb_debugfs_get(void) +{ + return gb_debug_root; +} +EXPORT_SYMBOL_GPL(gb_debugfs_get); diff --git a/drivers/greybus/greybus_trace.h b/drivers/greybus/greybus_trace.h new file mode 100644 index 000000000000..1bc9f1275c65 --- /dev/null +++ b/drivers/greybus/greybus_trace.h @@ -0,0 +1,502 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Greybus driver and device API + * + * Copyright 2015 Google Inc. + * Copyright 2015 Linaro Ltd. + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM greybus + +#if !defined(_TRACE_GREYBUS_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_GREYBUS_H + +#include + +struct gb_message; +struct gb_operation; +struct gb_connection; +struct gb_bundle; +struct gb_host_device; + +DECLARE_EVENT_CLASS(gb_message, + + TP_PROTO(struct gb_message *message), + + TP_ARGS(message), + + TP_STRUCT__entry( + __field(u16, size) + __field(u16, operation_id) + __field(u8, type) + __field(u8, result) + ), + + TP_fast_assign( + __entry->size = le16_to_cpu(message->header->size); + __entry->operation_id = + le16_to_cpu(message->header->operation_id); + __entry->type = message->header->type; + __entry->result = message->header->result; + ), + + TP_printk("size=%hu operation_id=0x%04x type=0x%02x result=0x%02x", + __entry->size, __entry->operation_id, + __entry->type, __entry->result) +); + +#define DEFINE_MESSAGE_EVENT(name) \ + DEFINE_EVENT(gb_message, name, \ + TP_PROTO(struct gb_message *message), \ + TP_ARGS(message)) + +/* + * Occurs immediately before calling a host device's message_send() + * method. + */ +DEFINE_MESSAGE_EVENT(gb_message_send); + +/* + * Occurs after an incoming request message has been received + */ +DEFINE_MESSAGE_EVENT(gb_message_recv_request); + +/* + * Occurs after an incoming response message has been received, + * after its matching request has been found. + */ +DEFINE_MESSAGE_EVENT(gb_message_recv_response); + +/* + * Occurs after an operation has been canceled, possibly before the + * cancellation is complete. + */ +DEFINE_MESSAGE_EVENT(gb_message_cancel_outgoing); + +/* + * Occurs when an incoming request is cancelled; if the response has + * been queued for sending, this occurs after it is sent. + */ +DEFINE_MESSAGE_EVENT(gb_message_cancel_incoming); + +/* + * Occurs in the host driver message_send() function just prior to + * handing off the data to be processed by hardware. + */ +DEFINE_MESSAGE_EVENT(gb_message_submit); + +#undef DEFINE_MESSAGE_EVENT + +DECLARE_EVENT_CLASS(gb_operation, + + TP_PROTO(struct gb_operation *operation), + + TP_ARGS(operation), + + TP_STRUCT__entry( + __field(u16, cport_id) /* CPort of HD side of connection */ + __field(u16, id) /* Operation ID */ + __field(u8, type) + __field(unsigned long, flags) + __field(int, active) + __field(int, waiters) + __field(int, errno) + ), + + TP_fast_assign( + __entry->cport_id = operation->connection->hd_cport_id; + __entry->id = operation->id; + __entry->type = operation->type; + __entry->flags = operation->flags; + __entry->active = operation->active; + __entry->waiters = atomic_read(&operation->waiters); + __entry->errno = operation->errno; + ), + + TP_printk("id=%04x type=0x%02x cport_id=%04x flags=0x%lx active=%d waiters=%d errno=%d", + __entry->id, __entry->cport_id, __entry->type, __entry->flags, + __entry->active, __entry->waiters, __entry->errno) +); + +#define DEFINE_OPERATION_EVENT(name) \ + DEFINE_EVENT(gb_operation, name, \ + TP_PROTO(struct gb_operation *operation), \ + TP_ARGS(operation)) + +/* + * Occurs after a new operation is created for an outgoing request + * has been successfully created. + */ +DEFINE_OPERATION_EVENT(gb_operation_create); + +/* + * Occurs after a new core operation has been created. + */ +DEFINE_OPERATION_EVENT(gb_operation_create_core); + +/* + * Occurs after a new operation has been created for an incoming + * request has been successfully created and initialized. + */ +DEFINE_OPERATION_EVENT(gb_operation_create_incoming); + +/* + * Occurs when the last reference to an operation has been dropped, + * prior to freeing resources. + */ +DEFINE_OPERATION_EVENT(gb_operation_destroy); + +/* + * Occurs when an operation has been marked active, after updating + * its active count. + */ +DEFINE_OPERATION_EVENT(gb_operation_get_active); + +/* + * Occurs when an operation has been marked active, before updating + * its active count. + */ +DEFINE_OPERATION_EVENT(gb_operation_put_active); + +#undef DEFINE_OPERATION_EVENT + +DECLARE_EVENT_CLASS(gb_connection, + + TP_PROTO(struct gb_connection *connection), + + TP_ARGS(connection), + + TP_STRUCT__entry( + __field(int, hd_bus_id) + __field(u8, bundle_id) + /* name contains "hd_cport_id/intf_id:cport_id" */ + __dynamic_array(char, name, sizeof(connection->name)) + __field(enum gb_connection_state, state) + __field(unsigned long, flags) + ), + + TP_fast_assign( + __entry->hd_bus_id = connection->hd->bus_id; + __entry->bundle_id = connection->bundle ? + connection->bundle->id : BUNDLE_ID_NONE; + memcpy(__get_str(name), connection->name, + sizeof(connection->name)); + __entry->state = connection->state; + __entry->flags = connection->flags; + ), + + TP_printk("hd_bus_id=%d bundle_id=0x%02x name=\"%s\" state=%u flags=0x%lx", + __entry->hd_bus_id, __entry->bundle_id, __get_str(name), + (unsigned int)__entry->state, __entry->flags) +); + +#define DEFINE_CONNECTION_EVENT(name) \ + DEFINE_EVENT(gb_connection, name, \ + TP_PROTO(struct gb_connection *connection), \ + TP_ARGS(connection)) + +/* + * Occurs after a new connection is successfully created. + */ +DEFINE_CONNECTION_EVENT(gb_connection_create); + +/* + * Occurs when the last reference to a connection has been dropped, + * before its resources are freed. + */ +DEFINE_CONNECTION_EVENT(gb_connection_release); + +/* + * Occurs when a new reference to connection is added, currently + * only when a message over the connection is received. + */ +DEFINE_CONNECTION_EVENT(gb_connection_get); + +/* + * Occurs when a new reference to connection is dropped, after a + * a received message is handled, or when the connection is + * destroyed. + */ +DEFINE_CONNECTION_EVENT(gb_connection_put); + +/* + * Occurs when a request to enable a connection is made, either for + * transmit only, or for both transmit and receive. + */ +DEFINE_CONNECTION_EVENT(gb_connection_enable); + +/* + * Occurs when a request to disable a connection is made, either for + * receive only, or for both transmit and receive. Also occurs when + * a request to forcefully disable a connection is made. + */ +DEFINE_CONNECTION_EVENT(gb_connection_disable); + +#undef DEFINE_CONNECTION_EVENT + +DECLARE_EVENT_CLASS(gb_bundle, + + TP_PROTO(struct gb_bundle *bundle), + + TP_ARGS(bundle), + + TP_STRUCT__entry( + __field(u8, intf_id) + __field(u8, id) + __field(u8, class) + __field(size_t, num_cports) + ), + + TP_fast_assign( + __entry->intf_id = bundle->intf->interface_id; + __entry->id = bundle->id; + __entry->class = bundle->class; + __entry->num_cports = bundle->num_cports; + ), + + TP_printk("intf_id=0x%02x id=%02x class=0x%02x num_cports=%zu", + __entry->intf_id, __entry->id, __entry->class, + __entry->num_cports) +); + +#define DEFINE_BUNDLE_EVENT(name) \ + DEFINE_EVENT(gb_bundle, name, \ + TP_PROTO(struct gb_bundle *bundle), \ + TP_ARGS(bundle)) + +/* + * Occurs after a new bundle is successfully created. + */ +DEFINE_BUNDLE_EVENT(gb_bundle_create); + +/* + * Occurs when the last reference to a bundle has been dropped, + * before its resources are freed. + */ +DEFINE_BUNDLE_EVENT(gb_bundle_release); + +/* + * Occurs when a bundle is added to an interface when the interface + * is enabled. + */ +DEFINE_BUNDLE_EVENT(gb_bundle_add); + +/* + * Occurs when a registered bundle gets destroyed, normally at the + * time an interface is disabled. + */ +DEFINE_BUNDLE_EVENT(gb_bundle_destroy); + +#undef DEFINE_BUNDLE_EVENT + +DECLARE_EVENT_CLASS(gb_interface, + + TP_PROTO(struct gb_interface *intf), + + TP_ARGS(intf), + + TP_STRUCT__entry( + __field(u8, module_id) + __field(u8, id) /* Interface id */ + __field(u8, device_id) + __field(int, disconnected) /* bool */ + __field(int, ejected) /* bool */ + __field(int, active) /* bool */ + __field(int, enabled) /* bool */ + __field(int, mode_switch) /* bool */ + ), + + TP_fast_assign( + __entry->module_id = intf->module->module_id; + __entry->id = intf->interface_id; + __entry->device_id = intf->device_id; + __entry->disconnected = intf->disconnected; + __entry->ejected = intf->ejected; + __entry->active = intf->active; + __entry->enabled = intf->enabled; + __entry->mode_switch = intf->mode_switch; + ), + + TP_printk("intf_id=%hhu device_id=%hhu module_id=%hhu D=%d J=%d A=%d E=%d M=%d", + __entry->id, __entry->device_id, __entry->module_id, + __entry->disconnected, __entry->ejected, __entry->active, + __entry->enabled, __entry->mode_switch) +); + +#define DEFINE_INTERFACE_EVENT(name) \ + DEFINE_EVENT(gb_interface, name, \ + TP_PROTO(struct gb_interface *intf), \ + TP_ARGS(intf)) + +/* + * Occurs after a new interface is successfully created. + */ +DEFINE_INTERFACE_EVENT(gb_interface_create); + +/* + * Occurs after the last reference to an interface has been dropped. + */ +DEFINE_INTERFACE_EVENT(gb_interface_release); + +/* + * Occurs after an interface been registerd. + */ +DEFINE_INTERFACE_EVENT(gb_interface_add); + +/* + * Occurs when a registered interface gets deregisterd. + */ +DEFINE_INTERFACE_EVENT(gb_interface_del); + +/* + * Occurs when a registered interface has been successfully + * activated. + */ +DEFINE_INTERFACE_EVENT(gb_interface_activate); + +/* + * Occurs when an activated interface is being deactivated. + */ +DEFINE_INTERFACE_EVENT(gb_interface_deactivate); + +/* + * Occurs when an interface has been successfully enabled. + */ +DEFINE_INTERFACE_EVENT(gb_interface_enable); + +/* + * Occurs when an enabled interface is being disabled. + */ +DEFINE_INTERFACE_EVENT(gb_interface_disable); + +#undef DEFINE_INTERFACE_EVENT + +DECLARE_EVENT_CLASS(gb_module, + + TP_PROTO(struct gb_module *module), + + TP_ARGS(module), + + TP_STRUCT__entry( + __field(int, hd_bus_id) + __field(u8, module_id) + __field(size_t, num_interfaces) + __field(int, disconnected) /* bool */ + ), + + TP_fast_assign( + __entry->hd_bus_id = module->hd->bus_id; + __entry->module_id = module->module_id; + __entry->num_interfaces = module->num_interfaces; + __entry->disconnected = module->disconnected; + ), + + TP_printk("hd_bus_id=%d module_id=%hhu num_interfaces=%zu disconnected=%d", + __entry->hd_bus_id, __entry->module_id, + __entry->num_interfaces, __entry->disconnected) +); + +#define DEFINE_MODULE_EVENT(name) \ + DEFINE_EVENT(gb_module, name, \ + TP_PROTO(struct gb_module *module), \ + TP_ARGS(module)) + +/* + * Occurs after a new module is successfully created, before + * creating any of its interfaces. + */ +DEFINE_MODULE_EVENT(gb_module_create); + +/* + * Occurs after the last reference to a module has been dropped. + */ +DEFINE_MODULE_EVENT(gb_module_release); + +/* + * Occurs after a module is successfully created, before registering + * any of its interfaces. + */ +DEFINE_MODULE_EVENT(gb_module_add); + +/* + * Occurs when a module is deleted, before deregistering its + * interfaces. + */ +DEFINE_MODULE_EVENT(gb_module_del); + +#undef DEFINE_MODULE_EVENT + +DECLARE_EVENT_CLASS(gb_host_device, + + TP_PROTO(struct gb_host_device *hd), + + TP_ARGS(hd), + + TP_STRUCT__entry( + __field(int, bus_id) + __field(size_t, num_cports) + __field(size_t, buffer_size_max) + ), + + TP_fast_assign( + __entry->bus_id = hd->bus_id; + __entry->num_cports = hd->num_cports; + __entry->buffer_size_max = hd->buffer_size_max; + ), + + TP_printk("bus_id=%d num_cports=%zu mtu=%zu", + __entry->bus_id, __entry->num_cports, + __entry->buffer_size_max) +); + +#define DEFINE_HD_EVENT(name) \ + DEFINE_EVENT(gb_host_device, name, \ + TP_PROTO(struct gb_host_device *hd), \ + TP_ARGS(hd)) + +/* + * Occurs after a new host device is successfully created, before + * its SVC has been set up. + */ +DEFINE_HD_EVENT(gb_hd_create); + +/* + * Occurs after the last reference to a host device has been + * dropped. + */ +DEFINE_HD_EVENT(gb_hd_release); + +/* + * Occurs after a new host device has been added, after the + * connection to its SVC has been enabled. + */ +DEFINE_HD_EVENT(gb_hd_add); + +/* + * Occurs when a host device is being disconnected from the AP USB + * host controller. + */ +DEFINE_HD_EVENT(gb_hd_del); + +/* + * Occurs when a host device has passed received data to the Greybus + * core, after it has been determined it is destined for a valid + * CPort. + */ +DEFINE_HD_EVENT(gb_hd_in); + +#undef DEFINE_HD_EVENT + +#endif /* _TRACE_GREYBUS_H */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . + +/* + * TRACE_INCLUDE_FILE is not needed if the filename and TRACE_SYSTEM are equal + */ +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE greybus_trace +#include + diff --git a/drivers/greybus/hd.c b/drivers/greybus/hd.c new file mode 100644 index 000000000000..72b21bf2d7d3 --- /dev/null +++ b/drivers/greybus/hd.c @@ -0,0 +1,256 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Greybus Host Device + * + * Copyright 2014-2015 Google Inc. + * Copyright 2014-2015 Linaro Ltd. + */ + +#include +#include +#include + +#include "greybus_trace.h" + +EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_create); +EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_release); +EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_add); +EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_del); +EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_in); +EXPORT_TRACEPOINT_SYMBOL_GPL(gb_message_submit); + +static struct ida gb_hd_bus_id_map; + +int gb_hd_output(struct gb_host_device *hd, void *req, u16 size, u8 cmd, + bool async) +{ + if (!hd || !hd->driver || !hd->driver->output) + return -EINVAL; + return hd->driver->output(hd, req, size, cmd, async); +} +EXPORT_SYMBOL_GPL(gb_hd_output); + +static ssize_t bus_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct gb_host_device *hd = to_gb_host_device(dev); + + return sprintf(buf, "%d\n", hd->bus_id); +} +static DEVICE_ATTR_RO(bus_id); + +static struct attribute *bus_attrs[] = { + &dev_attr_bus_id.attr, + NULL +}; +ATTRIBUTE_GROUPS(bus); + +int gb_hd_cport_reserve(struct gb_host_device *hd, u16 cport_id) +{ + struct ida *id_map = &hd->cport_id_map; + int ret; + + ret = ida_simple_get(id_map, cport_id, cport_id + 1, GFP_KERNEL); + if (ret < 0) { + dev_err(&hd->dev, "failed to reserve cport %u\n", cport_id); + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(gb_hd_cport_reserve); + +void gb_hd_cport_release_reserved(struct gb_host_device *hd, u16 cport_id) +{ + struct ida *id_map = &hd->cport_id_map; + + ida_simple_remove(id_map, cport_id); +} +EXPORT_SYMBOL_GPL(gb_hd_cport_release_reserved); + +/* Locking: Caller guarantees serialisation */ +int gb_hd_cport_allocate(struct gb_host_device *hd, int cport_id, + unsigned long flags) +{ + struct ida *id_map = &hd->cport_id_map; + int ida_start, ida_end; + + if (hd->driver->cport_allocate) + return hd->driver->cport_allocate(hd, cport_id, flags); + + if (cport_id < 0) { + ida_start = 0; + ida_end = hd->num_cports; + } else if (cport_id < hd->num_cports) { + ida_start = cport_id; + ida_end = cport_id + 1; + } else { + dev_err(&hd->dev, "cport %d not available\n", cport_id); + return -EINVAL; + } + + return ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL); +} + +/* Locking: Caller guarantees serialisation */ +void gb_hd_cport_release(struct gb_host_device *hd, u16 cport_id) +{ + if (hd->driver->cport_release) { + hd->driver->cport_release(hd, cport_id); + return; + } + + ida_simple_remove(&hd->cport_id_map, cport_id); +} + +static void gb_hd_release(struct device *dev) +{ + struct gb_host_device *hd = to_gb_host_device(dev); + + trace_gb_hd_release(hd); + + if (hd->svc) + gb_svc_put(hd->svc); + ida_simple_remove(&gb_hd_bus_id_map, hd->bus_id); + ida_destroy(&hd->cport_id_map); + kfree(hd); +} + +struct device_type greybus_hd_type = { + .name = "greybus_host_device", + .release = gb_hd_release, +}; + +struct gb_host_device *gb_hd_create(struct gb_hd_driver *driver, + struct device *parent, + size_t buffer_size_max, + size_t num_cports) +{ + struct gb_host_device *hd; + int ret; + + /* + * Validate that the driver implements all of the callbacks + * so that we don't have to every time we make them. + */ + if ((!driver->message_send) || (!driver->message_cancel)) { + dev_err(parent, "mandatory hd-callbacks missing\n"); + return ERR_PTR(-EINVAL); + } + + if (buffer_size_max < GB_OPERATION_MESSAGE_SIZE_MIN) { + dev_err(parent, "greybus host-device buffers too small\n"); + return ERR_PTR(-EINVAL); + } + + if (num_cports == 0 || num_cports > CPORT_ID_MAX + 1) { + dev_err(parent, "Invalid number of CPorts: %zu\n", num_cports); + return ERR_PTR(-EINVAL); + } + + /* + * Make sure to never allocate messages larger than what the Greybus + * protocol supports. + */ + if (buffer_size_max > GB_OPERATION_MESSAGE_SIZE_MAX) { + dev_warn(parent, "limiting buffer size to %u\n", + GB_OPERATION_MESSAGE_SIZE_MAX); + buffer_size_max = GB_OPERATION_MESSAGE_SIZE_MAX; + } + + hd = kzalloc(sizeof(*hd) + driver->hd_priv_size, GFP_KERNEL); + if (!hd) + return ERR_PTR(-ENOMEM); + + ret = ida_simple_get(&gb_hd_bus_id_map, 1, 0, GFP_KERNEL); + if (ret < 0) { + kfree(hd); + return ERR_PTR(ret); + } + hd->bus_id = ret; + + hd->driver = driver; + INIT_LIST_HEAD(&hd->modules); + INIT_LIST_HEAD(&hd->connections); + ida_init(&hd->cport_id_map); + hd->buffer_size_max = buffer_size_max; + hd->num_cports = num_cports; + + hd->dev.parent = parent; + hd->dev.bus = &greybus_bus_type; + hd->dev.type = &greybus_hd_type; + hd->dev.groups = bus_groups; + hd->dev.dma_mask = hd->dev.parent->dma_mask; + device_initialize(&hd->dev); + dev_set_name(&hd->dev, "greybus%d", hd->bus_id); + + trace_gb_hd_create(hd); + + hd->svc = gb_svc_create(hd); + if (!hd->svc) { + dev_err(&hd->dev, "failed to create svc\n"); + put_device(&hd->dev); + return ERR_PTR(-ENOMEM); + } + + return hd; +} +EXPORT_SYMBOL_GPL(gb_hd_create); + +int gb_hd_add(struct gb_host_device *hd) +{ + int ret; + + ret = device_add(&hd->dev); + if (ret) + return ret; + + ret = gb_svc_add(hd->svc); + if (ret) { + device_del(&hd->dev); + return ret; + } + + trace_gb_hd_add(hd); + + return 0; +} +EXPORT_SYMBOL_GPL(gb_hd_add); + +void gb_hd_del(struct gb_host_device *hd) +{ + trace_gb_hd_del(hd); + + /* + * Tear down the svc and flush any on-going hotplug processing before + * removing the remaining interfaces. + */ + gb_svc_del(hd->svc); + + device_del(&hd->dev); +} +EXPORT_SYMBOL_GPL(gb_hd_del); + +void gb_hd_shutdown(struct gb_host_device *hd) +{ + gb_svc_del(hd->svc); +} +EXPORT_SYMBOL_GPL(gb_hd_shutdown); + +void gb_hd_put(struct gb_host_device *hd) +{ + put_device(&hd->dev); +} +EXPORT_SYMBOL_GPL(gb_hd_put); + +int __init gb_hd_init(void) +{ + ida_init(&gb_hd_bus_id_map); + + return 0; +} + +void gb_hd_exit(void) +{ + ida_destroy(&gb_hd_bus_id_map); +} diff --git a/drivers/greybus/interface.c b/drivers/greybus/interface.c new file mode 100644 index 000000000000..67dbe6fda9a1 --- /dev/null +++ b/drivers/greybus/interface.c @@ -0,0 +1,1263 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Greybus interface code + * + * Copyright 2014 Google Inc. + * Copyright 2014 Linaro Ltd. + */ + +#include +#include + +#include "greybus_trace.h" + +#define GB_INTERFACE_MODE_SWITCH_TIMEOUT 2000 + +#define GB_INTERFACE_DEVICE_ID_BAD 0xff + +#define GB_INTERFACE_AUTOSUSPEND_MS 3000 + +/* Time required for interface to enter standby before disabling REFCLK */ +#define GB_INTERFACE_SUSPEND_HIBERNATE_DELAY_MS 20 + +/* Don't-care selector index */ +#define DME_SELECTOR_INDEX_NULL 0 + +/* DME attributes */ +/* FIXME: remove ES2 support and DME_T_TST_SRC_INCREMENT */ +#define DME_T_TST_SRC_INCREMENT 0x4083 + +#define DME_DDBL1_MANUFACTURERID 0x5003 +#define DME_DDBL1_PRODUCTID 0x5004 + +#define DME_TOSHIBA_GMP_VID 0x6000 +#define DME_TOSHIBA_GMP_PID 0x6001 +#define DME_TOSHIBA_GMP_SN0 0x6002 +#define DME_TOSHIBA_GMP_SN1 0x6003 +#define DME_TOSHIBA_GMP_INIT_STATUS 0x6101 + +/* DDBL1 Manufacturer and Product ids */ +#define TOSHIBA_DMID 0x0126 +#define TOSHIBA_ES2_BRIDGE_DPID 0x1000 +#define TOSHIBA_ES3_APBRIDGE_DPID 0x1001 +#define TOSHIBA_ES3_GBPHY_DPID 0x1002 + +static int gb_interface_hibernate_link(struct gb_interface *intf); +static int gb_interface_refclk_set(struct gb_interface *intf, bool enable); + +static int gb_interface_dme_attr_get(struct gb_interface *intf, + u16 attr, u32 *val) +{ + return gb_svc_dme_peer_get(intf->hd->svc, intf->interface_id, + attr, DME_SELECTOR_INDEX_NULL, val); +} + +static int gb_interface_read_ara_dme(struct gb_interface *intf) +{ + u32 sn0, sn1; + int ret; + + /* + * Unless this is a Toshiba bridge, bail out until we have defined + * standard GMP attributes. + */ + if (intf->ddbl1_manufacturer_id != TOSHIBA_DMID) { + dev_err(&intf->dev, "unknown manufacturer %08x\n", + intf->ddbl1_manufacturer_id); + return -ENODEV; + } + + ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_VID, + &intf->vendor_id); + if (ret) + return ret; + + ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_PID, + &intf->product_id); + if (ret) + return ret; + + ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_SN0, &sn0); + if (ret) + return ret; + + ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_SN1, &sn1); + if (ret) + return ret; + + intf->serial_number = (u64)sn1 << 32 | sn0; + + return 0; +} + +static int gb_interface_read_dme(struct gb_interface *intf) +{ + int ret; + + /* DME attributes have already been read */ + if (intf->dme_read) + return 0; + + ret = gb_interface_dme_attr_get(intf, DME_DDBL1_MANUFACTURERID, + &intf->ddbl1_manufacturer_id); + if (ret) + return ret; + + ret = gb_interface_dme_attr_get(intf, DME_DDBL1_PRODUCTID, + &intf->ddbl1_product_id); + if (ret) + return ret; + + if (intf->ddbl1_manufacturer_id == TOSHIBA_DMID && + intf->ddbl1_product_id == TOSHIBA_ES2_BRIDGE_DPID) { + intf->quirks |= GB_INTERFACE_QUIRK_NO_GMP_IDS; + intf->quirks |= GB_INTERFACE_QUIRK_NO_INIT_STATUS; + } + + ret = gb_interface_read_ara_dme(intf); + if (ret) + return ret; + + intf->dme_read = true; + + return 0; +} + +static int gb_interface_route_create(struct gb_interface *intf) +{ + struct gb_svc *svc = intf->hd->svc; + u8 intf_id = intf->interface_id; + u8 device_id; + int ret; + + /* Allocate an interface device id. */ + ret = ida_simple_get(&svc->device_id_map, + GB_SVC_DEVICE_ID_MIN, GB_SVC_DEVICE_ID_MAX + 1, + GFP_KERNEL); + if (ret < 0) { + dev_err(&intf->dev, "failed to allocate device id: %d\n", ret); + return ret; + } + device_id = ret; + + ret = gb_svc_intf_device_id(svc, intf_id, device_id); + if (ret) { + dev_err(&intf->dev, "failed to set device id %u: %d\n", + device_id, ret); + goto err_ida_remove; + } + + /* FIXME: Hard-coded AP device id. */ + ret = gb_svc_route_create(svc, svc->ap_intf_id, GB_SVC_DEVICE_ID_AP, + intf_id, device_id); + if (ret) { + dev_err(&intf->dev, "failed to create route: %d\n", ret); + goto err_svc_id_free; + } + + intf->device_id = device_id; + + return 0; + +err_svc_id_free: + /* + * XXX Should we tell SVC that this id doesn't belong to interface + * XXX anymore. + */ +err_ida_remove: + ida_simple_remove(&svc->device_id_map, device_id); + + return ret; +} + +static void gb_interface_route_destroy(struct gb_interface *intf) +{ + struct gb_svc *svc = intf->hd->svc; + + if (intf->device_id == GB_INTERFACE_DEVICE_ID_BAD) + return; + + gb_svc_route_destroy(svc, svc->ap_intf_id, intf->interface_id); + ida_simple_remove(&svc->device_id_map, intf->device_id); + intf->device_id = GB_INTERFACE_DEVICE_ID_BAD; +} + +/* Locking: Caller holds the interface mutex. */ +static int gb_interface_legacy_mode_switch(struct gb_interface *intf) +{ + int ret; + + dev_info(&intf->dev, "legacy mode switch detected\n"); + + /* Mark as disconnected to prevent I/O during disable. */ + intf->disconnected = true; + gb_interface_disable(intf); + intf->disconnected = false; + + ret = gb_interface_enable(intf); + if (ret) { + dev_err(&intf->dev, "failed to re-enable interface: %d\n", ret); + gb_interface_deactivate(intf); + } + + return ret; +} + +void gb_interface_mailbox_event(struct gb_interface *intf, u16 result, + u32 mailbox) +{ + mutex_lock(&intf->mutex); + + if (result) { + dev_warn(&intf->dev, + "mailbox event with UniPro error: 0x%04x\n", + result); + goto err_disable; + } + + if (mailbox != GB_SVC_INTF_MAILBOX_GREYBUS) { + dev_warn(&intf->dev, + "mailbox event with unexpected value: 0x%08x\n", + mailbox); + goto err_disable; + } + + if (intf->quirks & GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH) { + gb_interface_legacy_mode_switch(intf); + goto out_unlock; + } + + if (!intf->mode_switch) { + dev_warn(&intf->dev, "unexpected mailbox event: 0x%08x\n", + mailbox); + goto err_disable; + } + + dev_info(&intf->dev, "mode switch detected\n"); + + complete(&intf->mode_switch_completion); + +out_unlock: + mutex_unlock(&intf->mutex); + + return; + +err_disable: + gb_interface_disable(intf); + gb_interface_deactivate(intf); + mutex_unlock(&intf->mutex); +} + +static void gb_interface_mode_switch_work(struct work_struct *work) +{ + struct gb_interface *intf; + struct gb_control *control; + unsigned long timeout; + int ret; + + intf = container_of(work, struct gb_interface, mode_switch_work); + + mutex_lock(&intf->mutex); + /* Make sure interface is still enabled. */ + if (!intf->enabled) { + dev_dbg(&intf->dev, "mode switch aborted\n"); + intf->mode_switch = false; + mutex_unlock(&intf->mutex); + goto out_interface_put; + } + + /* + * Prepare the control device for mode switch and make sure to get an + * extra reference before it goes away during interface disable. + */ + control = gb_control_get(intf->control); + gb_control_mode_switch_prepare(control); + gb_interface_disable(intf); + mutex_unlock(&intf->mutex); + + timeout = msecs_to_jiffies(GB_INTERFACE_MODE_SWITCH_TIMEOUT); + ret = wait_for_completion_interruptible_timeout( + &intf->mode_switch_completion, timeout); + + /* Finalise control-connection mode switch. */ + gb_control_mode_switch_complete(control); + gb_control_put(control); + + if (ret < 0) { + dev_err(&intf->dev, "mode switch interrupted\n"); + goto err_deactivate; + } else if (ret == 0) { + dev_err(&intf->dev, "mode switch timed out\n"); + goto err_deactivate; + } + + /* Re-enable (re-enumerate) interface if still active. */ + mutex_lock(&intf->mutex); + intf->mode_switch = false; + if (intf->active) { + ret = gb_interface_enable(intf); + if (ret) { + dev_err(&intf->dev, "failed to re-enable interface: %d\n", + ret); + gb_interface_deactivate(intf); + } + } + mutex_unlock(&intf->mutex); + +out_interface_put: + gb_interface_put(intf); + + return; + +err_deactivate: + mutex_lock(&intf->mutex); + intf->mode_switch = false; + gb_interface_deactivate(intf); + mutex_unlock(&intf->mutex); + + gb_interface_put(intf); +} + +int gb_interface_request_mode_switch(struct gb_interface *intf) +{ + int ret = 0; + + mutex_lock(&intf->mutex); + if (intf->mode_switch) { + ret = -EBUSY; + goto out_unlock; + } + + intf->mode_switch = true; + reinit_completion(&intf->mode_switch_completion); + + /* + * Get a reference to the interface device, which will be put once the + * mode switch is complete. + */ + get_device(&intf->dev); + + if (!queue_work(system_long_wq, &intf->mode_switch_work)) { + put_device(&intf->dev); + ret = -EBUSY; + goto out_unlock; + } + +out_unlock: + mutex_unlock(&intf->mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(gb_interface_request_mode_switch); + +/* + * T_TstSrcIncrement is written by the module on ES2 as a stand-in for the + * init-status attribute DME_TOSHIBA_INIT_STATUS. The AP needs to read and + * clear it after reading a non-zero value from it. + * + * FIXME: This is module-hardware dependent and needs to be extended for every + * type of module we want to support. + */ +static int gb_interface_read_and_clear_init_status(struct gb_interface *intf) +{ + struct gb_host_device *hd = intf->hd; + unsigned long bootrom_quirks; + unsigned long s2l_quirks; + int ret; + u32 value; + u16 attr; + u8 init_status; + + /* + * ES2 bridges use T_TstSrcIncrement for the init status. + * + * FIXME: Remove ES2 support + */ + if (intf->quirks & GB_INTERFACE_QUIRK_NO_INIT_STATUS) + attr = DME_T_TST_SRC_INCREMENT; + else + attr = DME_TOSHIBA_GMP_INIT_STATUS; + + ret = gb_svc_dme_peer_get(hd->svc, intf->interface_id, attr, + DME_SELECTOR_INDEX_NULL, &value); + if (ret) + return ret; + + /* + * A nonzero init status indicates the module has finished + * initializing. + */ + if (!value) { + dev_err(&intf->dev, "invalid init status\n"); + return -ENODEV; + } + + /* + * Extract the init status. + * + * For ES2: We need to check lowest 8 bits of 'value'. + * For ES3: We need to check highest 8 bits out of 32 of 'value'. + * + * FIXME: Remove ES2 support + */ + if (intf->quirks & GB_INTERFACE_QUIRK_NO_INIT_STATUS) + init_status = value & 0xff; + else + init_status = value >> 24; + + /* + * Check if the interface is executing the quirky ES3 bootrom that, + * for example, requires E2EFC, CSD and CSV to be disabled. + */ + bootrom_quirks = GB_INTERFACE_QUIRK_NO_CPORT_FEATURES | + GB_INTERFACE_QUIRK_FORCED_DISABLE | + GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH | + GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE; + + s2l_quirks = GB_INTERFACE_QUIRK_NO_PM; + + switch (init_status) { + case GB_INIT_BOOTROM_UNIPRO_BOOT_STARTED: + case GB_INIT_BOOTROM_FALLBACK_UNIPRO_BOOT_STARTED: + intf->quirks |= bootrom_quirks; + break; + case GB_INIT_S2_LOADER_BOOT_STARTED: + /* S2 Loader doesn't support runtime PM */ + intf->quirks &= ~bootrom_quirks; + intf->quirks |= s2l_quirks; + break; + default: + intf->quirks &= ~bootrom_quirks; + intf->quirks &= ~s2l_quirks; + } + + /* Clear the init status. */ + return gb_svc_dme_peer_set(hd->svc, intf->interface_id, attr, + DME_SELECTOR_INDEX_NULL, 0); +} + +/* interface sysfs attributes */ +#define gb_interface_attr(field, type) \ +static ssize_t field##_show(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ +{ \ + struct gb_interface *intf = to_gb_interface(dev); \ + return scnprintf(buf, PAGE_SIZE, type"\n", intf->field); \ +} \ +static DEVICE_ATTR_RO(field) + +gb_interface_attr(ddbl1_manufacturer_id, "0x%08x"); +gb_interface_attr(ddbl1_product_id, "0x%08x"); +gb_interface_attr(interface_id, "%u"); +gb_interface_attr(vendor_id, "0x%08x"); +gb_interface_attr(product_id, "0x%08x"); +gb_interface_attr(serial_number, "0x%016llx"); + +static ssize_t voltage_now_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct gb_interface *intf = to_gb_interface(dev); + int ret; + u32 measurement; + + ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id, + GB_SVC_PWRMON_TYPE_VOL, + &measurement); + if (ret) { + dev_err(&intf->dev, "failed to get voltage sample (%d)\n", ret); + return ret; + } + + return sprintf(buf, "%u\n", measurement); +} +static DEVICE_ATTR_RO(voltage_now); + +static ssize_t current_now_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct gb_interface *intf = to_gb_interface(dev); + int ret; + u32 measurement; + + ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id, + GB_SVC_PWRMON_TYPE_CURR, + &measurement); + if (ret) { + dev_err(&intf->dev, "failed to get current sample (%d)\n", ret); + return ret; + } + + return sprintf(buf, "%u\n", measurement); +} +static DEVICE_ATTR_RO(current_now); + +static ssize_t power_now_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct gb_interface *intf = to_gb_interface(dev); + int ret; + u32 measurement; + + ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id, + GB_SVC_PWRMON_TYPE_PWR, + &measurement); + if (ret) { + dev_err(&intf->dev, "failed to get power sample (%d)\n", ret); + return ret; + } + + return sprintf(buf, "%u\n", measurement); +} +static DEVICE_ATTR_RO(power_now); + +static ssize_t power_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct gb_interface *intf = to_gb_interface(dev); + + if (intf->active) + return scnprintf(buf, PAGE_SIZE, "on\n"); + else + return scnprintf(buf, PAGE_SIZE, "off\n"); +} + +static ssize_t power_state_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t len) +{ + struct gb_interface *intf = to_gb_interface(dev); + bool activate; + int ret = 0; + + if (kstrtobool(buf, &activate)) + return -EINVAL; + + mutex_lock(&intf->mutex); + + if (activate == intf->active) + goto unlock; + + if (activate) { + ret = gb_interface_activate(intf); + if (ret) { + dev_err(&intf->dev, + "failed to activate interface: %d\n", ret); + goto unlock; + } + + ret = gb_interface_enable(intf); + if (ret) { + dev_err(&intf->dev, + "failed to enable interface: %d\n", ret); + gb_interface_deactivate(intf); + goto unlock; + } + } else { + gb_interface_disable(intf); + gb_interface_deactivate(intf); + } + +unlock: + mutex_unlock(&intf->mutex); + + if (ret) + return ret; + + return len; +} +static DEVICE_ATTR_RW(power_state); + +static const char *gb_interface_type_string(struct gb_interface *intf) +{ + static const char * const types[] = { + [GB_INTERFACE_TYPE_INVALID] = "invalid", + [GB_INTERFACE_TYPE_UNKNOWN] = "unknown", + [GB_INTERFACE_TYPE_DUMMY] = "dummy", + [GB_INTERFACE_TYPE_UNIPRO] = "unipro", + [GB_INTERFACE_TYPE_GREYBUS] = "greybus", + }; + + return types[intf->type]; +} + +static ssize_t interface_type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct gb_interface *intf = to_gb_interface(dev); + + return sprintf(buf, "%s\n", gb_interface_type_string(intf)); +} +static DEVICE_ATTR_RO(interface_type); + +static struct attribute *interface_unipro_attrs[] = { + &dev_attr_ddbl1_manufacturer_id.attr, + &dev_attr_ddbl1_product_id.attr, + NULL +}; + +static struct attribute *interface_greybus_attrs[] = { + &dev_attr_vendor_id.attr, + &dev_attr_product_id.attr, + &dev_attr_serial_number.attr, + NULL +}; + +static struct attribute *interface_power_attrs[] = { + &dev_attr_voltage_now.attr, + &dev_attr_current_now.attr, + &dev_attr_power_now.attr, + &dev_attr_power_state.attr, + NULL +}; + +static struct attribute *interface_common_attrs[] = { + &dev_attr_interface_id.attr, + &dev_attr_interface_type.attr, + NULL +}; + +static umode_t interface_unipro_is_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct gb_interface *intf = to_gb_interface(dev); + + switch (intf->type) { + case GB_INTERFACE_TYPE_UNIPRO: + case GB_INTERFACE_TYPE_GREYBUS: + return attr->mode; + default: + return 0; + } +} + +static umode_t interface_greybus_is_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct gb_interface *intf = to_gb_interface(dev); + + switch (intf->type) { + case GB_INTERFACE_TYPE_GREYBUS: + return attr->mode; + default: + return 0; + } +} + +static umode_t interface_power_is_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct gb_interface *intf = to_gb_interface(dev); + + switch (intf->type) { + case GB_INTERFACE_TYPE_UNIPRO: + case GB_INTERFACE_TYPE_GREYBUS: + return attr->mode; + default: + return 0; + } +} + +static const struct attribute_group interface_unipro_group = { + .is_visible = interface_unipro_is_visible, + .attrs = interface_unipro_attrs, +}; + +static const struct attribute_group interface_greybus_group = { + .is_visible = interface_greybus_is_visible, + .attrs = interface_greybus_attrs, +}; + +static const struct attribute_group interface_power_group = { + .is_visible = interface_power_is_visible, + .attrs = interface_power_attrs, +}; + +static const struct attribute_group interface_common_group = { + .attrs = interface_common_attrs, +}; + +static const struct attribute_group *interface_groups[] = { + &interface_unipro_group, + &interface_greybus_group, + &interface_power_group, + &interface_common_group, + NULL +}; + +static void gb_interface_release(struct device *dev) +{ + struct gb_interface *intf = to_gb_interface(dev); + + trace_gb_interface_release(intf); + + kfree(intf); +} + +#ifdef CONFIG_PM +static int gb_interface_suspend(struct device *dev) +{ + struct gb_interface *intf = to_gb_interface(dev); + int ret; + + ret = gb_control_interface_suspend_prepare(intf->control); + if (ret) + return ret; + + ret = gb_control_suspend(intf->control); + if (ret) + goto err_hibernate_abort; + + ret = gb_interface_hibernate_link(intf); + if (ret) + return ret; + + /* Delay to allow interface to enter standby before disabling refclk */ + msleep(GB_INTERFACE_SUSPEND_HIBERNATE_DELAY_MS); + + ret = gb_interface_refclk_set(intf, false); + if (ret) + return ret; + + return 0; + +err_hibernate_abort: + gb_control_interface_hibernate_abort(intf->control); + + return ret; +} + +static int gb_interface_resume(struct device *dev) +{ + struct gb_interface *intf = to_gb_interface(dev); + struct gb_svc *svc = intf->hd->svc; + int ret; + + ret = gb_interface_refclk_set(intf, true); + if (ret) + return ret; + + ret = gb_svc_intf_resume(svc, intf->interface_id); + if (ret) + return ret; + + ret = gb_control_resume(intf->control); + if (ret) + return ret; + + return 0; +} + +static int gb_interface_runtime_idle(struct device *dev) +{ + pm_runtime_mark_last_busy(dev); + pm_request_autosuspend(dev); + + return 0; +} +#endif + +static const struct dev_pm_ops gb_interface_pm_ops = { + SET_RUNTIME_PM_OPS(gb_interface_suspend, gb_interface_resume, + gb_interface_runtime_idle) +}; + +struct device_type greybus_interface_type = { + .name = "greybus_interface", + .release = gb_interface_release, + .pm = &gb_interface_pm_ops, +}; + +/* + * A Greybus module represents a user-replaceable component on a GMP + * phone. An interface is the physical connection on that module. A + * module may have more than one interface. + * + * Create a gb_interface structure to represent a discovered interface. + * The position of interface within the Endo is encoded in "interface_id" + * argument. + * + * Returns a pointer to the new interfce or a null pointer if a + * failure occurs due to memory exhaustion. + */ +struct gb_interface *gb_interface_create(struct gb_module *module, + u8 interface_id) +{ + struct gb_host_device *hd = module->hd; + struct gb_interface *intf; + + intf = kzalloc(sizeof(*intf), GFP_KERNEL); + if (!intf) + return NULL; + + intf->hd = hd; /* XXX refcount? */ + intf->module = module; + intf->interface_id = interface_id; + INIT_LIST_HEAD(&intf->bundles); + INIT_LIST_HEAD(&intf->manifest_descs); + mutex_init(&intf->mutex); + INIT_WORK(&intf->mode_switch_work, gb_interface_mode_switch_work); + init_completion(&intf->mode_switch_completion); + + /* Invalid device id to start with */ + intf->device_id = GB_INTERFACE_DEVICE_ID_BAD; + + intf->dev.parent = &module->dev; + intf->dev.bus = &greybus_bus_type; + intf->dev.type = &greybus_interface_type; + intf->dev.groups = interface_groups; + intf->dev.dma_mask = module->dev.dma_mask; + device_initialize(&intf->dev); + dev_set_name(&intf->dev, "%s.%u", dev_name(&module->dev), + interface_id); + + pm_runtime_set_autosuspend_delay(&intf->dev, + GB_INTERFACE_AUTOSUSPEND_MS); + + trace_gb_interface_create(intf); + + return intf; +} + +static int gb_interface_vsys_set(struct gb_interface *intf, bool enable) +{ + struct gb_svc *svc = intf->hd->svc; + int ret; + + dev_dbg(&intf->dev, "%s - %d\n", __func__, enable); + + ret = gb_svc_intf_vsys_set(svc, intf->interface_id, enable); + if (ret) { + dev_err(&intf->dev, "failed to set v_sys: %d\n", ret); + return ret; + } + + return 0; +} + +static int gb_interface_refclk_set(struct gb_interface *intf, bool enable) +{ + struct gb_svc *svc = intf->hd->svc; + int ret; + + dev_dbg(&intf->dev, "%s - %d\n", __func__, enable); + + ret = gb_svc_intf_refclk_set(svc, intf->interface_id, enable); + if (ret) { + dev_err(&intf->dev, "failed to set refclk: %d\n", ret); + return ret; + } + + return 0; +} + +static int gb_interface_unipro_set(struct gb_interface *intf, bool enable) +{ + struct gb_svc *svc = intf->hd->svc; + int ret; + + dev_dbg(&intf->dev, "%s - %d\n", __func__, enable); + + ret = gb_svc_intf_unipro_set(svc, intf->interface_id, enable); + if (ret) { + dev_err(&intf->dev, "failed to set UniPro: %d\n", ret); + return ret; + } + + return 0; +} + +static int gb_interface_activate_operation(struct gb_interface *intf, + enum gb_interface_type *intf_type) +{ + struct gb_svc *svc = intf->hd->svc; + u8 type; + int ret; + + dev_dbg(&intf->dev, "%s\n", __func__); + + ret = gb_svc_intf_activate(svc, intf->interface_id, &type); + if (ret) { + dev_err(&intf->dev, "failed to activate: %d\n", ret); + return ret; + } + + switch (type) { + case GB_SVC_INTF_TYPE_DUMMY: + *intf_type = GB_INTERFACE_TYPE_DUMMY; + /* FIXME: handle as an error for now */ + return -ENODEV; + case GB_SVC_INTF_TYPE_UNIPRO: + *intf_type = GB_INTERFACE_TYPE_UNIPRO; + dev_err(&intf->dev, "interface type UniPro not supported\n"); + /* FIXME: handle as an error for now */ + return -ENODEV; + case GB_SVC_INTF_TYPE_GREYBUS: + *intf_type = GB_INTERFACE_TYPE_GREYBUS; + break; + default: + dev_err(&intf->dev, "unknown interface type: %u\n", type); + *intf_type = GB_INTERFACE_TYPE_UNKNOWN; + return -ENODEV; + } + + return 0; +} + +static int gb_interface_hibernate_link(struct gb_interface *intf) +{ + struct gb_svc *svc = intf->hd->svc; + + return gb_svc_intf_set_power_mode_hibernate(svc, intf->interface_id); +} + +static int _gb_interface_activate(struct gb_interface *intf, + enum gb_interface_type *type) +{ + int ret; + + *type = GB_INTERFACE_TYPE_UNKNOWN; + + if (intf->ejected || intf->removed) + return -ENODEV; + + ret = gb_interface_vsys_set(intf, true); + if (ret) + return ret; + + ret = gb_interface_refclk_set(intf, true); + if (ret) + goto err_vsys_disable; + + ret = gb_interface_unipro_set(intf, true); + if (ret) + goto err_refclk_disable; + + ret = gb_interface_activate_operation(intf, type); + if (ret) { + switch (*type) { + case GB_INTERFACE_TYPE_UNIPRO: + case GB_INTERFACE_TYPE_GREYBUS: + goto err_hibernate_link; + default: + goto err_unipro_disable; + } + } + + ret = gb_interface_read_dme(intf); + if (ret) + goto err_hibernate_link; + + ret = gb_interface_route_create(intf); + if (ret) + goto err_hibernate_link; + + intf->active = true; + + trace_gb_interface_activate(intf); + + return 0; + +err_hibernate_link: + gb_interface_hibernate_link(intf); +err_unipro_disable: + gb_interface_unipro_set(intf, false); +err_refclk_disable: + gb_interface_refclk_set(intf, false); +err_vsys_disable: + gb_interface_vsys_set(intf, false); + + return ret; +} + +/* + * At present, we assume a UniPro-only module to be a Greybus module that + * failed to send its mailbox poke. There is some reason to believe that this + * is because of a bug in the ES3 bootrom. + * + * FIXME: Check if this is a Toshiba bridge before retrying? + */ +static int _gb_interface_activate_es3_hack(struct gb_interface *intf, + enum gb_interface_type *type) +{ + int retries = 3; + int ret; + + while (retries--) { + ret = _gb_interface_activate(intf, type); + if (ret == -ENODEV && *type == GB_INTERFACE_TYPE_UNIPRO) + continue; + + break; + } + + return ret; +} + +/* + * Activate an interface. + * + * Locking: Caller holds the interface mutex. + */ +int gb_interface_activate(struct gb_interface *intf) +{ + enum gb_interface_type type; + int ret; + + switch (intf->type) { + case GB_INTERFACE_TYPE_INVALID: + case GB_INTERFACE_TYPE_GREYBUS: + ret = _gb_interface_activate_es3_hack(intf, &type); + break; + default: + ret = _gb_interface_activate(intf, &type); + } + + /* Make sure type is detected correctly during reactivation. */ + if (intf->type != GB_INTERFACE_TYPE_INVALID) { + if (type != intf->type) { + dev_err(&intf->dev, "failed to detect interface type\n"); + + if (!ret) + gb_interface_deactivate(intf); + + return -EIO; + } + } else { + intf->type = type; + } + + return ret; +} + +/* + * Deactivate an interface. + * + * Locking: Caller holds the interface mutex. + */ +void gb_interface_deactivate(struct gb_interface *intf) +{ + if (!intf->active) + return; + + trace_gb_interface_deactivate(intf); + + /* Abort any ongoing mode switch. */ + if (intf->mode_switch) + complete(&intf->mode_switch_completion); + + gb_interface_route_destroy(intf); + gb_interface_hibernate_link(intf); + gb_interface_unipro_set(intf, false); + gb_interface_refclk_set(intf, false); + gb_interface_vsys_set(intf, false); + + intf->active = false; +} + +/* + * Enable an interface by enabling its control connection, fetching the + * manifest and other information over it, and finally registering its child + * devices. + * + * Locking: Caller holds the interface mutex. + */ +int gb_interface_enable(struct gb_interface *intf) +{ + struct gb_control *control; + struct gb_bundle *bundle, *tmp; + int ret, size; + void *manifest; + + ret = gb_interface_read_and_clear_init_status(intf); + if (ret) { + dev_err(&intf->dev, "failed to clear init status: %d\n", ret); + return ret; + } + + /* Establish control connection */ + control = gb_control_create(intf); + if (IS_ERR(control)) { + dev_err(&intf->dev, "failed to create control device: %ld\n", + PTR_ERR(control)); + return PTR_ERR(control); + } + intf->control = control; + + ret = gb_control_enable(intf->control); + if (ret) + goto err_put_control; + + /* Get manifest size using control protocol on CPort */ + size = gb_control_get_manifest_size_operation(intf); + if (size <= 0) { + dev_err(&intf->dev, "failed to get manifest size: %d\n", size); + + if (size) + ret = size; + else + ret = -EINVAL; + + goto err_disable_control; + } + + manifest = kmalloc(size, GFP_KERNEL); + if (!manifest) { + ret = -ENOMEM; + goto err_disable_control; + } + + /* Get manifest using control protocol on CPort */ + ret = gb_control_get_manifest_operation(intf, manifest, size); + if (ret) { + dev_err(&intf->dev, "failed to get manifest: %d\n", ret); + goto err_free_manifest; + } + + /* + * Parse the manifest and build up our data structures representing + * what's in it. + */ + if (!gb_manifest_parse(intf, manifest, size)) { + dev_err(&intf->dev, "failed to parse manifest\n"); + ret = -EINVAL; + goto err_destroy_bundles; + } + + ret = gb_control_get_bundle_versions(intf->control); + if (ret) + goto err_destroy_bundles; + + /* Register the control device and any bundles */ + ret = gb_control_add(intf->control); + if (ret) + goto err_destroy_bundles; + + pm_runtime_use_autosuspend(&intf->dev); + pm_runtime_get_noresume(&intf->dev); + pm_runtime_set_active(&intf->dev); + pm_runtime_enable(&intf->dev); + + list_for_each_entry_safe_reverse(bundle, tmp, &intf->bundles, links) { + ret = gb_bundle_add(bundle); + if (ret) { + gb_bundle_destroy(bundle); + continue; + } + } + + kfree(manifest); + + intf->enabled = true; + + pm_runtime_put(&intf->dev); + + trace_gb_interface_enable(intf); + + return 0; + +err_destroy_bundles: + list_for_each_entry_safe(bundle, tmp, &intf->bundles, links) + gb_bundle_destroy(bundle); +err_free_manifest: + kfree(manifest); +err_disable_control: + gb_control_disable(intf->control); +err_put_control: + gb_control_put(intf->control); + intf->control = NULL; + + return ret; +} + +/* + * Disable an interface and destroy its bundles. + * + * Locking: Caller holds the interface mutex. + */ +void gb_interface_disable(struct gb_interface *intf) +{ + struct gb_bundle *bundle; + struct gb_bundle *next; + + if (!intf->enabled) + return; + + trace_gb_interface_disable(intf); + + pm_runtime_get_sync(&intf->dev); + + /* Set disconnected flag to avoid I/O during connection tear down. */ + if (intf->quirks & GB_INTERFACE_QUIRK_FORCED_DISABLE) + intf->disconnected = true; + + list_for_each_entry_safe(bundle, next, &intf->bundles, links) + gb_bundle_destroy(bundle); + + if (!intf->mode_switch && !intf->disconnected) + gb_control_interface_deactivate_prepare(intf->control); + + gb_control_del(intf->control); + gb_control_disable(intf->control); + gb_control_put(intf->control); + intf->control = NULL; + + intf->enabled = false; + + pm_runtime_disable(&intf->dev); + pm_runtime_set_suspended(&intf->dev); + pm_runtime_dont_use_autosuspend(&intf->dev); + pm_runtime_put_noidle(&intf->dev); +} + +/* Register an interface. */ +int gb_interface_add(struct gb_interface *intf) +{ + int ret; + + ret = device_add(&intf->dev); + if (ret) { + dev_err(&intf->dev, "failed to register interface: %d\n", ret); + return ret; + } + + trace_gb_interface_add(intf); + + dev_info(&intf->dev, "Interface added (%s)\n", + gb_interface_type_string(intf)); + + switch (intf->type) { + case GB_INTERFACE_TYPE_GREYBUS: + dev_info(&intf->dev, "GMP VID=0x%08x, PID=0x%08x\n", + intf->vendor_id, intf->product_id); + /* fall-through */ + case GB_INTERFACE_TYPE_UNIPRO: + dev_info(&intf->dev, "DDBL1 Manufacturer=0x%08x, Product=0x%08x\n", + intf->ddbl1_manufacturer_id, + intf->ddbl1_product_id); + break; + default: + break; + } + + return 0; +} + +/* Deregister an interface. */ +void gb_interface_del(struct gb_interface *intf) +{ + if (device_is_registered(&intf->dev)) { + trace_gb_interface_del(intf); + + device_del(&intf->dev); + dev_info(&intf->dev, "Interface removed\n"); + } +} + +void gb_interface_put(struct gb_interface *intf) +{ + put_device(&intf->dev); +} diff --git a/drivers/greybus/manifest.c b/drivers/greybus/manifest.c new file mode 100644 index 000000000000..dd7040697bde --- /dev/null +++ b/drivers/greybus/manifest.c @@ -0,0 +1,533 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Greybus manifest parsing + * + * Copyright 2014-2015 Google Inc. + * Copyright 2014-2015 Linaro Ltd. + */ + +#include + +static const char *get_descriptor_type_string(u8 type) +{ + switch (type) { + case GREYBUS_TYPE_INVALID: + return "invalid"; + case GREYBUS_TYPE_STRING: + return "string"; + case GREYBUS_TYPE_INTERFACE: + return "interface"; + case GREYBUS_TYPE_CPORT: + return "cport"; + case GREYBUS_TYPE_BUNDLE: + return "bundle"; + default: + WARN_ON(1); + return "unknown"; + } +} + +/* + * We scan the manifest once to identify where all the descriptors + * are. The result is a list of these manifest_desc structures. We + * then pick through them for what we're looking for (starting with + * the interface descriptor). As each is processed we remove it from + * the list. When we're done the list should (probably) be empty. + */ +struct manifest_desc { + struct list_head links; + + size_t size; + void *data; + enum greybus_descriptor_type type; +}; + +static void release_manifest_descriptor(struct manifest_desc *descriptor) +{ + list_del(&descriptor->links); + kfree(descriptor); +} + +static void release_manifest_descriptors(struct gb_interface *intf) +{ + struct manifest_desc *descriptor; + struct manifest_desc *next; + + list_for_each_entry_safe(descriptor, next, &intf->manifest_descs, links) + release_manifest_descriptor(descriptor); +} + +static void release_cport_descriptors(struct list_head *head, u8 bundle_id) +{ + struct manifest_desc *desc, *tmp; + struct greybus_descriptor_cport *desc_cport; + + list_for_each_entry_safe(desc, tmp, head, links) { + desc_cport = desc->data; + + if (desc->type != GREYBUS_TYPE_CPORT) + continue; + + if (desc_cport->bundle == bundle_id) + release_manifest_descriptor(desc); + } +} + +static struct manifest_desc *get_next_bundle_desc(struct gb_interface *intf) +{ + struct manifest_desc *descriptor; + struct manifest_desc *next; + + list_for_each_entry_safe(descriptor, next, &intf->manifest_descs, links) + if (descriptor->type == GREYBUS_TYPE_BUNDLE) + return descriptor; + + return NULL; +} + +/* + * Validate the given descriptor. Its reported size must fit within + * the number of bytes remaining, and it must have a recognized + * type. Check that the reported size is at least as big as what + * we expect to see. (It could be bigger, perhaps for a new version + * of the format.) + * + * Returns the (non-zero) number of bytes consumed by the descriptor, + * or a negative errno. + */ +static int identify_descriptor(struct gb_interface *intf, + struct greybus_descriptor *desc, size_t size) +{ + struct greybus_descriptor_header *desc_header = &desc->header; + struct manifest_desc *descriptor; + size_t desc_size; + size_t expected_size; + + if (size < sizeof(*desc_header)) { + dev_err(&intf->dev, "manifest too small (%zu < %zu)\n", size, + sizeof(*desc_header)); + return -EINVAL; /* Must at least have header */ + } + + desc_size = le16_to_cpu(desc_header->size); + if (desc_size > size) { + dev_err(&intf->dev, "descriptor too big (%zu > %zu)\n", + desc_size, size); + return -EINVAL; + } + + /* Descriptor needs to at least have a header */ + expected_size = sizeof(*desc_header); + + switch (desc_header->type) { + case GREYBUS_TYPE_STRING: + expected_size += sizeof(struct greybus_descriptor_string); + expected_size += desc->string.length; + + /* String descriptors are padded to 4 byte boundaries */ + expected_size = ALIGN(expected_size, 4); + break; + case GREYBUS_TYPE_INTERFACE: + expected_size += sizeof(struct greybus_descriptor_interface); + break; + case GREYBUS_TYPE_BUNDLE: + expected_size += sizeof(struct greybus_descriptor_bundle); + break; + case GREYBUS_TYPE_CPORT: + expected_size += sizeof(struct greybus_descriptor_cport); + break; + case GREYBUS_TYPE_INVALID: + default: + dev_err(&intf->dev, "invalid descriptor type (%u)\n", + desc_header->type); + return -EINVAL; + } + + if (desc_size < expected_size) { + dev_err(&intf->dev, "%s descriptor too small (%zu < %zu)\n", + get_descriptor_type_string(desc_header->type), + desc_size, expected_size); + return -EINVAL; + } + + /* Descriptor bigger than what we expect */ + if (desc_size > expected_size) { + dev_warn(&intf->dev, "%s descriptor size mismatch (want %zu got %zu)\n", + get_descriptor_type_string(desc_header->type), + expected_size, desc_size); + } + + descriptor = kzalloc(sizeof(*descriptor), GFP_KERNEL); + if (!descriptor) + return -ENOMEM; + + descriptor->size = desc_size; + descriptor->data = (char *)desc + sizeof(*desc_header); + descriptor->type = desc_header->type; + list_add_tail(&descriptor->links, &intf->manifest_descs); + + /* desc_size is positive and is known to fit in a signed int */ + + return desc_size; +} + +/* + * Find the string descriptor having the given id, validate it, and + * allocate a duplicate copy of it. The duplicate has an extra byte + * which guarantees the returned string is NUL-terminated. + * + * String index 0 is valid (it represents "no string"), and for + * that a null pointer is returned. + * + * Otherwise returns a pointer to a newly-allocated copy of the + * descriptor string, or an error-coded pointer on failure. + */ +static char *gb_string_get(struct gb_interface *intf, u8 string_id) +{ + struct greybus_descriptor_string *desc_string; + struct manifest_desc *descriptor; + bool found = false; + char *string; + + /* A zero string id means no string (but no error) */ + if (!string_id) + return NULL; + + list_for_each_entry(descriptor, &intf->manifest_descs, links) { + if (descriptor->type != GREYBUS_TYPE_STRING) + continue; + + desc_string = descriptor->data; + if (desc_string->id == string_id) { + found = true; + break; + } + } + if (!found) + return ERR_PTR(-ENOENT); + + /* Allocate an extra byte so we can guarantee it's NUL-terminated */ + string = kmemdup(&desc_string->string, desc_string->length + 1, + GFP_KERNEL); + if (!string) + return ERR_PTR(-ENOMEM); + string[desc_string->length] = '\0'; + + /* Ok we've used this string, so we're done with it */ + release_manifest_descriptor(descriptor); + + return string; +} + +/* + * Find cport descriptors in the manifest associated with the given + * bundle, and set up data structures for the functions that use + * them. Returns the number of cports set up for the bundle, or 0 + * if there is an error. + */ +static u32 gb_manifest_parse_cports(struct gb_bundle *bundle) +{ + struct gb_interface *intf = bundle->intf; + struct greybus_descriptor_cport *desc_cport; + struct manifest_desc *desc, *next, *tmp; + LIST_HEAD(list); + u8 bundle_id = bundle->id; + u16 cport_id; + u32 count = 0; + int i; + + /* Set up all cport descriptors associated with this bundle */ + list_for_each_entry_safe(desc, next, &intf->manifest_descs, links) { + if (desc->type != GREYBUS_TYPE_CPORT) + continue; + + desc_cport = desc->data; + if (desc_cport->bundle != bundle_id) + continue; + + cport_id = le16_to_cpu(desc_cport->id); + if (cport_id > CPORT_ID_MAX) + goto exit; + + /* Nothing else should have its cport_id as control cport id */ + if (cport_id == GB_CONTROL_CPORT_ID) { + dev_err(&bundle->dev, "invalid cport id found (%02u)\n", + cport_id); + goto exit; + } + + /* + * Found one, move it to our temporary list after checking for + * duplicates. + */ + list_for_each_entry(tmp, &list, links) { + desc_cport = tmp->data; + if (cport_id == le16_to_cpu(desc_cport->id)) { + dev_err(&bundle->dev, + "duplicate CPort %u found\n", cport_id); + goto exit; + } + } + list_move_tail(&desc->links, &list); + count++; + } + + if (!count) + return 0; + + bundle->cport_desc = kcalloc(count, sizeof(*bundle->cport_desc), + GFP_KERNEL); + if (!bundle->cport_desc) + goto exit; + + bundle->num_cports = count; + + i = 0; + list_for_each_entry_safe(desc, next, &list, links) { + desc_cport = desc->data; + memcpy(&bundle->cport_desc[i++], desc_cport, + sizeof(*desc_cport)); + + /* Release the cport descriptor */ + release_manifest_descriptor(desc); + } + + return count; +exit: + release_cport_descriptors(&list, bundle_id); + /* + * Free all cports for this bundle to avoid 'excess descriptors' + * warnings. + */ + release_cport_descriptors(&intf->manifest_descs, bundle_id); + + return 0; /* Error; count should also be 0 */ +} + +/* + * Find bundle descriptors in the manifest and set up their data + * structures. Returns the number of bundles set up for the + * given interface. + */ +static u32 gb_manifest_parse_bundles(struct gb_interface *intf) +{ + struct manifest_desc *desc; + struct gb_bundle *bundle; + struct gb_bundle *bundle_next; + u32 count = 0; + u8 bundle_id; + u8 class; + + while ((desc = get_next_bundle_desc(intf))) { + struct greybus_descriptor_bundle *desc_bundle; + + /* Found one. Set up its bundle structure*/ + desc_bundle = desc->data; + bundle_id = desc_bundle->id; + class = desc_bundle->class; + + /* Done with this bundle descriptor */ + release_manifest_descriptor(desc); + + /* Ignore any legacy control bundles */ + if (bundle_id == GB_CONTROL_BUNDLE_ID) { + dev_dbg(&intf->dev, "%s - ignoring control bundle\n", + __func__); + release_cport_descriptors(&intf->manifest_descs, + bundle_id); + continue; + } + + /* Nothing else should have its class set to control class */ + if (class == GREYBUS_CLASS_CONTROL) { + dev_err(&intf->dev, + "bundle %u cannot use control class\n", + bundle_id); + goto cleanup; + } + + bundle = gb_bundle_create(intf, bundle_id, class); + if (!bundle) + goto cleanup; + + /* + * Now go set up this bundle's functions and cports. + * + * A 'bundle' represents a device in greybus. It may require + * multiple cports for its functioning. If we fail to setup any + * cport of a bundle, we better reject the complete bundle as + * the device may not be able to function properly then. + * + * But, failing to setup a cport of bundle X doesn't mean that + * the device corresponding to bundle Y will not work properly. + * Bundles should be treated as separate independent devices. + * + * While parsing manifest for an interface, treat bundles as + * separate entities and don't reject entire interface and its + * bundles on failing to initialize a cport. But make sure the + * bundle which needs the cport, gets destroyed properly. + */ + if (!gb_manifest_parse_cports(bundle)) { + gb_bundle_destroy(bundle); + continue; + } + + count++; + } + + return count; +cleanup: + /* An error occurred; undo any changes we've made */ + list_for_each_entry_safe(bundle, bundle_next, &intf->bundles, links) { + gb_bundle_destroy(bundle); + count--; + } + return 0; /* Error; count should also be 0 */ +} + +static bool gb_manifest_parse_interface(struct gb_interface *intf, + struct manifest_desc *interface_desc) +{ + struct greybus_descriptor_interface *desc_intf = interface_desc->data; + struct gb_control *control = intf->control; + char *str; + + /* Handle the strings first--they can fail */ + str = gb_string_get(intf, desc_intf->vendor_stringid); + if (IS_ERR(str)) + return false; + control->vendor_string = str; + + str = gb_string_get(intf, desc_intf->product_stringid); + if (IS_ERR(str)) + goto out_free_vendor_string; + control->product_string = str; + + /* Assign feature flags communicated via manifest */ + intf->features = desc_intf->features; + + /* Release the interface descriptor, now that we're done with it */ + release_manifest_descriptor(interface_desc); + + /* An interface must have at least one bundle descriptor */ + if (!gb_manifest_parse_bundles(intf)) { + dev_err(&intf->dev, "manifest bundle descriptors not valid\n"); + goto out_err; + } + + return true; +out_err: + kfree(control->product_string); + control->product_string = NULL; +out_free_vendor_string: + kfree(control->vendor_string); + control->vendor_string = NULL; + + return false; +} + +/* + * Parse a buffer containing an interface manifest. + * + * If we find anything wrong with the content/format of the buffer + * we reject it. + * + * The first requirement is that the manifest's version is + * one we can parse. + * + * We make an initial pass through the buffer and identify all of + * the descriptors it contains, keeping track for each its type + * and the location size of its data in the buffer. + * + * Next we scan the descriptors, looking for an interface descriptor; + * there must be exactly one of those. When found, we record the + * information it contains, and then remove that descriptor (and any + * string descriptors it refers to) from further consideration. + * + * After that we look for the interface's bundles--there must be at + * least one of those. + * + * Returns true if parsing was successful, false otherwise. + */ +bool gb_manifest_parse(struct gb_interface *intf, void *data, size_t size) +{ + struct greybus_manifest *manifest; + struct greybus_manifest_header *header; + struct greybus_descriptor *desc; + struct manifest_desc *descriptor; + struct manifest_desc *interface_desc = NULL; + u16 manifest_size; + u32 found = 0; + bool result; + + /* Manifest descriptor list should be empty here */ + if (WARN_ON(!list_empty(&intf->manifest_descs))) + return false; + + /* we have to have at _least_ the manifest header */ + if (size < sizeof(*header)) { + dev_err(&intf->dev, "short manifest (%zu < %zu)\n", + size, sizeof(*header)); + return false; + } + + /* Make sure the size is right */ + manifest = data; + header = &manifest->header; + manifest_size = le16_to_cpu(header->size); + if (manifest_size != size) { + dev_err(&intf->dev, "manifest size mismatch (%zu != %u)\n", + size, manifest_size); + return false; + } + + /* Validate major/minor number */ + if (header->version_major > GREYBUS_VERSION_MAJOR) { + dev_err(&intf->dev, "manifest version too new (%u.%u > %u.%u)\n", + header->version_major, header->version_minor, + GREYBUS_VERSION_MAJOR, GREYBUS_VERSION_MINOR); + return false; + } + + /* OK, find all the descriptors */ + desc = manifest->descriptors; + size -= sizeof(*header); + while (size) { + int desc_size; + + desc_size = identify_descriptor(intf, desc, size); + if (desc_size < 0) { + result = false; + goto out; + } + desc = (struct greybus_descriptor *)((char *)desc + desc_size); + size -= desc_size; + } + + /* There must be a single interface descriptor */ + list_for_each_entry(descriptor, &intf->manifest_descs, links) { + if (descriptor->type == GREYBUS_TYPE_INTERFACE) + if (!found++) + interface_desc = descriptor; + } + if (found != 1) { + dev_err(&intf->dev, "manifest must have 1 interface descriptor (%u found)\n", + found); + result = false; + goto out; + } + + /* Parse the manifest, starting with the interface descriptor */ + result = gb_manifest_parse_interface(intf, interface_desc); + + /* + * We really should have no remaining descriptors, but we + * don't know what newer format manifests might leave. + */ + if (result && !list_empty(&intf->manifest_descs)) + dev_info(&intf->dev, "excess descriptors in interface manifest\n"); +out: + release_manifest_descriptors(intf); + + return result; +} diff --git a/drivers/greybus/module.c b/drivers/greybus/module.c new file mode 100644 index 000000000000..36f77f9e1d74 --- /dev/null +++ b/drivers/greybus/module.c @@ -0,0 +1,236 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Greybus Module code + * + * Copyright 2016 Google Inc. + * Copyright 2016 Linaro Ltd. + */ + +#include +#include "greybus_trace.h" + +static ssize_t eject_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct gb_module *module = to_gb_module(dev); + struct gb_interface *intf; + size_t i; + long val; + int ret; + + ret = kstrtol(buf, 0, &val); + if (ret) + return ret; + + if (!val) + return len; + + for (i = 0; i < module->num_interfaces; ++i) { + intf = module->interfaces[i]; + + mutex_lock(&intf->mutex); + /* Set flag to prevent concurrent activation. */ + intf->ejected = true; + gb_interface_disable(intf); + gb_interface_deactivate(intf); + mutex_unlock(&intf->mutex); + } + + /* Tell the SVC to eject the primary interface. */ + ret = gb_svc_intf_eject(module->hd->svc, module->module_id); + if (ret) + return ret; + + return len; +} +static DEVICE_ATTR_WO(eject); + +static ssize_t module_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct gb_module *module = to_gb_module(dev); + + return sprintf(buf, "%u\n", module->module_id); +} +static DEVICE_ATTR_RO(module_id); + +static ssize_t num_interfaces_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct gb_module *module = to_gb_module(dev); + + return sprintf(buf, "%zu\n", module->num_interfaces); +} +static DEVICE_ATTR_RO(num_interfaces); + +static struct attribute *module_attrs[] = { + &dev_attr_eject.attr, + &dev_attr_module_id.attr, + &dev_attr_num_interfaces.attr, + NULL, +}; +ATTRIBUTE_GROUPS(module); + +static void gb_module_release(struct device *dev) +{ + struct gb_module *module = to_gb_module(dev); + + trace_gb_module_release(module); + + kfree(module); +} + +struct device_type greybus_module_type = { + .name = "greybus_module", + .release = gb_module_release, +}; + +struct gb_module *gb_module_create(struct gb_host_device *hd, u8 module_id, + size_t num_interfaces) +{ + struct gb_interface *intf; + struct gb_module *module; + int i; + + module = kzalloc(struct_size(module, interfaces, num_interfaces), + GFP_KERNEL); + if (!module) + return NULL; + + module->hd = hd; + module->module_id = module_id; + module->num_interfaces = num_interfaces; + + module->dev.parent = &hd->dev; + module->dev.bus = &greybus_bus_type; + module->dev.type = &greybus_module_type; + module->dev.groups = module_groups; + module->dev.dma_mask = hd->dev.dma_mask; + device_initialize(&module->dev); + dev_set_name(&module->dev, "%d-%u", hd->bus_id, module_id); + + trace_gb_module_create(module); + + for (i = 0; i < num_interfaces; ++i) { + intf = gb_interface_create(module, module_id + i); + if (!intf) { + dev_err(&module->dev, "failed to create interface %u\n", + module_id + i); + goto err_put_interfaces; + } + module->interfaces[i] = intf; + } + + return module; + +err_put_interfaces: + for (--i; i >= 0; --i) + gb_interface_put(module->interfaces[i]); + + put_device(&module->dev); + + return NULL; +} + +/* + * Register and enable an interface after first attempting to activate it. + */ +static void gb_module_register_interface(struct gb_interface *intf) +{ + struct gb_module *module = intf->module; + u8 intf_id = intf->interface_id; + int ret; + + mutex_lock(&intf->mutex); + + ret = gb_interface_activate(intf); + if (ret) { + if (intf->type != GB_INTERFACE_TYPE_DUMMY) { + dev_err(&module->dev, + "failed to activate interface %u: %d\n", + intf_id, ret); + } + + gb_interface_add(intf); + goto err_unlock; + } + + ret = gb_interface_add(intf); + if (ret) + goto err_interface_deactivate; + + ret = gb_interface_enable(intf); + if (ret) { + dev_err(&module->dev, "failed to enable interface %u: %d\n", + intf_id, ret); + goto err_interface_deactivate; + } + + mutex_unlock(&intf->mutex); + + return; + +err_interface_deactivate: + gb_interface_deactivate(intf); +err_unlock: + mutex_unlock(&intf->mutex); +} + +static void gb_module_deregister_interface(struct gb_interface *intf) +{ + /* Mark as disconnected to prevent I/O during disable. */ + if (intf->module->disconnected) + intf->disconnected = true; + + mutex_lock(&intf->mutex); + intf->removed = true; + gb_interface_disable(intf); + gb_interface_deactivate(intf); + mutex_unlock(&intf->mutex); + + gb_interface_del(intf); +} + +/* Register a module and its interfaces. */ +int gb_module_add(struct gb_module *module) +{ + size_t i; + int ret; + + ret = device_add(&module->dev); + if (ret) { + dev_err(&module->dev, "failed to register module: %d\n", ret); + return ret; + } + + trace_gb_module_add(module); + + for (i = 0; i < module->num_interfaces; ++i) + gb_module_register_interface(module->interfaces[i]); + + return 0; +} + +/* Deregister a module and its interfaces. */ +void gb_module_del(struct gb_module *module) +{ + size_t i; + + for (i = 0; i < module->num_interfaces; ++i) + gb_module_deregister_interface(module->interfaces[i]); + + trace_gb_module_del(module); + + device_del(&module->dev); +} + +void gb_module_put(struct gb_module *module) +{ + size_t i; + + for (i = 0; i < module->num_interfaces; ++i) + gb_interface_put(module->interfaces[i]); + + put_device(&module->dev); +} diff --git a/drivers/greybus/operation.c b/drivers/greybus/operation.c new file mode 100644 index 000000000000..8459e9bc0749 --- /dev/null +++ b/drivers/greybus/operation.c @@ -0,0 +1,1264 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Greybus operations + * + * Copyright 2014-2015 Google Inc. + * Copyright 2014-2015 Linaro Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "greybus_trace.h" + +static struct kmem_cache *gb_operation_cache; +static struct kmem_cache *gb_message_cache; + +/* Workqueue to handle Greybus operation completions. */ +static struct workqueue_struct *gb_operation_completion_wq; + +/* Wait queue for synchronous cancellations. */ +static DECLARE_WAIT_QUEUE_HEAD(gb_operation_cancellation_queue); + +/* + * Protects updates to operation->errno. + */ +static DEFINE_SPINLOCK(gb_operations_lock); + +static int gb_operation_response_send(struct gb_operation *operation, + int errno); + +/* + * Increment operation active count and add to connection list unless the + * connection is going away. + * + * Caller holds operation reference. + */ +static int gb_operation_get_active(struct gb_operation *operation) +{ + struct gb_connection *connection = operation->connection; + unsigned long flags; + + spin_lock_irqsave(&connection->lock, flags); + switch (connection->state) { + case GB_CONNECTION_STATE_ENABLED: + break; + case GB_CONNECTION_STATE_ENABLED_TX: + if (gb_operation_is_incoming(operation)) + goto err_unlock; + break; + case GB_CONNECTION_STATE_DISCONNECTING: + if (!gb_operation_is_core(operation)) + goto err_unlock; + break; + default: + goto err_unlock; + } + + if (operation->active++ == 0) + list_add_tail(&operation->links, &connection->operations); + + trace_gb_operation_get_active(operation); + + spin_unlock_irqrestore(&connection->lock, flags); + + return 0; + +err_unlock: + spin_unlock_irqrestore(&connection->lock, flags); + + return -ENOTCONN; +} + +/* Caller holds operation reference. */ +static void gb_operation_put_active(struct gb_operation *operation) +{ + struct gb_connection *connection = operation->connection; + unsigned long flags; + + spin_lock_irqsave(&connection->lock, flags); + + trace_gb_operation_put_active(operation); + + if (--operation->active == 0) { + list_del(&operation->links); + if (atomic_read(&operation->waiters)) + wake_up(&gb_operation_cancellation_queue); + } + spin_unlock_irqrestore(&connection->lock, flags); +} + +static bool gb_operation_is_active(struct gb_operation *operation) +{ + struct gb_connection *connection = operation->connection; + unsigned long flags; + bool ret; + + spin_lock_irqsave(&connection->lock, flags); + ret = operation->active; + spin_unlock_irqrestore(&connection->lock, flags); + + return ret; +} + +/* + * Set an operation's result. + * + * Initially an outgoing operation's errno value is -EBADR. + * If no error occurs before sending the request message the only + * valid value operation->errno can be set to is -EINPROGRESS, + * indicating the request has been (or rather is about to be) sent. + * At that point nobody should be looking at the result until the + * response arrives. + * + * The first time the result gets set after the request has been + * sent, that result "sticks." That is, if two concurrent threads + * race to set the result, the first one wins. The return value + * tells the caller whether its result was recorded; if not the + * caller has nothing more to do. + * + * The result value -EILSEQ is reserved to signal an implementation + * error; if it's ever observed, the code performing the request has + * done something fundamentally wrong. It is an error to try to set + * the result to -EBADR, and attempts to do so result in a warning, + * and -EILSEQ is used instead. Similarly, the only valid result + * value to set for an operation in initial state is -EINPROGRESS. + * Attempts to do otherwise will also record a (successful) -EILSEQ + * operation result. + */ +static bool gb_operation_result_set(struct gb_operation *operation, int result) +{ + unsigned long flags; + int prev; + + if (result == -EINPROGRESS) { + /* + * -EINPROGRESS is used to indicate the request is + * in flight. It should be the first result value + * set after the initial -EBADR. Issue a warning + * and record an implementation error if it's + * set at any other time. + */ + spin_lock_irqsave(&gb_operations_lock, flags); + prev = operation->errno; + if (prev == -EBADR) + operation->errno = result; + else + operation->errno = -EILSEQ; + spin_unlock_irqrestore(&gb_operations_lock, flags); + WARN_ON(prev != -EBADR); + + return true; + } + + /* + * The first result value set after a request has been sent + * will be the final result of the operation. Subsequent + * attempts to set the result are ignored. + * + * Note that -EBADR is a reserved "initial state" result + * value. Attempts to set this value result in a warning, + * and the result code is set to -EILSEQ instead. + */ + if (WARN_ON(result == -EBADR)) + result = -EILSEQ; /* Nobody should be setting -EBADR */ + + spin_lock_irqsave(&gb_operations_lock, flags); + prev = operation->errno; + if (prev == -EINPROGRESS) + operation->errno = result; /* First and final result */ + spin_unlock_irqrestore(&gb_operations_lock, flags); + + return prev == -EINPROGRESS; +} + +int gb_operation_result(struct gb_operation *operation) +{ + int result = operation->errno; + + WARN_ON(result == -EBADR); + WARN_ON(result == -EINPROGRESS); + + return result; +} +EXPORT_SYMBOL_GPL(gb_operation_result); + +/* + * Looks up an outgoing operation on a connection and returns a refcounted + * pointer if found, or NULL otherwise. + */ +static struct gb_operation * +gb_operation_find_outgoing(struct gb_connection *connection, u16 operation_id) +{ + struct gb_operation *operation; + unsigned long flags; + bool found = false; + + spin_lock_irqsave(&connection->lock, flags); + list_for_each_entry(operation, &connection->operations, links) + if (operation->id == operation_id && + !gb_operation_is_incoming(operation)) { + gb_operation_get(operation); + found = true; + break; + } + spin_unlock_irqrestore(&connection->lock, flags); + + return found ? operation : NULL; +} + +static int gb_message_send(struct gb_message *message, gfp_t gfp) +{ + struct gb_connection *connection = message->operation->connection; + + trace_gb_message_send(message); + return connection->hd->driver->message_send(connection->hd, + connection->hd_cport_id, + message, + gfp); +} + +/* + * Cancel a message we have passed to the host device layer to be sent. + */ +static void gb_message_cancel(struct gb_message *message) +{ + struct gb_host_device *hd = message->operation->connection->hd; + + hd->driver->message_cancel(message); +} + +static void gb_operation_request_handle(struct gb_operation *operation) +{ + struct gb_connection *connection = operation->connection; + int status; + int ret; + + if (connection->handler) { + status = connection->handler(operation); + } else { + dev_err(&connection->hd->dev, + "%s: unexpected incoming request of type 0x%02x\n", + connection->name, operation->type); + + status = -EPROTONOSUPPORT; + } + + ret = gb_operation_response_send(operation, status); + if (ret) { + dev_err(&connection->hd->dev, + "%s: failed to send response %d for type 0x%02x: %d\n", + connection->name, status, operation->type, ret); + return; + } +} + +/* + * Process operation work. + * + * For incoming requests, call the protocol request handler. The operation + * result should be -EINPROGRESS at this point. + * + * For outgoing requests, the operation result value should have + * been set before queueing this. The operation callback function + * allows the original requester to know the request has completed + * and its result is available. + */ +static void gb_operation_work(struct work_struct *work) +{ + struct gb_operation *operation; + int ret; + + operation = container_of(work, struct gb_operation, work); + + if (gb_operation_is_incoming(operation)) { + gb_operation_request_handle(operation); + } else { + ret = del_timer_sync(&operation->timer); + if (!ret) { + /* Cancel request message if scheduled by timeout. */ + if (gb_operation_result(operation) == -ETIMEDOUT) + gb_message_cancel(operation->request); + } + + operation->callback(operation); + } + + gb_operation_put_active(operation); + gb_operation_put(operation); +} + +static void gb_operation_timeout(struct timer_list *t) +{ + struct gb_operation *operation = from_timer(operation, t, timer); + + if (gb_operation_result_set(operation, -ETIMEDOUT)) { + /* + * A stuck request message will be cancelled from the + * workqueue. + */ + queue_work(gb_operation_completion_wq, &operation->work); + } +} + +static void gb_operation_message_init(struct gb_host_device *hd, + struct gb_message *message, + u16 operation_id, + size_t payload_size, u8 type) +{ + struct gb_operation_msg_hdr *header; + + header = message->buffer; + + message->header = header; + message->payload = payload_size ? header + 1 : NULL; + message->payload_size = payload_size; + + /* + * The type supplied for incoming message buffers will be + * GB_REQUEST_TYPE_INVALID. Such buffers will be overwritten by + * arriving data so there's no need to initialize the message header. + */ + if (type != GB_REQUEST_TYPE_INVALID) { + u16 message_size = (u16)(sizeof(*header) + payload_size); + + /* + * For a request, the operation id gets filled in + * when the message is sent. For a response, it + * will be copied from the request by the caller. + * + * The result field in a request message must be + * zero. It will be set just prior to sending for + * a response. + */ + header->size = cpu_to_le16(message_size); + header->operation_id = 0; + header->type = type; + header->result = 0; + } +} + +/* + * Allocate a message to be used for an operation request or response. + * Both types of message contain a common header. The request message + * for an outgoing operation is outbound, as is the response message + * for an incoming operation. The message header for an outbound + * message is partially initialized here. + * + * The headers for inbound messages don't need to be initialized; + * they'll be filled in by arriving data. + * + * Our message buffers have the following layout: + * message header \_ these combined are + * message payload / the message size + */ +static struct gb_message * +gb_operation_message_alloc(struct gb_host_device *hd, u8 type, + size_t payload_size, gfp_t gfp_flags) +{ + struct gb_message *message; + struct gb_operation_msg_hdr *header; + size_t message_size = payload_size + sizeof(*header); + + if (message_size > hd->buffer_size_max) { + dev_warn(&hd->dev, "requested message size too big (%zu > %zu)\n", + message_size, hd->buffer_size_max); + return NULL; + } + + /* Allocate the message structure and buffer. */ + message = kmem_cache_zalloc(gb_message_cache, gfp_flags); + if (!message) + return NULL; + + message->buffer = kzalloc(message_size, gfp_flags); + if (!message->buffer) + goto err_free_message; + + /* Initialize the message. Operation id is filled in later. */ + gb_operation_message_init(hd, message, 0, payload_size, type); + + return message; + +err_free_message: + kmem_cache_free(gb_message_cache, message); + + return NULL; +} + +static void gb_operation_message_free(struct gb_message *message) +{ + kfree(message->buffer); + kmem_cache_free(gb_message_cache, message); +} + +/* + * Map an enum gb_operation_status value (which is represented in a + * message as a single byte) to an appropriate Linux negative errno. + */ +static int gb_operation_status_map(u8 status) +{ + switch (status) { + case GB_OP_SUCCESS: + return 0; + case GB_OP_INTERRUPTED: + return -EINTR; + case GB_OP_TIMEOUT: + return -ETIMEDOUT; + case GB_OP_NO_MEMORY: + return -ENOMEM; + case GB_OP_PROTOCOL_BAD: + return -EPROTONOSUPPORT; + case GB_OP_OVERFLOW: + return -EMSGSIZE; + case GB_OP_INVALID: + return -EINVAL; + case GB_OP_RETRY: + return -EAGAIN; + case GB_OP_NONEXISTENT: + return -ENODEV; + case GB_OP_MALFUNCTION: + return -EILSEQ; + case GB_OP_UNKNOWN_ERROR: + default: + return -EIO; + } +} + +/* + * Map a Linux errno value (from operation->errno) into the value + * that should represent it in a response message status sent + * over the wire. Returns an enum gb_operation_status value (which + * is represented in a message as a single byte). + */ +static u8 gb_operation_errno_map(int errno) +{ + switch (errno) { + case 0: + return GB_OP_SUCCESS; + case -EINTR: + return GB_OP_INTERRUPTED; + case -ETIMEDOUT: + return GB_OP_TIMEOUT; + case -ENOMEM: + return GB_OP_NO_MEMORY; + case -EPROTONOSUPPORT: + return GB_OP_PROTOCOL_BAD; + case -EMSGSIZE: + return GB_OP_OVERFLOW; /* Could be underflow too */ + case -EINVAL: + return GB_OP_INVALID; + case -EAGAIN: + return GB_OP_RETRY; + case -EILSEQ: + return GB_OP_MALFUNCTION; + case -ENODEV: + return GB_OP_NONEXISTENT; + case -EIO: + default: + return GB_OP_UNKNOWN_ERROR; + } +} + +bool gb_operation_response_alloc(struct gb_operation *operation, + size_t response_size, gfp_t gfp) +{ + struct gb_host_device *hd = operation->connection->hd; + struct gb_operation_msg_hdr *request_header; + struct gb_message *response; + u8 type; + + type = operation->type | GB_MESSAGE_TYPE_RESPONSE; + response = gb_operation_message_alloc(hd, type, response_size, gfp); + if (!response) + return false; + response->operation = operation; + + /* + * Size and type get initialized when the message is + * allocated. The errno will be set before sending. All + * that's left is the operation id, which we copy from the + * request message header (as-is, in little-endian order). + */ + request_header = operation->request->header; + response->header->operation_id = request_header->operation_id; + operation->response = response; + + return true; +} +EXPORT_SYMBOL_GPL(gb_operation_response_alloc); + +/* + * Create a Greybus operation to be sent over the given connection. + * The request buffer will be big enough for a payload of the given + * size. + * + * For outgoing requests, the request message's header will be + * initialized with the type of the request and the message size. + * Outgoing operations must also specify the response buffer size, + * which must be sufficient to hold all expected response data. The + * response message header will eventually be overwritten, so there's + * no need to initialize it here. + * + * Request messages for incoming operations can arrive in interrupt + * context, so they must be allocated with GFP_ATOMIC. In this case + * the request buffer will be immediately overwritten, so there is + * no need to initialize the message header. Responsibility for + * allocating a response buffer lies with the incoming request + * handler for a protocol. So we don't allocate that here. + * + * Returns a pointer to the new operation or a null pointer if an + * error occurs. + */ +static struct gb_operation * +gb_operation_create_common(struct gb_connection *connection, u8 type, + size_t request_size, size_t response_size, + unsigned long op_flags, gfp_t gfp_flags) +{ + struct gb_host_device *hd = connection->hd; + struct gb_operation *operation; + + operation = kmem_cache_zalloc(gb_operation_cache, gfp_flags); + if (!operation) + return NULL; + operation->connection = connection; + + operation->request = gb_operation_message_alloc(hd, type, request_size, + gfp_flags); + if (!operation->request) + goto err_cache; + operation->request->operation = operation; + + /* Allocate the response buffer for outgoing operations */ + if (!(op_flags & GB_OPERATION_FLAG_INCOMING)) { + if (!gb_operation_response_alloc(operation, response_size, + gfp_flags)) { + goto err_request; + } + + timer_setup(&operation->timer, gb_operation_timeout, 0); + } + + operation->flags = op_flags; + operation->type = type; + operation->errno = -EBADR; /* Initial value--means "never set" */ + + INIT_WORK(&operation->work, gb_operation_work); + init_completion(&operation->completion); + kref_init(&operation->kref); + atomic_set(&operation->waiters, 0); + + return operation; + +err_request: + gb_operation_message_free(operation->request); +err_cache: + kmem_cache_free(gb_operation_cache, operation); + + return NULL; +} + +/* + * Create a new operation associated with the given connection. The + * request and response sizes provided are the number of bytes + * required to hold the request/response payload only. Both of + * these are allowed to be 0. Note that 0x00 is reserved as an + * invalid operation type for all protocols, and this is enforced + * here. + */ +struct gb_operation * +gb_operation_create_flags(struct gb_connection *connection, + u8 type, size_t request_size, + size_t response_size, unsigned long flags, + gfp_t gfp) +{ + struct gb_operation *operation; + + if (WARN_ON_ONCE(type == GB_REQUEST_TYPE_INVALID)) + return NULL; + if (WARN_ON_ONCE(type & GB_MESSAGE_TYPE_RESPONSE)) + type &= ~GB_MESSAGE_TYPE_RESPONSE; + + if (WARN_ON_ONCE(flags & ~GB_OPERATION_FLAG_USER_MASK)) + flags &= GB_OPERATION_FLAG_USER_MASK; + + operation = gb_operation_create_common(connection, type, + request_size, response_size, + flags, gfp); + if (operation) + trace_gb_operation_create(operation); + + return operation; +} +EXPORT_SYMBOL_GPL(gb_operation_create_flags); + +struct gb_operation * +gb_operation_create_core(struct gb_connection *connection, + u8 type, size_t request_size, + size_t response_size, unsigned long flags, + gfp_t gfp) +{ + struct gb_operation *operation; + + flags |= GB_OPERATION_FLAG_CORE; + + operation = gb_operation_create_common(connection, type, + request_size, response_size, + flags, gfp); + if (operation) + trace_gb_operation_create_core(operation); + + return operation; +} + +/* Do not export this function. */ + +size_t gb_operation_get_payload_size_max(struct gb_connection *connection) +{ + struct gb_host_device *hd = connection->hd; + + return hd->buffer_size_max - sizeof(struct gb_operation_msg_hdr); +} +EXPORT_SYMBOL_GPL(gb_operation_get_payload_size_max); + +static struct gb_operation * +gb_operation_create_incoming(struct gb_connection *connection, u16 id, + u8 type, void *data, size_t size) +{ + struct gb_operation *operation; + size_t request_size; + unsigned long flags = GB_OPERATION_FLAG_INCOMING; + + /* Caller has made sure we at least have a message header. */ + request_size = size - sizeof(struct gb_operation_msg_hdr); + + if (!id) + flags |= GB_OPERATION_FLAG_UNIDIRECTIONAL; + + operation = gb_operation_create_common(connection, type, + request_size, + GB_REQUEST_TYPE_INVALID, + flags, GFP_ATOMIC); + if (!operation) + return NULL; + + operation->id = id; + memcpy(operation->request->header, data, size); + trace_gb_operation_create_incoming(operation); + + return operation; +} + +/* + * Get an additional reference on an operation. + */ +void gb_operation_get(struct gb_operation *operation) +{ + kref_get(&operation->kref); +} +EXPORT_SYMBOL_GPL(gb_operation_get); + +/* + * Destroy a previously created operation. + */ +static void _gb_operation_destroy(struct kref *kref) +{ + struct gb_operation *operation; + + operation = container_of(kref, struct gb_operation, kref); + + trace_gb_operation_destroy(operation); + + if (operation->response) + gb_operation_message_free(operation->response); + gb_operation_message_free(operation->request); + + kmem_cache_free(gb_operation_cache, operation); +} + +/* + * Drop a reference on an operation, and destroy it when the last + * one is gone. + */ +void gb_operation_put(struct gb_operation *operation) +{ + if (WARN_ON(!operation)) + return; + + kref_put(&operation->kref, _gb_operation_destroy); +} +EXPORT_SYMBOL_GPL(gb_operation_put); + +/* Tell the requester we're done */ +static void gb_operation_sync_callback(struct gb_operation *operation) +{ + complete(&operation->completion); +} + +/** + * gb_operation_request_send() - send an operation request message + * @operation: the operation to initiate + * @callback: the operation completion callback + * @timeout: operation timeout in milliseconds, or zero for no timeout + * @gfp: the memory flags to use for any allocations + * + * The caller has filled in any payload so the request message is ready to go. + * The callback function supplied will be called when the response message has + * arrived, a unidirectional request has been sent, or the operation is + * cancelled, indicating that the operation is complete. The callback function + * can fetch the result of the operation using gb_operation_result() if + * desired. + * + * Return: 0 if the request was successfully queued in the host-driver queues, + * or a negative errno. + */ +int gb_operation_request_send(struct gb_operation *operation, + gb_operation_callback callback, + unsigned int timeout, + gfp_t gfp) +{ + struct gb_connection *connection = operation->connection; + struct gb_operation_msg_hdr *header; + unsigned int cycle; + int ret; + + if (gb_connection_is_offloaded(connection)) + return -EBUSY; + + if (!callback) + return -EINVAL; + + /* + * Record the callback function, which is executed in + * non-atomic (workqueue) context when the final result + * of an operation has been set. + */ + operation->callback = callback; + + /* + * Assign the operation's id, and store it in the request header. + * Zero is a reserved operation id for unidirectional operations. + */ + if (gb_operation_is_unidirectional(operation)) { + operation->id = 0; + } else { + cycle = (unsigned int)atomic_inc_return(&connection->op_cycle); + operation->id = (u16)(cycle % U16_MAX + 1); + } + + header = operation->request->header; + header->operation_id = cpu_to_le16(operation->id); + + gb_operation_result_set(operation, -EINPROGRESS); + + /* + * Get an extra reference on the operation. It'll be dropped when the + * operation completes. + */ + gb_operation_get(operation); + ret = gb_operation_get_active(operation); + if (ret) + goto err_put; + + ret = gb_message_send(operation->request, gfp); + if (ret) + goto err_put_active; + + if (timeout) { + operation->timer.expires = jiffies + msecs_to_jiffies(timeout); + add_timer(&operation->timer); + } + + return 0; + +err_put_active: + gb_operation_put_active(operation); +err_put: + gb_operation_put(operation); + + return ret; +} +EXPORT_SYMBOL_GPL(gb_operation_request_send); + +/* + * Send a synchronous operation. This function is expected to + * block, returning only when the response has arrived, (or when an + * error is detected. The return value is the result of the + * operation. + */ +int gb_operation_request_send_sync_timeout(struct gb_operation *operation, + unsigned int timeout) +{ + int ret; + + ret = gb_operation_request_send(operation, gb_operation_sync_callback, + timeout, GFP_KERNEL); + if (ret) + return ret; + + ret = wait_for_completion_interruptible(&operation->completion); + if (ret < 0) { + /* Cancel the operation if interrupted */ + gb_operation_cancel(operation, -ECANCELED); + } + + return gb_operation_result(operation); +} +EXPORT_SYMBOL_GPL(gb_operation_request_send_sync_timeout); + +/* + * Send a response for an incoming operation request. A non-zero + * errno indicates a failed operation. + * + * If there is any response payload, the incoming request handler is + * responsible for allocating the response message. Otherwise the + * it can simply supply the result errno; this function will + * allocate the response message if necessary. + */ +static int gb_operation_response_send(struct gb_operation *operation, + int errno) +{ + struct gb_connection *connection = operation->connection; + int ret; + + if (!operation->response && + !gb_operation_is_unidirectional(operation)) { + if (!gb_operation_response_alloc(operation, 0, GFP_KERNEL)) + return -ENOMEM; + } + + /* Record the result */ + if (!gb_operation_result_set(operation, errno)) { + dev_err(&connection->hd->dev, "request result already set\n"); + return -EIO; /* Shouldn't happen */ + } + + /* Sender of request does not care about response. */ + if (gb_operation_is_unidirectional(operation)) + return 0; + + /* Reference will be dropped when message has been sent. */ + gb_operation_get(operation); + ret = gb_operation_get_active(operation); + if (ret) + goto err_put; + + /* Fill in the response header and send it */ + operation->response->header->result = gb_operation_errno_map(errno); + + ret = gb_message_send(operation->response, GFP_KERNEL); + if (ret) + goto err_put_active; + + return 0; + +err_put_active: + gb_operation_put_active(operation); +err_put: + gb_operation_put(operation); + + return ret; +} + +/* + * This function is called when a message send request has completed. + */ +void greybus_message_sent(struct gb_host_device *hd, + struct gb_message *message, int status) +{ + struct gb_operation *operation = message->operation; + struct gb_connection *connection = operation->connection; + + /* + * If the message was a response, we just need to drop our + * reference to the operation. If an error occurred, report + * it. + * + * For requests, if there's no error and the operation in not + * unidirectional, there's nothing more to do until the response + * arrives. If an error occurred attempting to send it, or if the + * operation is unidrectional, record the result of the operation and + * schedule its completion. + */ + if (message == operation->response) { + if (status) { + dev_err(&connection->hd->dev, + "%s: error sending response 0x%02x: %d\n", + connection->name, operation->type, status); + } + + gb_operation_put_active(operation); + gb_operation_put(operation); + } else if (status || gb_operation_is_unidirectional(operation)) { + if (gb_operation_result_set(operation, status)) { + queue_work(gb_operation_completion_wq, + &operation->work); + } + } +} +EXPORT_SYMBOL_GPL(greybus_message_sent); + +/* + * We've received data on a connection, and it doesn't look like a + * response, so we assume it's a request. + * + * This is called in interrupt context, so just copy the incoming + * data into the request buffer and handle the rest via workqueue. + */ +static void gb_connection_recv_request(struct gb_connection *connection, + const struct gb_operation_msg_hdr *header, + void *data, size_t size) +{ + struct gb_operation *operation; + u16 operation_id; + u8 type; + int ret; + + operation_id = le16_to_cpu(header->operation_id); + type = header->type; + + operation = gb_operation_create_incoming(connection, operation_id, + type, data, size); + if (!operation) { + dev_err(&connection->hd->dev, + "%s: can't create incoming operation\n", + connection->name); + return; + } + + ret = gb_operation_get_active(operation); + if (ret) { + gb_operation_put(operation); + return; + } + trace_gb_message_recv_request(operation->request); + + /* + * The initial reference to the operation will be dropped when the + * request handler returns. + */ + if (gb_operation_result_set(operation, -EINPROGRESS)) + queue_work(connection->wq, &operation->work); +} + +/* + * We've received data that appears to be an operation response + * message. Look up the operation, and record that we've received + * its response. + * + * This is called in interrupt context, so just copy the incoming + * data into the response buffer and handle the rest via workqueue. + */ +static void gb_connection_recv_response(struct gb_connection *connection, + const struct gb_operation_msg_hdr *header, + void *data, size_t size) +{ + struct gb_operation *operation; + struct gb_message *message; + size_t message_size; + u16 operation_id; + int errno; + + operation_id = le16_to_cpu(header->operation_id); + + if (!operation_id) { + dev_err_ratelimited(&connection->hd->dev, + "%s: invalid response id 0 received\n", + connection->name); + return; + } + + operation = gb_operation_find_outgoing(connection, operation_id); + if (!operation) { + dev_err_ratelimited(&connection->hd->dev, + "%s: unexpected response id 0x%04x received\n", + connection->name, operation_id); + return; + } + + errno = gb_operation_status_map(header->result); + message = operation->response; + message_size = sizeof(*header) + message->payload_size; + if (!errno && size > message_size) { + dev_err_ratelimited(&connection->hd->dev, + "%s: malformed response 0x%02x received (%zu > %zu)\n", + connection->name, header->type, + size, message_size); + errno = -EMSGSIZE; + } else if (!errno && size < message_size) { + if (gb_operation_short_response_allowed(operation)) { + message->payload_size = size - sizeof(*header); + } else { + dev_err_ratelimited(&connection->hd->dev, + "%s: short response 0x%02x received (%zu < %zu)\n", + connection->name, header->type, + size, message_size); + errno = -EMSGSIZE; + } + } + + /* We must ignore the payload if a bad status is returned */ + if (errno) + size = sizeof(*header); + + /* The rest will be handled in work queue context */ + if (gb_operation_result_set(operation, errno)) { + memcpy(message->buffer, data, size); + + trace_gb_message_recv_response(message); + + queue_work(gb_operation_completion_wq, &operation->work); + } + + gb_operation_put(operation); +} + +/* + * Handle data arriving on a connection. As soon as we return the + * supplied data buffer will be reused (so unless we do something + * with, it's effectively dropped). + */ +void gb_connection_recv(struct gb_connection *connection, + void *data, size_t size) +{ + struct gb_operation_msg_hdr header; + struct device *dev = &connection->hd->dev; + size_t msg_size; + + if (connection->state == GB_CONNECTION_STATE_DISABLED || + gb_connection_is_offloaded(connection)) { + dev_warn_ratelimited(dev, "%s: dropping %zu received bytes\n", + connection->name, size); + return; + } + + if (size < sizeof(header)) { + dev_err_ratelimited(dev, "%s: short message received\n", + connection->name); + return; + } + + /* Use memcpy as data may be unaligned */ + memcpy(&header, data, sizeof(header)); + msg_size = le16_to_cpu(header.size); + if (size < msg_size) { + dev_err_ratelimited(dev, + "%s: incomplete message 0x%04x of type 0x%02x received (%zu < %zu)\n", + connection->name, + le16_to_cpu(header.operation_id), + header.type, size, msg_size); + return; /* XXX Should still complete operation */ + } + + if (header.type & GB_MESSAGE_TYPE_RESPONSE) { + gb_connection_recv_response(connection, &header, data, + msg_size); + } else { + gb_connection_recv_request(connection, &header, data, + msg_size); + } +} + +/* + * Cancel an outgoing operation synchronously, and record the given error to + * indicate why. + */ +void gb_operation_cancel(struct gb_operation *operation, int errno) +{ + if (WARN_ON(gb_operation_is_incoming(operation))) + return; + + if (gb_operation_result_set(operation, errno)) { + gb_message_cancel(operation->request); + queue_work(gb_operation_completion_wq, &operation->work); + } + trace_gb_message_cancel_outgoing(operation->request); + + atomic_inc(&operation->waiters); + wait_event(gb_operation_cancellation_queue, + !gb_operation_is_active(operation)); + atomic_dec(&operation->waiters); +} +EXPORT_SYMBOL_GPL(gb_operation_cancel); + +/* + * Cancel an incoming operation synchronously. Called during connection tear + * down. + */ +void gb_operation_cancel_incoming(struct gb_operation *operation, int errno) +{ + if (WARN_ON(!gb_operation_is_incoming(operation))) + return; + + if (!gb_operation_is_unidirectional(operation)) { + /* + * Make sure the request handler has submitted the response + * before cancelling it. + */ + flush_work(&operation->work); + if (!gb_operation_result_set(operation, errno)) + gb_message_cancel(operation->response); + } + trace_gb_message_cancel_incoming(operation->response); + + atomic_inc(&operation->waiters); + wait_event(gb_operation_cancellation_queue, + !gb_operation_is_active(operation)); + atomic_dec(&operation->waiters); +} + +/** + * gb_operation_sync_timeout() - implement a "simple" synchronous operation + * @connection: the Greybus connection to send this to + * @type: the type of operation to send + * @request: pointer to a memory buffer to copy the request from + * @request_size: size of @request + * @response: pointer to a memory buffer to copy the response to + * @response_size: the size of @response. + * @timeout: operation timeout in milliseconds + * + * This function implements a simple synchronous Greybus operation. It sends + * the provided operation request and waits (sleeps) until the corresponding + * operation response message has been successfully received, or an error + * occurs. @request and @response are buffers to hold the request and response + * data respectively, and if they are not NULL, their size must be specified in + * @request_size and @response_size. + * + * If a response payload is to come back, and @response is not NULL, + * @response_size number of bytes will be copied into @response if the operation + * is successful. + * + * If there is an error, the response buffer is left alone. + */ +int gb_operation_sync_timeout(struct gb_connection *connection, int type, + void *request, int request_size, + void *response, int response_size, + unsigned int timeout) +{ + struct gb_operation *operation; + int ret; + + if ((response_size && !response) || + (request_size && !request)) + return -EINVAL; + + operation = gb_operation_create(connection, type, + request_size, response_size, + GFP_KERNEL); + if (!operation) + return -ENOMEM; + + if (request_size) + memcpy(operation->request->payload, request, request_size); + + ret = gb_operation_request_send_sync_timeout(operation, timeout); + if (ret) { + dev_err(&connection->hd->dev, + "%s: synchronous operation id 0x%04x of type 0x%02x failed: %d\n", + connection->name, operation->id, type, ret); + } else { + if (response_size) { + memcpy(response, operation->response->payload, + response_size); + } + } + + gb_operation_put(operation); + + return ret; +} +EXPORT_SYMBOL_GPL(gb_operation_sync_timeout); + +/** + * gb_operation_unidirectional_timeout() - initiate a unidirectional operation + * @connection: connection to use + * @type: type of operation to send + * @request: memory buffer to copy the request from + * @request_size: size of @request + * @timeout: send timeout in milliseconds + * + * Initiate a unidirectional operation by sending a request message and + * waiting for it to be acknowledged as sent by the host device. + * + * Note that successful send of a unidirectional operation does not imply that + * the request as actually reached the remote end of the connection. + */ +int gb_operation_unidirectional_timeout(struct gb_connection *connection, + int type, void *request, + int request_size, + unsigned int timeout) +{ + struct gb_operation *operation; + int ret; + + if (request_size && !request) + return -EINVAL; + + operation = gb_operation_create_flags(connection, type, + request_size, 0, + GB_OPERATION_FLAG_UNIDIRECTIONAL, + GFP_KERNEL); + if (!operation) + return -ENOMEM; + + if (request_size) + memcpy(operation->request->payload, request, request_size); + + ret = gb_operation_request_send_sync_timeout(operation, timeout); + if (ret) { + dev_err(&connection->hd->dev, + "%s: unidirectional operation of type 0x%02x failed: %d\n", + connection->name, type, ret); + } + + gb_operation_put(operation); + + return ret; +} +EXPORT_SYMBOL_GPL(gb_operation_unidirectional_timeout); + +int __init gb_operation_init(void) +{ + gb_message_cache = kmem_cache_create("gb_message_cache", + sizeof(struct gb_message), 0, 0, + NULL); + if (!gb_message_cache) + return -ENOMEM; + + gb_operation_cache = kmem_cache_create("gb_operation_cache", + sizeof(struct gb_operation), 0, + 0, NULL); + if (!gb_operation_cache) + goto err_destroy_message_cache; + + gb_operation_completion_wq = alloc_workqueue("greybus_completion", + 0, 0); + if (!gb_operation_completion_wq) + goto err_destroy_operation_cache; + + return 0; + +err_destroy_operation_cache: + kmem_cache_destroy(gb_operation_cache); + gb_operation_cache = NULL; +err_destroy_message_cache: + kmem_cache_destroy(gb_message_cache); + gb_message_cache = NULL; + + return -ENOMEM; +} + +void gb_operation_exit(void) +{ + destroy_workqueue(gb_operation_completion_wq); + gb_operation_completion_wq = NULL; + kmem_cache_destroy(gb_operation_cache); + gb_operation_cache = NULL; + kmem_cache_destroy(gb_message_cache); + gb_message_cache = NULL; +} diff --git a/drivers/greybus/svc.c b/drivers/greybus/svc.c new file mode 100644 index 000000000000..ce7740ef449b --- /dev/null +++ b/drivers/greybus/svc.c @@ -0,0 +1,1397 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SVC Greybus driver. + * + * Copyright 2015 Google Inc. + * Copyright 2015 Linaro Ltd. + */ + +#include +#include +#include + +#define SVC_INTF_EJECT_TIMEOUT 9000 +#define SVC_INTF_ACTIVATE_TIMEOUT 6000 +#define SVC_INTF_RESUME_TIMEOUT 3000 + +struct gb_svc_deferred_request { + struct work_struct work; + struct gb_operation *operation; +}; + +static int gb_svc_queue_deferred_request(struct gb_operation *operation); + +static ssize_t endo_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct gb_svc *svc = to_gb_svc(dev); + + return sprintf(buf, "0x%04x\n", svc->endo_id); +} +static DEVICE_ATTR_RO(endo_id); + +static ssize_t ap_intf_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct gb_svc *svc = to_gb_svc(dev); + + return sprintf(buf, "%u\n", svc->ap_intf_id); +} +static DEVICE_ATTR_RO(ap_intf_id); + +// FIXME +// This is a hack, we need to do this "right" and clean the interface up +// properly, not just forcibly yank the thing out of the system and hope for the +// best. But for now, people want their modules to come out without having to +// throw the thing to the ground or get out a screwdriver. +static ssize_t intf_eject_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t len) +{ + struct gb_svc *svc = to_gb_svc(dev); + unsigned short intf_id; + int ret; + + ret = kstrtou16(buf, 10, &intf_id); + if (ret < 0) + return ret; + + dev_warn(dev, "Forcibly trying to eject interface %d\n", intf_id); + + ret = gb_svc_intf_eject(svc, intf_id); + if (ret < 0) + return ret; + + return len; +} +static DEVICE_ATTR_WO(intf_eject); + +static ssize_t watchdog_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct gb_svc *svc = to_gb_svc(dev); + + return sprintf(buf, "%s\n", + gb_svc_watchdog_enabled(svc) ? "enabled" : "disabled"); +} + +static ssize_t watchdog_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t len) +{ + struct gb_svc *svc = to_gb_svc(dev); + int retval; + bool user_request; + + retval = strtobool(buf, &user_request); + if (retval) + return retval; + + if (user_request) + retval = gb_svc_watchdog_enable(svc); + else + retval = gb_svc_watchdog_disable(svc); + if (retval) + return retval; + return len; +} +static DEVICE_ATTR_RW(watchdog); + +static ssize_t watchdog_action_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct gb_svc *svc = to_gb_svc(dev); + + if (svc->action == GB_SVC_WATCHDOG_BITE_PANIC_KERNEL) + return sprintf(buf, "panic\n"); + else if (svc->action == GB_SVC_WATCHDOG_BITE_RESET_UNIPRO) + return sprintf(buf, "reset\n"); + + return -EINVAL; +} + +static ssize_t watchdog_action_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct gb_svc *svc = to_gb_svc(dev); + + if (sysfs_streq(buf, "panic")) + svc->action = GB_SVC_WATCHDOG_BITE_PANIC_KERNEL; + else if (sysfs_streq(buf, "reset")) + svc->action = GB_SVC_WATCHDOG_BITE_RESET_UNIPRO; + else + return -EINVAL; + + return len; +} +static DEVICE_ATTR_RW(watchdog_action); + +static int gb_svc_pwrmon_rail_count_get(struct gb_svc *svc, u8 *value) +{ + struct gb_svc_pwrmon_rail_count_get_response response; + int ret; + + ret = gb_operation_sync(svc->connection, + GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET, NULL, 0, + &response, sizeof(response)); + if (ret) { + dev_err(&svc->dev, "failed to get rail count: %d\n", ret); + return ret; + } + + *value = response.rail_count; + + return 0; +} + +static int gb_svc_pwrmon_rail_names_get(struct gb_svc *svc, + struct gb_svc_pwrmon_rail_names_get_response *response, + size_t bufsize) +{ + int ret; + + ret = gb_operation_sync(svc->connection, + GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET, NULL, 0, + response, bufsize); + if (ret) { + dev_err(&svc->dev, "failed to get rail names: %d\n", ret); + return ret; + } + + if (response->status != GB_SVC_OP_SUCCESS) { + dev_err(&svc->dev, + "SVC error while getting rail names: %u\n", + response->status); + return -EREMOTEIO; + } + + return 0; +} + +static int gb_svc_pwrmon_sample_get(struct gb_svc *svc, u8 rail_id, + u8 measurement_type, u32 *value) +{ + struct gb_svc_pwrmon_sample_get_request request; + struct gb_svc_pwrmon_sample_get_response response; + int ret; + + request.rail_id = rail_id; + request.measurement_type = measurement_type; + + ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_PWRMON_SAMPLE_GET, + &request, sizeof(request), + &response, sizeof(response)); + if (ret) { + dev_err(&svc->dev, "failed to get rail sample: %d\n", ret); + return ret; + } + + if (response.result) { + dev_err(&svc->dev, + "UniPro error while getting rail power sample (%d %d): %d\n", + rail_id, measurement_type, response.result); + switch (response.result) { + case GB_SVC_PWRMON_GET_SAMPLE_INVAL: + return -EINVAL; + case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP: + return -ENOMSG; + default: + return -EREMOTEIO; + } + } + + *value = le32_to_cpu(response.measurement); + + return 0; +} + +int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id, + u8 measurement_type, u32 *value) +{ + struct gb_svc_pwrmon_intf_sample_get_request request; + struct gb_svc_pwrmon_intf_sample_get_response response; + int ret; + + request.intf_id = intf_id; + request.measurement_type = measurement_type; + + ret = gb_operation_sync(svc->connection, + GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET, + &request, sizeof(request), + &response, sizeof(response)); + if (ret) { + dev_err(&svc->dev, "failed to get intf sample: %d\n", ret); + return ret; + } + + if (response.result) { + dev_err(&svc->dev, + "UniPro error while getting intf power sample (%d %d): %d\n", + intf_id, measurement_type, response.result); + switch (response.result) { + case GB_SVC_PWRMON_GET_SAMPLE_INVAL: + return -EINVAL; + case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP: + return -ENOMSG; + default: + return -EREMOTEIO; + } + } + + *value = le32_to_cpu(response.measurement); + + return 0; +} + +static struct attribute *svc_attrs[] = { + &dev_attr_endo_id.attr, + &dev_attr_ap_intf_id.attr, + &dev_attr_intf_eject.attr, + &dev_attr_watchdog.attr, + &dev_attr_watchdog_action.attr, + NULL, +}; +ATTRIBUTE_GROUPS(svc); + +int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id) +{ + struct gb_svc_intf_device_id_request request; + + request.intf_id = intf_id; + request.device_id = device_id; + + return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID, + &request, sizeof(request), NULL, 0); +} + +int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id) +{ + struct gb_svc_intf_eject_request request; + int ret; + + request.intf_id = intf_id; + + /* + * The pulse width for module release in svc is long so we need to + * increase the timeout so the operation will not return to soon. + */ + ret = gb_operation_sync_timeout(svc->connection, + GB_SVC_TYPE_INTF_EJECT, &request, + sizeof(request), NULL, 0, + SVC_INTF_EJECT_TIMEOUT); + if (ret) { + dev_err(&svc->dev, "failed to eject interface %u\n", intf_id); + return ret; + } + + return 0; +} + +int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable) +{ + struct gb_svc_intf_vsys_request request; + struct gb_svc_intf_vsys_response response; + int type, ret; + + request.intf_id = intf_id; + + if (enable) + type = GB_SVC_TYPE_INTF_VSYS_ENABLE; + else + type = GB_SVC_TYPE_INTF_VSYS_DISABLE; + + ret = gb_operation_sync(svc->connection, type, + &request, sizeof(request), + &response, sizeof(response)); + if (ret < 0) + return ret; + if (response.result_code != GB_SVC_INTF_VSYS_OK) + return -EREMOTEIO; + return 0; +} + +int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable) +{ + struct gb_svc_intf_refclk_request request; + struct gb_svc_intf_refclk_response response; + int type, ret; + + request.intf_id = intf_id; + + if (enable) + type = GB_SVC_TYPE_INTF_REFCLK_ENABLE; + else + type = GB_SVC_TYPE_INTF_REFCLK_DISABLE; + + ret = gb_operation_sync(svc->connection, type, + &request, sizeof(request), + &response, sizeof(response)); + if (ret < 0) + return ret; + if (response.result_code != GB_SVC_INTF_REFCLK_OK) + return -EREMOTEIO; + return 0; +} + +int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable) +{ + struct gb_svc_intf_unipro_request request; + struct gb_svc_intf_unipro_response response; + int type, ret; + + request.intf_id = intf_id; + + if (enable) + type = GB_SVC_TYPE_INTF_UNIPRO_ENABLE; + else + type = GB_SVC_TYPE_INTF_UNIPRO_DISABLE; + + ret = gb_operation_sync(svc->connection, type, + &request, sizeof(request), + &response, sizeof(response)); + if (ret < 0) + return ret; + if (response.result_code != GB_SVC_INTF_UNIPRO_OK) + return -EREMOTEIO; + return 0; +} + +int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type) +{ + struct gb_svc_intf_activate_request request; + struct gb_svc_intf_activate_response response; + int ret; + + request.intf_id = intf_id; + + ret = gb_operation_sync_timeout(svc->connection, + GB_SVC_TYPE_INTF_ACTIVATE, + &request, sizeof(request), + &response, sizeof(response), + SVC_INTF_ACTIVATE_TIMEOUT); + if (ret < 0) + return ret; + if (response.status != GB_SVC_OP_SUCCESS) { + dev_err(&svc->dev, "failed to activate interface %u: %u\n", + intf_id, response.status); + return -EREMOTEIO; + } + + *intf_type = response.intf_type; + + return 0; +} + +int gb_svc_intf_resume(struct gb_svc *svc, u8 intf_id) +{ + struct gb_svc_intf_resume_request request; + struct gb_svc_intf_resume_response response; + int ret; + + request.intf_id = intf_id; + + ret = gb_operation_sync_timeout(svc->connection, + GB_SVC_TYPE_INTF_RESUME, + &request, sizeof(request), + &response, sizeof(response), + SVC_INTF_RESUME_TIMEOUT); + if (ret < 0) { + dev_err(&svc->dev, "failed to send interface resume %u: %d\n", + intf_id, ret); + return ret; + } + + if (response.status != GB_SVC_OP_SUCCESS) { + dev_err(&svc->dev, "failed to resume interface %u: %u\n", + intf_id, response.status); + return -EREMOTEIO; + } + + return 0; +} + +int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector, + u32 *value) +{ + struct gb_svc_dme_peer_get_request request; + struct gb_svc_dme_peer_get_response response; + u16 result; + int ret; + + request.intf_id = intf_id; + request.attr = cpu_to_le16(attr); + request.selector = cpu_to_le16(selector); + + ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET, + &request, sizeof(request), + &response, sizeof(response)); + if (ret) { + dev_err(&svc->dev, "failed to get DME attribute (%u 0x%04x %u): %d\n", + intf_id, attr, selector, ret); + return ret; + } + + result = le16_to_cpu(response.result_code); + if (result) { + dev_err(&svc->dev, "UniPro error while getting DME attribute (%u 0x%04x %u): %u\n", + intf_id, attr, selector, result); + return -EREMOTEIO; + } + + if (value) + *value = le32_to_cpu(response.attr_value); + + return 0; +} + +int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector, + u32 value) +{ + struct gb_svc_dme_peer_set_request request; + struct gb_svc_dme_peer_set_response response; + u16 result; + int ret; + + request.intf_id = intf_id; + request.attr = cpu_to_le16(attr); + request.selector = cpu_to_le16(selector); + request.value = cpu_to_le32(value); + + ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET, + &request, sizeof(request), + &response, sizeof(response)); + if (ret) { + dev_err(&svc->dev, "failed to set DME attribute (%u 0x%04x %u %u): %d\n", + intf_id, attr, selector, value, ret); + return ret; + } + + result = le16_to_cpu(response.result_code); + if (result) { + dev_err(&svc->dev, "UniPro error while setting DME attribute (%u 0x%04x %u %u): %u\n", + intf_id, attr, selector, value, result); + return -EREMOTEIO; + } + + return 0; +} + +int gb_svc_connection_create(struct gb_svc *svc, + u8 intf1_id, u16 cport1_id, + u8 intf2_id, u16 cport2_id, + u8 cport_flags) +{ + struct gb_svc_conn_create_request request; + + request.intf1_id = intf1_id; + request.cport1_id = cpu_to_le16(cport1_id); + request.intf2_id = intf2_id; + request.cport2_id = cpu_to_le16(cport2_id); + request.tc = 0; /* TC0 */ + request.flags = cport_flags; + + return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE, + &request, sizeof(request), NULL, 0); +} + +void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id, + u8 intf2_id, u16 cport2_id) +{ + struct gb_svc_conn_destroy_request request; + struct gb_connection *connection = svc->connection; + int ret; + + request.intf1_id = intf1_id; + request.cport1_id = cpu_to_le16(cport1_id); + request.intf2_id = intf2_id; + request.cport2_id = cpu_to_le16(cport2_id); + + ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY, + &request, sizeof(request), NULL, 0); + if (ret) { + dev_err(&svc->dev, "failed to destroy connection (%u:%u %u:%u): %d\n", + intf1_id, cport1_id, intf2_id, cport2_id, ret); + } +} + +/* Creates bi-directional routes between the devices */ +int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id, + u8 intf2_id, u8 dev2_id) +{ + struct gb_svc_route_create_request request; + + request.intf1_id = intf1_id; + request.dev1_id = dev1_id; + request.intf2_id = intf2_id; + request.dev2_id = dev2_id; + + return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE, + &request, sizeof(request), NULL, 0); +} + +/* Destroys bi-directional routes between the devices */ +void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id) +{ + struct gb_svc_route_destroy_request request; + int ret; + + request.intf1_id = intf1_id; + request.intf2_id = intf2_id; + + ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY, + &request, sizeof(request), NULL, 0); + if (ret) { + dev_err(&svc->dev, "failed to destroy route (%u %u): %d\n", + intf1_id, intf2_id, ret); + } +} + +int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series, + u8 tx_mode, u8 tx_gear, u8 tx_nlanes, + u8 tx_amplitude, u8 tx_hs_equalizer, + u8 rx_mode, u8 rx_gear, u8 rx_nlanes, + u8 flags, u32 quirks, + struct gb_svc_l2_timer_cfg *local, + struct gb_svc_l2_timer_cfg *remote) +{ + struct gb_svc_intf_set_pwrm_request request; + struct gb_svc_intf_set_pwrm_response response; + int ret; + u16 result_code; + + memset(&request, 0, sizeof(request)); + + request.intf_id = intf_id; + request.hs_series = hs_series; + request.tx_mode = tx_mode; + request.tx_gear = tx_gear; + request.tx_nlanes = tx_nlanes; + request.tx_amplitude = tx_amplitude; + request.tx_hs_equalizer = tx_hs_equalizer; + request.rx_mode = rx_mode; + request.rx_gear = rx_gear; + request.rx_nlanes = rx_nlanes; + request.flags = flags; + request.quirks = cpu_to_le32(quirks); + if (local) + request.local_l2timerdata = *local; + if (remote) + request.remote_l2timerdata = *remote; + + ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM, + &request, sizeof(request), + &response, sizeof(response)); + if (ret < 0) + return ret; + + result_code = response.result_code; + if (result_code != GB_SVC_SETPWRM_PWR_LOCAL) { + dev_err(&svc->dev, "set power mode = %d\n", result_code); + return -EIO; + } + + return 0; +} +EXPORT_SYMBOL_GPL(gb_svc_intf_set_power_mode); + +int gb_svc_intf_set_power_mode_hibernate(struct gb_svc *svc, u8 intf_id) +{ + struct gb_svc_intf_set_pwrm_request request; + struct gb_svc_intf_set_pwrm_response response; + int ret; + u16 result_code; + + memset(&request, 0, sizeof(request)); + + request.intf_id = intf_id; + request.hs_series = GB_SVC_UNIPRO_HS_SERIES_A; + request.tx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE; + request.rx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE; + + ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM, + &request, sizeof(request), + &response, sizeof(response)); + if (ret < 0) { + dev_err(&svc->dev, + "failed to send set power mode operation to interface %u: %d\n", + intf_id, ret); + return ret; + } + + result_code = response.result_code; + if (result_code != GB_SVC_SETPWRM_PWR_OK) { + dev_err(&svc->dev, + "failed to hibernate the link for interface %u: %u\n", + intf_id, result_code); + return -EIO; + } + + return 0; +} + +int gb_svc_ping(struct gb_svc *svc) +{ + return gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_PING, + NULL, 0, NULL, 0, + GB_OPERATION_TIMEOUT_DEFAULT * 2); +} + +static int gb_svc_version_request(struct gb_operation *op) +{ + struct gb_connection *connection = op->connection; + struct gb_svc *svc = gb_connection_get_data(connection); + struct gb_svc_version_request *request; + struct gb_svc_version_response *response; + + if (op->request->payload_size < sizeof(*request)) { + dev_err(&svc->dev, "short version request (%zu < %zu)\n", + op->request->payload_size, + sizeof(*request)); + return -EINVAL; + } + + request = op->request->payload; + + if (request->major > GB_SVC_VERSION_MAJOR) { + dev_warn(&svc->dev, "unsupported major version (%u > %u)\n", + request->major, GB_SVC_VERSION_MAJOR); + return -ENOTSUPP; + } + + svc->protocol_major = request->major; + svc->protocol_minor = request->minor; + + if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL)) + return -ENOMEM; + + response = op->response->payload; + response->major = svc->protocol_major; + response->minor = svc->protocol_minor; + + return 0; +} + +static ssize_t pwr_debugfs_voltage_read(struct file *file, char __user *buf, + size_t len, loff_t *offset) +{ + struct svc_debugfs_pwrmon_rail *pwrmon_rails = + file_inode(file)->i_private; + struct gb_svc *svc = pwrmon_rails->svc; + int ret, desc; + u32 value; + char buff[16]; + + ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id, + GB_SVC_PWRMON_TYPE_VOL, &value); + if (ret) { + dev_err(&svc->dev, + "failed to get voltage sample %u: %d\n", + pwrmon_rails->id, ret); + return ret; + } + + desc = scnprintf(buff, sizeof(buff), "%u\n", value); + + return simple_read_from_buffer(buf, len, offset, buff, desc); +} + +static ssize_t pwr_debugfs_current_read(struct file *file, char __user *buf, + size_t len, loff_t *offset) +{ + struct svc_debugfs_pwrmon_rail *pwrmon_rails = + file_inode(file)->i_private; + struct gb_svc *svc = pwrmon_rails->svc; + int ret, desc; + u32 value; + char buff[16]; + + ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id, + GB_SVC_PWRMON_TYPE_CURR, &value); + if (ret) { + dev_err(&svc->dev, + "failed to get current sample %u: %d\n", + pwrmon_rails->id, ret); + return ret; + } + + desc = scnprintf(buff, sizeof(buff), "%u\n", value); + + return simple_read_from_buffer(buf, len, offset, buff, desc); +} + +static ssize_t pwr_debugfs_power_read(struct file *file, char __user *buf, + size_t len, loff_t *offset) +{ + struct svc_debugfs_pwrmon_rail *pwrmon_rails = + file_inode(file)->i_private; + struct gb_svc *svc = pwrmon_rails->svc; + int ret, desc; + u32 value; + char buff[16]; + + ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id, + GB_SVC_PWRMON_TYPE_PWR, &value); + if (ret) { + dev_err(&svc->dev, "failed to get power sample %u: %d\n", + pwrmon_rails->id, ret); + return ret; + } + + desc = scnprintf(buff, sizeof(buff), "%u\n", value); + + return simple_read_from_buffer(buf, len, offset, buff, desc); +} + +static const struct file_operations pwrmon_debugfs_voltage_fops = { + .read = pwr_debugfs_voltage_read, +}; + +static const struct file_operations pwrmon_debugfs_current_fops = { + .read = pwr_debugfs_current_read, +}; + +static const struct file_operations pwrmon_debugfs_power_fops = { + .read = pwr_debugfs_power_read, +}; + +static void gb_svc_pwrmon_debugfs_init(struct gb_svc *svc) +{ + int i; + size_t bufsize; + struct dentry *dent; + struct gb_svc_pwrmon_rail_names_get_response *rail_names; + u8 rail_count; + + dent = debugfs_create_dir("pwrmon", svc->debugfs_dentry); + if (IS_ERR_OR_NULL(dent)) + return; + + if (gb_svc_pwrmon_rail_count_get(svc, &rail_count)) + goto err_pwrmon_debugfs; + + if (!rail_count || rail_count > GB_SVC_PWRMON_MAX_RAIL_COUNT) + goto err_pwrmon_debugfs; + + bufsize = sizeof(*rail_names) + + GB_SVC_PWRMON_RAIL_NAME_BUFSIZE * rail_count; + + rail_names = kzalloc(bufsize, GFP_KERNEL); + if (!rail_names) + goto err_pwrmon_debugfs; + + svc->pwrmon_rails = kcalloc(rail_count, sizeof(*svc->pwrmon_rails), + GFP_KERNEL); + if (!svc->pwrmon_rails) + goto err_pwrmon_debugfs_free; + + if (gb_svc_pwrmon_rail_names_get(svc, rail_names, bufsize)) + goto err_pwrmon_debugfs_free; + + for (i = 0; i < rail_count; i++) { + struct dentry *dir; + struct svc_debugfs_pwrmon_rail *rail = &svc->pwrmon_rails[i]; + char fname[GB_SVC_PWRMON_RAIL_NAME_BUFSIZE]; + + snprintf(fname, sizeof(fname), "%s", + (char *)&rail_names->name[i]); + + rail->id = i; + rail->svc = svc; + + dir = debugfs_create_dir(fname, dent); + debugfs_create_file("voltage_now", 0444, dir, rail, + &pwrmon_debugfs_voltage_fops); + debugfs_create_file("current_now", 0444, dir, rail, + &pwrmon_debugfs_current_fops); + debugfs_create_file("power_now", 0444, dir, rail, + &pwrmon_debugfs_power_fops); + } + + kfree(rail_names); + return; + +err_pwrmon_debugfs_free: + kfree(rail_names); + kfree(svc->pwrmon_rails); + svc->pwrmon_rails = NULL; + +err_pwrmon_debugfs: + debugfs_remove(dent); +} + +static void gb_svc_debugfs_init(struct gb_svc *svc) +{ + svc->debugfs_dentry = debugfs_create_dir(dev_name(&svc->dev), + gb_debugfs_get()); + gb_svc_pwrmon_debugfs_init(svc); +} + +static void gb_svc_debugfs_exit(struct gb_svc *svc) +{ + debugfs_remove_recursive(svc->debugfs_dentry); + kfree(svc->pwrmon_rails); + svc->pwrmon_rails = NULL; +} + +static int gb_svc_hello(struct gb_operation *op) +{ + struct gb_connection *connection = op->connection; + struct gb_svc *svc = gb_connection_get_data(connection); + struct gb_svc_hello_request *hello_request; + int ret; + + if (op->request->payload_size < sizeof(*hello_request)) { + dev_warn(&svc->dev, "short hello request (%zu < %zu)\n", + op->request->payload_size, + sizeof(*hello_request)); + return -EINVAL; + } + + hello_request = op->request->payload; + svc->endo_id = le16_to_cpu(hello_request->endo_id); + svc->ap_intf_id = hello_request->interface_id; + + ret = device_add(&svc->dev); + if (ret) { + dev_err(&svc->dev, "failed to register svc device: %d\n", ret); + return ret; + } + + ret = gb_svc_watchdog_create(svc); + if (ret) { + dev_err(&svc->dev, "failed to create watchdog: %d\n", ret); + goto err_unregister_device; + } + + gb_svc_debugfs_init(svc); + + return gb_svc_queue_deferred_request(op); + +err_unregister_device: + gb_svc_watchdog_destroy(svc); + device_del(&svc->dev); + return ret; +} + +static struct gb_interface *gb_svc_interface_lookup(struct gb_svc *svc, + u8 intf_id) +{ + struct gb_host_device *hd = svc->hd; + struct gb_module *module; + size_t num_interfaces; + u8 module_id; + + list_for_each_entry(module, &hd->modules, hd_node) { + module_id = module->module_id; + num_interfaces = module->num_interfaces; + + if (intf_id >= module_id && + intf_id < module_id + num_interfaces) { + return module->interfaces[intf_id - module_id]; + } + } + + return NULL; +} + +static struct gb_module *gb_svc_module_lookup(struct gb_svc *svc, u8 module_id) +{ + struct gb_host_device *hd = svc->hd; + struct gb_module *module; + + list_for_each_entry(module, &hd->modules, hd_node) { + if (module->module_id == module_id) + return module; + } + + return NULL; +} + +static void gb_svc_process_hello_deferred(struct gb_operation *operation) +{ + struct gb_connection *connection = operation->connection; + struct gb_svc *svc = gb_connection_get_data(connection); + int ret; + + /* + * XXX This is a hack/work-around to reconfigure the APBridgeA-Switch + * link to PWM G2, 1 Lane, Slow Auto, so that it has sufficient + * bandwidth for 3 audio streams plus boot-over-UniPro of a hot-plugged + * module. + * + * The code should be removed once SW-2217, Heuristic for UniPro + * Power Mode Changes is resolved. + */ + ret = gb_svc_intf_set_power_mode(svc, svc->ap_intf_id, + GB_SVC_UNIPRO_HS_SERIES_A, + GB_SVC_UNIPRO_SLOW_AUTO_MODE, + 2, 1, + GB_SVC_SMALL_AMPLITUDE, + GB_SVC_NO_DE_EMPHASIS, + GB_SVC_UNIPRO_SLOW_AUTO_MODE, + 2, 1, + 0, 0, + NULL, NULL); + + if (ret) + dev_warn(&svc->dev, + "power mode change failed on AP to switch link: %d\n", + ret); +} + +static void gb_svc_process_module_inserted(struct gb_operation *operation) +{ + struct gb_svc_module_inserted_request *request; + struct gb_connection *connection = operation->connection; + struct gb_svc *svc = gb_connection_get_data(connection); + struct gb_host_device *hd = svc->hd; + struct gb_module *module; + size_t num_interfaces; + u8 module_id; + u16 flags; + int ret; + + /* The request message size has already been verified. */ + request = operation->request->payload; + module_id = request->primary_intf_id; + num_interfaces = request->intf_count; + flags = le16_to_cpu(request->flags); + + dev_dbg(&svc->dev, "%s - id = %u, num_interfaces = %zu, flags = 0x%04x\n", + __func__, module_id, num_interfaces, flags); + + if (flags & GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY) { + dev_warn(&svc->dev, "no primary interface detected on module %u\n", + module_id); + } + + module = gb_svc_module_lookup(svc, module_id); + if (module) { + dev_warn(&svc->dev, "unexpected module-inserted event %u\n", + module_id); + return; + } + + module = gb_module_create(hd, module_id, num_interfaces); + if (!module) { + dev_err(&svc->dev, "failed to create module\n"); + return; + } + + ret = gb_module_add(module); + if (ret) { + gb_module_put(module); + return; + } + + list_add(&module->hd_node, &hd->modules); +} + +static void gb_svc_process_module_removed(struct gb_operation *operation) +{ + struct gb_svc_module_removed_request *request; + struct gb_connection *connection = operation->connection; + struct gb_svc *svc = gb_connection_get_data(connection); + struct gb_module *module; + u8 module_id; + + /* The request message size has already been verified. */ + request = operation->request->payload; + module_id = request->primary_intf_id; + + dev_dbg(&svc->dev, "%s - id = %u\n", __func__, module_id); + + module = gb_svc_module_lookup(svc, module_id); + if (!module) { + dev_warn(&svc->dev, "unexpected module-removed event %u\n", + module_id); + return; + } + + module->disconnected = true; + + gb_module_del(module); + list_del(&module->hd_node); + gb_module_put(module); +} + +static void gb_svc_process_intf_oops(struct gb_operation *operation) +{ + struct gb_svc_intf_oops_request *request; + struct gb_connection *connection = operation->connection; + struct gb_svc *svc = gb_connection_get_data(connection); + struct gb_interface *intf; + u8 intf_id; + u8 reason; + + /* The request message size has already been verified. */ + request = operation->request->payload; + intf_id = request->intf_id; + reason = request->reason; + + intf = gb_svc_interface_lookup(svc, intf_id); + if (!intf) { + dev_warn(&svc->dev, "unexpected interface-oops event %u\n", + intf_id); + return; + } + + dev_info(&svc->dev, "Deactivating interface %u, interface oops reason = %u\n", + intf_id, reason); + + mutex_lock(&intf->mutex); + intf->disconnected = true; + gb_interface_disable(intf); + gb_interface_deactivate(intf); + mutex_unlock(&intf->mutex); +} + +static void gb_svc_process_intf_mailbox_event(struct gb_operation *operation) +{ + struct gb_svc_intf_mailbox_event_request *request; + struct gb_connection *connection = operation->connection; + struct gb_svc *svc = gb_connection_get_data(connection); + struct gb_interface *intf; + u8 intf_id; + u16 result_code; + u32 mailbox; + + /* The request message size has already been verified. */ + request = operation->request->payload; + intf_id = request->intf_id; + result_code = le16_to_cpu(request->result_code); + mailbox = le32_to_cpu(request->mailbox); + + dev_dbg(&svc->dev, "%s - id = %u, result = 0x%04x, mailbox = 0x%08x\n", + __func__, intf_id, result_code, mailbox); + + intf = gb_svc_interface_lookup(svc, intf_id); + if (!intf) { + dev_warn(&svc->dev, "unexpected mailbox event %u\n", intf_id); + return; + } + + gb_interface_mailbox_event(intf, result_code, mailbox); +} + +static void gb_svc_process_deferred_request(struct work_struct *work) +{ + struct gb_svc_deferred_request *dr; + struct gb_operation *operation; + struct gb_svc *svc; + u8 type; + + dr = container_of(work, struct gb_svc_deferred_request, work); + operation = dr->operation; + svc = gb_connection_get_data(operation->connection); + type = operation->request->header->type; + + switch (type) { + case GB_SVC_TYPE_SVC_HELLO: + gb_svc_process_hello_deferred(operation); + break; + case GB_SVC_TYPE_MODULE_INSERTED: + gb_svc_process_module_inserted(operation); + break; + case GB_SVC_TYPE_MODULE_REMOVED: + gb_svc_process_module_removed(operation); + break; + case GB_SVC_TYPE_INTF_MAILBOX_EVENT: + gb_svc_process_intf_mailbox_event(operation); + break; + case GB_SVC_TYPE_INTF_OOPS: + gb_svc_process_intf_oops(operation); + break; + default: + dev_err(&svc->dev, "bad deferred request type: 0x%02x\n", type); + } + + gb_operation_put(operation); + kfree(dr); +} + +static int gb_svc_queue_deferred_request(struct gb_operation *operation) +{ + struct gb_svc *svc = gb_connection_get_data(operation->connection); + struct gb_svc_deferred_request *dr; + + dr = kmalloc(sizeof(*dr), GFP_KERNEL); + if (!dr) + return -ENOMEM; + + gb_operation_get(operation); + + dr->operation = operation; + INIT_WORK(&dr->work, gb_svc_process_deferred_request); + + queue_work(svc->wq, &dr->work); + + return 0; +} + +static int gb_svc_intf_reset_recv(struct gb_operation *op) +{ + struct gb_svc *svc = gb_connection_get_data(op->connection); + struct gb_message *request = op->request; + struct gb_svc_intf_reset_request *reset; + + if (request->payload_size < sizeof(*reset)) { + dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n", + request->payload_size, sizeof(*reset)); + return -EINVAL; + } + reset = request->payload; + + /* FIXME Reset the interface here */ + + return 0; +} + +static int gb_svc_module_inserted_recv(struct gb_operation *op) +{ + struct gb_svc *svc = gb_connection_get_data(op->connection); + struct gb_svc_module_inserted_request *request; + + if (op->request->payload_size < sizeof(*request)) { + dev_warn(&svc->dev, "short module-inserted request received (%zu < %zu)\n", + op->request->payload_size, sizeof(*request)); + return -EINVAL; + } + + request = op->request->payload; + + dev_dbg(&svc->dev, "%s - id = %u\n", __func__, + request->primary_intf_id); + + return gb_svc_queue_deferred_request(op); +} + +static int gb_svc_module_removed_recv(struct gb_operation *op) +{ + struct gb_svc *svc = gb_connection_get_data(op->connection); + struct gb_svc_module_removed_request *request; + + if (op->request->payload_size < sizeof(*request)) { + dev_warn(&svc->dev, "short module-removed request received (%zu < %zu)\n", + op->request->payload_size, sizeof(*request)); + return -EINVAL; + } + + request = op->request->payload; + + dev_dbg(&svc->dev, "%s - id = %u\n", __func__, + request->primary_intf_id); + + return gb_svc_queue_deferred_request(op); +} + +static int gb_svc_intf_oops_recv(struct gb_operation *op) +{ + struct gb_svc *svc = gb_connection_get_data(op->connection); + struct gb_svc_intf_oops_request *request; + + if (op->request->payload_size < sizeof(*request)) { + dev_warn(&svc->dev, "short intf-oops request received (%zu < %zu)\n", + op->request->payload_size, sizeof(*request)); + return -EINVAL; + } + + return gb_svc_queue_deferred_request(op); +} + +static int gb_svc_intf_mailbox_event_recv(struct gb_operation *op) +{ + struct gb_svc *svc = gb_connection_get_data(op->connection); + struct gb_svc_intf_mailbox_event_request *request; + + if (op->request->payload_size < sizeof(*request)) { + dev_warn(&svc->dev, "short mailbox request received (%zu < %zu)\n", + op->request->payload_size, sizeof(*request)); + return -EINVAL; + } + + request = op->request->payload; + + dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id); + + return gb_svc_queue_deferred_request(op); +} + +static int gb_svc_request_handler(struct gb_operation *op) +{ + struct gb_connection *connection = op->connection; + struct gb_svc *svc = gb_connection_get_data(connection); + u8 type = op->type; + int ret = 0; + + /* + * SVC requests need to follow a specific order (at least initially) and + * below code takes care of enforcing that. The expected order is: + * - PROTOCOL_VERSION + * - SVC_HELLO + * - Any other request, but the earlier two. + * + * Incoming requests are guaranteed to be serialized and so we don't + * need to protect 'state' for any races. + */ + switch (type) { + case GB_SVC_TYPE_PROTOCOL_VERSION: + if (svc->state != GB_SVC_STATE_RESET) + ret = -EINVAL; + break; + case GB_SVC_TYPE_SVC_HELLO: + if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION) + ret = -EINVAL; + break; + default: + if (svc->state != GB_SVC_STATE_SVC_HELLO) + ret = -EINVAL; + break; + } + + if (ret) { + dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n", + type, svc->state); + return ret; + } + + switch (type) { + case GB_SVC_TYPE_PROTOCOL_VERSION: + ret = gb_svc_version_request(op); + if (!ret) + svc->state = GB_SVC_STATE_PROTOCOL_VERSION; + return ret; + case GB_SVC_TYPE_SVC_HELLO: + ret = gb_svc_hello(op); + if (!ret) + svc->state = GB_SVC_STATE_SVC_HELLO; + return ret; + case GB_SVC_TYPE_INTF_RESET: + return gb_svc_intf_reset_recv(op); + case GB_SVC_TYPE_MODULE_INSERTED: + return gb_svc_module_inserted_recv(op); + case GB_SVC_TYPE_MODULE_REMOVED: + return gb_svc_module_removed_recv(op); + case GB_SVC_TYPE_INTF_MAILBOX_EVENT: + return gb_svc_intf_mailbox_event_recv(op); + case GB_SVC_TYPE_INTF_OOPS: + return gb_svc_intf_oops_recv(op); + default: + dev_warn(&svc->dev, "unsupported request 0x%02x\n", type); + return -EINVAL; + } +} + +static void gb_svc_release(struct device *dev) +{ + struct gb_svc *svc = to_gb_svc(dev); + + if (svc->connection) + gb_connection_destroy(svc->connection); + ida_destroy(&svc->device_id_map); + destroy_workqueue(svc->wq); + kfree(svc); +} + +struct device_type greybus_svc_type = { + .name = "greybus_svc", + .release = gb_svc_release, +}; + +struct gb_svc *gb_svc_create(struct gb_host_device *hd) +{ + struct gb_svc *svc; + + svc = kzalloc(sizeof(*svc), GFP_KERNEL); + if (!svc) + return NULL; + + svc->wq = alloc_workqueue("%s:svc", WQ_UNBOUND, 1, dev_name(&hd->dev)); + if (!svc->wq) { + kfree(svc); + return NULL; + } + + svc->dev.parent = &hd->dev; + svc->dev.bus = &greybus_bus_type; + svc->dev.type = &greybus_svc_type; + svc->dev.groups = svc_groups; + svc->dev.dma_mask = svc->dev.parent->dma_mask; + device_initialize(&svc->dev); + + dev_set_name(&svc->dev, "%d-svc", hd->bus_id); + + ida_init(&svc->device_id_map); + svc->state = GB_SVC_STATE_RESET; + svc->hd = hd; + + svc->connection = gb_connection_create_static(hd, GB_SVC_CPORT_ID, + gb_svc_request_handler); + if (IS_ERR(svc->connection)) { + dev_err(&svc->dev, "failed to create connection: %ld\n", + PTR_ERR(svc->connection)); + goto err_put_device; + } + + gb_connection_set_data(svc->connection, svc); + + return svc; + +err_put_device: + put_device(&svc->dev); + return NULL; +} + +int gb_svc_add(struct gb_svc *svc) +{ + int ret; + + /* + * The SVC protocol is currently driven by the SVC, so the SVC device + * is added from the connection request handler when enough + * information has been received. + */ + ret = gb_connection_enable(svc->connection); + if (ret) + return ret; + + return 0; +} + +static void gb_svc_remove_modules(struct gb_svc *svc) +{ + struct gb_host_device *hd = svc->hd; + struct gb_module *module, *tmp; + + list_for_each_entry_safe(module, tmp, &hd->modules, hd_node) { + gb_module_del(module); + list_del(&module->hd_node); + gb_module_put(module); + } +} + +void gb_svc_del(struct gb_svc *svc) +{ + gb_connection_disable_rx(svc->connection); + + /* + * The SVC device may have been registered from the request handler. + */ + if (device_is_registered(&svc->dev)) { + gb_svc_debugfs_exit(svc); + gb_svc_watchdog_destroy(svc); + device_del(&svc->dev); + } + + flush_workqueue(svc->wq); + + gb_svc_remove_modules(svc); + + gb_connection_disable(svc->connection); +} + +void gb_svc_put(struct gb_svc *svc) +{ + put_device(&svc->dev); +} diff --git a/drivers/greybus/svc_watchdog.c b/drivers/greybus/svc_watchdog.c new file mode 100644 index 000000000000..b6b1682c19c4 --- /dev/null +++ b/drivers/greybus/svc_watchdog.c @@ -0,0 +1,197 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SVC Greybus "watchdog" driver. + * + * Copyright 2016 Google Inc. + */ + +#include +#include +#include +#include + +#define SVC_WATCHDOG_PERIOD (2 * HZ) + +struct gb_svc_watchdog { + struct delayed_work work; + struct gb_svc *svc; + bool enabled; + struct notifier_block pm_notifier; +}; + +static struct delayed_work reset_work; + +static int svc_watchdog_pm_notifier(struct notifier_block *notifier, + unsigned long pm_event, void *unused) +{ + struct gb_svc_watchdog *watchdog = + container_of(notifier, struct gb_svc_watchdog, pm_notifier); + + switch (pm_event) { + case PM_SUSPEND_PREPARE: + gb_svc_watchdog_disable(watchdog->svc); + break; + case PM_POST_SUSPEND: + gb_svc_watchdog_enable(watchdog->svc); + break; + default: + break; + } + + return NOTIFY_DONE; +} + +static void greybus_reset(struct work_struct *work) +{ + static char const start_path[] = "/system/bin/start"; + static char *envp[] = { + "HOME=/", + "PATH=/sbin:/vendor/bin:/system/sbin:/system/bin:/system/xbin", + NULL, + }; + static char *argv[] = { + (char *)start_path, + "unipro_reset", + NULL, + }; + + pr_err("svc_watchdog: calling \"%s %s\" to reset greybus network!\n", + argv[0], argv[1]); + call_usermodehelper(start_path, argv, envp, UMH_WAIT_EXEC); +} + +static void do_work(struct work_struct *work) +{ + struct gb_svc_watchdog *watchdog; + struct gb_svc *svc; + int retval; + + watchdog = container_of(work, struct gb_svc_watchdog, work.work); + svc = watchdog->svc; + + dev_dbg(&svc->dev, "%s: ping.\n", __func__); + retval = gb_svc_ping(svc); + if (retval) { + /* + * Something went really wrong, let's warn userspace and then + * pull the plug and reset the whole greybus network. + * We need to do this outside of this workqueue as we will be + * tearing down the svc device itself. So queue up + * yet-another-callback to do that. + */ + dev_err(&svc->dev, + "SVC ping has returned %d, something is wrong!!!\n", + retval); + + if (svc->action == GB_SVC_WATCHDOG_BITE_PANIC_KERNEL) { + panic("SVC is not responding\n"); + } else if (svc->action == GB_SVC_WATCHDOG_BITE_RESET_UNIPRO) { + dev_err(&svc->dev, "Resetting the greybus network, watch out!!!\n"); + + INIT_DELAYED_WORK(&reset_work, greybus_reset); + schedule_delayed_work(&reset_work, HZ / 2); + + /* + * Disable ourselves, we don't want to trip again unless + * userspace wants us to. + */ + watchdog->enabled = false; + } + } + + /* resubmit our work to happen again, if we are still "alive" */ + if (watchdog->enabled) + schedule_delayed_work(&watchdog->work, SVC_WATCHDOG_PERIOD); +} + +int gb_svc_watchdog_create(struct gb_svc *svc) +{ + struct gb_svc_watchdog *watchdog; + int retval; + + if (svc->watchdog) + return 0; + + watchdog = kmalloc(sizeof(*watchdog), GFP_KERNEL); + if (!watchdog) + return -ENOMEM; + + watchdog->enabled = false; + watchdog->svc = svc; + INIT_DELAYED_WORK(&watchdog->work, do_work); + svc->watchdog = watchdog; + + watchdog->pm_notifier.notifier_call = svc_watchdog_pm_notifier; + retval = register_pm_notifier(&watchdog->pm_notifier); + if (retval) { + dev_err(&svc->dev, "error registering pm notifier(%d)\n", + retval); + goto svc_watchdog_create_err; + } + + retval = gb_svc_watchdog_enable(svc); + if (retval) { + dev_err(&svc->dev, "error enabling watchdog (%d)\n", retval); + unregister_pm_notifier(&watchdog->pm_notifier); + goto svc_watchdog_create_err; + } + return retval; + +svc_watchdog_create_err: + svc->watchdog = NULL; + kfree(watchdog); + + return retval; +} + +void gb_svc_watchdog_destroy(struct gb_svc *svc) +{ + struct gb_svc_watchdog *watchdog = svc->watchdog; + + if (!watchdog) + return; + + unregister_pm_notifier(&watchdog->pm_notifier); + gb_svc_watchdog_disable(svc); + svc->watchdog = NULL; + kfree(watchdog); +} + +bool gb_svc_watchdog_enabled(struct gb_svc *svc) +{ + if (!svc || !svc->watchdog) + return false; + return svc->watchdog->enabled; +} + +int gb_svc_watchdog_enable(struct gb_svc *svc) +{ + struct gb_svc_watchdog *watchdog; + + if (!svc->watchdog) + return -ENODEV; + + watchdog = svc->watchdog; + if (watchdog->enabled) + return 0; + + watchdog->enabled = true; + schedule_delayed_work(&watchdog->work, SVC_WATCHDOG_PERIOD); + return 0; +} + +int gb_svc_watchdog_disable(struct gb_svc *svc) +{ + struct gb_svc_watchdog *watchdog; + + if (!svc->watchdog) + return -ENODEV; + + watchdog = svc->watchdog; + if (!watchdog->enabled) + return 0; + + watchdog->enabled = false; + cancel_delayed_work_sync(&watchdog->work); + return 0; +} diff --git a/drivers/staging/greybus/Kconfig b/drivers/staging/greybus/Kconfig index 4894c3514955..d03c37e1e6e8 100644 --- a/drivers/staging/greybus/Kconfig +++ b/drivers/staging/greybus/Kconfig @@ -1,20 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -menuconfig GREYBUS - tristate "Greybus support" - depends on SYSFS - ---help--- - This option enables the Greybus driver core. Greybus is an - hardware protocol that was designed to provide Unipro with a - sane application layer. It was originally designed for the - ARA project, a module phone system, but has shown up in other - phones, and can be tunneled over other busses in order to - control hardware devices. - - Say Y here to enable support for these types of drivers. - - To compile this code as a module, chose M here: the module - will be called greybus.ko - if GREYBUS config GREYBUS_ES2 diff --git a/drivers/staging/greybus/Makefile b/drivers/staging/greybus/Makefile index 2551ed16b742..d16853399c9a 100644 --- a/drivers/staging/greybus/Makefile +++ b/drivers/staging/greybus/Makefile @@ -1,24 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 -# Greybus core -greybus-y := core.o \ - debugfs.o \ - hd.o \ - manifest.o \ - module.o \ - interface.o \ - bundle.o \ - connection.o \ - control.o \ - svc.o \ - svc_watchdog.o \ - operation.o - -obj-$(CONFIG_GREYBUS) += greybus.o - # needed for trace events ccflags-y += -I$(src) - # Greybus Host controller drivers gb-es2-y := es2.o diff --git a/drivers/staging/greybus/bundle.c b/drivers/staging/greybus/bundle.c deleted file mode 100644 index 84660729538b..000000000000 --- a/drivers/staging/greybus/bundle.c +++ /dev/null @@ -1,252 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Greybus bundles - * - * Copyright 2014-2015 Google Inc. - * Copyright 2014-2015 Linaro Ltd. - */ - -#include -#include "greybus_trace.h" - -static ssize_t bundle_class_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct gb_bundle *bundle = to_gb_bundle(dev); - - return sprintf(buf, "0x%02x\n", bundle->class); -} -static DEVICE_ATTR_RO(bundle_class); - -static ssize_t bundle_id_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct gb_bundle *bundle = to_gb_bundle(dev); - - return sprintf(buf, "%u\n", bundle->id); -} -static DEVICE_ATTR_RO(bundle_id); - -static ssize_t state_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct gb_bundle *bundle = to_gb_bundle(dev); - - if (!bundle->state) - return sprintf(buf, "\n"); - - return sprintf(buf, "%s\n", bundle->state); -} - -static ssize_t state_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t size) -{ - struct gb_bundle *bundle = to_gb_bundle(dev); - - kfree(bundle->state); - bundle->state = kstrdup(buf, GFP_KERNEL); - if (!bundle->state) - return -ENOMEM; - - /* Tell userspace that the file contents changed */ - sysfs_notify(&bundle->dev.kobj, NULL, "state"); - - return size; -} -static DEVICE_ATTR_RW(state); - -static struct attribute *bundle_attrs[] = { - &dev_attr_bundle_class.attr, - &dev_attr_bundle_id.attr, - &dev_attr_state.attr, - NULL, -}; - -ATTRIBUTE_GROUPS(bundle); - -static struct gb_bundle *gb_bundle_find(struct gb_interface *intf, - u8 bundle_id) -{ - struct gb_bundle *bundle; - - list_for_each_entry(bundle, &intf->bundles, links) { - if (bundle->id == bundle_id) - return bundle; - } - - return NULL; -} - -static void gb_bundle_release(struct device *dev) -{ - struct gb_bundle *bundle = to_gb_bundle(dev); - - trace_gb_bundle_release(bundle); - - kfree(bundle->state); - kfree(bundle->cport_desc); - kfree(bundle); -} - -#ifdef CONFIG_PM -static void gb_bundle_disable_all_connections(struct gb_bundle *bundle) -{ - struct gb_connection *connection; - - list_for_each_entry(connection, &bundle->connections, bundle_links) - gb_connection_disable(connection); -} - -static void gb_bundle_enable_all_connections(struct gb_bundle *bundle) -{ - struct gb_connection *connection; - - list_for_each_entry(connection, &bundle->connections, bundle_links) - gb_connection_enable(connection); -} - -static int gb_bundle_suspend(struct device *dev) -{ - struct gb_bundle *bundle = to_gb_bundle(dev); - const struct dev_pm_ops *pm = dev->driver->pm; - int ret; - - if (pm && pm->runtime_suspend) { - ret = pm->runtime_suspend(&bundle->dev); - if (ret) - return ret; - } else { - gb_bundle_disable_all_connections(bundle); - } - - ret = gb_control_bundle_suspend(bundle->intf->control, bundle->id); - if (ret) { - if (pm && pm->runtime_resume) - ret = pm->runtime_resume(dev); - else - gb_bundle_enable_all_connections(bundle); - - return ret; - } - - return 0; -} - -static int gb_bundle_resume(struct device *dev) -{ - struct gb_bundle *bundle = to_gb_bundle(dev); - const struct dev_pm_ops *pm = dev->driver->pm; - int ret; - - ret = gb_control_bundle_resume(bundle->intf->control, bundle->id); - if (ret) - return ret; - - if (pm && pm->runtime_resume) { - ret = pm->runtime_resume(dev); - if (ret) - return ret; - } else { - gb_bundle_enable_all_connections(bundle); - } - - return 0; -} - -static int gb_bundle_idle(struct device *dev) -{ - pm_runtime_mark_last_busy(dev); - pm_request_autosuspend(dev); - - return 0; -} -#endif - -static const struct dev_pm_ops gb_bundle_pm_ops = { - SET_RUNTIME_PM_OPS(gb_bundle_suspend, gb_bundle_resume, gb_bundle_idle) -}; - -struct device_type greybus_bundle_type = { - .name = "greybus_bundle", - .release = gb_bundle_release, - .pm = &gb_bundle_pm_ops, -}; - -/* - * Create a gb_bundle structure to represent a discovered - * bundle. Returns a pointer to the new bundle or a null - * pointer if a failure occurs due to memory exhaustion. - */ -struct gb_bundle *gb_bundle_create(struct gb_interface *intf, u8 bundle_id, - u8 class) -{ - struct gb_bundle *bundle; - - if (bundle_id == BUNDLE_ID_NONE) { - dev_err(&intf->dev, "can't use bundle id %u\n", bundle_id); - return NULL; - } - - /* - * Reject any attempt to reuse a bundle id. We initialize - * these serially, so there's no need to worry about keeping - * the interface bundle list locked here. - */ - if (gb_bundle_find(intf, bundle_id)) { - dev_err(&intf->dev, "duplicate bundle id %u\n", bundle_id); - return NULL; - } - - bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); - if (!bundle) - return NULL; - - bundle->intf = intf; - bundle->id = bundle_id; - bundle->class = class; - INIT_LIST_HEAD(&bundle->connections); - - bundle->dev.parent = &intf->dev; - bundle->dev.bus = &greybus_bus_type; - bundle->dev.type = &greybus_bundle_type; - bundle->dev.groups = bundle_groups; - bundle->dev.dma_mask = intf->dev.dma_mask; - device_initialize(&bundle->dev); - dev_set_name(&bundle->dev, "%s.%d", dev_name(&intf->dev), bundle_id); - - list_add(&bundle->links, &intf->bundles); - - trace_gb_bundle_create(bundle); - - return bundle; -} - -int gb_bundle_add(struct gb_bundle *bundle) -{ - int ret; - - ret = device_add(&bundle->dev); - if (ret) { - dev_err(&bundle->dev, "failed to register bundle: %d\n", ret); - return ret; - } - - trace_gb_bundle_add(bundle); - - return 0; -} - -/* - * Tear down a previously set up bundle. - */ -void gb_bundle_destroy(struct gb_bundle *bundle) -{ - trace_gb_bundle_destroy(bundle); - - if (device_is_registered(&bundle->dev)) - device_del(&bundle->dev); - - list_del(&bundle->links); - - put_device(&bundle->dev); -} diff --git a/drivers/staging/greybus/connection.c b/drivers/staging/greybus/connection.c deleted file mode 100644 index fc8f57f97ce6..000000000000 --- a/drivers/staging/greybus/connection.c +++ /dev/null @@ -1,942 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Greybus connections - * - * Copyright 2014 Google Inc. - * Copyright 2014 Linaro Ltd. - */ - -#include -#include - -#include "greybus_trace.h" - -#define GB_CONNECTION_CPORT_QUIESCE_TIMEOUT 1000 - -static void gb_connection_kref_release(struct kref *kref); - -static DEFINE_SPINLOCK(gb_connections_lock); -static DEFINE_MUTEX(gb_connection_mutex); - -/* Caller holds gb_connection_mutex. */ -static bool gb_connection_cport_in_use(struct gb_interface *intf, u16 cport_id) -{ - struct gb_host_device *hd = intf->hd; - struct gb_connection *connection; - - list_for_each_entry(connection, &hd->connections, hd_links) { - if (connection->intf == intf && - connection->intf_cport_id == cport_id) - return true; - } - - return false; -} - -static void gb_connection_get(struct gb_connection *connection) -{ - kref_get(&connection->kref); - - trace_gb_connection_get(connection); -} - -static void gb_connection_put(struct gb_connection *connection) -{ - trace_gb_connection_put(connection); - - kref_put(&connection->kref, gb_connection_kref_release); -} - -/* - * Returns a reference-counted pointer to the connection if found. - */ -static struct gb_connection * -gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id) -{ - struct gb_connection *connection; - unsigned long flags; - - spin_lock_irqsave(&gb_connections_lock, flags); - list_for_each_entry(connection, &hd->connections, hd_links) - if (connection->hd_cport_id == cport_id) { - gb_connection_get(connection); - goto found; - } - connection = NULL; -found: - spin_unlock_irqrestore(&gb_connections_lock, flags); - - return connection; -} - -/* - * Callback from the host driver to let us know that data has been - * received on the bundle. - */ -void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id, - u8 *data, size_t length) -{ - struct gb_connection *connection; - - trace_gb_hd_in(hd); - - connection = gb_connection_hd_find(hd, cport_id); - if (!connection) { - dev_err(&hd->dev, - "nonexistent connection (%zu bytes dropped)\n", length); - return; - } - gb_connection_recv(connection, data, length); - gb_connection_put(connection); -} -EXPORT_SYMBOL_GPL(greybus_data_rcvd); - -static void gb_connection_kref_release(struct kref *kref) -{ - struct gb_connection *connection; - - connection = container_of(kref, struct gb_connection, kref); - - trace_gb_connection_release(connection); - - kfree(connection); -} - -static void gb_connection_init_name(struct gb_connection *connection) -{ - u16 hd_cport_id = connection->hd_cport_id; - u16 cport_id = 0; - u8 intf_id = 0; - - if (connection->intf) { - intf_id = connection->intf->interface_id; - cport_id = connection->intf_cport_id; - } - - snprintf(connection->name, sizeof(connection->name), - "%u/%u:%u", hd_cport_id, intf_id, cport_id); -} - -/* - * _gb_connection_create() - create a Greybus connection - * @hd: host device of the connection - * @hd_cport_id: host-device cport id, or -1 for dynamic allocation - * @intf: remote interface, or NULL for static connections - * @bundle: remote-interface bundle (may be NULL) - * @cport_id: remote-interface cport id, or 0 for static connections - * @handler: request handler (may be NULL) - * @flags: connection flags - * - * Create a Greybus connection, representing the bidirectional link - * between a CPort on a (local) Greybus host device and a CPort on - * another Greybus interface. - * - * A connection also maintains the state of operations sent over the - * connection. - * - * Serialised against concurrent create and destroy using the - * gb_connection_mutex. - * - * Return: A pointer to the new connection if successful, or an ERR_PTR - * otherwise. - */ -static struct gb_connection * -_gb_connection_create(struct gb_host_device *hd, int hd_cport_id, - struct gb_interface *intf, - struct gb_bundle *bundle, int cport_id, - gb_request_handler_t handler, - unsigned long flags) -{ - struct gb_connection *connection; - int ret; - - mutex_lock(&gb_connection_mutex); - - if (intf && gb_connection_cport_in_use(intf, cport_id)) { - dev_err(&intf->dev, "cport %u already in use\n", cport_id); - ret = -EBUSY; - goto err_unlock; - } - - ret = gb_hd_cport_allocate(hd, hd_cport_id, flags); - if (ret < 0) { - dev_err(&hd->dev, "failed to allocate cport: %d\n", ret); - goto err_unlock; - } - hd_cport_id = ret; - - connection = kzalloc(sizeof(*connection), GFP_KERNEL); - if (!connection) { - ret = -ENOMEM; - goto err_hd_cport_release; - } - - connection->hd_cport_id = hd_cport_id; - connection->intf_cport_id = cport_id; - connection->hd = hd; - connection->intf = intf; - connection->bundle = bundle; - connection->handler = handler; - connection->flags = flags; - if (intf && (intf->quirks & GB_INTERFACE_QUIRK_NO_CPORT_FEATURES)) - connection->flags |= GB_CONNECTION_FLAG_NO_FLOWCTRL; - connection->state = GB_CONNECTION_STATE_DISABLED; - - atomic_set(&connection->op_cycle, 0); - mutex_init(&connection->mutex); - spin_lock_init(&connection->lock); - INIT_LIST_HEAD(&connection->operations); - - connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1, - dev_name(&hd->dev), hd_cport_id); - if (!connection->wq) { - ret = -ENOMEM; - goto err_free_connection; - } - - kref_init(&connection->kref); - - gb_connection_init_name(connection); - - spin_lock_irq(&gb_connections_lock); - list_add(&connection->hd_links, &hd->connections); - - if (bundle) - list_add(&connection->bundle_links, &bundle->connections); - else - INIT_LIST_HEAD(&connection->bundle_links); - - spin_unlock_irq(&gb_connections_lock); - - mutex_unlock(&gb_connection_mutex); - - trace_gb_connection_create(connection); - - return connection; - -err_free_connection: - kfree(connection); -err_hd_cport_release: - gb_hd_cport_release(hd, hd_cport_id); -err_unlock: - mutex_unlock(&gb_connection_mutex); - - return ERR_PTR(ret); -} - -struct gb_connection * -gb_connection_create_static(struct gb_host_device *hd, u16 hd_cport_id, - gb_request_handler_t handler) -{ - return _gb_connection_create(hd, hd_cport_id, NULL, NULL, 0, handler, - GB_CONNECTION_FLAG_HIGH_PRIO); -} - -struct gb_connection * -gb_connection_create_control(struct gb_interface *intf) -{ - return _gb_connection_create(intf->hd, -1, intf, NULL, 0, NULL, - GB_CONNECTION_FLAG_CONTROL | - GB_CONNECTION_FLAG_HIGH_PRIO); -} - -struct gb_connection * -gb_connection_create(struct gb_bundle *bundle, u16 cport_id, - gb_request_handler_t handler) -{ - struct gb_interface *intf = bundle->intf; - - return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id, - handler, 0); -} -EXPORT_SYMBOL_GPL(gb_connection_create); - -struct gb_connection * -gb_connection_create_flags(struct gb_bundle *bundle, u16 cport_id, - gb_request_handler_t handler, - unsigned long flags) -{ - struct gb_interface *intf = bundle->intf; - - if (WARN_ON_ONCE(flags & GB_CONNECTION_FLAG_CORE_MASK)) - flags &= ~GB_CONNECTION_FLAG_CORE_MASK; - - return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id, - handler, flags); -} -EXPORT_SYMBOL_GPL(gb_connection_create_flags); - -struct gb_connection * -gb_connection_create_offloaded(struct gb_bundle *bundle, u16 cport_id, - unsigned long flags) -{ - flags |= GB_CONNECTION_FLAG_OFFLOADED; - - return gb_connection_create_flags(bundle, cport_id, NULL, flags); -} -EXPORT_SYMBOL_GPL(gb_connection_create_offloaded); - -static int gb_connection_hd_cport_enable(struct gb_connection *connection) -{ - struct gb_host_device *hd = connection->hd; - int ret; - - if (!hd->driver->cport_enable) - return 0; - - ret = hd->driver->cport_enable(hd, connection->hd_cport_id, - connection->flags); - if (ret) { - dev_err(&hd->dev, "%s: failed to enable host cport: %d\n", - connection->name, ret); - return ret; - } - - return 0; -} - -static void gb_connection_hd_cport_disable(struct gb_connection *connection) -{ - struct gb_host_device *hd = connection->hd; - int ret; - - if (!hd->driver->cport_disable) - return; - - ret = hd->driver->cport_disable(hd, connection->hd_cport_id); - if (ret) { - dev_err(&hd->dev, "%s: failed to disable host cport: %d\n", - connection->name, ret); - } -} - -static int gb_connection_hd_cport_connected(struct gb_connection *connection) -{ - struct gb_host_device *hd = connection->hd; - int ret; - - if (!hd->driver->cport_connected) - return 0; - - ret = hd->driver->cport_connected(hd, connection->hd_cport_id); - if (ret) { - dev_err(&hd->dev, "%s: failed to set connected state: %d\n", - connection->name, ret); - return ret; - } - - return 0; -} - -static int gb_connection_hd_cport_flush(struct gb_connection *connection) -{ - struct gb_host_device *hd = connection->hd; - int ret; - - if (!hd->driver->cport_flush) - return 0; - - ret = hd->driver->cport_flush(hd, connection->hd_cport_id); - if (ret) { - dev_err(&hd->dev, "%s: failed to flush host cport: %d\n", - connection->name, ret); - return ret; - } - - return 0; -} - -static int gb_connection_hd_cport_quiesce(struct gb_connection *connection) -{ - struct gb_host_device *hd = connection->hd; - size_t peer_space; - int ret; - - if (!hd->driver->cport_quiesce) - return 0; - - peer_space = sizeof(struct gb_operation_msg_hdr) + - sizeof(struct gb_cport_shutdown_request); - - if (connection->mode_switch) - peer_space += sizeof(struct gb_operation_msg_hdr); - - if (!hd->driver->cport_quiesce) - return 0; - - ret = hd->driver->cport_quiesce(hd, connection->hd_cport_id, - peer_space, - GB_CONNECTION_CPORT_QUIESCE_TIMEOUT); - if (ret) { - dev_err(&hd->dev, "%s: failed to quiesce host cport: %d\n", - connection->name, ret); - return ret; - } - - return 0; -} - -static int gb_connection_hd_cport_clear(struct gb_connection *connection) -{ - struct gb_host_device *hd = connection->hd; - int ret; - - if (!hd->driver->cport_clear) - return 0; - - ret = hd->driver->cport_clear(hd, connection->hd_cport_id); - if (ret) { - dev_err(&hd->dev, "%s: failed to clear host cport: %d\n", - connection->name, ret); - return ret; - } - - return 0; -} - -/* - * Request the SVC to create a connection from AP's cport to interface's - * cport. - */ -static int -gb_connection_svc_connection_create(struct gb_connection *connection) -{ - struct gb_host_device *hd = connection->hd; - struct gb_interface *intf; - u8 cport_flags; - int ret; - - if (gb_connection_is_static(connection)) - return 0; - - intf = connection->intf; - - /* - * Enable either E2EFC or CSD, unless no flow control is requested. - */ - cport_flags = GB_SVC_CPORT_FLAG_CSV_N; - if (gb_connection_flow_control_disabled(connection)) { - cport_flags |= GB_SVC_CPORT_FLAG_CSD_N; - } else if (gb_connection_e2efc_enabled(connection)) { - cport_flags |= GB_SVC_CPORT_FLAG_CSD_N | - GB_SVC_CPORT_FLAG_E2EFC; - } - - ret = gb_svc_connection_create(hd->svc, - hd->svc->ap_intf_id, - connection->hd_cport_id, - intf->interface_id, - connection->intf_cport_id, - cport_flags); - if (ret) { - dev_err(&connection->hd->dev, - "%s: failed to create svc connection: %d\n", - connection->name, ret); - return ret; - } - - return 0; -} - -static void -gb_connection_svc_connection_destroy(struct gb_connection *connection) -{ - if (gb_connection_is_static(connection)) - return; - - gb_svc_connection_destroy(connection->hd->svc, - connection->hd->svc->ap_intf_id, - connection->hd_cport_id, - connection->intf->interface_id, - connection->intf_cport_id); -} - -/* Inform Interface about active CPorts */ -static int gb_connection_control_connected(struct gb_connection *connection) -{ - struct gb_control *control; - u16 cport_id = connection->intf_cport_id; - int ret; - - if (gb_connection_is_static(connection)) - return 0; - - if (gb_connection_is_control(connection)) - return 0; - - control = connection->intf->control; - - ret = gb_control_connected_operation(control, cport_id); - if (ret) { - dev_err(&connection->bundle->dev, - "failed to connect cport: %d\n", ret); - return ret; - } - - return 0; -} - -static void -gb_connection_control_disconnecting(struct gb_connection *connection) -{ - struct gb_control *control; - u16 cport_id = connection->intf_cport_id; - int ret; - - if (gb_connection_is_static(connection)) - return; - - control = connection->intf->control; - - ret = gb_control_disconnecting_operation(control, cport_id); - if (ret) { - dev_err(&connection->hd->dev, - "%s: failed to send disconnecting: %d\n", - connection->name, ret); - } -} - -static void -gb_connection_control_disconnected(struct gb_connection *connection) -{ - struct gb_control *control; - u16 cport_id = connection->intf_cport_id; - int ret; - - if (gb_connection_is_static(connection)) - return; - - control = connection->intf->control; - - if (gb_connection_is_control(connection)) { - if (connection->mode_switch) { - ret = gb_control_mode_switch_operation(control); - if (ret) { - /* - * Allow mode switch to time out waiting for - * mailbox event. - */ - return; - } - } - - return; - } - - ret = gb_control_disconnected_operation(control, cport_id); - if (ret) { - dev_warn(&connection->bundle->dev, - "failed to disconnect cport: %d\n", ret); - } -} - -static int gb_connection_shutdown_operation(struct gb_connection *connection, - u8 phase) -{ - struct gb_cport_shutdown_request *req; - struct gb_operation *operation; - int ret; - - operation = gb_operation_create_core(connection, - GB_REQUEST_TYPE_CPORT_SHUTDOWN, - sizeof(*req), 0, 0, - GFP_KERNEL); - if (!operation) - return -ENOMEM; - - req = operation->request->payload; - req->phase = phase; - - ret = gb_operation_request_send_sync(operation); - - gb_operation_put(operation); - - return ret; -} - -static int gb_connection_cport_shutdown(struct gb_connection *connection, - u8 phase) -{ - struct gb_host_device *hd = connection->hd; - const struct gb_hd_driver *drv = hd->driver; - int ret; - - if (gb_connection_is_static(connection)) - return 0; - - if (gb_connection_is_offloaded(connection)) { - if (!drv->cport_shutdown) - return 0; - - ret = drv->cport_shutdown(hd, connection->hd_cport_id, phase, - GB_OPERATION_TIMEOUT_DEFAULT); - } else { - ret = gb_connection_shutdown_operation(connection, phase); - } - - if (ret) { - dev_err(&hd->dev, "%s: failed to send cport shutdown (phase %d): %d\n", - connection->name, phase, ret); - return ret; - } - - return 0; -} - -static int -gb_connection_cport_shutdown_phase_1(struct gb_connection *connection) -{ - return gb_connection_cport_shutdown(connection, 1); -} - -static int -gb_connection_cport_shutdown_phase_2(struct gb_connection *connection) -{ - return gb_connection_cport_shutdown(connection, 2); -} - -/* - * Cancel all active operations on a connection. - * - * Locking: Called with connection lock held and state set to DISABLED or - * DISCONNECTING. - */ -static void gb_connection_cancel_operations(struct gb_connection *connection, - int errno) - __must_hold(&connection->lock) -{ - struct gb_operation *operation; - - while (!list_empty(&connection->operations)) { - operation = list_last_entry(&connection->operations, - struct gb_operation, links); - gb_operation_get(operation); - spin_unlock_irq(&connection->lock); - - if (gb_operation_is_incoming(operation)) - gb_operation_cancel_incoming(operation, errno); - else - gb_operation_cancel(operation, errno); - - gb_operation_put(operation); - - spin_lock_irq(&connection->lock); - } -} - -/* - * Cancel all active incoming operations on a connection. - * - * Locking: Called with connection lock held and state set to ENABLED_TX. - */ -static void -gb_connection_flush_incoming_operations(struct gb_connection *connection, - int errno) - __must_hold(&connection->lock) -{ - struct gb_operation *operation; - bool incoming; - - while (!list_empty(&connection->operations)) { - incoming = false; - list_for_each_entry(operation, &connection->operations, - links) { - if (gb_operation_is_incoming(operation)) { - gb_operation_get(operation); - incoming = true; - break; - } - } - - if (!incoming) - break; - - spin_unlock_irq(&connection->lock); - - /* FIXME: flush, not cancel? */ - gb_operation_cancel_incoming(operation, errno); - gb_operation_put(operation); - - spin_lock_irq(&connection->lock); - } -} - -/* - * _gb_connection_enable() - enable a connection - * @connection: connection to enable - * @rx: whether to enable incoming requests - * - * Connection-enable helper for DISABLED->ENABLED, DISABLED->ENABLED_TX, and - * ENABLED_TX->ENABLED state transitions. - * - * Locking: Caller holds connection->mutex. - */ -static int _gb_connection_enable(struct gb_connection *connection, bool rx) -{ - int ret; - - /* Handle ENABLED_TX -> ENABLED transitions. */ - if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) { - if (!(connection->handler && rx)) - return 0; - - spin_lock_irq(&connection->lock); - connection->state = GB_CONNECTION_STATE_ENABLED; - spin_unlock_irq(&connection->lock); - - return 0; - } - - ret = gb_connection_hd_cport_enable(connection); - if (ret) - return ret; - - ret = gb_connection_svc_connection_create(connection); - if (ret) - goto err_hd_cport_clear; - - ret = gb_connection_hd_cport_connected(connection); - if (ret) - goto err_svc_connection_destroy; - - spin_lock_irq(&connection->lock); - if (connection->handler && rx) - connection->state = GB_CONNECTION_STATE_ENABLED; - else - connection->state = GB_CONNECTION_STATE_ENABLED_TX; - spin_unlock_irq(&connection->lock); - - ret = gb_connection_control_connected(connection); - if (ret) - goto err_control_disconnecting; - - return 0; - -err_control_disconnecting: - spin_lock_irq(&connection->lock); - connection->state = GB_CONNECTION_STATE_DISCONNECTING; - gb_connection_cancel_operations(connection, -ESHUTDOWN); - spin_unlock_irq(&connection->lock); - - /* Transmit queue should already be empty. */ - gb_connection_hd_cport_flush(connection); - - gb_connection_control_disconnecting(connection); - gb_connection_cport_shutdown_phase_1(connection); - gb_connection_hd_cport_quiesce(connection); - gb_connection_cport_shutdown_phase_2(connection); - gb_connection_control_disconnected(connection); - connection->state = GB_CONNECTION_STATE_DISABLED; -err_svc_connection_destroy: - gb_connection_svc_connection_destroy(connection); -err_hd_cport_clear: - gb_connection_hd_cport_clear(connection); - - gb_connection_hd_cport_disable(connection); - - return ret; -} - -int gb_connection_enable(struct gb_connection *connection) -{ - int ret = 0; - - mutex_lock(&connection->mutex); - - if (connection->state == GB_CONNECTION_STATE_ENABLED) - goto out_unlock; - - ret = _gb_connection_enable(connection, true); - if (!ret) - trace_gb_connection_enable(connection); - -out_unlock: - mutex_unlock(&connection->mutex); - - return ret; -} -EXPORT_SYMBOL_GPL(gb_connection_enable); - -int gb_connection_enable_tx(struct gb_connection *connection) -{ - int ret = 0; - - mutex_lock(&connection->mutex); - - if (connection->state == GB_CONNECTION_STATE_ENABLED) { - ret = -EINVAL; - goto out_unlock; - } - - if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) - goto out_unlock; - - ret = _gb_connection_enable(connection, false); - if (!ret) - trace_gb_connection_enable(connection); - -out_unlock: - mutex_unlock(&connection->mutex); - - return ret; -} -EXPORT_SYMBOL_GPL(gb_connection_enable_tx); - -void gb_connection_disable_rx(struct gb_connection *connection) -{ - mutex_lock(&connection->mutex); - - spin_lock_irq(&connection->lock); - if (connection->state != GB_CONNECTION_STATE_ENABLED) { - spin_unlock_irq(&connection->lock); - goto out_unlock; - } - connection->state = GB_CONNECTION_STATE_ENABLED_TX; - gb_connection_flush_incoming_operations(connection, -ESHUTDOWN); - spin_unlock_irq(&connection->lock); - - trace_gb_connection_disable(connection); - -out_unlock: - mutex_unlock(&connection->mutex); -} -EXPORT_SYMBOL_GPL(gb_connection_disable_rx); - -void gb_connection_mode_switch_prepare(struct gb_connection *connection) -{ - connection->mode_switch = true; -} - -void gb_connection_mode_switch_complete(struct gb_connection *connection) -{ - gb_connection_svc_connection_destroy(connection); - gb_connection_hd_cport_clear(connection); - - gb_connection_hd_cport_disable(connection); - - connection->mode_switch = false; -} - -void gb_connection_disable(struct gb_connection *connection) -{ - mutex_lock(&connection->mutex); - - if (connection->state == GB_CONNECTION_STATE_DISABLED) - goto out_unlock; - - trace_gb_connection_disable(connection); - - spin_lock_irq(&connection->lock); - connection->state = GB_CONNECTION_STATE_DISCONNECTING; - gb_connection_cancel_operations(connection, -ESHUTDOWN); - spin_unlock_irq(&connection->lock); - - gb_connection_hd_cport_flush(connection); - - gb_connection_control_disconnecting(connection); - gb_connection_cport_shutdown_phase_1(connection); - gb_connection_hd_cport_quiesce(connection); - gb_connection_cport_shutdown_phase_2(connection); - gb_connection_control_disconnected(connection); - - connection->state = GB_CONNECTION_STATE_DISABLED; - - /* control-connection tear down is deferred when mode switching */ - if (!connection->mode_switch) { - gb_connection_svc_connection_destroy(connection); - gb_connection_hd_cport_clear(connection); - - gb_connection_hd_cport_disable(connection); - } - -out_unlock: - mutex_unlock(&connection->mutex); -} -EXPORT_SYMBOL_GPL(gb_connection_disable); - -/* Disable a connection without communicating with the remote end. */ -void gb_connection_disable_forced(struct gb_connection *connection) -{ - mutex_lock(&connection->mutex); - - if (connection->state == GB_CONNECTION_STATE_DISABLED) - goto out_unlock; - - trace_gb_connection_disable(connection); - - spin_lock_irq(&connection->lock); - connection->state = GB_CONNECTION_STATE_DISABLED; - gb_connection_cancel_operations(connection, -ESHUTDOWN); - spin_unlock_irq(&connection->lock); - - gb_connection_hd_cport_flush(connection); - - gb_connection_svc_connection_destroy(connection); - gb_connection_hd_cport_clear(connection); - - gb_connection_hd_cport_disable(connection); -out_unlock: - mutex_unlock(&connection->mutex); -} -EXPORT_SYMBOL_GPL(gb_connection_disable_forced); - -/* Caller must have disabled the connection before destroying it. */ -void gb_connection_destroy(struct gb_connection *connection) -{ - if (!connection) - return; - - if (WARN_ON(connection->state != GB_CONNECTION_STATE_DISABLED)) - gb_connection_disable(connection); - - mutex_lock(&gb_connection_mutex); - - spin_lock_irq(&gb_connections_lock); - list_del(&connection->bundle_links); - list_del(&connection->hd_links); - spin_unlock_irq(&gb_connections_lock); - - destroy_workqueue(connection->wq); - - gb_hd_cport_release(connection->hd, connection->hd_cport_id); - connection->hd_cport_id = CPORT_ID_BAD; - - mutex_unlock(&gb_connection_mutex); - - gb_connection_put(connection); -} -EXPORT_SYMBOL_GPL(gb_connection_destroy); - -void gb_connection_latency_tag_enable(struct gb_connection *connection) -{ - struct gb_host_device *hd = connection->hd; - int ret; - - if (!hd->driver->latency_tag_enable) - return; - - ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id); - if (ret) { - dev_err(&connection->hd->dev, - "%s: failed to enable latency tag: %d\n", - connection->name, ret); - } -} -EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable); - -void gb_connection_latency_tag_disable(struct gb_connection *connection) -{ - struct gb_host_device *hd = connection->hd; - int ret; - - if (!hd->driver->latency_tag_disable) - return; - - ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id); - if (ret) { - dev_err(&connection->hd->dev, - "%s: failed to disable latency tag: %d\n", - connection->name, ret); - } -} -EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable); diff --git a/drivers/staging/greybus/control.c b/drivers/staging/greybus/control.c deleted file mode 100644 index 359a25841973..000000000000 --- a/drivers/staging/greybus/control.c +++ /dev/null @@ -1,584 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Greybus CPort control protocol. - * - * Copyright 2015 Google Inc. - * Copyright 2015 Linaro Ltd. - */ - -#include -#include -#include -#include - -/* Highest control-protocol version supported */ -#define GB_CONTROL_VERSION_MAJOR 0 -#define GB_CONTROL_VERSION_MINOR 1 - -static int gb_control_get_version(struct gb_control *control) -{ - struct gb_interface *intf = control->connection->intf; - struct gb_control_version_request request; - struct gb_control_version_response response; - int ret; - - request.major = GB_CONTROL_VERSION_MAJOR; - request.minor = GB_CONTROL_VERSION_MINOR; - - ret = gb_operation_sync(control->connection, - GB_CONTROL_TYPE_VERSION, - &request, sizeof(request), &response, - sizeof(response)); - if (ret) { - dev_err(&intf->dev, - "failed to get control-protocol version: %d\n", - ret); - return ret; - } - - if (response.major > request.major) { - dev_err(&intf->dev, - "unsupported major control-protocol version (%u > %u)\n", - response.major, request.major); - return -ENOTSUPP; - } - - control->protocol_major = response.major; - control->protocol_minor = response.minor; - - dev_dbg(&intf->dev, "%s - %u.%u\n", __func__, response.major, - response.minor); - - return 0; -} - -static int gb_control_get_bundle_version(struct gb_control *control, - struct gb_bundle *bundle) -{ - struct gb_interface *intf = control->connection->intf; - struct gb_control_bundle_version_request request; - struct gb_control_bundle_version_response response; - int ret; - - request.bundle_id = bundle->id; - - ret = gb_operation_sync(control->connection, - GB_CONTROL_TYPE_BUNDLE_VERSION, - &request, sizeof(request), - &response, sizeof(response)); - if (ret) { - dev_err(&intf->dev, - "failed to get bundle %u class version: %d\n", - bundle->id, ret); - return ret; - } - - bundle->class_major = response.major; - bundle->class_minor = response.minor; - - dev_dbg(&intf->dev, "%s - %u: %u.%u\n", __func__, bundle->id, - response.major, response.minor); - - return 0; -} - -int gb_control_get_bundle_versions(struct gb_control *control) -{ - struct gb_interface *intf = control->connection->intf; - struct gb_bundle *bundle; - int ret; - - if (!control->has_bundle_version) - return 0; - - list_for_each_entry(bundle, &intf->bundles, links) { - ret = gb_control_get_bundle_version(control, bundle); - if (ret) - return ret; - } - - return 0; -} - -/* Get Manifest's size from the interface */ -int gb_control_get_manifest_size_operation(struct gb_interface *intf) -{ - struct gb_control_get_manifest_size_response response; - struct gb_connection *connection = intf->control->connection; - int ret; - - ret = gb_operation_sync(connection, GB_CONTROL_TYPE_GET_MANIFEST_SIZE, - NULL, 0, &response, sizeof(response)); - if (ret) { - dev_err(&connection->intf->dev, - "failed to get manifest size: %d\n", ret); - return ret; - } - - return le16_to_cpu(response.size); -} - -/* Reads Manifest from the interface */ -int gb_control_get_manifest_operation(struct gb_interface *intf, void *manifest, - size_t size) -{ - struct gb_connection *connection = intf->control->connection; - - return gb_operation_sync(connection, GB_CONTROL_TYPE_GET_MANIFEST, - NULL, 0, manifest, size); -} - -int gb_control_connected_operation(struct gb_control *control, u16 cport_id) -{ - struct gb_control_connected_request request; - - request.cport_id = cpu_to_le16(cport_id); - return gb_operation_sync(control->connection, GB_CONTROL_TYPE_CONNECTED, - &request, sizeof(request), NULL, 0); -} - -int gb_control_disconnected_operation(struct gb_control *control, u16 cport_id) -{ - struct gb_control_disconnected_request request; - - request.cport_id = cpu_to_le16(cport_id); - return gb_operation_sync(control->connection, - GB_CONTROL_TYPE_DISCONNECTED, &request, - sizeof(request), NULL, 0); -} - -int gb_control_disconnecting_operation(struct gb_control *control, - u16 cport_id) -{ - struct gb_control_disconnecting_request *request; - struct gb_operation *operation; - int ret; - - operation = gb_operation_create_core(control->connection, - GB_CONTROL_TYPE_DISCONNECTING, - sizeof(*request), 0, 0, - GFP_KERNEL); - if (!operation) - return -ENOMEM; - - request = operation->request->payload; - request->cport_id = cpu_to_le16(cport_id); - - ret = gb_operation_request_send_sync(operation); - if (ret) { - dev_err(&control->dev, "failed to send disconnecting: %d\n", - ret); - } - - gb_operation_put(operation); - - return ret; -} - -int gb_control_mode_switch_operation(struct gb_control *control) -{ - struct gb_operation *operation; - int ret; - - operation = gb_operation_create_core(control->connection, - GB_CONTROL_TYPE_MODE_SWITCH, - 0, 0, - GB_OPERATION_FLAG_UNIDIRECTIONAL, - GFP_KERNEL); - if (!operation) - return -ENOMEM; - - ret = gb_operation_request_send_sync(operation); - if (ret) - dev_err(&control->dev, "failed to send mode switch: %d\n", ret); - - gb_operation_put(operation); - - return ret; -} - -static int gb_control_bundle_pm_status_map(u8 status) -{ - switch (status) { - case GB_CONTROL_BUNDLE_PM_INVAL: - return -EINVAL; - case GB_CONTROL_BUNDLE_PM_BUSY: - return -EBUSY; - case GB_CONTROL_BUNDLE_PM_NA: - return -ENOMSG; - case GB_CONTROL_BUNDLE_PM_FAIL: - default: - return -EREMOTEIO; - } -} - -int gb_control_bundle_suspend(struct gb_control *control, u8 bundle_id) -{ - struct gb_control_bundle_pm_request request; - struct gb_control_bundle_pm_response response; - int ret; - - request.bundle_id = bundle_id; - ret = gb_operation_sync(control->connection, - GB_CONTROL_TYPE_BUNDLE_SUSPEND, &request, - sizeof(request), &response, sizeof(response)); - if (ret) { - dev_err(&control->dev, "failed to send bundle %u suspend: %d\n", - bundle_id, ret); - return ret; - } - - if (response.status != GB_CONTROL_BUNDLE_PM_OK) { - dev_err(&control->dev, "failed to suspend bundle %u: %d\n", - bundle_id, response.status); - return gb_control_bundle_pm_status_map(response.status); - } - - return 0; -} - -int gb_control_bundle_resume(struct gb_control *control, u8 bundle_id) -{ - struct gb_control_bundle_pm_request request; - struct gb_control_bundle_pm_response response; - int ret; - - request.bundle_id = bundle_id; - ret = gb_operation_sync(control->connection, - GB_CONTROL_TYPE_BUNDLE_RESUME, &request, - sizeof(request), &response, sizeof(response)); - if (ret) { - dev_err(&control->dev, "failed to send bundle %u resume: %d\n", - bundle_id, ret); - return ret; - } - - if (response.status != GB_CONTROL_BUNDLE_PM_OK) { - dev_err(&control->dev, "failed to resume bundle %u: %d\n", - bundle_id, response.status); - return gb_control_bundle_pm_status_map(response.status); - } - - return 0; -} - -int gb_control_bundle_deactivate(struct gb_control *control, u8 bundle_id) -{ - struct gb_control_bundle_pm_request request; - struct gb_control_bundle_pm_response response; - int ret; - - request.bundle_id = bundle_id; - ret = gb_operation_sync(control->connection, - GB_CONTROL_TYPE_BUNDLE_DEACTIVATE, &request, - sizeof(request), &response, sizeof(response)); - if (ret) { - dev_err(&control->dev, - "failed to send bundle %u deactivate: %d\n", bundle_id, - ret); - return ret; - } - - if (response.status != GB_CONTROL_BUNDLE_PM_OK) { - dev_err(&control->dev, "failed to deactivate bundle %u: %d\n", - bundle_id, response.status); - return gb_control_bundle_pm_status_map(response.status); - } - - return 0; -} - -int gb_control_bundle_activate(struct gb_control *control, u8 bundle_id) -{ - struct gb_control_bundle_pm_request request; - struct gb_control_bundle_pm_response response; - int ret; - - if (!control->has_bundle_activate) - return 0; - - request.bundle_id = bundle_id; - ret = gb_operation_sync(control->connection, - GB_CONTROL_TYPE_BUNDLE_ACTIVATE, &request, - sizeof(request), &response, sizeof(response)); - if (ret) { - dev_err(&control->dev, - "failed to send bundle %u activate: %d\n", bundle_id, - ret); - return ret; - } - - if (response.status != GB_CONTROL_BUNDLE_PM_OK) { - dev_err(&control->dev, "failed to activate bundle %u: %d\n", - bundle_id, response.status); - return gb_control_bundle_pm_status_map(response.status); - } - - return 0; -} - -static int gb_control_interface_pm_status_map(u8 status) -{ - switch (status) { - case GB_CONTROL_INTF_PM_BUSY: - return -EBUSY; - case GB_CONTROL_INTF_PM_NA: - return -ENOMSG; - default: - return -EREMOTEIO; - } -} - -int gb_control_interface_suspend_prepare(struct gb_control *control) -{ - struct gb_control_intf_pm_response response; - int ret; - - ret = gb_operation_sync(control->connection, - GB_CONTROL_TYPE_INTF_SUSPEND_PREPARE, NULL, 0, - &response, sizeof(response)); - if (ret) { - dev_err(&control->dev, - "failed to send interface suspend prepare: %d\n", ret); - return ret; - } - - if (response.status != GB_CONTROL_INTF_PM_OK) { - dev_err(&control->dev, "interface error while preparing suspend: %d\n", - response.status); - return gb_control_interface_pm_status_map(response.status); - } - - return 0; -} - -int gb_control_interface_deactivate_prepare(struct gb_control *control) -{ - struct gb_control_intf_pm_response response; - int ret; - - ret = gb_operation_sync(control->connection, - GB_CONTROL_TYPE_INTF_DEACTIVATE_PREPARE, NULL, - 0, &response, sizeof(response)); - if (ret) { - dev_err(&control->dev, "failed to send interface deactivate prepare: %d\n", - ret); - return ret; - } - - if (response.status != GB_CONTROL_INTF_PM_OK) { - dev_err(&control->dev, "interface error while preparing deactivate: %d\n", - response.status); - return gb_control_interface_pm_status_map(response.status); - } - - return 0; -} - -int gb_control_interface_hibernate_abort(struct gb_control *control) -{ - struct gb_control_intf_pm_response response; - int ret; - - ret = gb_operation_sync(control->connection, - GB_CONTROL_TYPE_INTF_HIBERNATE_ABORT, NULL, 0, - &response, sizeof(response)); - if (ret) { - dev_err(&control->dev, - "failed to send interface aborting hibernate: %d\n", - ret); - return ret; - } - - if (response.status != GB_CONTROL_INTF_PM_OK) { - dev_err(&control->dev, "interface error while aborting hibernate: %d\n", - response.status); - return gb_control_interface_pm_status_map(response.status); - } - - return 0; -} - -static ssize_t vendor_string_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct gb_control *control = to_gb_control(dev); - - return scnprintf(buf, PAGE_SIZE, "%s\n", control->vendor_string); -} -static DEVICE_ATTR_RO(vendor_string); - -static ssize_t product_string_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct gb_control *control = to_gb_control(dev); - - return scnprintf(buf, PAGE_SIZE, "%s\n", control->product_string); -} -static DEVICE_ATTR_RO(product_string); - -static struct attribute *control_attrs[] = { - &dev_attr_vendor_string.attr, - &dev_attr_product_string.attr, - NULL, -}; -ATTRIBUTE_GROUPS(control); - -static void gb_control_release(struct device *dev) -{ - struct gb_control *control = to_gb_control(dev); - - gb_connection_destroy(control->connection); - - kfree(control->vendor_string); - kfree(control->product_string); - - kfree(control); -} - -struct device_type greybus_control_type = { - .name = "greybus_control", - .release = gb_control_release, -}; - -struct gb_control *gb_control_create(struct gb_interface *intf) -{ - struct gb_connection *connection; - struct gb_control *control; - - control = kzalloc(sizeof(*control), GFP_KERNEL); - if (!control) - return ERR_PTR(-ENOMEM); - - control->intf = intf; - - connection = gb_connection_create_control(intf); - if (IS_ERR(connection)) { - dev_err(&intf->dev, - "failed to create control connection: %ld\n", - PTR_ERR(connection)); - kfree(control); - return ERR_CAST(connection); - } - - control->connection = connection; - - control->dev.parent = &intf->dev; - control->dev.bus = &greybus_bus_type; - control->dev.type = &greybus_control_type; - control->dev.groups = control_groups; - control->dev.dma_mask = intf->dev.dma_mask; - device_initialize(&control->dev); - dev_set_name(&control->dev, "%s.ctrl", dev_name(&intf->dev)); - - gb_connection_set_data(control->connection, control); - - return control; -} - -int gb_control_enable(struct gb_control *control) -{ - int ret; - - dev_dbg(&control->connection->intf->dev, "%s\n", __func__); - - ret = gb_connection_enable_tx(control->connection); - if (ret) { - dev_err(&control->connection->intf->dev, - "failed to enable control connection: %d\n", - ret); - return ret; - } - - ret = gb_control_get_version(control); - if (ret) - goto err_disable_connection; - - if (control->protocol_major > 0 || control->protocol_minor > 1) - control->has_bundle_version = true; - - /* FIXME: use protocol version instead */ - if (!(control->intf->quirks & GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE)) - control->has_bundle_activate = true; - - return 0; - -err_disable_connection: - gb_connection_disable(control->connection); - - return ret; -} - -void gb_control_disable(struct gb_control *control) -{ - dev_dbg(&control->connection->intf->dev, "%s\n", __func__); - - if (control->intf->disconnected) - gb_connection_disable_forced(control->connection); - else - gb_connection_disable(control->connection); -} - -int gb_control_suspend(struct gb_control *control) -{ - gb_connection_disable(control->connection); - - return 0; -} - -int gb_control_resume(struct gb_control *control) -{ - int ret; - - ret = gb_connection_enable_tx(control->connection); - if (ret) { - dev_err(&control->connection->intf->dev, - "failed to enable control connection: %d\n", ret); - return ret; - } - - return 0; -} - -int gb_control_add(struct gb_control *control) -{ - int ret; - - ret = device_add(&control->dev); - if (ret) { - dev_err(&control->dev, - "failed to register control device: %d\n", - ret); - return ret; - } - - return 0; -} - -void gb_control_del(struct gb_control *control) -{ - if (device_is_registered(&control->dev)) - device_del(&control->dev); -} - -struct gb_control *gb_control_get(struct gb_control *control) -{ - get_device(&control->dev); - - return control; -} - -void gb_control_put(struct gb_control *control) -{ - put_device(&control->dev); -} - -void gb_control_mode_switch_prepare(struct gb_control *control) -{ - gb_connection_mode_switch_prepare(control->connection); -} - -void gb_control_mode_switch_complete(struct gb_control *control) -{ - gb_connection_mode_switch_complete(control->connection); -} diff --git a/drivers/staging/greybus/core.c b/drivers/staging/greybus/core.c deleted file mode 100644 index e546c6431877..000000000000 --- a/drivers/staging/greybus/core.c +++ /dev/null @@ -1,349 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Greybus "Core" - * - * Copyright 2014-2015 Google Inc. - * Copyright 2014-2015 Linaro Ltd. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#define CREATE_TRACE_POINTS -#include -#include "greybus_trace.h" - -#define GB_BUNDLE_AUTOSUSPEND_MS 3000 - -/* Allow greybus to be disabled at boot if needed */ -static bool nogreybus; -#ifdef MODULE -module_param(nogreybus, bool, 0444); -#else -core_param(nogreybus, nogreybus, bool, 0444); -#endif -int greybus_disabled(void) -{ - return nogreybus; -} -EXPORT_SYMBOL_GPL(greybus_disabled); - -static bool greybus_match_one_id(struct gb_bundle *bundle, - const struct greybus_bundle_id *id) -{ - if ((id->match_flags & GREYBUS_ID_MATCH_VENDOR) && - (id->vendor != bundle->intf->vendor_id)) - return false; - - if ((id->match_flags & GREYBUS_ID_MATCH_PRODUCT) && - (id->product != bundle->intf->product_id)) - return false; - - if ((id->match_flags & GREYBUS_ID_MATCH_CLASS) && - (id->class != bundle->class)) - return false; - - return true; -} - -static const struct greybus_bundle_id * -greybus_match_id(struct gb_bundle *bundle, const struct greybus_bundle_id *id) -{ - if (!id) - return NULL; - - for (; id->vendor || id->product || id->class || id->driver_info; - id++) { - if (greybus_match_one_id(bundle, id)) - return id; - } - - return NULL; -} - -static int greybus_match_device(struct device *dev, struct device_driver *drv) -{ - struct greybus_driver *driver = to_greybus_driver(drv); - struct gb_bundle *bundle; - const struct greybus_bundle_id *id; - - if (!is_gb_bundle(dev)) - return 0; - - bundle = to_gb_bundle(dev); - - id = greybus_match_id(bundle, driver->id_table); - if (id) - return 1; - /* FIXME - Dynamic ids? */ - return 0; -} - -static int greybus_uevent(struct device *dev, struct kobj_uevent_env *env) -{ - struct gb_host_device *hd; - struct gb_module *module = NULL; - struct gb_interface *intf = NULL; - struct gb_control *control = NULL; - struct gb_bundle *bundle = NULL; - struct gb_svc *svc = NULL; - - if (is_gb_host_device(dev)) { - hd = to_gb_host_device(dev); - } else if (is_gb_module(dev)) { - module = to_gb_module(dev); - hd = module->hd; - } else if (is_gb_interface(dev)) { - intf = to_gb_interface(dev); - module = intf->module; - hd = intf->hd; - } else if (is_gb_control(dev)) { - control = to_gb_control(dev); - intf = control->intf; - module = intf->module; - hd = intf->hd; - } else if (is_gb_bundle(dev)) { - bundle = to_gb_bundle(dev); - intf = bundle->intf; - module = intf->module; - hd = intf->hd; - } else if (is_gb_svc(dev)) { - svc = to_gb_svc(dev); - hd = svc->hd; - } else { - dev_WARN(dev, "uevent for unknown greybus device \"type\"!\n"); - return -EINVAL; - } - - if (add_uevent_var(env, "BUS=%u", hd->bus_id)) - return -ENOMEM; - - if (module) { - if (add_uevent_var(env, "MODULE=%u", module->module_id)) - return -ENOMEM; - } - - if (intf) { - if (add_uevent_var(env, "INTERFACE=%u", intf->interface_id)) - return -ENOMEM; - if (add_uevent_var(env, "GREYBUS_ID=%08x/%08x", - intf->vendor_id, intf->product_id)) - return -ENOMEM; - } - - if (bundle) { - // FIXME - // add a uevent that can "load" a bundle type - // This is what we need to bind a driver to so use the info - // in gmod here as well - - if (add_uevent_var(env, "BUNDLE=%u", bundle->id)) - return -ENOMEM; - if (add_uevent_var(env, "BUNDLE_CLASS=%02x", bundle->class)) - return -ENOMEM; - } - - return 0; -} - -static void greybus_shutdown(struct device *dev) -{ - if (is_gb_host_device(dev)) { - struct gb_host_device *hd; - - hd = to_gb_host_device(dev); - gb_hd_shutdown(hd); - } -} - -struct bus_type greybus_bus_type = { - .name = "greybus", - .match = greybus_match_device, - .uevent = greybus_uevent, - .shutdown = greybus_shutdown, -}; - -static int greybus_probe(struct device *dev) -{ - struct greybus_driver *driver = to_greybus_driver(dev->driver); - struct gb_bundle *bundle = to_gb_bundle(dev); - const struct greybus_bundle_id *id; - int retval; - - /* match id */ - id = greybus_match_id(bundle, driver->id_table); - if (!id) - return -ENODEV; - - retval = pm_runtime_get_sync(&bundle->intf->dev); - if (retval < 0) { - pm_runtime_put_noidle(&bundle->intf->dev); - return retval; - } - - retval = gb_control_bundle_activate(bundle->intf->control, bundle->id); - if (retval) { - pm_runtime_put(&bundle->intf->dev); - return retval; - } - - /* - * Unbound bundle devices are always deactivated. During probe, the - * Runtime PM is set to enabled and active and the usage count is - * incremented. If the driver supports runtime PM, it should call - * pm_runtime_put() in its probe routine and pm_runtime_get_sync() - * in remove routine. - */ - pm_runtime_set_autosuspend_delay(dev, GB_BUNDLE_AUTOSUSPEND_MS); - pm_runtime_use_autosuspend(dev); - pm_runtime_get_noresume(dev); - pm_runtime_set_active(dev); - pm_runtime_enable(dev); - - retval = driver->probe(bundle, id); - if (retval) { - /* - * Catch buggy drivers that fail to destroy their connections. - */ - WARN_ON(!list_empty(&bundle->connections)); - - gb_control_bundle_deactivate(bundle->intf->control, bundle->id); - - pm_runtime_disable(dev); - pm_runtime_set_suspended(dev); - pm_runtime_put_noidle(dev); - pm_runtime_dont_use_autosuspend(dev); - pm_runtime_put(&bundle->intf->dev); - - return retval; - } - - pm_runtime_put(&bundle->intf->dev); - - return 0; -} - -static int greybus_remove(struct device *dev) -{ - struct greybus_driver *driver = to_greybus_driver(dev->driver); - struct gb_bundle *bundle = to_gb_bundle(dev); - struct gb_connection *connection; - int retval; - - retval = pm_runtime_get_sync(dev); - if (retval < 0) - dev_err(dev, "failed to resume bundle: %d\n", retval); - - /* - * Disable (non-offloaded) connections early in case the interface is - * already gone to avoid unceccessary operation timeouts during - * driver disconnect. Otherwise, only disable incoming requests. - */ - list_for_each_entry(connection, &bundle->connections, bundle_links) { - if (gb_connection_is_offloaded(connection)) - continue; - - if (bundle->intf->disconnected) - gb_connection_disable_forced(connection); - else - gb_connection_disable_rx(connection); - } - - driver->disconnect(bundle); - - /* Catch buggy drivers that fail to destroy their connections. */ - WARN_ON(!list_empty(&bundle->connections)); - - if (!bundle->intf->disconnected) - gb_control_bundle_deactivate(bundle->intf->control, bundle->id); - - pm_runtime_put_noidle(dev); - pm_runtime_disable(dev); - pm_runtime_set_suspended(dev); - pm_runtime_dont_use_autosuspend(dev); - pm_runtime_put_noidle(dev); - - return 0; -} - -int greybus_register_driver(struct greybus_driver *driver, struct module *owner, - const char *mod_name) -{ - int retval; - - if (greybus_disabled()) - return -ENODEV; - - driver->driver.bus = &greybus_bus_type; - driver->driver.name = driver->name; - driver->driver.probe = greybus_probe; - driver->driver.remove = greybus_remove; - driver->driver.owner = owner; - driver->driver.mod_name = mod_name; - - retval = driver_register(&driver->driver); - if (retval) - return retval; - - pr_info("registered new driver %s\n", driver->name); - return 0; -} -EXPORT_SYMBOL_GPL(greybus_register_driver); - -void greybus_deregister_driver(struct greybus_driver *driver) -{ - driver_unregister(&driver->driver); -} -EXPORT_SYMBOL_GPL(greybus_deregister_driver); - -static int __init gb_init(void) -{ - int retval; - - if (greybus_disabled()) - return -ENODEV; - - BUILD_BUG_ON(CPORT_ID_MAX >= (long)CPORT_ID_BAD); - - gb_debugfs_init(); - - retval = bus_register(&greybus_bus_type); - if (retval) { - pr_err("bus_register failed (%d)\n", retval); - goto error_bus; - } - - retval = gb_hd_init(); - if (retval) { - pr_err("gb_hd_init failed (%d)\n", retval); - goto error_hd; - } - - retval = gb_operation_init(); - if (retval) { - pr_err("gb_operation_init failed (%d)\n", retval); - goto error_operation; - } - return 0; /* Success */ - -error_operation: - gb_hd_exit(); -error_hd: - bus_unregister(&greybus_bus_type); -error_bus: - gb_debugfs_cleanup(); - - return retval; -} -module_init(gb_init); - -static void __exit gb_exit(void) -{ - gb_operation_exit(); - gb_hd_exit(); - bus_unregister(&greybus_bus_type); - gb_debugfs_cleanup(); - tracepoint_synchronize_unregister(); -} -module_exit(gb_exit); -MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Greg Kroah-Hartman "); diff --git a/drivers/staging/greybus/debugfs.c b/drivers/staging/greybus/debugfs.c deleted file mode 100644 index e102d7badb9d..000000000000 --- a/drivers/staging/greybus/debugfs.c +++ /dev/null @@ -1,29 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Greybus debugfs code - * - * Copyright 2014 Google Inc. - * Copyright 2014 Linaro Ltd. - */ - -#include -#include - -static struct dentry *gb_debug_root; - -void __init gb_debugfs_init(void) -{ - gb_debug_root = debugfs_create_dir("greybus", NULL); -} - -void gb_debugfs_cleanup(void) -{ - debugfs_remove_recursive(gb_debug_root); - gb_debug_root = NULL; -} - -struct dentry *gb_debugfs_get(void) -{ - return gb_debug_root; -} -EXPORT_SYMBOL_GPL(gb_debugfs_get); diff --git a/drivers/staging/greybus/es2.c b/drivers/staging/greybus/es2.c index 366716f11b1a..5b755e76d8a4 100644 --- a/drivers/staging/greybus/es2.c +++ b/drivers/staging/greybus/es2.c @@ -15,7 +15,7 @@ #include #include "arpc.h" -#include "greybus_trace.h" +#include "../../greybus/greybus_trace.h" /* Default timeout for USB vendor requests. */ diff --git a/drivers/staging/greybus/greybus_trace.h b/drivers/staging/greybus/greybus_trace.h deleted file mode 100644 index 1bc9f1275c65..000000000000 --- a/drivers/staging/greybus/greybus_trace.h +++ /dev/null @@ -1,502 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Greybus driver and device API - * - * Copyright 2015 Google Inc. - * Copyright 2015 Linaro Ltd. - */ -#undef TRACE_SYSTEM -#define TRACE_SYSTEM greybus - -#if !defined(_TRACE_GREYBUS_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_GREYBUS_H - -#include - -struct gb_message; -struct gb_operation; -struct gb_connection; -struct gb_bundle; -struct gb_host_device; - -DECLARE_EVENT_CLASS(gb_message, - - TP_PROTO(struct gb_message *message), - - TP_ARGS(message), - - TP_STRUCT__entry( - __field(u16, size) - __field(u16, operation_id) - __field(u8, type) - __field(u8, result) - ), - - TP_fast_assign( - __entry->size = le16_to_cpu(message->header->size); - __entry->operation_id = - le16_to_cpu(message->header->operation_id); - __entry->type = message->header->type; - __entry->result = message->header->result; - ), - - TP_printk("size=%hu operation_id=0x%04x type=0x%02x result=0x%02x", - __entry->size, __entry->operation_id, - __entry->type, __entry->result) -); - -#define DEFINE_MESSAGE_EVENT(name) \ - DEFINE_EVENT(gb_message, name, \ - TP_PROTO(struct gb_message *message), \ - TP_ARGS(message)) - -/* - * Occurs immediately before calling a host device's message_send() - * method. - */ -DEFINE_MESSAGE_EVENT(gb_message_send); - -/* - * Occurs after an incoming request message has been received - */ -DEFINE_MESSAGE_EVENT(gb_message_recv_request); - -/* - * Occurs after an incoming response message has been received, - * after its matching request has been found. - */ -DEFINE_MESSAGE_EVENT(gb_message_recv_response); - -/* - * Occurs after an operation has been canceled, possibly before the - * cancellation is complete. - */ -DEFINE_MESSAGE_EVENT(gb_message_cancel_outgoing); - -/* - * Occurs when an incoming request is cancelled; if the response has - * been queued for sending, this occurs after it is sent. - */ -DEFINE_MESSAGE_EVENT(gb_message_cancel_incoming); - -/* - * Occurs in the host driver message_send() function just prior to - * handing off the data to be processed by hardware. - */ -DEFINE_MESSAGE_EVENT(gb_message_submit); - -#undef DEFINE_MESSAGE_EVENT - -DECLARE_EVENT_CLASS(gb_operation, - - TP_PROTO(struct gb_operation *operation), - - TP_ARGS(operation), - - TP_STRUCT__entry( - __field(u16, cport_id) /* CPort of HD side of connection */ - __field(u16, id) /* Operation ID */ - __field(u8, type) - __field(unsigned long, flags) - __field(int, active) - __field(int, waiters) - __field(int, errno) - ), - - TP_fast_assign( - __entry->cport_id = operation->connection->hd_cport_id; - __entry->id = operation->id; - __entry->type = operation->type; - __entry->flags = operation->flags; - __entry->active = operation->active; - __entry->waiters = atomic_read(&operation->waiters); - __entry->errno = operation->errno; - ), - - TP_printk("id=%04x type=0x%02x cport_id=%04x flags=0x%lx active=%d waiters=%d errno=%d", - __entry->id, __entry->cport_id, __entry->type, __entry->flags, - __entry->active, __entry->waiters, __entry->errno) -); - -#define DEFINE_OPERATION_EVENT(name) \ - DEFINE_EVENT(gb_operation, name, \ - TP_PROTO(struct gb_operation *operation), \ - TP_ARGS(operation)) - -/* - * Occurs after a new operation is created for an outgoing request - * has been successfully created. - */ -DEFINE_OPERATION_EVENT(gb_operation_create); - -/* - * Occurs after a new core operation has been created. - */ -DEFINE_OPERATION_EVENT(gb_operation_create_core); - -/* - * Occurs after a new operation has been created for an incoming - * request has been successfully created and initialized. - */ -DEFINE_OPERATION_EVENT(gb_operation_create_incoming); - -/* - * Occurs when the last reference to an operation has been dropped, - * prior to freeing resources. - */ -DEFINE_OPERATION_EVENT(gb_operation_destroy); - -/* - * Occurs when an operation has been marked active, after updating - * its active count. - */ -DEFINE_OPERATION_EVENT(gb_operation_get_active); - -/* - * Occurs when an operation has been marked active, before updating - * its active count. - */ -DEFINE_OPERATION_EVENT(gb_operation_put_active); - -#undef DEFINE_OPERATION_EVENT - -DECLARE_EVENT_CLASS(gb_connection, - - TP_PROTO(struct gb_connection *connection), - - TP_ARGS(connection), - - TP_STRUCT__entry( - __field(int, hd_bus_id) - __field(u8, bundle_id) - /* name contains "hd_cport_id/intf_id:cport_id" */ - __dynamic_array(char, name, sizeof(connection->name)) - __field(enum gb_connection_state, state) - __field(unsigned long, flags) - ), - - TP_fast_assign( - __entry->hd_bus_id = connection->hd->bus_id; - __entry->bundle_id = connection->bundle ? - connection->bundle->id : BUNDLE_ID_NONE; - memcpy(__get_str(name), connection->name, - sizeof(connection->name)); - __entry->state = connection->state; - __entry->flags = connection->flags; - ), - - TP_printk("hd_bus_id=%d bundle_id=0x%02x name=\"%s\" state=%u flags=0x%lx", - __entry->hd_bus_id, __entry->bundle_id, __get_str(name), - (unsigned int)__entry->state, __entry->flags) -); - -#define DEFINE_CONNECTION_EVENT(name) \ - DEFINE_EVENT(gb_connection, name, \ - TP_PROTO(struct gb_connection *connection), \ - TP_ARGS(connection)) - -/* - * Occurs after a new connection is successfully created. - */ -DEFINE_CONNECTION_EVENT(gb_connection_create); - -/* - * Occurs when the last reference to a connection has been dropped, - * before its resources are freed. - */ -DEFINE_CONNECTION_EVENT(gb_connection_release); - -/* - * Occurs when a new reference to connection is added, currently - * only when a message over the connection is received. - */ -DEFINE_CONNECTION_EVENT(gb_connection_get); - -/* - * Occurs when a new reference to connection is dropped, after a - * a received message is handled, or when the connection is - * destroyed. - */ -DEFINE_CONNECTION_EVENT(gb_connection_put); - -/* - * Occurs when a request to enable a connection is made, either for - * transmit only, or for both transmit and receive. - */ -DEFINE_CONNECTION_EVENT(gb_connection_enable); - -/* - * Occurs when a request to disable a connection is made, either for - * receive only, or for both transmit and receive. Also occurs when - * a request to forcefully disable a connection is made. - */ -DEFINE_CONNECTION_EVENT(gb_connection_disable); - -#undef DEFINE_CONNECTION_EVENT - -DECLARE_EVENT_CLASS(gb_bundle, - - TP_PROTO(struct gb_bundle *bundle), - - TP_ARGS(bundle), - - TP_STRUCT__entry( - __field(u8, intf_id) - __field(u8, id) - __field(u8, class) - __field(size_t, num_cports) - ), - - TP_fast_assign( - __entry->intf_id = bundle->intf->interface_id; - __entry->id = bundle->id; - __entry->class = bundle->class; - __entry->num_cports = bundle->num_cports; - ), - - TP_printk("intf_id=0x%02x id=%02x class=0x%02x num_cports=%zu", - __entry->intf_id, __entry->id, __entry->class, - __entry->num_cports) -); - -#define DEFINE_BUNDLE_EVENT(name) \ - DEFINE_EVENT(gb_bundle, name, \ - TP_PROTO(struct gb_bundle *bundle), \ - TP_ARGS(bundle)) - -/* - * Occurs after a new bundle is successfully created. - */ -DEFINE_BUNDLE_EVENT(gb_bundle_create); - -/* - * Occurs when the last reference to a bundle has been dropped, - * before its resources are freed. - */ -DEFINE_BUNDLE_EVENT(gb_bundle_release); - -/* - * Occurs when a bundle is added to an interface when the interface - * is enabled. - */ -DEFINE_BUNDLE_EVENT(gb_bundle_add); - -/* - * Occurs when a registered bundle gets destroyed, normally at the - * time an interface is disabled. - */ -DEFINE_BUNDLE_EVENT(gb_bundle_destroy); - -#undef DEFINE_BUNDLE_EVENT - -DECLARE_EVENT_CLASS(gb_interface, - - TP_PROTO(struct gb_interface *intf), - - TP_ARGS(intf), - - TP_STRUCT__entry( - __field(u8, module_id) - __field(u8, id) /* Interface id */ - __field(u8, device_id) - __field(int, disconnected) /* bool */ - __field(int, ejected) /* bool */ - __field(int, active) /* bool */ - __field(int, enabled) /* bool */ - __field(int, mode_switch) /* bool */ - ), - - TP_fast_assign( - __entry->module_id = intf->module->module_id; - __entry->id = intf->interface_id; - __entry->device_id = intf->device_id; - __entry->disconnected = intf->disconnected; - __entry->ejected = intf->ejected; - __entry->active = intf->active; - __entry->enabled = intf->enabled; - __entry->mode_switch = intf->mode_switch; - ), - - TP_printk("intf_id=%hhu device_id=%hhu module_id=%hhu D=%d J=%d A=%d E=%d M=%d", - __entry->id, __entry->device_id, __entry->module_id, - __entry->disconnected, __entry->ejected, __entry->active, - __entry->enabled, __entry->mode_switch) -); - -#define DEFINE_INTERFACE_EVENT(name) \ - DEFINE_EVENT(gb_interface, name, \ - TP_PROTO(struct gb_interface *intf), \ - TP_ARGS(intf)) - -/* - * Occurs after a new interface is successfully created. - */ -DEFINE_INTERFACE_EVENT(gb_interface_create); - -/* - * Occurs after the last reference to an interface has been dropped. - */ -DEFINE_INTERFACE_EVENT(gb_interface_release); - -/* - * Occurs after an interface been registerd. - */ -DEFINE_INTERFACE_EVENT(gb_interface_add); - -/* - * Occurs when a registered interface gets deregisterd. - */ -DEFINE_INTERFACE_EVENT(gb_interface_del); - -/* - * Occurs when a registered interface has been successfully - * activated. - */ -DEFINE_INTERFACE_EVENT(gb_interface_activate); - -/* - * Occurs when an activated interface is being deactivated. - */ -DEFINE_INTERFACE_EVENT(gb_interface_deactivate); - -/* - * Occurs when an interface has been successfully enabled. - */ -DEFINE_INTERFACE_EVENT(gb_interface_enable); - -/* - * Occurs when an enabled interface is being disabled. - */ -DEFINE_INTERFACE_EVENT(gb_interface_disable); - -#undef DEFINE_INTERFACE_EVENT - -DECLARE_EVENT_CLASS(gb_module, - - TP_PROTO(struct gb_module *module), - - TP_ARGS(module), - - TP_STRUCT__entry( - __field(int, hd_bus_id) - __field(u8, module_id) - __field(size_t, num_interfaces) - __field(int, disconnected) /* bool */ - ), - - TP_fast_assign( - __entry->hd_bus_id = module->hd->bus_id; - __entry->module_id = module->module_id; - __entry->num_interfaces = module->num_interfaces; - __entry->disconnected = module->disconnected; - ), - - TP_printk("hd_bus_id=%d module_id=%hhu num_interfaces=%zu disconnected=%d", - __entry->hd_bus_id, __entry->module_id, - __entry->num_interfaces, __entry->disconnected) -); - -#define DEFINE_MODULE_EVENT(name) \ - DEFINE_EVENT(gb_module, name, \ - TP_PROTO(struct gb_module *module), \ - TP_ARGS(module)) - -/* - * Occurs after a new module is successfully created, before - * creating any of its interfaces. - */ -DEFINE_MODULE_EVENT(gb_module_create); - -/* - * Occurs after the last reference to a module has been dropped. - */ -DEFINE_MODULE_EVENT(gb_module_release); - -/* - * Occurs after a module is successfully created, before registering - * any of its interfaces. - */ -DEFINE_MODULE_EVENT(gb_module_add); - -/* - * Occurs when a module is deleted, before deregistering its - * interfaces. - */ -DEFINE_MODULE_EVENT(gb_module_del); - -#undef DEFINE_MODULE_EVENT - -DECLARE_EVENT_CLASS(gb_host_device, - - TP_PROTO(struct gb_host_device *hd), - - TP_ARGS(hd), - - TP_STRUCT__entry( - __field(int, bus_id) - __field(size_t, num_cports) - __field(size_t, buffer_size_max) - ), - - TP_fast_assign( - __entry->bus_id = hd->bus_id; - __entry->num_cports = hd->num_cports; - __entry->buffer_size_max = hd->buffer_size_max; - ), - - TP_printk("bus_id=%d num_cports=%zu mtu=%zu", - __entry->bus_id, __entry->num_cports, - __entry->buffer_size_max) -); - -#define DEFINE_HD_EVENT(name) \ - DEFINE_EVENT(gb_host_device, name, \ - TP_PROTO(struct gb_host_device *hd), \ - TP_ARGS(hd)) - -/* - * Occurs after a new host device is successfully created, before - * its SVC has been set up. - */ -DEFINE_HD_EVENT(gb_hd_create); - -/* - * Occurs after the last reference to a host device has been - * dropped. - */ -DEFINE_HD_EVENT(gb_hd_release); - -/* - * Occurs after a new host device has been added, after the - * connection to its SVC has been enabled. - */ -DEFINE_HD_EVENT(gb_hd_add); - -/* - * Occurs when a host device is being disconnected from the AP USB - * host controller. - */ -DEFINE_HD_EVENT(gb_hd_del); - -/* - * Occurs when a host device has passed received data to the Greybus - * core, after it has been determined it is destined for a valid - * CPort. - */ -DEFINE_HD_EVENT(gb_hd_in); - -#undef DEFINE_HD_EVENT - -#endif /* _TRACE_GREYBUS_H */ - -/* This part must be outside protection */ -#undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH . - -/* - * TRACE_INCLUDE_FILE is not needed if the filename and TRACE_SYSTEM are equal - */ -#undef TRACE_INCLUDE_FILE -#define TRACE_INCLUDE_FILE greybus_trace -#include - diff --git a/drivers/staging/greybus/hd.c b/drivers/staging/greybus/hd.c deleted file mode 100644 index 72b21bf2d7d3..000000000000 --- a/drivers/staging/greybus/hd.c +++ /dev/null @@ -1,256 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Greybus Host Device - * - * Copyright 2014-2015 Google Inc. - * Copyright 2014-2015 Linaro Ltd. - */ - -#include -#include -#include - -#include "greybus_trace.h" - -EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_create); -EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_release); -EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_add); -EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_del); -EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_in); -EXPORT_TRACEPOINT_SYMBOL_GPL(gb_message_submit); - -static struct ida gb_hd_bus_id_map; - -int gb_hd_output(struct gb_host_device *hd, void *req, u16 size, u8 cmd, - bool async) -{ - if (!hd || !hd->driver || !hd->driver->output) - return -EINVAL; - return hd->driver->output(hd, req, size, cmd, async); -} -EXPORT_SYMBOL_GPL(gb_hd_output); - -static ssize_t bus_id_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct gb_host_device *hd = to_gb_host_device(dev); - - return sprintf(buf, "%d\n", hd->bus_id); -} -static DEVICE_ATTR_RO(bus_id); - -static struct attribute *bus_attrs[] = { - &dev_attr_bus_id.attr, - NULL -}; -ATTRIBUTE_GROUPS(bus); - -int gb_hd_cport_reserve(struct gb_host_device *hd, u16 cport_id) -{ - struct ida *id_map = &hd->cport_id_map; - int ret; - - ret = ida_simple_get(id_map, cport_id, cport_id + 1, GFP_KERNEL); - if (ret < 0) { - dev_err(&hd->dev, "failed to reserve cport %u\n", cport_id); - return ret; - } - - return 0; -} -EXPORT_SYMBOL_GPL(gb_hd_cport_reserve); - -void gb_hd_cport_release_reserved(struct gb_host_device *hd, u16 cport_id) -{ - struct ida *id_map = &hd->cport_id_map; - - ida_simple_remove(id_map, cport_id); -} -EXPORT_SYMBOL_GPL(gb_hd_cport_release_reserved); - -/* Locking: Caller guarantees serialisation */ -int gb_hd_cport_allocate(struct gb_host_device *hd, int cport_id, - unsigned long flags) -{ - struct ida *id_map = &hd->cport_id_map; - int ida_start, ida_end; - - if (hd->driver->cport_allocate) - return hd->driver->cport_allocate(hd, cport_id, flags); - - if (cport_id < 0) { - ida_start = 0; - ida_end = hd->num_cports; - } else if (cport_id < hd->num_cports) { - ida_start = cport_id; - ida_end = cport_id + 1; - } else { - dev_err(&hd->dev, "cport %d not available\n", cport_id); - return -EINVAL; - } - - return ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL); -} - -/* Locking: Caller guarantees serialisation */ -void gb_hd_cport_release(struct gb_host_device *hd, u16 cport_id) -{ - if (hd->driver->cport_release) { - hd->driver->cport_release(hd, cport_id); - return; - } - - ida_simple_remove(&hd->cport_id_map, cport_id); -} - -static void gb_hd_release(struct device *dev) -{ - struct gb_host_device *hd = to_gb_host_device(dev); - - trace_gb_hd_release(hd); - - if (hd->svc) - gb_svc_put(hd->svc); - ida_simple_remove(&gb_hd_bus_id_map, hd->bus_id); - ida_destroy(&hd->cport_id_map); - kfree(hd); -} - -struct device_type greybus_hd_type = { - .name = "greybus_host_device", - .release = gb_hd_release, -}; - -struct gb_host_device *gb_hd_create(struct gb_hd_driver *driver, - struct device *parent, - size_t buffer_size_max, - size_t num_cports) -{ - struct gb_host_device *hd; - int ret; - - /* - * Validate that the driver implements all of the callbacks - * so that we don't have to every time we make them. - */ - if ((!driver->message_send) || (!driver->message_cancel)) { - dev_err(parent, "mandatory hd-callbacks missing\n"); - return ERR_PTR(-EINVAL); - } - - if (buffer_size_max < GB_OPERATION_MESSAGE_SIZE_MIN) { - dev_err(parent, "greybus host-device buffers too small\n"); - return ERR_PTR(-EINVAL); - } - - if (num_cports == 0 || num_cports > CPORT_ID_MAX + 1) { - dev_err(parent, "Invalid number of CPorts: %zu\n", num_cports); - return ERR_PTR(-EINVAL); - } - - /* - * Make sure to never allocate messages larger than what the Greybus - * protocol supports. - */ - if (buffer_size_max > GB_OPERATION_MESSAGE_SIZE_MAX) { - dev_warn(parent, "limiting buffer size to %u\n", - GB_OPERATION_MESSAGE_SIZE_MAX); - buffer_size_max = GB_OPERATION_MESSAGE_SIZE_MAX; - } - - hd = kzalloc(sizeof(*hd) + driver->hd_priv_size, GFP_KERNEL); - if (!hd) - return ERR_PTR(-ENOMEM); - - ret = ida_simple_get(&gb_hd_bus_id_map, 1, 0, GFP_KERNEL); - if (ret < 0) { - kfree(hd); - return ERR_PTR(ret); - } - hd->bus_id = ret; - - hd->driver = driver; - INIT_LIST_HEAD(&hd->modules); - INIT_LIST_HEAD(&hd->connections); - ida_init(&hd->cport_id_map); - hd->buffer_size_max = buffer_size_max; - hd->num_cports = num_cports; - - hd->dev.parent = parent; - hd->dev.bus = &greybus_bus_type; - hd->dev.type = &greybus_hd_type; - hd->dev.groups = bus_groups; - hd->dev.dma_mask = hd->dev.parent->dma_mask; - device_initialize(&hd->dev); - dev_set_name(&hd->dev, "greybus%d", hd->bus_id); - - trace_gb_hd_create(hd); - - hd->svc = gb_svc_create(hd); - if (!hd->svc) { - dev_err(&hd->dev, "failed to create svc\n"); - put_device(&hd->dev); - return ERR_PTR(-ENOMEM); - } - - return hd; -} -EXPORT_SYMBOL_GPL(gb_hd_create); - -int gb_hd_add(struct gb_host_device *hd) -{ - int ret; - - ret = device_add(&hd->dev); - if (ret) - return ret; - - ret = gb_svc_add(hd->svc); - if (ret) { - device_del(&hd->dev); - return ret; - } - - trace_gb_hd_add(hd); - - return 0; -} -EXPORT_SYMBOL_GPL(gb_hd_add); - -void gb_hd_del(struct gb_host_device *hd) -{ - trace_gb_hd_del(hd); - - /* - * Tear down the svc and flush any on-going hotplug processing before - * removing the remaining interfaces. - */ - gb_svc_del(hd->svc); - - device_del(&hd->dev); -} -EXPORT_SYMBOL_GPL(gb_hd_del); - -void gb_hd_shutdown(struct gb_host_device *hd) -{ - gb_svc_del(hd->svc); -} -EXPORT_SYMBOL_GPL(gb_hd_shutdown); - -void gb_hd_put(struct gb_host_device *hd) -{ - put_device(&hd->dev); -} -EXPORT_SYMBOL_GPL(gb_hd_put); - -int __init gb_hd_init(void) -{ - ida_init(&gb_hd_bus_id_map); - - return 0; -} - -void gb_hd_exit(void) -{ - ida_destroy(&gb_hd_bus_id_map); -} diff --git a/drivers/staging/greybus/interface.c b/drivers/staging/greybus/interface.c deleted file mode 100644 index 67dbe6fda9a1..000000000000 --- a/drivers/staging/greybus/interface.c +++ /dev/null @@ -1,1263 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Greybus interface code - * - * Copyright 2014 Google Inc. - * Copyright 2014 Linaro Ltd. - */ - -#include -#include - -#include "greybus_trace.h" - -#define GB_INTERFACE_MODE_SWITCH_TIMEOUT 2000 - -#define GB_INTERFACE_DEVICE_ID_BAD 0xff - -#define GB_INTERFACE_AUTOSUSPEND_MS 3000 - -/* Time required for interface to enter standby before disabling REFCLK */ -#define GB_INTERFACE_SUSPEND_HIBERNATE_DELAY_MS 20 - -/* Don't-care selector index */ -#define DME_SELECTOR_INDEX_NULL 0 - -/* DME attributes */ -/* FIXME: remove ES2 support and DME_T_TST_SRC_INCREMENT */ -#define DME_T_TST_SRC_INCREMENT 0x4083 - -#define DME_DDBL1_MANUFACTURERID 0x5003 -#define DME_DDBL1_PRODUCTID 0x5004 - -#define DME_TOSHIBA_GMP_VID 0x6000 -#define DME_TOSHIBA_GMP_PID 0x6001 -#define DME_TOSHIBA_GMP_SN0 0x6002 -#define DME_TOSHIBA_GMP_SN1 0x6003 -#define DME_TOSHIBA_GMP_INIT_STATUS 0x6101 - -/* DDBL1 Manufacturer and Product ids */ -#define TOSHIBA_DMID 0x0126 -#define TOSHIBA_ES2_BRIDGE_DPID 0x1000 -#define TOSHIBA_ES3_APBRIDGE_DPID 0x1001 -#define TOSHIBA_ES3_GBPHY_DPID 0x1002 - -static int gb_interface_hibernate_link(struct gb_interface *intf); -static int gb_interface_refclk_set(struct gb_interface *intf, bool enable); - -static int gb_interface_dme_attr_get(struct gb_interface *intf, - u16 attr, u32 *val) -{ - return gb_svc_dme_peer_get(intf->hd->svc, intf->interface_id, - attr, DME_SELECTOR_INDEX_NULL, val); -} - -static int gb_interface_read_ara_dme(struct gb_interface *intf) -{ - u32 sn0, sn1; - int ret; - - /* - * Unless this is a Toshiba bridge, bail out until we have defined - * standard GMP attributes. - */ - if (intf->ddbl1_manufacturer_id != TOSHIBA_DMID) { - dev_err(&intf->dev, "unknown manufacturer %08x\n", - intf->ddbl1_manufacturer_id); - return -ENODEV; - } - - ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_VID, - &intf->vendor_id); - if (ret) - return ret; - - ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_PID, - &intf->product_id); - if (ret) - return ret; - - ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_SN0, &sn0); - if (ret) - return ret; - - ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_SN1, &sn1); - if (ret) - return ret; - - intf->serial_number = (u64)sn1 << 32 | sn0; - - return 0; -} - -static int gb_interface_read_dme(struct gb_interface *intf) -{ - int ret; - - /* DME attributes have already been read */ - if (intf->dme_read) - return 0; - - ret = gb_interface_dme_attr_get(intf, DME_DDBL1_MANUFACTURERID, - &intf->ddbl1_manufacturer_id); - if (ret) - return ret; - - ret = gb_interface_dme_attr_get(intf, DME_DDBL1_PRODUCTID, - &intf->ddbl1_product_id); - if (ret) - return ret; - - if (intf->ddbl1_manufacturer_id == TOSHIBA_DMID && - intf->ddbl1_product_id == TOSHIBA_ES2_BRIDGE_DPID) { - intf->quirks |= GB_INTERFACE_QUIRK_NO_GMP_IDS; - intf->quirks |= GB_INTERFACE_QUIRK_NO_INIT_STATUS; - } - - ret = gb_interface_read_ara_dme(intf); - if (ret) - return ret; - - intf->dme_read = true; - - return 0; -} - -static int gb_interface_route_create(struct gb_interface *intf) -{ - struct gb_svc *svc = intf->hd->svc; - u8 intf_id = intf->interface_id; - u8 device_id; - int ret; - - /* Allocate an interface device id. */ - ret = ida_simple_get(&svc->device_id_map, - GB_SVC_DEVICE_ID_MIN, GB_SVC_DEVICE_ID_MAX + 1, - GFP_KERNEL); - if (ret < 0) { - dev_err(&intf->dev, "failed to allocate device id: %d\n", ret); - return ret; - } - device_id = ret; - - ret = gb_svc_intf_device_id(svc, intf_id, device_id); - if (ret) { - dev_err(&intf->dev, "failed to set device id %u: %d\n", - device_id, ret); - goto err_ida_remove; - } - - /* FIXME: Hard-coded AP device id. */ - ret = gb_svc_route_create(svc, svc->ap_intf_id, GB_SVC_DEVICE_ID_AP, - intf_id, device_id); - if (ret) { - dev_err(&intf->dev, "failed to create route: %d\n", ret); - goto err_svc_id_free; - } - - intf->device_id = device_id; - - return 0; - -err_svc_id_free: - /* - * XXX Should we tell SVC that this id doesn't belong to interface - * XXX anymore. - */ -err_ida_remove: - ida_simple_remove(&svc->device_id_map, device_id); - - return ret; -} - -static void gb_interface_route_destroy(struct gb_interface *intf) -{ - struct gb_svc *svc = intf->hd->svc; - - if (intf->device_id == GB_INTERFACE_DEVICE_ID_BAD) - return; - - gb_svc_route_destroy(svc, svc->ap_intf_id, intf->interface_id); - ida_simple_remove(&svc->device_id_map, intf->device_id); - intf->device_id = GB_INTERFACE_DEVICE_ID_BAD; -} - -/* Locking: Caller holds the interface mutex. */ -static int gb_interface_legacy_mode_switch(struct gb_interface *intf) -{ - int ret; - - dev_info(&intf->dev, "legacy mode switch detected\n"); - - /* Mark as disconnected to prevent I/O during disable. */ - intf->disconnected = true; - gb_interface_disable(intf); - intf->disconnected = false; - - ret = gb_interface_enable(intf); - if (ret) { - dev_err(&intf->dev, "failed to re-enable interface: %d\n", ret); - gb_interface_deactivate(intf); - } - - return ret; -} - -void gb_interface_mailbox_event(struct gb_interface *intf, u16 result, - u32 mailbox) -{ - mutex_lock(&intf->mutex); - - if (result) { - dev_warn(&intf->dev, - "mailbox event with UniPro error: 0x%04x\n", - result); - goto err_disable; - } - - if (mailbox != GB_SVC_INTF_MAILBOX_GREYBUS) { - dev_warn(&intf->dev, - "mailbox event with unexpected value: 0x%08x\n", - mailbox); - goto err_disable; - } - - if (intf->quirks & GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH) { - gb_interface_legacy_mode_switch(intf); - goto out_unlock; - } - - if (!intf->mode_switch) { - dev_warn(&intf->dev, "unexpected mailbox event: 0x%08x\n", - mailbox); - goto err_disable; - } - - dev_info(&intf->dev, "mode switch detected\n"); - - complete(&intf->mode_switch_completion); - -out_unlock: - mutex_unlock(&intf->mutex); - - return; - -err_disable: - gb_interface_disable(intf); - gb_interface_deactivate(intf); - mutex_unlock(&intf->mutex); -} - -static void gb_interface_mode_switch_work(struct work_struct *work) -{ - struct gb_interface *intf; - struct gb_control *control; - unsigned long timeout; - int ret; - - intf = container_of(work, struct gb_interface, mode_switch_work); - - mutex_lock(&intf->mutex); - /* Make sure interface is still enabled. */ - if (!intf->enabled) { - dev_dbg(&intf->dev, "mode switch aborted\n"); - intf->mode_switch = false; - mutex_unlock(&intf->mutex); - goto out_interface_put; - } - - /* - * Prepare the control device for mode switch and make sure to get an - * extra reference before it goes away during interface disable. - */ - control = gb_control_get(intf->control); - gb_control_mode_switch_prepare(control); - gb_interface_disable(intf); - mutex_unlock(&intf->mutex); - - timeout = msecs_to_jiffies(GB_INTERFACE_MODE_SWITCH_TIMEOUT); - ret = wait_for_completion_interruptible_timeout( - &intf->mode_switch_completion, timeout); - - /* Finalise control-connection mode switch. */ - gb_control_mode_switch_complete(control); - gb_control_put(control); - - if (ret < 0) { - dev_err(&intf->dev, "mode switch interrupted\n"); - goto err_deactivate; - } else if (ret == 0) { - dev_err(&intf->dev, "mode switch timed out\n"); - goto err_deactivate; - } - - /* Re-enable (re-enumerate) interface if still active. */ - mutex_lock(&intf->mutex); - intf->mode_switch = false; - if (intf->active) { - ret = gb_interface_enable(intf); - if (ret) { - dev_err(&intf->dev, "failed to re-enable interface: %d\n", - ret); - gb_interface_deactivate(intf); - } - } - mutex_unlock(&intf->mutex); - -out_interface_put: - gb_interface_put(intf); - - return; - -err_deactivate: - mutex_lock(&intf->mutex); - intf->mode_switch = false; - gb_interface_deactivate(intf); - mutex_unlock(&intf->mutex); - - gb_interface_put(intf); -} - -int gb_interface_request_mode_switch(struct gb_interface *intf) -{ - int ret = 0; - - mutex_lock(&intf->mutex); - if (intf->mode_switch) { - ret = -EBUSY; - goto out_unlock; - } - - intf->mode_switch = true; - reinit_completion(&intf->mode_switch_completion); - - /* - * Get a reference to the interface device, which will be put once the - * mode switch is complete. - */ - get_device(&intf->dev); - - if (!queue_work(system_long_wq, &intf->mode_switch_work)) { - put_device(&intf->dev); - ret = -EBUSY; - goto out_unlock; - } - -out_unlock: - mutex_unlock(&intf->mutex); - - return ret; -} -EXPORT_SYMBOL_GPL(gb_interface_request_mode_switch); - -/* - * T_TstSrcIncrement is written by the module on ES2 as a stand-in for the - * init-status attribute DME_TOSHIBA_INIT_STATUS. The AP needs to read and - * clear it after reading a non-zero value from it. - * - * FIXME: This is module-hardware dependent and needs to be extended for every - * type of module we want to support. - */ -static int gb_interface_read_and_clear_init_status(struct gb_interface *intf) -{ - struct gb_host_device *hd = intf->hd; - unsigned long bootrom_quirks; - unsigned long s2l_quirks; - int ret; - u32 value; - u16 attr; - u8 init_status; - - /* - * ES2 bridges use T_TstSrcIncrement for the init status. - * - * FIXME: Remove ES2 support - */ - if (intf->quirks & GB_INTERFACE_QUIRK_NO_INIT_STATUS) - attr = DME_T_TST_SRC_INCREMENT; - else - attr = DME_TOSHIBA_GMP_INIT_STATUS; - - ret = gb_svc_dme_peer_get(hd->svc, intf->interface_id, attr, - DME_SELECTOR_INDEX_NULL, &value); - if (ret) - return ret; - - /* - * A nonzero init status indicates the module has finished - * initializing. - */ - if (!value) { - dev_err(&intf->dev, "invalid init status\n"); - return -ENODEV; - } - - /* - * Extract the init status. - * - * For ES2: We need to check lowest 8 bits of 'value'. - * For ES3: We need to check highest 8 bits out of 32 of 'value'. - * - * FIXME: Remove ES2 support - */ - if (intf->quirks & GB_INTERFACE_QUIRK_NO_INIT_STATUS) - init_status = value & 0xff; - else - init_status = value >> 24; - - /* - * Check if the interface is executing the quirky ES3 bootrom that, - * for example, requires E2EFC, CSD and CSV to be disabled. - */ - bootrom_quirks = GB_INTERFACE_QUIRK_NO_CPORT_FEATURES | - GB_INTERFACE_QUIRK_FORCED_DISABLE | - GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH | - GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE; - - s2l_quirks = GB_INTERFACE_QUIRK_NO_PM; - - switch (init_status) { - case GB_INIT_BOOTROM_UNIPRO_BOOT_STARTED: - case GB_INIT_BOOTROM_FALLBACK_UNIPRO_BOOT_STARTED: - intf->quirks |= bootrom_quirks; - break; - case GB_INIT_S2_LOADER_BOOT_STARTED: - /* S2 Loader doesn't support runtime PM */ - intf->quirks &= ~bootrom_quirks; - intf->quirks |= s2l_quirks; - break; - default: - intf->quirks &= ~bootrom_quirks; - intf->quirks &= ~s2l_quirks; - } - - /* Clear the init status. */ - return gb_svc_dme_peer_set(hd->svc, intf->interface_id, attr, - DME_SELECTOR_INDEX_NULL, 0); -} - -/* interface sysfs attributes */ -#define gb_interface_attr(field, type) \ -static ssize_t field##_show(struct device *dev, \ - struct device_attribute *attr, \ - char *buf) \ -{ \ - struct gb_interface *intf = to_gb_interface(dev); \ - return scnprintf(buf, PAGE_SIZE, type"\n", intf->field); \ -} \ -static DEVICE_ATTR_RO(field) - -gb_interface_attr(ddbl1_manufacturer_id, "0x%08x"); -gb_interface_attr(ddbl1_product_id, "0x%08x"); -gb_interface_attr(interface_id, "%u"); -gb_interface_attr(vendor_id, "0x%08x"); -gb_interface_attr(product_id, "0x%08x"); -gb_interface_attr(serial_number, "0x%016llx"); - -static ssize_t voltage_now_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct gb_interface *intf = to_gb_interface(dev); - int ret; - u32 measurement; - - ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id, - GB_SVC_PWRMON_TYPE_VOL, - &measurement); - if (ret) { - dev_err(&intf->dev, "failed to get voltage sample (%d)\n", ret); - return ret; - } - - return sprintf(buf, "%u\n", measurement); -} -static DEVICE_ATTR_RO(voltage_now); - -static ssize_t current_now_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct gb_interface *intf = to_gb_interface(dev); - int ret; - u32 measurement; - - ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id, - GB_SVC_PWRMON_TYPE_CURR, - &measurement); - if (ret) { - dev_err(&intf->dev, "failed to get current sample (%d)\n", ret); - return ret; - } - - return sprintf(buf, "%u\n", measurement); -} -static DEVICE_ATTR_RO(current_now); - -static ssize_t power_now_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct gb_interface *intf = to_gb_interface(dev); - int ret; - u32 measurement; - - ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id, - GB_SVC_PWRMON_TYPE_PWR, - &measurement); - if (ret) { - dev_err(&intf->dev, "failed to get power sample (%d)\n", ret); - return ret; - } - - return sprintf(buf, "%u\n", measurement); -} -static DEVICE_ATTR_RO(power_now); - -static ssize_t power_state_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct gb_interface *intf = to_gb_interface(dev); - - if (intf->active) - return scnprintf(buf, PAGE_SIZE, "on\n"); - else - return scnprintf(buf, PAGE_SIZE, "off\n"); -} - -static ssize_t power_state_store(struct device *dev, - struct device_attribute *attr, const char *buf, - size_t len) -{ - struct gb_interface *intf = to_gb_interface(dev); - bool activate; - int ret = 0; - - if (kstrtobool(buf, &activate)) - return -EINVAL; - - mutex_lock(&intf->mutex); - - if (activate == intf->active) - goto unlock; - - if (activate) { - ret = gb_interface_activate(intf); - if (ret) { - dev_err(&intf->dev, - "failed to activate interface: %d\n", ret); - goto unlock; - } - - ret = gb_interface_enable(intf); - if (ret) { - dev_err(&intf->dev, - "failed to enable interface: %d\n", ret); - gb_interface_deactivate(intf); - goto unlock; - } - } else { - gb_interface_disable(intf); - gb_interface_deactivate(intf); - } - -unlock: - mutex_unlock(&intf->mutex); - - if (ret) - return ret; - - return len; -} -static DEVICE_ATTR_RW(power_state); - -static const char *gb_interface_type_string(struct gb_interface *intf) -{ - static const char * const types[] = { - [GB_INTERFACE_TYPE_INVALID] = "invalid", - [GB_INTERFACE_TYPE_UNKNOWN] = "unknown", - [GB_INTERFACE_TYPE_DUMMY] = "dummy", - [GB_INTERFACE_TYPE_UNIPRO] = "unipro", - [GB_INTERFACE_TYPE_GREYBUS] = "greybus", - }; - - return types[intf->type]; -} - -static ssize_t interface_type_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct gb_interface *intf = to_gb_interface(dev); - - return sprintf(buf, "%s\n", gb_interface_type_string(intf)); -} -static DEVICE_ATTR_RO(interface_type); - -static struct attribute *interface_unipro_attrs[] = { - &dev_attr_ddbl1_manufacturer_id.attr, - &dev_attr_ddbl1_product_id.attr, - NULL -}; - -static struct attribute *interface_greybus_attrs[] = { - &dev_attr_vendor_id.attr, - &dev_attr_product_id.attr, - &dev_attr_serial_number.attr, - NULL -}; - -static struct attribute *interface_power_attrs[] = { - &dev_attr_voltage_now.attr, - &dev_attr_current_now.attr, - &dev_attr_power_now.attr, - &dev_attr_power_state.attr, - NULL -}; - -static struct attribute *interface_common_attrs[] = { - &dev_attr_interface_id.attr, - &dev_attr_interface_type.attr, - NULL -}; - -static umode_t interface_unipro_is_visible(struct kobject *kobj, - struct attribute *attr, int n) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct gb_interface *intf = to_gb_interface(dev); - - switch (intf->type) { - case GB_INTERFACE_TYPE_UNIPRO: - case GB_INTERFACE_TYPE_GREYBUS: - return attr->mode; - default: - return 0; - } -} - -static umode_t interface_greybus_is_visible(struct kobject *kobj, - struct attribute *attr, int n) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct gb_interface *intf = to_gb_interface(dev); - - switch (intf->type) { - case GB_INTERFACE_TYPE_GREYBUS: - return attr->mode; - default: - return 0; - } -} - -static umode_t interface_power_is_visible(struct kobject *kobj, - struct attribute *attr, int n) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct gb_interface *intf = to_gb_interface(dev); - - switch (intf->type) { - case GB_INTERFACE_TYPE_UNIPRO: - case GB_INTERFACE_TYPE_GREYBUS: - return attr->mode; - default: - return 0; - } -} - -static const struct attribute_group interface_unipro_group = { - .is_visible = interface_unipro_is_visible, - .attrs = interface_unipro_attrs, -}; - -static const struct attribute_group interface_greybus_group = { - .is_visible = interface_greybus_is_visible, - .attrs = interface_greybus_attrs, -}; - -static const struct attribute_group interface_power_group = { - .is_visible = interface_power_is_visible, - .attrs = interface_power_attrs, -}; - -static const struct attribute_group interface_common_group = { - .attrs = interface_common_attrs, -}; - -static const struct attribute_group *interface_groups[] = { - &interface_unipro_group, - &interface_greybus_group, - &interface_power_group, - &interface_common_group, - NULL -}; - -static void gb_interface_release(struct device *dev) -{ - struct gb_interface *intf = to_gb_interface(dev); - - trace_gb_interface_release(intf); - - kfree(intf); -} - -#ifdef CONFIG_PM -static int gb_interface_suspend(struct device *dev) -{ - struct gb_interface *intf = to_gb_interface(dev); - int ret; - - ret = gb_control_interface_suspend_prepare(intf->control); - if (ret) - return ret; - - ret = gb_control_suspend(intf->control); - if (ret) - goto err_hibernate_abort; - - ret = gb_interface_hibernate_link(intf); - if (ret) - return ret; - - /* Delay to allow interface to enter standby before disabling refclk */ - msleep(GB_INTERFACE_SUSPEND_HIBERNATE_DELAY_MS); - - ret = gb_interface_refclk_set(intf, false); - if (ret) - return ret; - - return 0; - -err_hibernate_abort: - gb_control_interface_hibernate_abort(intf->control); - - return ret; -} - -static int gb_interface_resume(struct device *dev) -{ - struct gb_interface *intf = to_gb_interface(dev); - struct gb_svc *svc = intf->hd->svc; - int ret; - - ret = gb_interface_refclk_set(intf, true); - if (ret) - return ret; - - ret = gb_svc_intf_resume(svc, intf->interface_id); - if (ret) - return ret; - - ret = gb_control_resume(intf->control); - if (ret) - return ret; - - return 0; -} - -static int gb_interface_runtime_idle(struct device *dev) -{ - pm_runtime_mark_last_busy(dev); - pm_request_autosuspend(dev); - - return 0; -} -#endif - -static const struct dev_pm_ops gb_interface_pm_ops = { - SET_RUNTIME_PM_OPS(gb_interface_suspend, gb_interface_resume, - gb_interface_runtime_idle) -}; - -struct device_type greybus_interface_type = { - .name = "greybus_interface", - .release = gb_interface_release, - .pm = &gb_interface_pm_ops, -}; - -/* - * A Greybus module represents a user-replaceable component on a GMP - * phone. An interface is the physical connection on that module. A - * module may have more than one interface. - * - * Create a gb_interface structure to represent a discovered interface. - * The position of interface within the Endo is encoded in "interface_id" - * argument. - * - * Returns a pointer to the new interfce or a null pointer if a - * failure occurs due to memory exhaustion. - */ -struct gb_interface *gb_interface_create(struct gb_module *module, - u8 interface_id) -{ - struct gb_host_device *hd = module->hd; - struct gb_interface *intf; - - intf = kzalloc(sizeof(*intf), GFP_KERNEL); - if (!intf) - return NULL; - - intf->hd = hd; /* XXX refcount? */ - intf->module = module; - intf->interface_id = interface_id; - INIT_LIST_HEAD(&intf->bundles); - INIT_LIST_HEAD(&intf->manifest_descs); - mutex_init(&intf->mutex); - INIT_WORK(&intf->mode_switch_work, gb_interface_mode_switch_work); - init_completion(&intf->mode_switch_completion); - - /* Invalid device id to start with */ - intf->device_id = GB_INTERFACE_DEVICE_ID_BAD; - - intf->dev.parent = &module->dev; - intf->dev.bus = &greybus_bus_type; - intf->dev.type = &greybus_interface_type; - intf->dev.groups = interface_groups; - intf->dev.dma_mask = module->dev.dma_mask; - device_initialize(&intf->dev); - dev_set_name(&intf->dev, "%s.%u", dev_name(&module->dev), - interface_id); - - pm_runtime_set_autosuspend_delay(&intf->dev, - GB_INTERFACE_AUTOSUSPEND_MS); - - trace_gb_interface_create(intf); - - return intf; -} - -static int gb_interface_vsys_set(struct gb_interface *intf, bool enable) -{ - struct gb_svc *svc = intf->hd->svc; - int ret; - - dev_dbg(&intf->dev, "%s - %d\n", __func__, enable); - - ret = gb_svc_intf_vsys_set(svc, intf->interface_id, enable); - if (ret) { - dev_err(&intf->dev, "failed to set v_sys: %d\n", ret); - return ret; - } - - return 0; -} - -static int gb_interface_refclk_set(struct gb_interface *intf, bool enable) -{ - struct gb_svc *svc = intf->hd->svc; - int ret; - - dev_dbg(&intf->dev, "%s - %d\n", __func__, enable); - - ret = gb_svc_intf_refclk_set(svc, intf->interface_id, enable); - if (ret) { - dev_err(&intf->dev, "failed to set refclk: %d\n", ret); - return ret; - } - - return 0; -} - -static int gb_interface_unipro_set(struct gb_interface *intf, bool enable) -{ - struct gb_svc *svc = intf->hd->svc; - int ret; - - dev_dbg(&intf->dev, "%s - %d\n", __func__, enable); - - ret = gb_svc_intf_unipro_set(svc, intf->interface_id, enable); - if (ret) { - dev_err(&intf->dev, "failed to set UniPro: %d\n", ret); - return ret; - } - - return 0; -} - -static int gb_interface_activate_operation(struct gb_interface *intf, - enum gb_interface_type *intf_type) -{ - struct gb_svc *svc = intf->hd->svc; - u8 type; - int ret; - - dev_dbg(&intf->dev, "%s\n", __func__); - - ret = gb_svc_intf_activate(svc, intf->interface_id, &type); - if (ret) { - dev_err(&intf->dev, "failed to activate: %d\n", ret); - return ret; - } - - switch (type) { - case GB_SVC_INTF_TYPE_DUMMY: - *intf_type = GB_INTERFACE_TYPE_DUMMY; - /* FIXME: handle as an error for now */ - return -ENODEV; - case GB_SVC_INTF_TYPE_UNIPRO: - *intf_type = GB_INTERFACE_TYPE_UNIPRO; - dev_err(&intf->dev, "interface type UniPro not supported\n"); - /* FIXME: handle as an error for now */ - return -ENODEV; - case GB_SVC_INTF_TYPE_GREYBUS: - *intf_type = GB_INTERFACE_TYPE_GREYBUS; - break; - default: - dev_err(&intf->dev, "unknown interface type: %u\n", type); - *intf_type = GB_INTERFACE_TYPE_UNKNOWN; - return -ENODEV; - } - - return 0; -} - -static int gb_interface_hibernate_link(struct gb_interface *intf) -{ - struct gb_svc *svc = intf->hd->svc; - - return gb_svc_intf_set_power_mode_hibernate(svc, intf->interface_id); -} - -static int _gb_interface_activate(struct gb_interface *intf, - enum gb_interface_type *type) -{ - int ret; - - *type = GB_INTERFACE_TYPE_UNKNOWN; - - if (intf->ejected || intf->removed) - return -ENODEV; - - ret = gb_interface_vsys_set(intf, true); - if (ret) - return ret; - - ret = gb_interface_refclk_set(intf, true); - if (ret) - goto err_vsys_disable; - - ret = gb_interface_unipro_set(intf, true); - if (ret) - goto err_refclk_disable; - - ret = gb_interface_activate_operation(intf, type); - if (ret) { - switch (*type) { - case GB_INTERFACE_TYPE_UNIPRO: - case GB_INTERFACE_TYPE_GREYBUS: - goto err_hibernate_link; - default: - goto err_unipro_disable; - } - } - - ret = gb_interface_read_dme(intf); - if (ret) - goto err_hibernate_link; - - ret = gb_interface_route_create(intf); - if (ret) - goto err_hibernate_link; - - intf->active = true; - - trace_gb_interface_activate(intf); - - return 0; - -err_hibernate_link: - gb_interface_hibernate_link(intf); -err_unipro_disable: - gb_interface_unipro_set(intf, false); -err_refclk_disable: - gb_interface_refclk_set(intf, false); -err_vsys_disable: - gb_interface_vsys_set(intf, false); - - return ret; -} - -/* - * At present, we assume a UniPro-only module to be a Greybus module that - * failed to send its mailbox poke. There is some reason to believe that this - * is because of a bug in the ES3 bootrom. - * - * FIXME: Check if this is a Toshiba bridge before retrying? - */ -static int _gb_interface_activate_es3_hack(struct gb_interface *intf, - enum gb_interface_type *type) -{ - int retries = 3; - int ret; - - while (retries--) { - ret = _gb_interface_activate(intf, type); - if (ret == -ENODEV && *type == GB_INTERFACE_TYPE_UNIPRO) - continue; - - break; - } - - return ret; -} - -/* - * Activate an interface. - * - * Locking: Caller holds the interface mutex. - */ -int gb_interface_activate(struct gb_interface *intf) -{ - enum gb_interface_type type; - int ret; - - switch (intf->type) { - case GB_INTERFACE_TYPE_INVALID: - case GB_INTERFACE_TYPE_GREYBUS: - ret = _gb_interface_activate_es3_hack(intf, &type); - break; - default: - ret = _gb_interface_activate(intf, &type); - } - - /* Make sure type is detected correctly during reactivation. */ - if (intf->type != GB_INTERFACE_TYPE_INVALID) { - if (type != intf->type) { - dev_err(&intf->dev, "failed to detect interface type\n"); - - if (!ret) - gb_interface_deactivate(intf); - - return -EIO; - } - } else { - intf->type = type; - } - - return ret; -} - -/* - * Deactivate an interface. - * - * Locking: Caller holds the interface mutex. - */ -void gb_interface_deactivate(struct gb_interface *intf) -{ - if (!intf->active) - return; - - trace_gb_interface_deactivate(intf); - - /* Abort any ongoing mode switch. */ - if (intf->mode_switch) - complete(&intf->mode_switch_completion); - - gb_interface_route_destroy(intf); - gb_interface_hibernate_link(intf); - gb_interface_unipro_set(intf, false); - gb_interface_refclk_set(intf, false); - gb_interface_vsys_set(intf, false); - - intf->active = false; -} - -/* - * Enable an interface by enabling its control connection, fetching the - * manifest and other information over it, and finally registering its child - * devices. - * - * Locking: Caller holds the interface mutex. - */ -int gb_interface_enable(struct gb_interface *intf) -{ - struct gb_control *control; - struct gb_bundle *bundle, *tmp; - int ret, size; - void *manifest; - - ret = gb_interface_read_and_clear_init_status(intf); - if (ret) { - dev_err(&intf->dev, "failed to clear init status: %d\n", ret); - return ret; - } - - /* Establish control connection */ - control = gb_control_create(intf); - if (IS_ERR(control)) { - dev_err(&intf->dev, "failed to create control device: %ld\n", - PTR_ERR(control)); - return PTR_ERR(control); - } - intf->control = control; - - ret = gb_control_enable(intf->control); - if (ret) - goto err_put_control; - - /* Get manifest size using control protocol on CPort */ - size = gb_control_get_manifest_size_operation(intf); - if (size <= 0) { - dev_err(&intf->dev, "failed to get manifest size: %d\n", size); - - if (size) - ret = size; - else - ret = -EINVAL; - - goto err_disable_control; - } - - manifest = kmalloc(size, GFP_KERNEL); - if (!manifest) { - ret = -ENOMEM; - goto err_disable_control; - } - - /* Get manifest using control protocol on CPort */ - ret = gb_control_get_manifest_operation(intf, manifest, size); - if (ret) { - dev_err(&intf->dev, "failed to get manifest: %d\n", ret); - goto err_free_manifest; - } - - /* - * Parse the manifest and build up our data structures representing - * what's in it. - */ - if (!gb_manifest_parse(intf, manifest, size)) { - dev_err(&intf->dev, "failed to parse manifest\n"); - ret = -EINVAL; - goto err_destroy_bundles; - } - - ret = gb_control_get_bundle_versions(intf->control); - if (ret) - goto err_destroy_bundles; - - /* Register the control device and any bundles */ - ret = gb_control_add(intf->control); - if (ret) - goto err_destroy_bundles; - - pm_runtime_use_autosuspend(&intf->dev); - pm_runtime_get_noresume(&intf->dev); - pm_runtime_set_active(&intf->dev); - pm_runtime_enable(&intf->dev); - - list_for_each_entry_safe_reverse(bundle, tmp, &intf->bundles, links) { - ret = gb_bundle_add(bundle); - if (ret) { - gb_bundle_destroy(bundle); - continue; - } - } - - kfree(manifest); - - intf->enabled = true; - - pm_runtime_put(&intf->dev); - - trace_gb_interface_enable(intf); - - return 0; - -err_destroy_bundles: - list_for_each_entry_safe(bundle, tmp, &intf->bundles, links) - gb_bundle_destroy(bundle); -err_free_manifest: - kfree(manifest); -err_disable_control: - gb_control_disable(intf->control); -err_put_control: - gb_control_put(intf->control); - intf->control = NULL; - - return ret; -} - -/* - * Disable an interface and destroy its bundles. - * - * Locking: Caller holds the interface mutex. - */ -void gb_interface_disable(struct gb_interface *intf) -{ - struct gb_bundle *bundle; - struct gb_bundle *next; - - if (!intf->enabled) - return; - - trace_gb_interface_disable(intf); - - pm_runtime_get_sync(&intf->dev); - - /* Set disconnected flag to avoid I/O during connection tear down. */ - if (intf->quirks & GB_INTERFACE_QUIRK_FORCED_DISABLE) - intf->disconnected = true; - - list_for_each_entry_safe(bundle, next, &intf->bundles, links) - gb_bundle_destroy(bundle); - - if (!intf->mode_switch && !intf->disconnected) - gb_control_interface_deactivate_prepare(intf->control); - - gb_control_del(intf->control); - gb_control_disable(intf->control); - gb_control_put(intf->control); - intf->control = NULL; - - intf->enabled = false; - - pm_runtime_disable(&intf->dev); - pm_runtime_set_suspended(&intf->dev); - pm_runtime_dont_use_autosuspend(&intf->dev); - pm_runtime_put_noidle(&intf->dev); -} - -/* Register an interface. */ -int gb_interface_add(struct gb_interface *intf) -{ - int ret; - - ret = device_add(&intf->dev); - if (ret) { - dev_err(&intf->dev, "failed to register interface: %d\n", ret); - return ret; - } - - trace_gb_interface_add(intf); - - dev_info(&intf->dev, "Interface added (%s)\n", - gb_interface_type_string(intf)); - - switch (intf->type) { - case GB_INTERFACE_TYPE_GREYBUS: - dev_info(&intf->dev, "GMP VID=0x%08x, PID=0x%08x\n", - intf->vendor_id, intf->product_id); - /* fall-through */ - case GB_INTERFACE_TYPE_UNIPRO: - dev_info(&intf->dev, "DDBL1 Manufacturer=0x%08x, Product=0x%08x\n", - intf->ddbl1_manufacturer_id, - intf->ddbl1_product_id); - break; - default: - break; - } - - return 0; -} - -/* Deregister an interface. */ -void gb_interface_del(struct gb_interface *intf) -{ - if (device_is_registered(&intf->dev)) { - trace_gb_interface_del(intf); - - device_del(&intf->dev); - dev_info(&intf->dev, "Interface removed\n"); - } -} - -void gb_interface_put(struct gb_interface *intf) -{ - put_device(&intf->dev); -} diff --git a/drivers/staging/greybus/manifest.c b/drivers/staging/greybus/manifest.c deleted file mode 100644 index dd7040697bde..000000000000 --- a/drivers/staging/greybus/manifest.c +++ /dev/null @@ -1,533 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Greybus manifest parsing - * - * Copyright 2014-2015 Google Inc. - * Copyright 2014-2015 Linaro Ltd. - */ - -#include - -static const char *get_descriptor_type_string(u8 type) -{ - switch (type) { - case GREYBUS_TYPE_INVALID: - return "invalid"; - case GREYBUS_TYPE_STRING: - return "string"; - case GREYBUS_TYPE_INTERFACE: - return "interface"; - case GREYBUS_TYPE_CPORT: - return "cport"; - case GREYBUS_TYPE_BUNDLE: - return "bundle"; - default: - WARN_ON(1); - return "unknown"; - } -} - -/* - * We scan the manifest once to identify where all the descriptors - * are. The result is a list of these manifest_desc structures. We - * then pick through them for what we're looking for (starting with - * the interface descriptor). As each is processed we remove it from - * the list. When we're done the list should (probably) be empty. - */ -struct manifest_desc { - struct list_head links; - - size_t size; - void *data; - enum greybus_descriptor_type type; -}; - -static void release_manifest_descriptor(struct manifest_desc *descriptor) -{ - list_del(&descriptor->links); - kfree(descriptor); -} - -static void release_manifest_descriptors(struct gb_interface *intf) -{ - struct manifest_desc *descriptor; - struct manifest_desc *next; - - list_for_each_entry_safe(descriptor, next, &intf->manifest_descs, links) - release_manifest_descriptor(descriptor); -} - -static void release_cport_descriptors(struct list_head *head, u8 bundle_id) -{ - struct manifest_desc *desc, *tmp; - struct greybus_descriptor_cport *desc_cport; - - list_for_each_entry_safe(desc, tmp, head, links) { - desc_cport = desc->data; - - if (desc->type != GREYBUS_TYPE_CPORT) - continue; - - if (desc_cport->bundle == bundle_id) - release_manifest_descriptor(desc); - } -} - -static struct manifest_desc *get_next_bundle_desc(struct gb_interface *intf) -{ - struct manifest_desc *descriptor; - struct manifest_desc *next; - - list_for_each_entry_safe(descriptor, next, &intf->manifest_descs, links) - if (descriptor->type == GREYBUS_TYPE_BUNDLE) - return descriptor; - - return NULL; -} - -/* - * Validate the given descriptor. Its reported size must fit within - * the number of bytes remaining, and it must have a recognized - * type. Check that the reported size is at least as big as what - * we expect to see. (It could be bigger, perhaps for a new version - * of the format.) - * - * Returns the (non-zero) number of bytes consumed by the descriptor, - * or a negative errno. - */ -static int identify_descriptor(struct gb_interface *intf, - struct greybus_descriptor *desc, size_t size) -{ - struct greybus_descriptor_header *desc_header = &desc->header; - struct manifest_desc *descriptor; - size_t desc_size; - size_t expected_size; - - if (size < sizeof(*desc_header)) { - dev_err(&intf->dev, "manifest too small (%zu < %zu)\n", size, - sizeof(*desc_header)); - return -EINVAL; /* Must at least have header */ - } - - desc_size = le16_to_cpu(desc_header->size); - if (desc_size > size) { - dev_err(&intf->dev, "descriptor too big (%zu > %zu)\n", - desc_size, size); - return -EINVAL; - } - - /* Descriptor needs to at least have a header */ - expected_size = sizeof(*desc_header); - - switch (desc_header->type) { - case GREYBUS_TYPE_STRING: - expected_size += sizeof(struct greybus_descriptor_string); - expected_size += desc->string.length; - - /* String descriptors are padded to 4 byte boundaries */ - expected_size = ALIGN(expected_size, 4); - break; - case GREYBUS_TYPE_INTERFACE: - expected_size += sizeof(struct greybus_descriptor_interface); - break; - case GREYBUS_TYPE_BUNDLE: - expected_size += sizeof(struct greybus_descriptor_bundle); - break; - case GREYBUS_TYPE_CPORT: - expected_size += sizeof(struct greybus_descriptor_cport); - break; - case GREYBUS_TYPE_INVALID: - default: - dev_err(&intf->dev, "invalid descriptor type (%u)\n", - desc_header->type); - return -EINVAL; - } - - if (desc_size < expected_size) { - dev_err(&intf->dev, "%s descriptor too small (%zu < %zu)\n", - get_descriptor_type_string(desc_header->type), - desc_size, expected_size); - return -EINVAL; - } - - /* Descriptor bigger than what we expect */ - if (desc_size > expected_size) { - dev_warn(&intf->dev, "%s descriptor size mismatch (want %zu got %zu)\n", - get_descriptor_type_string(desc_header->type), - expected_size, desc_size); - } - - descriptor = kzalloc(sizeof(*descriptor), GFP_KERNEL); - if (!descriptor) - return -ENOMEM; - - descriptor->size = desc_size; - descriptor->data = (char *)desc + sizeof(*desc_header); - descriptor->type = desc_header->type; - list_add_tail(&descriptor->links, &intf->manifest_descs); - - /* desc_size is positive and is known to fit in a signed int */ - - return desc_size; -} - -/* - * Find the string descriptor having the given id, validate it, and - * allocate a duplicate copy of it. The duplicate has an extra byte - * which guarantees the returned string is NUL-terminated. - * - * String index 0 is valid (it represents "no string"), and for - * that a null pointer is returned. - * - * Otherwise returns a pointer to a newly-allocated copy of the - * descriptor string, or an error-coded pointer on failure. - */ -static char *gb_string_get(struct gb_interface *intf, u8 string_id) -{ - struct greybus_descriptor_string *desc_string; - struct manifest_desc *descriptor; - bool found = false; - char *string; - - /* A zero string id means no string (but no error) */ - if (!string_id) - return NULL; - - list_for_each_entry(descriptor, &intf->manifest_descs, links) { - if (descriptor->type != GREYBUS_TYPE_STRING) - continue; - - desc_string = descriptor->data; - if (desc_string->id == string_id) { - found = true; - break; - } - } - if (!found) - return ERR_PTR(-ENOENT); - - /* Allocate an extra byte so we can guarantee it's NUL-terminated */ - string = kmemdup(&desc_string->string, desc_string->length + 1, - GFP_KERNEL); - if (!string) - return ERR_PTR(-ENOMEM); - string[desc_string->length] = '\0'; - - /* Ok we've used this string, so we're done with it */ - release_manifest_descriptor(descriptor); - - return string; -} - -/* - * Find cport descriptors in the manifest associated with the given - * bundle, and set up data structures for the functions that use - * them. Returns the number of cports set up for the bundle, or 0 - * if there is an error. - */ -static u32 gb_manifest_parse_cports(struct gb_bundle *bundle) -{ - struct gb_interface *intf = bundle->intf; - struct greybus_descriptor_cport *desc_cport; - struct manifest_desc *desc, *next, *tmp; - LIST_HEAD(list); - u8 bundle_id = bundle->id; - u16 cport_id; - u32 count = 0; - int i; - - /* Set up all cport descriptors associated with this bundle */ - list_for_each_entry_safe(desc, next, &intf->manifest_descs, links) { - if (desc->type != GREYBUS_TYPE_CPORT) - continue; - - desc_cport = desc->data; - if (desc_cport->bundle != bundle_id) - continue; - - cport_id = le16_to_cpu(desc_cport->id); - if (cport_id > CPORT_ID_MAX) - goto exit; - - /* Nothing else should have its cport_id as control cport id */ - if (cport_id == GB_CONTROL_CPORT_ID) { - dev_err(&bundle->dev, "invalid cport id found (%02u)\n", - cport_id); - goto exit; - } - - /* - * Found one, move it to our temporary list after checking for - * duplicates. - */ - list_for_each_entry(tmp, &list, links) { - desc_cport = tmp->data; - if (cport_id == le16_to_cpu(desc_cport->id)) { - dev_err(&bundle->dev, - "duplicate CPort %u found\n", cport_id); - goto exit; - } - } - list_move_tail(&desc->links, &list); - count++; - } - - if (!count) - return 0; - - bundle->cport_desc = kcalloc(count, sizeof(*bundle->cport_desc), - GFP_KERNEL); - if (!bundle->cport_desc) - goto exit; - - bundle->num_cports = count; - - i = 0; - list_for_each_entry_safe(desc, next, &list, links) { - desc_cport = desc->data; - memcpy(&bundle->cport_desc[i++], desc_cport, - sizeof(*desc_cport)); - - /* Release the cport descriptor */ - release_manifest_descriptor(desc); - } - - return count; -exit: - release_cport_descriptors(&list, bundle_id); - /* - * Free all cports for this bundle to avoid 'excess descriptors' - * warnings. - */ - release_cport_descriptors(&intf->manifest_descs, bundle_id); - - return 0; /* Error; count should also be 0 */ -} - -/* - * Find bundle descriptors in the manifest and set up their data - * structures. Returns the number of bundles set up for the - * given interface. - */ -static u32 gb_manifest_parse_bundles(struct gb_interface *intf) -{ - struct manifest_desc *desc; - struct gb_bundle *bundle; - struct gb_bundle *bundle_next; - u32 count = 0; - u8 bundle_id; - u8 class; - - while ((desc = get_next_bundle_desc(intf))) { - struct greybus_descriptor_bundle *desc_bundle; - - /* Found one. Set up its bundle structure*/ - desc_bundle = desc->data; - bundle_id = desc_bundle->id; - class = desc_bundle->class; - - /* Done with this bundle descriptor */ - release_manifest_descriptor(desc); - - /* Ignore any legacy control bundles */ - if (bundle_id == GB_CONTROL_BUNDLE_ID) { - dev_dbg(&intf->dev, "%s - ignoring control bundle\n", - __func__); - release_cport_descriptors(&intf->manifest_descs, - bundle_id); - continue; - } - - /* Nothing else should have its class set to control class */ - if (class == GREYBUS_CLASS_CONTROL) { - dev_err(&intf->dev, - "bundle %u cannot use control class\n", - bundle_id); - goto cleanup; - } - - bundle = gb_bundle_create(intf, bundle_id, class); - if (!bundle) - goto cleanup; - - /* - * Now go set up this bundle's functions and cports. - * - * A 'bundle' represents a device in greybus. It may require - * multiple cports for its functioning. If we fail to setup any - * cport of a bundle, we better reject the complete bundle as - * the device may not be able to function properly then. - * - * But, failing to setup a cport of bundle X doesn't mean that - * the device corresponding to bundle Y will not work properly. - * Bundles should be treated as separate independent devices. - * - * While parsing manifest for an interface, treat bundles as - * separate entities and don't reject entire interface and its - * bundles on failing to initialize a cport. But make sure the - * bundle which needs the cport, gets destroyed properly. - */ - if (!gb_manifest_parse_cports(bundle)) { - gb_bundle_destroy(bundle); - continue; - } - - count++; - } - - return count; -cleanup: - /* An error occurred; undo any changes we've made */ - list_for_each_entry_safe(bundle, bundle_next, &intf->bundles, links) { - gb_bundle_destroy(bundle); - count--; - } - return 0; /* Error; count should also be 0 */ -} - -static bool gb_manifest_parse_interface(struct gb_interface *intf, - struct manifest_desc *interface_desc) -{ - struct greybus_descriptor_interface *desc_intf = interface_desc->data; - struct gb_control *control = intf->control; - char *str; - - /* Handle the strings first--they can fail */ - str = gb_string_get(intf, desc_intf->vendor_stringid); - if (IS_ERR(str)) - return false; - control->vendor_string = str; - - str = gb_string_get(intf, desc_intf->product_stringid); - if (IS_ERR(str)) - goto out_free_vendor_string; - control->product_string = str; - - /* Assign feature flags communicated via manifest */ - intf->features = desc_intf->features; - - /* Release the interface descriptor, now that we're done with it */ - release_manifest_descriptor(interface_desc); - - /* An interface must have at least one bundle descriptor */ - if (!gb_manifest_parse_bundles(intf)) { - dev_err(&intf->dev, "manifest bundle descriptors not valid\n"); - goto out_err; - } - - return true; -out_err: - kfree(control->product_string); - control->product_string = NULL; -out_free_vendor_string: - kfree(control->vendor_string); - control->vendor_string = NULL; - - return false; -} - -/* - * Parse a buffer containing an interface manifest. - * - * If we find anything wrong with the content/format of the buffer - * we reject it. - * - * The first requirement is that the manifest's version is - * one we can parse. - * - * We make an initial pass through the buffer and identify all of - * the descriptors it contains, keeping track for each its type - * and the location size of its data in the buffer. - * - * Next we scan the descriptors, looking for an interface descriptor; - * there must be exactly one of those. When found, we record the - * information it contains, and then remove that descriptor (and any - * string descriptors it refers to) from further consideration. - * - * After that we look for the interface's bundles--there must be at - * least one of those. - * - * Returns true if parsing was successful, false otherwise. - */ -bool gb_manifest_parse(struct gb_interface *intf, void *data, size_t size) -{ - struct greybus_manifest *manifest; - struct greybus_manifest_header *header; - struct greybus_descriptor *desc; - struct manifest_desc *descriptor; - struct manifest_desc *interface_desc = NULL; - u16 manifest_size; - u32 found = 0; - bool result; - - /* Manifest descriptor list should be empty here */ - if (WARN_ON(!list_empty(&intf->manifest_descs))) - return false; - - /* we have to have at _least_ the manifest header */ - if (size < sizeof(*header)) { - dev_err(&intf->dev, "short manifest (%zu < %zu)\n", - size, sizeof(*header)); - return false; - } - - /* Make sure the size is right */ - manifest = data; - header = &manifest->header; - manifest_size = le16_to_cpu(header->size); - if (manifest_size != size) { - dev_err(&intf->dev, "manifest size mismatch (%zu != %u)\n", - size, manifest_size); - return false; - } - - /* Validate major/minor number */ - if (header->version_major > GREYBUS_VERSION_MAJOR) { - dev_err(&intf->dev, "manifest version too new (%u.%u > %u.%u)\n", - header->version_major, header->version_minor, - GREYBUS_VERSION_MAJOR, GREYBUS_VERSION_MINOR); - return false; - } - - /* OK, find all the descriptors */ - desc = manifest->descriptors; - size -= sizeof(*header); - while (size) { - int desc_size; - - desc_size = identify_descriptor(intf, desc, size); - if (desc_size < 0) { - result = false; - goto out; - } - desc = (struct greybus_descriptor *)((char *)desc + desc_size); - size -= desc_size; - } - - /* There must be a single interface descriptor */ - list_for_each_entry(descriptor, &intf->manifest_descs, links) { - if (descriptor->type == GREYBUS_TYPE_INTERFACE) - if (!found++) - interface_desc = descriptor; - } - if (found != 1) { - dev_err(&intf->dev, "manifest must have 1 interface descriptor (%u found)\n", - found); - result = false; - goto out; - } - - /* Parse the manifest, starting with the interface descriptor */ - result = gb_manifest_parse_interface(intf, interface_desc); - - /* - * We really should have no remaining descriptors, but we - * don't know what newer format manifests might leave. - */ - if (result && !list_empty(&intf->manifest_descs)) - dev_info(&intf->dev, "excess descriptors in interface manifest\n"); -out: - release_manifest_descriptors(intf); - - return result; -} diff --git a/drivers/staging/greybus/module.c b/drivers/staging/greybus/module.c deleted file mode 100644 index 36f77f9e1d74..000000000000 --- a/drivers/staging/greybus/module.c +++ /dev/null @@ -1,236 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Greybus Module code - * - * Copyright 2016 Google Inc. - * Copyright 2016 Linaro Ltd. - */ - -#include -#include "greybus_trace.h" - -static ssize_t eject_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t len) -{ - struct gb_module *module = to_gb_module(dev); - struct gb_interface *intf; - size_t i; - long val; - int ret; - - ret = kstrtol(buf, 0, &val); - if (ret) - return ret; - - if (!val) - return len; - - for (i = 0; i < module->num_interfaces; ++i) { - intf = module->interfaces[i]; - - mutex_lock(&intf->mutex); - /* Set flag to prevent concurrent activation. */ - intf->ejected = true; - gb_interface_disable(intf); - gb_interface_deactivate(intf); - mutex_unlock(&intf->mutex); - } - - /* Tell the SVC to eject the primary interface. */ - ret = gb_svc_intf_eject(module->hd->svc, module->module_id); - if (ret) - return ret; - - return len; -} -static DEVICE_ATTR_WO(eject); - -static ssize_t module_id_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct gb_module *module = to_gb_module(dev); - - return sprintf(buf, "%u\n", module->module_id); -} -static DEVICE_ATTR_RO(module_id); - -static ssize_t num_interfaces_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct gb_module *module = to_gb_module(dev); - - return sprintf(buf, "%zu\n", module->num_interfaces); -} -static DEVICE_ATTR_RO(num_interfaces); - -static struct attribute *module_attrs[] = { - &dev_attr_eject.attr, - &dev_attr_module_id.attr, - &dev_attr_num_interfaces.attr, - NULL, -}; -ATTRIBUTE_GROUPS(module); - -static void gb_module_release(struct device *dev) -{ - struct gb_module *module = to_gb_module(dev); - - trace_gb_module_release(module); - - kfree(module); -} - -struct device_type greybus_module_type = { - .name = "greybus_module", - .release = gb_module_release, -}; - -struct gb_module *gb_module_create(struct gb_host_device *hd, u8 module_id, - size_t num_interfaces) -{ - struct gb_interface *intf; - struct gb_module *module; - int i; - - module = kzalloc(struct_size(module, interfaces, num_interfaces), - GFP_KERNEL); - if (!module) - return NULL; - - module->hd = hd; - module->module_id = module_id; - module->num_interfaces = num_interfaces; - - module->dev.parent = &hd->dev; - module->dev.bus = &greybus_bus_type; - module->dev.type = &greybus_module_type; - module->dev.groups = module_groups; - module->dev.dma_mask = hd->dev.dma_mask; - device_initialize(&module->dev); - dev_set_name(&module->dev, "%d-%u", hd->bus_id, module_id); - - trace_gb_module_create(module); - - for (i = 0; i < num_interfaces; ++i) { - intf = gb_interface_create(module, module_id + i); - if (!intf) { - dev_err(&module->dev, "failed to create interface %u\n", - module_id + i); - goto err_put_interfaces; - } - module->interfaces[i] = intf; - } - - return module; - -err_put_interfaces: - for (--i; i >= 0; --i) - gb_interface_put(module->interfaces[i]); - - put_device(&module->dev); - - return NULL; -} - -/* - * Register and enable an interface after first attempting to activate it. - */ -static void gb_module_register_interface(struct gb_interface *intf) -{ - struct gb_module *module = intf->module; - u8 intf_id = intf->interface_id; - int ret; - - mutex_lock(&intf->mutex); - - ret = gb_interface_activate(intf); - if (ret) { - if (intf->type != GB_INTERFACE_TYPE_DUMMY) { - dev_err(&module->dev, - "failed to activate interface %u: %d\n", - intf_id, ret); - } - - gb_interface_add(intf); - goto err_unlock; - } - - ret = gb_interface_add(intf); - if (ret) - goto err_interface_deactivate; - - ret = gb_interface_enable(intf); - if (ret) { - dev_err(&module->dev, "failed to enable interface %u: %d\n", - intf_id, ret); - goto err_interface_deactivate; - } - - mutex_unlock(&intf->mutex); - - return; - -err_interface_deactivate: - gb_interface_deactivate(intf); -err_unlock: - mutex_unlock(&intf->mutex); -} - -static void gb_module_deregister_interface(struct gb_interface *intf) -{ - /* Mark as disconnected to prevent I/O during disable. */ - if (intf->module->disconnected) - intf->disconnected = true; - - mutex_lock(&intf->mutex); - intf->removed = true; - gb_interface_disable(intf); - gb_interface_deactivate(intf); - mutex_unlock(&intf->mutex); - - gb_interface_del(intf); -} - -/* Register a module and its interfaces. */ -int gb_module_add(struct gb_module *module) -{ - size_t i; - int ret; - - ret = device_add(&module->dev); - if (ret) { - dev_err(&module->dev, "failed to register module: %d\n", ret); - return ret; - } - - trace_gb_module_add(module); - - for (i = 0; i < module->num_interfaces; ++i) - gb_module_register_interface(module->interfaces[i]); - - return 0; -} - -/* Deregister a module and its interfaces. */ -void gb_module_del(struct gb_module *module) -{ - size_t i; - - for (i = 0; i < module->num_interfaces; ++i) - gb_module_deregister_interface(module->interfaces[i]); - - trace_gb_module_del(module); - - device_del(&module->dev); -} - -void gb_module_put(struct gb_module *module) -{ - size_t i; - - for (i = 0; i < module->num_interfaces; ++i) - gb_interface_put(module->interfaces[i]); - - put_device(&module->dev); -} diff --git a/drivers/staging/greybus/operation.c b/drivers/staging/greybus/operation.c deleted file mode 100644 index 8459e9bc0749..000000000000 --- a/drivers/staging/greybus/operation.c +++ /dev/null @@ -1,1264 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Greybus operations - * - * Copyright 2014-2015 Google Inc. - * Copyright 2014-2015 Linaro Ltd. - */ - -#include -#include -#include -#include -#include -#include -#include - -#include "greybus_trace.h" - -static struct kmem_cache *gb_operation_cache; -static struct kmem_cache *gb_message_cache; - -/* Workqueue to handle Greybus operation completions. */ -static struct workqueue_struct *gb_operation_completion_wq; - -/* Wait queue for synchronous cancellations. */ -static DECLARE_WAIT_QUEUE_HEAD(gb_operation_cancellation_queue); - -/* - * Protects updates to operation->errno. - */ -static DEFINE_SPINLOCK(gb_operations_lock); - -static int gb_operation_response_send(struct gb_operation *operation, - int errno); - -/* - * Increment operation active count and add to connection list unless the - * connection is going away. - * - * Caller holds operation reference. - */ -static int gb_operation_get_active(struct gb_operation *operation) -{ - struct gb_connection *connection = operation->connection; - unsigned long flags; - - spin_lock_irqsave(&connection->lock, flags); - switch (connection->state) { - case GB_CONNECTION_STATE_ENABLED: - break; - case GB_CONNECTION_STATE_ENABLED_TX: - if (gb_operation_is_incoming(operation)) - goto err_unlock; - break; - case GB_CONNECTION_STATE_DISCONNECTING: - if (!gb_operation_is_core(operation)) - goto err_unlock; - break; - default: - goto err_unlock; - } - - if (operation->active++ == 0) - list_add_tail(&operation->links, &connection->operations); - - trace_gb_operation_get_active(operation); - - spin_unlock_irqrestore(&connection->lock, flags); - - return 0; - -err_unlock: - spin_unlock_irqrestore(&connection->lock, flags); - - return -ENOTCONN; -} - -/* Caller holds operation reference. */ -static void gb_operation_put_active(struct gb_operation *operation) -{ - struct gb_connection *connection = operation->connection; - unsigned long flags; - - spin_lock_irqsave(&connection->lock, flags); - - trace_gb_operation_put_active(operation); - - if (--operation->active == 0) { - list_del(&operation->links); - if (atomic_read(&operation->waiters)) - wake_up(&gb_operation_cancellation_queue); - } - spin_unlock_irqrestore(&connection->lock, flags); -} - -static bool gb_operation_is_active(struct gb_operation *operation) -{ - struct gb_connection *connection = operation->connection; - unsigned long flags; - bool ret; - - spin_lock_irqsave(&connection->lock, flags); - ret = operation->active; - spin_unlock_irqrestore(&connection->lock, flags); - - return ret; -} - -/* - * Set an operation's result. - * - * Initially an outgoing operation's errno value is -EBADR. - * If no error occurs before sending the request message the only - * valid value operation->errno can be set to is -EINPROGRESS, - * indicating the request has been (or rather is about to be) sent. - * At that point nobody should be looking at the result until the - * response arrives. - * - * The first time the result gets set after the request has been - * sent, that result "sticks." That is, if two concurrent threads - * race to set the result, the first one wins. The return value - * tells the caller whether its result was recorded; if not the - * caller has nothing more to do. - * - * The result value -EILSEQ is reserved to signal an implementation - * error; if it's ever observed, the code performing the request has - * done something fundamentally wrong. It is an error to try to set - * the result to -EBADR, and attempts to do so result in a warning, - * and -EILSEQ is used instead. Similarly, the only valid result - * value to set for an operation in initial state is -EINPROGRESS. - * Attempts to do otherwise will also record a (successful) -EILSEQ - * operation result. - */ -static bool gb_operation_result_set(struct gb_operation *operation, int result) -{ - unsigned long flags; - int prev; - - if (result == -EINPROGRESS) { - /* - * -EINPROGRESS is used to indicate the request is - * in flight. It should be the first result value - * set after the initial -EBADR. Issue a warning - * and record an implementation error if it's - * set at any other time. - */ - spin_lock_irqsave(&gb_operations_lock, flags); - prev = operation->errno; - if (prev == -EBADR) - operation->errno = result; - else - operation->errno = -EILSEQ; - spin_unlock_irqrestore(&gb_operations_lock, flags); - WARN_ON(prev != -EBADR); - - return true; - } - - /* - * The first result value set after a request has been sent - * will be the final result of the operation. Subsequent - * attempts to set the result are ignored. - * - * Note that -EBADR is a reserved "initial state" result - * value. Attempts to set this value result in a warning, - * and the result code is set to -EILSEQ instead. - */ - if (WARN_ON(result == -EBADR)) - result = -EILSEQ; /* Nobody should be setting -EBADR */ - - spin_lock_irqsave(&gb_operations_lock, flags); - prev = operation->errno; - if (prev == -EINPROGRESS) - operation->errno = result; /* First and final result */ - spin_unlock_irqrestore(&gb_operations_lock, flags); - - return prev == -EINPROGRESS; -} - -int gb_operation_result(struct gb_operation *operation) -{ - int result = operation->errno; - - WARN_ON(result == -EBADR); - WARN_ON(result == -EINPROGRESS); - - return result; -} -EXPORT_SYMBOL_GPL(gb_operation_result); - -/* - * Looks up an outgoing operation on a connection and returns a refcounted - * pointer if found, or NULL otherwise. - */ -static struct gb_operation * -gb_operation_find_outgoing(struct gb_connection *connection, u16 operation_id) -{ - struct gb_operation *operation; - unsigned long flags; - bool found = false; - - spin_lock_irqsave(&connection->lock, flags); - list_for_each_entry(operation, &connection->operations, links) - if (operation->id == operation_id && - !gb_operation_is_incoming(operation)) { - gb_operation_get(operation); - found = true; - break; - } - spin_unlock_irqrestore(&connection->lock, flags); - - return found ? operation : NULL; -} - -static int gb_message_send(struct gb_message *message, gfp_t gfp) -{ - struct gb_connection *connection = message->operation->connection; - - trace_gb_message_send(message); - return connection->hd->driver->message_send(connection->hd, - connection->hd_cport_id, - message, - gfp); -} - -/* - * Cancel a message we have passed to the host device layer to be sent. - */ -static void gb_message_cancel(struct gb_message *message) -{ - struct gb_host_device *hd = message->operation->connection->hd; - - hd->driver->message_cancel(message); -} - -static void gb_operation_request_handle(struct gb_operation *operation) -{ - struct gb_connection *connection = operation->connection; - int status; - int ret; - - if (connection->handler) { - status = connection->handler(operation); - } else { - dev_err(&connection->hd->dev, - "%s: unexpected incoming request of type 0x%02x\n", - connection->name, operation->type); - - status = -EPROTONOSUPPORT; - } - - ret = gb_operation_response_send(operation, status); - if (ret) { - dev_err(&connection->hd->dev, - "%s: failed to send response %d for type 0x%02x: %d\n", - connection->name, status, operation->type, ret); - return; - } -} - -/* - * Process operation work. - * - * For incoming requests, call the protocol request handler. The operation - * result should be -EINPROGRESS at this point. - * - * For outgoing requests, the operation result value should have - * been set before queueing this. The operation callback function - * allows the original requester to know the request has completed - * and its result is available. - */ -static void gb_operation_work(struct work_struct *work) -{ - struct gb_operation *operation; - int ret; - - operation = container_of(work, struct gb_operation, work); - - if (gb_operation_is_incoming(operation)) { - gb_operation_request_handle(operation); - } else { - ret = del_timer_sync(&operation->timer); - if (!ret) { - /* Cancel request message if scheduled by timeout. */ - if (gb_operation_result(operation) == -ETIMEDOUT) - gb_message_cancel(operation->request); - } - - operation->callback(operation); - } - - gb_operation_put_active(operation); - gb_operation_put(operation); -} - -static void gb_operation_timeout(struct timer_list *t) -{ - struct gb_operation *operation = from_timer(operation, t, timer); - - if (gb_operation_result_set(operation, -ETIMEDOUT)) { - /* - * A stuck request message will be cancelled from the - * workqueue. - */ - queue_work(gb_operation_completion_wq, &operation->work); - } -} - -static void gb_operation_message_init(struct gb_host_device *hd, - struct gb_message *message, - u16 operation_id, - size_t payload_size, u8 type) -{ - struct gb_operation_msg_hdr *header; - - header = message->buffer; - - message->header = header; - message->payload = payload_size ? header + 1 : NULL; - message->payload_size = payload_size; - - /* - * The type supplied for incoming message buffers will be - * GB_REQUEST_TYPE_INVALID. Such buffers will be overwritten by - * arriving data so there's no need to initialize the message header. - */ - if (type != GB_REQUEST_TYPE_INVALID) { - u16 message_size = (u16)(sizeof(*header) + payload_size); - - /* - * For a request, the operation id gets filled in - * when the message is sent. For a response, it - * will be copied from the request by the caller. - * - * The result field in a request message must be - * zero. It will be set just prior to sending for - * a response. - */ - header->size = cpu_to_le16(message_size); - header->operation_id = 0; - header->type = type; - header->result = 0; - } -} - -/* - * Allocate a message to be used for an operation request or response. - * Both types of message contain a common header. The request message - * for an outgoing operation is outbound, as is the response message - * for an incoming operation. The message header for an outbound - * message is partially initialized here. - * - * The headers for inbound messages don't need to be initialized; - * they'll be filled in by arriving data. - * - * Our message buffers have the following layout: - * message header \_ these combined are - * message payload / the message size - */ -static struct gb_message * -gb_operation_message_alloc(struct gb_host_device *hd, u8 type, - size_t payload_size, gfp_t gfp_flags) -{ - struct gb_message *message; - struct gb_operation_msg_hdr *header; - size_t message_size = payload_size + sizeof(*header); - - if (message_size > hd->buffer_size_max) { - dev_warn(&hd->dev, "requested message size too big (%zu > %zu)\n", - message_size, hd->buffer_size_max); - return NULL; - } - - /* Allocate the message structure and buffer. */ - message = kmem_cache_zalloc(gb_message_cache, gfp_flags); - if (!message) - return NULL; - - message->buffer = kzalloc(message_size, gfp_flags); - if (!message->buffer) - goto err_free_message; - - /* Initialize the message. Operation id is filled in later. */ - gb_operation_message_init(hd, message, 0, payload_size, type); - - return message; - -err_free_message: - kmem_cache_free(gb_message_cache, message); - - return NULL; -} - -static void gb_operation_message_free(struct gb_message *message) -{ - kfree(message->buffer); - kmem_cache_free(gb_message_cache, message); -} - -/* - * Map an enum gb_operation_status value (which is represented in a - * message as a single byte) to an appropriate Linux negative errno. - */ -static int gb_operation_status_map(u8 status) -{ - switch (status) { - case GB_OP_SUCCESS: - return 0; - case GB_OP_INTERRUPTED: - return -EINTR; - case GB_OP_TIMEOUT: - return -ETIMEDOUT; - case GB_OP_NO_MEMORY: - return -ENOMEM; - case GB_OP_PROTOCOL_BAD: - return -EPROTONOSUPPORT; - case GB_OP_OVERFLOW: - return -EMSGSIZE; - case GB_OP_INVALID: - return -EINVAL; - case GB_OP_RETRY: - return -EAGAIN; - case GB_OP_NONEXISTENT: - return -ENODEV; - case GB_OP_MALFUNCTION: - return -EILSEQ; - case GB_OP_UNKNOWN_ERROR: - default: - return -EIO; - } -} - -/* - * Map a Linux errno value (from operation->errno) into the value - * that should represent it in a response message status sent - * over the wire. Returns an enum gb_operation_status value (which - * is represented in a message as a single byte). - */ -static u8 gb_operation_errno_map(int errno) -{ - switch (errno) { - case 0: - return GB_OP_SUCCESS; - case -EINTR: - return GB_OP_INTERRUPTED; - case -ETIMEDOUT: - return GB_OP_TIMEOUT; - case -ENOMEM: - return GB_OP_NO_MEMORY; - case -EPROTONOSUPPORT: - return GB_OP_PROTOCOL_BAD; - case -EMSGSIZE: - return GB_OP_OVERFLOW; /* Could be underflow too */ - case -EINVAL: - return GB_OP_INVALID; - case -EAGAIN: - return GB_OP_RETRY; - case -EILSEQ: - return GB_OP_MALFUNCTION; - case -ENODEV: - return GB_OP_NONEXISTENT; - case -EIO: - default: - return GB_OP_UNKNOWN_ERROR; - } -} - -bool gb_operation_response_alloc(struct gb_operation *operation, - size_t response_size, gfp_t gfp) -{ - struct gb_host_device *hd = operation->connection->hd; - struct gb_operation_msg_hdr *request_header; - struct gb_message *response; - u8 type; - - type = operation->type | GB_MESSAGE_TYPE_RESPONSE; - response = gb_operation_message_alloc(hd, type, response_size, gfp); - if (!response) - return false; - response->operation = operation; - - /* - * Size and type get initialized when the message is - * allocated. The errno will be set before sending. All - * that's left is the operation id, which we copy from the - * request message header (as-is, in little-endian order). - */ - request_header = operation->request->header; - response->header->operation_id = request_header->operation_id; - operation->response = response; - - return true; -} -EXPORT_SYMBOL_GPL(gb_operation_response_alloc); - -/* - * Create a Greybus operation to be sent over the given connection. - * The request buffer will be big enough for a payload of the given - * size. - * - * For outgoing requests, the request message's header will be - * initialized with the type of the request and the message size. - * Outgoing operations must also specify the response buffer size, - * which must be sufficient to hold all expected response data. The - * response message header will eventually be overwritten, so there's - * no need to initialize it here. - * - * Request messages for incoming operations can arrive in interrupt - * context, so they must be allocated with GFP_ATOMIC. In this case - * the request buffer will be immediately overwritten, so there is - * no need to initialize the message header. Responsibility for - * allocating a response buffer lies with the incoming request - * handler for a protocol. So we don't allocate that here. - * - * Returns a pointer to the new operation or a null pointer if an - * error occurs. - */ -static struct gb_operation * -gb_operation_create_common(struct gb_connection *connection, u8 type, - size_t request_size, size_t response_size, - unsigned long op_flags, gfp_t gfp_flags) -{ - struct gb_host_device *hd = connection->hd; - struct gb_operation *operation; - - operation = kmem_cache_zalloc(gb_operation_cache, gfp_flags); - if (!operation) - return NULL; - operation->connection = connection; - - operation->request = gb_operation_message_alloc(hd, type, request_size, - gfp_flags); - if (!operation->request) - goto err_cache; - operation->request->operation = operation; - - /* Allocate the response buffer for outgoing operations */ - if (!(op_flags & GB_OPERATION_FLAG_INCOMING)) { - if (!gb_operation_response_alloc(operation, response_size, - gfp_flags)) { - goto err_request; - } - - timer_setup(&operation->timer, gb_operation_timeout, 0); - } - - operation->flags = op_flags; - operation->type = type; - operation->errno = -EBADR; /* Initial value--means "never set" */ - - INIT_WORK(&operation->work, gb_operation_work); - init_completion(&operation->completion); - kref_init(&operation->kref); - atomic_set(&operation->waiters, 0); - - return operation; - -err_request: - gb_operation_message_free(operation->request); -err_cache: - kmem_cache_free(gb_operation_cache, operation); - - return NULL; -} - -/* - * Create a new operation associated with the given connection. The - * request and response sizes provided are the number of bytes - * required to hold the request/response payload only. Both of - * these are allowed to be 0. Note that 0x00 is reserved as an - * invalid operation type for all protocols, and this is enforced - * here. - */ -struct gb_operation * -gb_operation_create_flags(struct gb_connection *connection, - u8 type, size_t request_size, - size_t response_size, unsigned long flags, - gfp_t gfp) -{ - struct gb_operation *operation; - - if (WARN_ON_ONCE(type == GB_REQUEST_TYPE_INVALID)) - return NULL; - if (WARN_ON_ONCE(type & GB_MESSAGE_TYPE_RESPONSE)) - type &= ~GB_MESSAGE_TYPE_RESPONSE; - - if (WARN_ON_ONCE(flags & ~GB_OPERATION_FLAG_USER_MASK)) - flags &= GB_OPERATION_FLAG_USER_MASK; - - operation = gb_operation_create_common(connection, type, - request_size, response_size, - flags, gfp); - if (operation) - trace_gb_operation_create(operation); - - return operation; -} -EXPORT_SYMBOL_GPL(gb_operation_create_flags); - -struct gb_operation * -gb_operation_create_core(struct gb_connection *connection, - u8 type, size_t request_size, - size_t response_size, unsigned long flags, - gfp_t gfp) -{ - struct gb_operation *operation; - - flags |= GB_OPERATION_FLAG_CORE; - - operation = gb_operation_create_common(connection, type, - request_size, response_size, - flags, gfp); - if (operation) - trace_gb_operation_create_core(operation); - - return operation; -} - -/* Do not export this function. */ - -size_t gb_operation_get_payload_size_max(struct gb_connection *connection) -{ - struct gb_host_device *hd = connection->hd; - - return hd->buffer_size_max - sizeof(struct gb_operation_msg_hdr); -} -EXPORT_SYMBOL_GPL(gb_operation_get_payload_size_max); - -static struct gb_operation * -gb_operation_create_incoming(struct gb_connection *connection, u16 id, - u8 type, void *data, size_t size) -{ - struct gb_operation *operation; - size_t request_size; - unsigned long flags = GB_OPERATION_FLAG_INCOMING; - - /* Caller has made sure we at least have a message header. */ - request_size = size - sizeof(struct gb_operation_msg_hdr); - - if (!id) - flags |= GB_OPERATION_FLAG_UNIDIRECTIONAL; - - operation = gb_operation_create_common(connection, type, - request_size, - GB_REQUEST_TYPE_INVALID, - flags, GFP_ATOMIC); - if (!operation) - return NULL; - - operation->id = id; - memcpy(operation->request->header, data, size); - trace_gb_operation_create_incoming(operation); - - return operation; -} - -/* - * Get an additional reference on an operation. - */ -void gb_operation_get(struct gb_operation *operation) -{ - kref_get(&operation->kref); -} -EXPORT_SYMBOL_GPL(gb_operation_get); - -/* - * Destroy a previously created operation. - */ -static void _gb_operation_destroy(struct kref *kref) -{ - struct gb_operation *operation; - - operation = container_of(kref, struct gb_operation, kref); - - trace_gb_operation_destroy(operation); - - if (operation->response) - gb_operation_message_free(operation->response); - gb_operation_message_free(operation->request); - - kmem_cache_free(gb_operation_cache, operation); -} - -/* - * Drop a reference on an operation, and destroy it when the last - * one is gone. - */ -void gb_operation_put(struct gb_operation *operation) -{ - if (WARN_ON(!operation)) - return; - - kref_put(&operation->kref, _gb_operation_destroy); -} -EXPORT_SYMBOL_GPL(gb_operation_put); - -/* Tell the requester we're done */ -static void gb_operation_sync_callback(struct gb_operation *operation) -{ - complete(&operation->completion); -} - -/** - * gb_operation_request_send() - send an operation request message - * @operation: the operation to initiate - * @callback: the operation completion callback - * @timeout: operation timeout in milliseconds, or zero for no timeout - * @gfp: the memory flags to use for any allocations - * - * The caller has filled in any payload so the request message is ready to go. - * The callback function supplied will be called when the response message has - * arrived, a unidirectional request has been sent, or the operation is - * cancelled, indicating that the operation is complete. The callback function - * can fetch the result of the operation using gb_operation_result() if - * desired. - * - * Return: 0 if the request was successfully queued in the host-driver queues, - * or a negative errno. - */ -int gb_operation_request_send(struct gb_operation *operation, - gb_operation_callback callback, - unsigned int timeout, - gfp_t gfp) -{ - struct gb_connection *connection = operation->connection; - struct gb_operation_msg_hdr *header; - unsigned int cycle; - int ret; - - if (gb_connection_is_offloaded(connection)) - return -EBUSY; - - if (!callback) - return -EINVAL; - - /* - * Record the callback function, which is executed in - * non-atomic (workqueue) context when the final result - * of an operation has been set. - */ - operation->callback = callback; - - /* - * Assign the operation's id, and store it in the request header. - * Zero is a reserved operation id for unidirectional operations. - */ - if (gb_operation_is_unidirectional(operation)) { - operation->id = 0; - } else { - cycle = (unsigned int)atomic_inc_return(&connection->op_cycle); - operation->id = (u16)(cycle % U16_MAX + 1); - } - - header = operation->request->header; - header->operation_id = cpu_to_le16(operation->id); - - gb_operation_result_set(operation, -EINPROGRESS); - - /* - * Get an extra reference on the operation. It'll be dropped when the - * operation completes. - */ - gb_operation_get(operation); - ret = gb_operation_get_active(operation); - if (ret) - goto err_put; - - ret = gb_message_send(operation->request, gfp); - if (ret) - goto err_put_active; - - if (timeout) { - operation->timer.expires = jiffies + msecs_to_jiffies(timeout); - add_timer(&operation->timer); - } - - return 0; - -err_put_active: - gb_operation_put_active(operation); -err_put: - gb_operation_put(operation); - - return ret; -} -EXPORT_SYMBOL_GPL(gb_operation_request_send); - -/* - * Send a synchronous operation. This function is expected to - * block, returning only when the response has arrived, (or when an - * error is detected. The return value is the result of the - * operation. - */ -int gb_operation_request_send_sync_timeout(struct gb_operation *operation, - unsigned int timeout) -{ - int ret; - - ret = gb_operation_request_send(operation, gb_operation_sync_callback, - timeout, GFP_KERNEL); - if (ret) - return ret; - - ret = wait_for_completion_interruptible(&operation->completion); - if (ret < 0) { - /* Cancel the operation if interrupted */ - gb_operation_cancel(operation, -ECANCELED); - } - - return gb_operation_result(operation); -} -EXPORT_SYMBOL_GPL(gb_operation_request_send_sync_timeout); - -/* - * Send a response for an incoming operation request. A non-zero - * errno indicates a failed operation. - * - * If there is any response payload, the incoming request handler is - * responsible for allocating the response message. Otherwise the - * it can simply supply the result errno; this function will - * allocate the response message if necessary. - */ -static int gb_operation_response_send(struct gb_operation *operation, - int errno) -{ - struct gb_connection *connection = operation->connection; - int ret; - - if (!operation->response && - !gb_operation_is_unidirectional(operation)) { - if (!gb_operation_response_alloc(operation, 0, GFP_KERNEL)) - return -ENOMEM; - } - - /* Record the result */ - if (!gb_operation_result_set(operation, errno)) { - dev_err(&connection->hd->dev, "request result already set\n"); - return -EIO; /* Shouldn't happen */ - } - - /* Sender of request does not care about response. */ - if (gb_operation_is_unidirectional(operation)) - return 0; - - /* Reference will be dropped when message has been sent. */ - gb_operation_get(operation); - ret = gb_operation_get_active(operation); - if (ret) - goto err_put; - - /* Fill in the response header and send it */ - operation->response->header->result = gb_operation_errno_map(errno); - - ret = gb_message_send(operation->response, GFP_KERNEL); - if (ret) - goto err_put_active; - - return 0; - -err_put_active: - gb_operation_put_active(operation); -err_put: - gb_operation_put(operation); - - return ret; -} - -/* - * This function is called when a message send request has completed. - */ -void greybus_message_sent(struct gb_host_device *hd, - struct gb_message *message, int status) -{ - struct gb_operation *operation = message->operation; - struct gb_connection *connection = operation->connection; - - /* - * If the message was a response, we just need to drop our - * reference to the operation. If an error occurred, report - * it. - * - * For requests, if there's no error and the operation in not - * unidirectional, there's nothing more to do until the response - * arrives. If an error occurred attempting to send it, or if the - * operation is unidrectional, record the result of the operation and - * schedule its completion. - */ - if (message == operation->response) { - if (status) { - dev_err(&connection->hd->dev, - "%s: error sending response 0x%02x: %d\n", - connection->name, operation->type, status); - } - - gb_operation_put_active(operation); - gb_operation_put(operation); - } else if (status || gb_operation_is_unidirectional(operation)) { - if (gb_operation_result_set(operation, status)) { - queue_work(gb_operation_completion_wq, - &operation->work); - } - } -} -EXPORT_SYMBOL_GPL(greybus_message_sent); - -/* - * We've received data on a connection, and it doesn't look like a - * response, so we assume it's a request. - * - * This is called in interrupt context, so just copy the incoming - * data into the request buffer and handle the rest via workqueue. - */ -static void gb_connection_recv_request(struct gb_connection *connection, - const struct gb_operation_msg_hdr *header, - void *data, size_t size) -{ - struct gb_operation *operation; - u16 operation_id; - u8 type; - int ret; - - operation_id = le16_to_cpu(header->operation_id); - type = header->type; - - operation = gb_operation_create_incoming(connection, operation_id, - type, data, size); - if (!operation) { - dev_err(&connection->hd->dev, - "%s: can't create incoming operation\n", - connection->name); - return; - } - - ret = gb_operation_get_active(operation); - if (ret) { - gb_operation_put(operation); - return; - } - trace_gb_message_recv_request(operation->request); - - /* - * The initial reference to the operation will be dropped when the - * request handler returns. - */ - if (gb_operation_result_set(operation, -EINPROGRESS)) - queue_work(connection->wq, &operation->work); -} - -/* - * We've received data that appears to be an operation response - * message. Look up the operation, and record that we've received - * its response. - * - * This is called in interrupt context, so just copy the incoming - * data into the response buffer and handle the rest via workqueue. - */ -static void gb_connection_recv_response(struct gb_connection *connection, - const struct gb_operation_msg_hdr *header, - void *data, size_t size) -{ - struct gb_operation *operation; - struct gb_message *message; - size_t message_size; - u16 operation_id; - int errno; - - operation_id = le16_to_cpu(header->operation_id); - - if (!operation_id) { - dev_err_ratelimited(&connection->hd->dev, - "%s: invalid response id 0 received\n", - connection->name); - return; - } - - operation = gb_operation_find_outgoing(connection, operation_id); - if (!operation) { - dev_err_ratelimited(&connection->hd->dev, - "%s: unexpected response id 0x%04x received\n", - connection->name, operation_id); - return; - } - - errno = gb_operation_status_map(header->result); - message = operation->response; - message_size = sizeof(*header) + message->payload_size; - if (!errno && size > message_size) { - dev_err_ratelimited(&connection->hd->dev, - "%s: malformed response 0x%02x received (%zu > %zu)\n", - connection->name, header->type, - size, message_size); - errno = -EMSGSIZE; - } else if (!errno && size < message_size) { - if (gb_operation_short_response_allowed(operation)) { - message->payload_size = size - sizeof(*header); - } else { - dev_err_ratelimited(&connection->hd->dev, - "%s: short response 0x%02x received (%zu < %zu)\n", - connection->name, header->type, - size, message_size); - errno = -EMSGSIZE; - } - } - - /* We must ignore the payload if a bad status is returned */ - if (errno) - size = sizeof(*header); - - /* The rest will be handled in work queue context */ - if (gb_operation_result_set(operation, errno)) { - memcpy(message->buffer, data, size); - - trace_gb_message_recv_response(message); - - queue_work(gb_operation_completion_wq, &operation->work); - } - - gb_operation_put(operation); -} - -/* - * Handle data arriving on a connection. As soon as we return the - * supplied data buffer will be reused (so unless we do something - * with, it's effectively dropped). - */ -void gb_connection_recv(struct gb_connection *connection, - void *data, size_t size) -{ - struct gb_operation_msg_hdr header; - struct device *dev = &connection->hd->dev; - size_t msg_size; - - if (connection->state == GB_CONNECTION_STATE_DISABLED || - gb_connection_is_offloaded(connection)) { - dev_warn_ratelimited(dev, "%s: dropping %zu received bytes\n", - connection->name, size); - return; - } - - if (size < sizeof(header)) { - dev_err_ratelimited(dev, "%s: short message received\n", - connection->name); - return; - } - - /* Use memcpy as data may be unaligned */ - memcpy(&header, data, sizeof(header)); - msg_size = le16_to_cpu(header.size); - if (size < msg_size) { - dev_err_ratelimited(dev, - "%s: incomplete message 0x%04x of type 0x%02x received (%zu < %zu)\n", - connection->name, - le16_to_cpu(header.operation_id), - header.type, size, msg_size); - return; /* XXX Should still complete operation */ - } - - if (header.type & GB_MESSAGE_TYPE_RESPONSE) { - gb_connection_recv_response(connection, &header, data, - msg_size); - } else { - gb_connection_recv_request(connection, &header, data, - msg_size); - } -} - -/* - * Cancel an outgoing operation synchronously, and record the given error to - * indicate why. - */ -void gb_operation_cancel(struct gb_operation *operation, int errno) -{ - if (WARN_ON(gb_operation_is_incoming(operation))) - return; - - if (gb_operation_result_set(operation, errno)) { - gb_message_cancel(operation->request); - queue_work(gb_operation_completion_wq, &operation->work); - } - trace_gb_message_cancel_outgoing(operation->request); - - atomic_inc(&operation->waiters); - wait_event(gb_operation_cancellation_queue, - !gb_operation_is_active(operation)); - atomic_dec(&operation->waiters); -} -EXPORT_SYMBOL_GPL(gb_operation_cancel); - -/* - * Cancel an incoming operation synchronously. Called during connection tear - * down. - */ -void gb_operation_cancel_incoming(struct gb_operation *operation, int errno) -{ - if (WARN_ON(!gb_operation_is_incoming(operation))) - return; - - if (!gb_operation_is_unidirectional(operation)) { - /* - * Make sure the request handler has submitted the response - * before cancelling it. - */ - flush_work(&operation->work); - if (!gb_operation_result_set(operation, errno)) - gb_message_cancel(operation->response); - } - trace_gb_message_cancel_incoming(operation->response); - - atomic_inc(&operation->waiters); - wait_event(gb_operation_cancellation_queue, - !gb_operation_is_active(operation)); - atomic_dec(&operation->waiters); -} - -/** - * gb_operation_sync_timeout() - implement a "simple" synchronous operation - * @connection: the Greybus connection to send this to - * @type: the type of operation to send - * @request: pointer to a memory buffer to copy the request from - * @request_size: size of @request - * @response: pointer to a memory buffer to copy the response to - * @response_size: the size of @response. - * @timeout: operation timeout in milliseconds - * - * This function implements a simple synchronous Greybus operation. It sends - * the provided operation request and waits (sleeps) until the corresponding - * operation response message has been successfully received, or an error - * occurs. @request and @response are buffers to hold the request and response - * data respectively, and if they are not NULL, their size must be specified in - * @request_size and @response_size. - * - * If a response payload is to come back, and @response is not NULL, - * @response_size number of bytes will be copied into @response if the operation - * is successful. - * - * If there is an error, the response buffer is left alone. - */ -int gb_operation_sync_timeout(struct gb_connection *connection, int type, - void *request, int request_size, - void *response, int response_size, - unsigned int timeout) -{ - struct gb_operation *operation; - int ret; - - if ((response_size && !response) || - (request_size && !request)) - return -EINVAL; - - operation = gb_operation_create(connection, type, - request_size, response_size, - GFP_KERNEL); - if (!operation) - return -ENOMEM; - - if (request_size) - memcpy(operation->request->payload, request, request_size); - - ret = gb_operation_request_send_sync_timeout(operation, timeout); - if (ret) { - dev_err(&connection->hd->dev, - "%s: synchronous operation id 0x%04x of type 0x%02x failed: %d\n", - connection->name, operation->id, type, ret); - } else { - if (response_size) { - memcpy(response, operation->response->payload, - response_size); - } - } - - gb_operation_put(operation); - - return ret; -} -EXPORT_SYMBOL_GPL(gb_operation_sync_timeout); - -/** - * gb_operation_unidirectional_timeout() - initiate a unidirectional operation - * @connection: connection to use - * @type: type of operation to send - * @request: memory buffer to copy the request from - * @request_size: size of @request - * @timeout: send timeout in milliseconds - * - * Initiate a unidirectional operation by sending a request message and - * waiting for it to be acknowledged as sent by the host device. - * - * Note that successful send of a unidirectional operation does not imply that - * the request as actually reached the remote end of the connection. - */ -int gb_operation_unidirectional_timeout(struct gb_connection *connection, - int type, void *request, - int request_size, - unsigned int timeout) -{ - struct gb_operation *operation; - int ret; - - if (request_size && !request) - return -EINVAL; - - operation = gb_operation_create_flags(connection, type, - request_size, 0, - GB_OPERATION_FLAG_UNIDIRECTIONAL, - GFP_KERNEL); - if (!operation) - return -ENOMEM; - - if (request_size) - memcpy(operation->request->payload, request, request_size); - - ret = gb_operation_request_send_sync_timeout(operation, timeout); - if (ret) { - dev_err(&connection->hd->dev, - "%s: unidirectional operation of type 0x%02x failed: %d\n", - connection->name, type, ret); - } - - gb_operation_put(operation); - - return ret; -} -EXPORT_SYMBOL_GPL(gb_operation_unidirectional_timeout); - -int __init gb_operation_init(void) -{ - gb_message_cache = kmem_cache_create("gb_message_cache", - sizeof(struct gb_message), 0, 0, - NULL); - if (!gb_message_cache) - return -ENOMEM; - - gb_operation_cache = kmem_cache_create("gb_operation_cache", - sizeof(struct gb_operation), 0, - 0, NULL); - if (!gb_operation_cache) - goto err_destroy_message_cache; - - gb_operation_completion_wq = alloc_workqueue("greybus_completion", - 0, 0); - if (!gb_operation_completion_wq) - goto err_destroy_operation_cache; - - return 0; - -err_destroy_operation_cache: - kmem_cache_destroy(gb_operation_cache); - gb_operation_cache = NULL; -err_destroy_message_cache: - kmem_cache_destroy(gb_message_cache); - gb_message_cache = NULL; - - return -ENOMEM; -} - -void gb_operation_exit(void) -{ - destroy_workqueue(gb_operation_completion_wq); - gb_operation_completion_wq = NULL; - kmem_cache_destroy(gb_operation_cache); - gb_operation_cache = NULL; - kmem_cache_destroy(gb_message_cache); - gb_message_cache = NULL; -} diff --git a/drivers/staging/greybus/svc.c b/drivers/staging/greybus/svc.c deleted file mode 100644 index ce7740ef449b..000000000000 --- a/drivers/staging/greybus/svc.c +++ /dev/null @@ -1,1397 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * SVC Greybus driver. - * - * Copyright 2015 Google Inc. - * Copyright 2015 Linaro Ltd. - */ - -#include -#include -#include - -#define SVC_INTF_EJECT_TIMEOUT 9000 -#define SVC_INTF_ACTIVATE_TIMEOUT 6000 -#define SVC_INTF_RESUME_TIMEOUT 3000 - -struct gb_svc_deferred_request { - struct work_struct work; - struct gb_operation *operation; -}; - -static int gb_svc_queue_deferred_request(struct gb_operation *operation); - -static ssize_t endo_id_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct gb_svc *svc = to_gb_svc(dev); - - return sprintf(buf, "0x%04x\n", svc->endo_id); -} -static DEVICE_ATTR_RO(endo_id); - -static ssize_t ap_intf_id_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct gb_svc *svc = to_gb_svc(dev); - - return sprintf(buf, "%u\n", svc->ap_intf_id); -} -static DEVICE_ATTR_RO(ap_intf_id); - -// FIXME -// This is a hack, we need to do this "right" and clean the interface up -// properly, not just forcibly yank the thing out of the system and hope for the -// best. But for now, people want their modules to come out without having to -// throw the thing to the ground or get out a screwdriver. -static ssize_t intf_eject_store(struct device *dev, - struct device_attribute *attr, const char *buf, - size_t len) -{ - struct gb_svc *svc = to_gb_svc(dev); - unsigned short intf_id; - int ret; - - ret = kstrtou16(buf, 10, &intf_id); - if (ret < 0) - return ret; - - dev_warn(dev, "Forcibly trying to eject interface %d\n", intf_id); - - ret = gb_svc_intf_eject(svc, intf_id); - if (ret < 0) - return ret; - - return len; -} -static DEVICE_ATTR_WO(intf_eject); - -static ssize_t watchdog_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct gb_svc *svc = to_gb_svc(dev); - - return sprintf(buf, "%s\n", - gb_svc_watchdog_enabled(svc) ? "enabled" : "disabled"); -} - -static ssize_t watchdog_store(struct device *dev, - struct device_attribute *attr, const char *buf, - size_t len) -{ - struct gb_svc *svc = to_gb_svc(dev); - int retval; - bool user_request; - - retval = strtobool(buf, &user_request); - if (retval) - return retval; - - if (user_request) - retval = gb_svc_watchdog_enable(svc); - else - retval = gb_svc_watchdog_disable(svc); - if (retval) - return retval; - return len; -} -static DEVICE_ATTR_RW(watchdog); - -static ssize_t watchdog_action_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct gb_svc *svc = to_gb_svc(dev); - - if (svc->action == GB_SVC_WATCHDOG_BITE_PANIC_KERNEL) - return sprintf(buf, "panic\n"); - else if (svc->action == GB_SVC_WATCHDOG_BITE_RESET_UNIPRO) - return sprintf(buf, "reset\n"); - - return -EINVAL; -} - -static ssize_t watchdog_action_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t len) -{ - struct gb_svc *svc = to_gb_svc(dev); - - if (sysfs_streq(buf, "panic")) - svc->action = GB_SVC_WATCHDOG_BITE_PANIC_KERNEL; - else if (sysfs_streq(buf, "reset")) - svc->action = GB_SVC_WATCHDOG_BITE_RESET_UNIPRO; - else - return -EINVAL; - - return len; -} -static DEVICE_ATTR_RW(watchdog_action); - -static int gb_svc_pwrmon_rail_count_get(struct gb_svc *svc, u8 *value) -{ - struct gb_svc_pwrmon_rail_count_get_response response; - int ret; - - ret = gb_operation_sync(svc->connection, - GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET, NULL, 0, - &response, sizeof(response)); - if (ret) { - dev_err(&svc->dev, "failed to get rail count: %d\n", ret); - return ret; - } - - *value = response.rail_count; - - return 0; -} - -static int gb_svc_pwrmon_rail_names_get(struct gb_svc *svc, - struct gb_svc_pwrmon_rail_names_get_response *response, - size_t bufsize) -{ - int ret; - - ret = gb_operation_sync(svc->connection, - GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET, NULL, 0, - response, bufsize); - if (ret) { - dev_err(&svc->dev, "failed to get rail names: %d\n", ret); - return ret; - } - - if (response->status != GB_SVC_OP_SUCCESS) { - dev_err(&svc->dev, - "SVC error while getting rail names: %u\n", - response->status); - return -EREMOTEIO; - } - - return 0; -} - -static int gb_svc_pwrmon_sample_get(struct gb_svc *svc, u8 rail_id, - u8 measurement_type, u32 *value) -{ - struct gb_svc_pwrmon_sample_get_request request; - struct gb_svc_pwrmon_sample_get_response response; - int ret; - - request.rail_id = rail_id; - request.measurement_type = measurement_type; - - ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_PWRMON_SAMPLE_GET, - &request, sizeof(request), - &response, sizeof(response)); - if (ret) { - dev_err(&svc->dev, "failed to get rail sample: %d\n", ret); - return ret; - } - - if (response.result) { - dev_err(&svc->dev, - "UniPro error while getting rail power sample (%d %d): %d\n", - rail_id, measurement_type, response.result); - switch (response.result) { - case GB_SVC_PWRMON_GET_SAMPLE_INVAL: - return -EINVAL; - case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP: - return -ENOMSG; - default: - return -EREMOTEIO; - } - } - - *value = le32_to_cpu(response.measurement); - - return 0; -} - -int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id, - u8 measurement_type, u32 *value) -{ - struct gb_svc_pwrmon_intf_sample_get_request request; - struct gb_svc_pwrmon_intf_sample_get_response response; - int ret; - - request.intf_id = intf_id; - request.measurement_type = measurement_type; - - ret = gb_operation_sync(svc->connection, - GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET, - &request, sizeof(request), - &response, sizeof(response)); - if (ret) { - dev_err(&svc->dev, "failed to get intf sample: %d\n", ret); - return ret; - } - - if (response.result) { - dev_err(&svc->dev, - "UniPro error while getting intf power sample (%d %d): %d\n", - intf_id, measurement_type, response.result); - switch (response.result) { - case GB_SVC_PWRMON_GET_SAMPLE_INVAL: - return -EINVAL; - case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP: - return -ENOMSG; - default: - return -EREMOTEIO; - } - } - - *value = le32_to_cpu(response.measurement); - - return 0; -} - -static struct attribute *svc_attrs[] = { - &dev_attr_endo_id.attr, - &dev_attr_ap_intf_id.attr, - &dev_attr_intf_eject.attr, - &dev_attr_watchdog.attr, - &dev_attr_watchdog_action.attr, - NULL, -}; -ATTRIBUTE_GROUPS(svc); - -int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id) -{ - struct gb_svc_intf_device_id_request request; - - request.intf_id = intf_id; - request.device_id = device_id; - - return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID, - &request, sizeof(request), NULL, 0); -} - -int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id) -{ - struct gb_svc_intf_eject_request request; - int ret; - - request.intf_id = intf_id; - - /* - * The pulse width for module release in svc is long so we need to - * increase the timeout so the operation will not return to soon. - */ - ret = gb_operation_sync_timeout(svc->connection, - GB_SVC_TYPE_INTF_EJECT, &request, - sizeof(request), NULL, 0, - SVC_INTF_EJECT_TIMEOUT); - if (ret) { - dev_err(&svc->dev, "failed to eject interface %u\n", intf_id); - return ret; - } - - return 0; -} - -int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable) -{ - struct gb_svc_intf_vsys_request request; - struct gb_svc_intf_vsys_response response; - int type, ret; - - request.intf_id = intf_id; - - if (enable) - type = GB_SVC_TYPE_INTF_VSYS_ENABLE; - else - type = GB_SVC_TYPE_INTF_VSYS_DISABLE; - - ret = gb_operation_sync(svc->connection, type, - &request, sizeof(request), - &response, sizeof(response)); - if (ret < 0) - return ret; - if (response.result_code != GB_SVC_INTF_VSYS_OK) - return -EREMOTEIO; - return 0; -} - -int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable) -{ - struct gb_svc_intf_refclk_request request; - struct gb_svc_intf_refclk_response response; - int type, ret; - - request.intf_id = intf_id; - - if (enable) - type = GB_SVC_TYPE_INTF_REFCLK_ENABLE; - else - type = GB_SVC_TYPE_INTF_REFCLK_DISABLE; - - ret = gb_operation_sync(svc->connection, type, - &request, sizeof(request), - &response, sizeof(response)); - if (ret < 0) - return ret; - if (response.result_code != GB_SVC_INTF_REFCLK_OK) - return -EREMOTEIO; - return 0; -} - -int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable) -{ - struct gb_svc_intf_unipro_request request; - struct gb_svc_intf_unipro_response response; - int type, ret; - - request.intf_id = intf_id; - - if (enable) - type = GB_SVC_TYPE_INTF_UNIPRO_ENABLE; - else - type = GB_SVC_TYPE_INTF_UNIPRO_DISABLE; - - ret = gb_operation_sync(svc->connection, type, - &request, sizeof(request), - &response, sizeof(response)); - if (ret < 0) - return ret; - if (response.result_code != GB_SVC_INTF_UNIPRO_OK) - return -EREMOTEIO; - return 0; -} - -int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type) -{ - struct gb_svc_intf_activate_request request; - struct gb_svc_intf_activate_response response; - int ret; - - request.intf_id = intf_id; - - ret = gb_operation_sync_timeout(svc->connection, - GB_SVC_TYPE_INTF_ACTIVATE, - &request, sizeof(request), - &response, sizeof(response), - SVC_INTF_ACTIVATE_TIMEOUT); - if (ret < 0) - return ret; - if (response.status != GB_SVC_OP_SUCCESS) { - dev_err(&svc->dev, "failed to activate interface %u: %u\n", - intf_id, response.status); - return -EREMOTEIO; - } - - *intf_type = response.intf_type; - - return 0; -} - -int gb_svc_intf_resume(struct gb_svc *svc, u8 intf_id) -{ - struct gb_svc_intf_resume_request request; - struct gb_svc_intf_resume_response response; - int ret; - - request.intf_id = intf_id; - - ret = gb_operation_sync_timeout(svc->connection, - GB_SVC_TYPE_INTF_RESUME, - &request, sizeof(request), - &response, sizeof(response), - SVC_INTF_RESUME_TIMEOUT); - if (ret < 0) { - dev_err(&svc->dev, "failed to send interface resume %u: %d\n", - intf_id, ret); - return ret; - } - - if (response.status != GB_SVC_OP_SUCCESS) { - dev_err(&svc->dev, "failed to resume interface %u: %u\n", - intf_id, response.status); - return -EREMOTEIO; - } - - return 0; -} - -int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector, - u32 *value) -{ - struct gb_svc_dme_peer_get_request request; - struct gb_svc_dme_peer_get_response response; - u16 result; - int ret; - - request.intf_id = intf_id; - request.attr = cpu_to_le16(attr); - request.selector = cpu_to_le16(selector); - - ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET, - &request, sizeof(request), - &response, sizeof(response)); - if (ret) { - dev_err(&svc->dev, "failed to get DME attribute (%u 0x%04x %u): %d\n", - intf_id, attr, selector, ret); - return ret; - } - - result = le16_to_cpu(response.result_code); - if (result) { - dev_err(&svc->dev, "UniPro error while getting DME attribute (%u 0x%04x %u): %u\n", - intf_id, attr, selector, result); - return -EREMOTEIO; - } - - if (value) - *value = le32_to_cpu(response.attr_value); - - return 0; -} - -int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector, - u32 value) -{ - struct gb_svc_dme_peer_set_request request; - struct gb_svc_dme_peer_set_response response; - u16 result; - int ret; - - request.intf_id = intf_id; - request.attr = cpu_to_le16(attr); - request.selector = cpu_to_le16(selector); - request.value = cpu_to_le32(value); - - ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET, - &request, sizeof(request), - &response, sizeof(response)); - if (ret) { - dev_err(&svc->dev, "failed to set DME attribute (%u 0x%04x %u %u): %d\n", - intf_id, attr, selector, value, ret); - return ret; - } - - result = le16_to_cpu(response.result_code); - if (result) { - dev_err(&svc->dev, "UniPro error while setting DME attribute (%u 0x%04x %u %u): %u\n", - intf_id, attr, selector, value, result); - return -EREMOTEIO; - } - - return 0; -} - -int gb_svc_connection_create(struct gb_svc *svc, - u8 intf1_id, u16 cport1_id, - u8 intf2_id, u16 cport2_id, - u8 cport_flags) -{ - struct gb_svc_conn_create_request request; - - request.intf1_id = intf1_id; - request.cport1_id = cpu_to_le16(cport1_id); - request.intf2_id = intf2_id; - request.cport2_id = cpu_to_le16(cport2_id); - request.tc = 0; /* TC0 */ - request.flags = cport_flags; - - return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE, - &request, sizeof(request), NULL, 0); -} - -void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id, - u8 intf2_id, u16 cport2_id) -{ - struct gb_svc_conn_destroy_request request; - struct gb_connection *connection = svc->connection; - int ret; - - request.intf1_id = intf1_id; - request.cport1_id = cpu_to_le16(cport1_id); - request.intf2_id = intf2_id; - request.cport2_id = cpu_to_le16(cport2_id); - - ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY, - &request, sizeof(request), NULL, 0); - if (ret) { - dev_err(&svc->dev, "failed to destroy connection (%u:%u %u:%u): %d\n", - intf1_id, cport1_id, intf2_id, cport2_id, ret); - } -} - -/* Creates bi-directional routes between the devices */ -int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id, - u8 intf2_id, u8 dev2_id) -{ - struct gb_svc_route_create_request request; - - request.intf1_id = intf1_id; - request.dev1_id = dev1_id; - request.intf2_id = intf2_id; - request.dev2_id = dev2_id; - - return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE, - &request, sizeof(request), NULL, 0); -} - -/* Destroys bi-directional routes between the devices */ -void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id) -{ - struct gb_svc_route_destroy_request request; - int ret; - - request.intf1_id = intf1_id; - request.intf2_id = intf2_id; - - ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY, - &request, sizeof(request), NULL, 0); - if (ret) { - dev_err(&svc->dev, "failed to destroy route (%u %u): %d\n", - intf1_id, intf2_id, ret); - } -} - -int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series, - u8 tx_mode, u8 tx_gear, u8 tx_nlanes, - u8 tx_amplitude, u8 tx_hs_equalizer, - u8 rx_mode, u8 rx_gear, u8 rx_nlanes, - u8 flags, u32 quirks, - struct gb_svc_l2_timer_cfg *local, - struct gb_svc_l2_timer_cfg *remote) -{ - struct gb_svc_intf_set_pwrm_request request; - struct gb_svc_intf_set_pwrm_response response; - int ret; - u16 result_code; - - memset(&request, 0, sizeof(request)); - - request.intf_id = intf_id; - request.hs_series = hs_series; - request.tx_mode = tx_mode; - request.tx_gear = tx_gear; - request.tx_nlanes = tx_nlanes; - request.tx_amplitude = tx_amplitude; - request.tx_hs_equalizer = tx_hs_equalizer; - request.rx_mode = rx_mode; - request.rx_gear = rx_gear; - request.rx_nlanes = rx_nlanes; - request.flags = flags; - request.quirks = cpu_to_le32(quirks); - if (local) - request.local_l2timerdata = *local; - if (remote) - request.remote_l2timerdata = *remote; - - ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM, - &request, sizeof(request), - &response, sizeof(response)); - if (ret < 0) - return ret; - - result_code = response.result_code; - if (result_code != GB_SVC_SETPWRM_PWR_LOCAL) { - dev_err(&svc->dev, "set power mode = %d\n", result_code); - return -EIO; - } - - return 0; -} -EXPORT_SYMBOL_GPL(gb_svc_intf_set_power_mode); - -int gb_svc_intf_set_power_mode_hibernate(struct gb_svc *svc, u8 intf_id) -{ - struct gb_svc_intf_set_pwrm_request request; - struct gb_svc_intf_set_pwrm_response response; - int ret; - u16 result_code; - - memset(&request, 0, sizeof(request)); - - request.intf_id = intf_id; - request.hs_series = GB_SVC_UNIPRO_HS_SERIES_A; - request.tx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE; - request.rx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE; - - ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM, - &request, sizeof(request), - &response, sizeof(response)); - if (ret < 0) { - dev_err(&svc->dev, - "failed to send set power mode operation to interface %u: %d\n", - intf_id, ret); - return ret; - } - - result_code = response.result_code; - if (result_code != GB_SVC_SETPWRM_PWR_OK) { - dev_err(&svc->dev, - "failed to hibernate the link for interface %u: %u\n", - intf_id, result_code); - return -EIO; - } - - return 0; -} - -int gb_svc_ping(struct gb_svc *svc) -{ - return gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_PING, - NULL, 0, NULL, 0, - GB_OPERATION_TIMEOUT_DEFAULT * 2); -} - -static int gb_svc_version_request(struct gb_operation *op) -{ - struct gb_connection *connection = op->connection; - struct gb_svc *svc = gb_connection_get_data(connection); - struct gb_svc_version_request *request; - struct gb_svc_version_response *response; - - if (op->request->payload_size < sizeof(*request)) { - dev_err(&svc->dev, "short version request (%zu < %zu)\n", - op->request->payload_size, - sizeof(*request)); - return -EINVAL; - } - - request = op->request->payload; - - if (request->major > GB_SVC_VERSION_MAJOR) { - dev_warn(&svc->dev, "unsupported major version (%u > %u)\n", - request->major, GB_SVC_VERSION_MAJOR); - return -ENOTSUPP; - } - - svc->protocol_major = request->major; - svc->protocol_minor = request->minor; - - if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL)) - return -ENOMEM; - - response = op->response->payload; - response->major = svc->protocol_major; - response->minor = svc->protocol_minor; - - return 0; -} - -static ssize_t pwr_debugfs_voltage_read(struct file *file, char __user *buf, - size_t len, loff_t *offset) -{ - struct svc_debugfs_pwrmon_rail *pwrmon_rails = - file_inode(file)->i_private; - struct gb_svc *svc = pwrmon_rails->svc; - int ret, desc; - u32 value; - char buff[16]; - - ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id, - GB_SVC_PWRMON_TYPE_VOL, &value); - if (ret) { - dev_err(&svc->dev, - "failed to get voltage sample %u: %d\n", - pwrmon_rails->id, ret); - return ret; - } - - desc = scnprintf(buff, sizeof(buff), "%u\n", value); - - return simple_read_from_buffer(buf, len, offset, buff, desc); -} - -static ssize_t pwr_debugfs_current_read(struct file *file, char __user *buf, - size_t len, loff_t *offset) -{ - struct svc_debugfs_pwrmon_rail *pwrmon_rails = - file_inode(file)->i_private; - struct gb_svc *svc = pwrmon_rails->svc; - int ret, desc; - u32 value; - char buff[16]; - - ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id, - GB_SVC_PWRMON_TYPE_CURR, &value); - if (ret) { - dev_err(&svc->dev, - "failed to get current sample %u: %d\n", - pwrmon_rails->id, ret); - return ret; - } - - desc = scnprintf(buff, sizeof(buff), "%u\n", value); - - return simple_read_from_buffer(buf, len, offset, buff, desc); -} - -static ssize_t pwr_debugfs_power_read(struct file *file, char __user *buf, - size_t len, loff_t *offset) -{ - struct svc_debugfs_pwrmon_rail *pwrmon_rails = - file_inode(file)->i_private; - struct gb_svc *svc = pwrmon_rails->svc; - int ret, desc; - u32 value; - char buff[16]; - - ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id, - GB_SVC_PWRMON_TYPE_PWR, &value); - if (ret) { - dev_err(&svc->dev, "failed to get power sample %u: %d\n", - pwrmon_rails->id, ret); - return ret; - } - - desc = scnprintf(buff, sizeof(buff), "%u\n", value); - - return simple_read_from_buffer(buf, len, offset, buff, desc); -} - -static const struct file_operations pwrmon_debugfs_voltage_fops = { - .read = pwr_debugfs_voltage_read, -}; - -static const struct file_operations pwrmon_debugfs_current_fops = { - .read = pwr_debugfs_current_read, -}; - -static const struct file_operations pwrmon_debugfs_power_fops = { - .read = pwr_debugfs_power_read, -}; - -static void gb_svc_pwrmon_debugfs_init(struct gb_svc *svc) -{ - int i; - size_t bufsize; - struct dentry *dent; - struct gb_svc_pwrmon_rail_names_get_response *rail_names; - u8 rail_count; - - dent = debugfs_create_dir("pwrmon", svc->debugfs_dentry); - if (IS_ERR_OR_NULL(dent)) - return; - - if (gb_svc_pwrmon_rail_count_get(svc, &rail_count)) - goto err_pwrmon_debugfs; - - if (!rail_count || rail_count > GB_SVC_PWRMON_MAX_RAIL_COUNT) - goto err_pwrmon_debugfs; - - bufsize = sizeof(*rail_names) + - GB_SVC_PWRMON_RAIL_NAME_BUFSIZE * rail_count; - - rail_names = kzalloc(bufsize, GFP_KERNEL); - if (!rail_names) - goto err_pwrmon_debugfs; - - svc->pwrmon_rails = kcalloc(rail_count, sizeof(*svc->pwrmon_rails), - GFP_KERNEL); - if (!svc->pwrmon_rails) - goto err_pwrmon_debugfs_free; - - if (gb_svc_pwrmon_rail_names_get(svc, rail_names, bufsize)) - goto err_pwrmon_debugfs_free; - - for (i = 0; i < rail_count; i++) { - struct dentry *dir; - struct svc_debugfs_pwrmon_rail *rail = &svc->pwrmon_rails[i]; - char fname[GB_SVC_PWRMON_RAIL_NAME_BUFSIZE]; - - snprintf(fname, sizeof(fname), "%s", - (char *)&rail_names->name[i]); - - rail->id = i; - rail->svc = svc; - - dir = debugfs_create_dir(fname, dent); - debugfs_create_file("voltage_now", 0444, dir, rail, - &pwrmon_debugfs_voltage_fops); - debugfs_create_file("current_now", 0444, dir, rail, - &pwrmon_debugfs_current_fops); - debugfs_create_file("power_now", 0444, dir, rail, - &pwrmon_debugfs_power_fops); - } - - kfree(rail_names); - return; - -err_pwrmon_debugfs_free: - kfree(rail_names); - kfree(svc->pwrmon_rails); - svc->pwrmon_rails = NULL; - -err_pwrmon_debugfs: - debugfs_remove(dent); -} - -static void gb_svc_debugfs_init(struct gb_svc *svc) -{ - svc->debugfs_dentry = debugfs_create_dir(dev_name(&svc->dev), - gb_debugfs_get()); - gb_svc_pwrmon_debugfs_init(svc); -} - -static void gb_svc_debugfs_exit(struct gb_svc *svc) -{ - debugfs_remove_recursive(svc->debugfs_dentry); - kfree(svc->pwrmon_rails); - svc->pwrmon_rails = NULL; -} - -static int gb_svc_hello(struct gb_operation *op) -{ - struct gb_connection *connection = op->connection; - struct gb_svc *svc = gb_connection_get_data(connection); - struct gb_svc_hello_request *hello_request; - int ret; - - if (op->request->payload_size < sizeof(*hello_request)) { - dev_warn(&svc->dev, "short hello request (%zu < %zu)\n", - op->request->payload_size, - sizeof(*hello_request)); - return -EINVAL; - } - - hello_request = op->request->payload; - svc->endo_id = le16_to_cpu(hello_request->endo_id); - svc->ap_intf_id = hello_request->interface_id; - - ret = device_add(&svc->dev); - if (ret) { - dev_err(&svc->dev, "failed to register svc device: %d\n", ret); - return ret; - } - - ret = gb_svc_watchdog_create(svc); - if (ret) { - dev_err(&svc->dev, "failed to create watchdog: %d\n", ret); - goto err_unregister_device; - } - - gb_svc_debugfs_init(svc); - - return gb_svc_queue_deferred_request(op); - -err_unregister_device: - gb_svc_watchdog_destroy(svc); - device_del(&svc->dev); - return ret; -} - -static struct gb_interface *gb_svc_interface_lookup(struct gb_svc *svc, - u8 intf_id) -{ - struct gb_host_device *hd = svc->hd; - struct gb_module *module; - size_t num_interfaces; - u8 module_id; - - list_for_each_entry(module, &hd->modules, hd_node) { - module_id = module->module_id; - num_interfaces = module->num_interfaces; - - if (intf_id >= module_id && - intf_id < module_id + num_interfaces) { - return module->interfaces[intf_id - module_id]; - } - } - - return NULL; -} - -static struct gb_module *gb_svc_module_lookup(struct gb_svc *svc, u8 module_id) -{ - struct gb_host_device *hd = svc->hd; - struct gb_module *module; - - list_for_each_entry(module, &hd->modules, hd_node) { - if (module->module_id == module_id) - return module; - } - - return NULL; -} - -static void gb_svc_process_hello_deferred(struct gb_operation *operation) -{ - struct gb_connection *connection = operation->connection; - struct gb_svc *svc = gb_connection_get_data(connection); - int ret; - - /* - * XXX This is a hack/work-around to reconfigure the APBridgeA-Switch - * link to PWM G2, 1 Lane, Slow Auto, so that it has sufficient - * bandwidth for 3 audio streams plus boot-over-UniPro of a hot-plugged - * module. - * - * The code should be removed once SW-2217, Heuristic for UniPro - * Power Mode Changes is resolved. - */ - ret = gb_svc_intf_set_power_mode(svc, svc->ap_intf_id, - GB_SVC_UNIPRO_HS_SERIES_A, - GB_SVC_UNIPRO_SLOW_AUTO_MODE, - 2, 1, - GB_SVC_SMALL_AMPLITUDE, - GB_SVC_NO_DE_EMPHASIS, - GB_SVC_UNIPRO_SLOW_AUTO_MODE, - 2, 1, - 0, 0, - NULL, NULL); - - if (ret) - dev_warn(&svc->dev, - "power mode change failed on AP to switch link: %d\n", - ret); -} - -static void gb_svc_process_module_inserted(struct gb_operation *operation) -{ - struct gb_svc_module_inserted_request *request; - struct gb_connection *connection = operation->connection; - struct gb_svc *svc = gb_connection_get_data(connection); - struct gb_host_device *hd = svc->hd; - struct gb_module *module; - size_t num_interfaces; - u8 module_id; - u16 flags; - int ret; - - /* The request message size has already been verified. */ - request = operation->request->payload; - module_id = request->primary_intf_id; - num_interfaces = request->intf_count; - flags = le16_to_cpu(request->flags); - - dev_dbg(&svc->dev, "%s - id = %u, num_interfaces = %zu, flags = 0x%04x\n", - __func__, module_id, num_interfaces, flags); - - if (flags & GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY) { - dev_warn(&svc->dev, "no primary interface detected on module %u\n", - module_id); - } - - module = gb_svc_module_lookup(svc, module_id); - if (module) { - dev_warn(&svc->dev, "unexpected module-inserted event %u\n", - module_id); - return; - } - - module = gb_module_create(hd, module_id, num_interfaces); - if (!module) { - dev_err(&svc->dev, "failed to create module\n"); - return; - } - - ret = gb_module_add(module); - if (ret) { - gb_module_put(module); - return; - } - - list_add(&module->hd_node, &hd->modules); -} - -static void gb_svc_process_module_removed(struct gb_operation *operation) -{ - struct gb_svc_module_removed_request *request; - struct gb_connection *connection = operation->connection; - struct gb_svc *svc = gb_connection_get_data(connection); - struct gb_module *module; - u8 module_id; - - /* The request message size has already been verified. */ - request = operation->request->payload; - module_id = request->primary_intf_id; - - dev_dbg(&svc->dev, "%s - id = %u\n", __func__, module_id); - - module = gb_svc_module_lookup(svc, module_id); - if (!module) { - dev_warn(&svc->dev, "unexpected module-removed event %u\n", - module_id); - return; - } - - module->disconnected = true; - - gb_module_del(module); - list_del(&module->hd_node); - gb_module_put(module); -} - -static void gb_svc_process_intf_oops(struct gb_operation *operation) -{ - struct gb_svc_intf_oops_request *request; - struct gb_connection *connection = operation->connection; - struct gb_svc *svc = gb_connection_get_data(connection); - struct gb_interface *intf; - u8 intf_id; - u8 reason; - - /* The request message size has already been verified. */ - request = operation->request->payload; - intf_id = request->intf_id; - reason = request->reason; - - intf = gb_svc_interface_lookup(svc, intf_id); - if (!intf) { - dev_warn(&svc->dev, "unexpected interface-oops event %u\n", - intf_id); - return; - } - - dev_info(&svc->dev, "Deactivating interface %u, interface oops reason = %u\n", - intf_id, reason); - - mutex_lock(&intf->mutex); - intf->disconnected = true; - gb_interface_disable(intf); - gb_interface_deactivate(intf); - mutex_unlock(&intf->mutex); -} - -static void gb_svc_process_intf_mailbox_event(struct gb_operation *operation) -{ - struct gb_svc_intf_mailbox_event_request *request; - struct gb_connection *connection = operation->connection; - struct gb_svc *svc = gb_connection_get_data(connection); - struct gb_interface *intf; - u8 intf_id; - u16 result_code; - u32 mailbox; - - /* The request message size has already been verified. */ - request = operation->request->payload; - intf_id = request->intf_id; - result_code = le16_to_cpu(request->result_code); - mailbox = le32_to_cpu(request->mailbox); - - dev_dbg(&svc->dev, "%s - id = %u, result = 0x%04x, mailbox = 0x%08x\n", - __func__, intf_id, result_code, mailbox); - - intf = gb_svc_interface_lookup(svc, intf_id); - if (!intf) { - dev_warn(&svc->dev, "unexpected mailbox event %u\n", intf_id); - return; - } - - gb_interface_mailbox_event(intf, result_code, mailbox); -} - -static void gb_svc_process_deferred_request(struct work_struct *work) -{ - struct gb_svc_deferred_request *dr; - struct gb_operation *operation; - struct gb_svc *svc; - u8 type; - - dr = container_of(work, struct gb_svc_deferred_request, work); - operation = dr->operation; - svc = gb_connection_get_data(operation->connection); - type = operation->request->header->type; - - switch (type) { - case GB_SVC_TYPE_SVC_HELLO: - gb_svc_process_hello_deferred(operation); - break; - case GB_SVC_TYPE_MODULE_INSERTED: - gb_svc_process_module_inserted(operation); - break; - case GB_SVC_TYPE_MODULE_REMOVED: - gb_svc_process_module_removed(operation); - break; - case GB_SVC_TYPE_INTF_MAILBOX_EVENT: - gb_svc_process_intf_mailbox_event(operation); - break; - case GB_SVC_TYPE_INTF_OOPS: - gb_svc_process_intf_oops(operation); - break; - default: - dev_err(&svc->dev, "bad deferred request type: 0x%02x\n", type); - } - - gb_operation_put(operation); - kfree(dr); -} - -static int gb_svc_queue_deferred_request(struct gb_operation *operation) -{ - struct gb_svc *svc = gb_connection_get_data(operation->connection); - struct gb_svc_deferred_request *dr; - - dr = kmalloc(sizeof(*dr), GFP_KERNEL); - if (!dr) - return -ENOMEM; - - gb_operation_get(operation); - - dr->operation = operation; - INIT_WORK(&dr->work, gb_svc_process_deferred_request); - - queue_work(svc->wq, &dr->work); - - return 0; -} - -static int gb_svc_intf_reset_recv(struct gb_operation *op) -{ - struct gb_svc *svc = gb_connection_get_data(op->connection); - struct gb_message *request = op->request; - struct gb_svc_intf_reset_request *reset; - - if (request->payload_size < sizeof(*reset)) { - dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n", - request->payload_size, sizeof(*reset)); - return -EINVAL; - } - reset = request->payload; - - /* FIXME Reset the interface here */ - - return 0; -} - -static int gb_svc_module_inserted_recv(struct gb_operation *op) -{ - struct gb_svc *svc = gb_connection_get_data(op->connection); - struct gb_svc_module_inserted_request *request; - - if (op->request->payload_size < sizeof(*request)) { - dev_warn(&svc->dev, "short module-inserted request received (%zu < %zu)\n", - op->request->payload_size, sizeof(*request)); - return -EINVAL; - } - - request = op->request->payload; - - dev_dbg(&svc->dev, "%s - id = %u\n", __func__, - request->primary_intf_id); - - return gb_svc_queue_deferred_request(op); -} - -static int gb_svc_module_removed_recv(struct gb_operation *op) -{ - struct gb_svc *svc = gb_connection_get_data(op->connection); - struct gb_svc_module_removed_request *request; - - if (op->request->payload_size < sizeof(*request)) { - dev_warn(&svc->dev, "short module-removed request received (%zu < %zu)\n", - op->request->payload_size, sizeof(*request)); - return -EINVAL; - } - - request = op->request->payload; - - dev_dbg(&svc->dev, "%s - id = %u\n", __func__, - request->primary_intf_id); - - return gb_svc_queue_deferred_request(op); -} - -static int gb_svc_intf_oops_recv(struct gb_operation *op) -{ - struct gb_svc *svc = gb_connection_get_data(op->connection); - struct gb_svc_intf_oops_request *request; - - if (op->request->payload_size < sizeof(*request)) { - dev_warn(&svc->dev, "short intf-oops request received (%zu < %zu)\n", - op->request->payload_size, sizeof(*request)); - return -EINVAL; - } - - return gb_svc_queue_deferred_request(op); -} - -static int gb_svc_intf_mailbox_event_recv(struct gb_operation *op) -{ - struct gb_svc *svc = gb_connection_get_data(op->connection); - struct gb_svc_intf_mailbox_event_request *request; - - if (op->request->payload_size < sizeof(*request)) { - dev_warn(&svc->dev, "short mailbox request received (%zu < %zu)\n", - op->request->payload_size, sizeof(*request)); - return -EINVAL; - } - - request = op->request->payload; - - dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id); - - return gb_svc_queue_deferred_request(op); -} - -static int gb_svc_request_handler(struct gb_operation *op) -{ - struct gb_connection *connection = op->connection; - struct gb_svc *svc = gb_connection_get_data(connection); - u8 type = op->type; - int ret = 0; - - /* - * SVC requests need to follow a specific order (at least initially) and - * below code takes care of enforcing that. The expected order is: - * - PROTOCOL_VERSION - * - SVC_HELLO - * - Any other request, but the earlier two. - * - * Incoming requests are guaranteed to be serialized and so we don't - * need to protect 'state' for any races. - */ - switch (type) { - case GB_SVC_TYPE_PROTOCOL_VERSION: - if (svc->state != GB_SVC_STATE_RESET) - ret = -EINVAL; - break; - case GB_SVC_TYPE_SVC_HELLO: - if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION) - ret = -EINVAL; - break; - default: - if (svc->state != GB_SVC_STATE_SVC_HELLO) - ret = -EINVAL; - break; - } - - if (ret) { - dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n", - type, svc->state); - return ret; - } - - switch (type) { - case GB_SVC_TYPE_PROTOCOL_VERSION: - ret = gb_svc_version_request(op); - if (!ret) - svc->state = GB_SVC_STATE_PROTOCOL_VERSION; - return ret; - case GB_SVC_TYPE_SVC_HELLO: - ret = gb_svc_hello(op); - if (!ret) - svc->state = GB_SVC_STATE_SVC_HELLO; - return ret; - case GB_SVC_TYPE_INTF_RESET: - return gb_svc_intf_reset_recv(op); - case GB_SVC_TYPE_MODULE_INSERTED: - return gb_svc_module_inserted_recv(op); - case GB_SVC_TYPE_MODULE_REMOVED: - return gb_svc_module_removed_recv(op); - case GB_SVC_TYPE_INTF_MAILBOX_EVENT: - return gb_svc_intf_mailbox_event_recv(op); - case GB_SVC_TYPE_INTF_OOPS: - return gb_svc_intf_oops_recv(op); - default: - dev_warn(&svc->dev, "unsupported request 0x%02x\n", type); - return -EINVAL; - } -} - -static void gb_svc_release(struct device *dev) -{ - struct gb_svc *svc = to_gb_svc(dev); - - if (svc->connection) - gb_connection_destroy(svc->connection); - ida_destroy(&svc->device_id_map); - destroy_workqueue(svc->wq); - kfree(svc); -} - -struct device_type greybus_svc_type = { - .name = "greybus_svc", - .release = gb_svc_release, -}; - -struct gb_svc *gb_svc_create(struct gb_host_device *hd) -{ - struct gb_svc *svc; - - svc = kzalloc(sizeof(*svc), GFP_KERNEL); - if (!svc) - return NULL; - - svc->wq = alloc_workqueue("%s:svc", WQ_UNBOUND, 1, dev_name(&hd->dev)); - if (!svc->wq) { - kfree(svc); - return NULL; - } - - svc->dev.parent = &hd->dev; - svc->dev.bus = &greybus_bus_type; - svc->dev.type = &greybus_svc_type; - svc->dev.groups = svc_groups; - svc->dev.dma_mask = svc->dev.parent->dma_mask; - device_initialize(&svc->dev); - - dev_set_name(&svc->dev, "%d-svc", hd->bus_id); - - ida_init(&svc->device_id_map); - svc->state = GB_SVC_STATE_RESET; - svc->hd = hd; - - svc->connection = gb_connection_create_static(hd, GB_SVC_CPORT_ID, - gb_svc_request_handler); - if (IS_ERR(svc->connection)) { - dev_err(&svc->dev, "failed to create connection: %ld\n", - PTR_ERR(svc->connection)); - goto err_put_device; - } - - gb_connection_set_data(svc->connection, svc); - - return svc; - -err_put_device: - put_device(&svc->dev); - return NULL; -} - -int gb_svc_add(struct gb_svc *svc) -{ - int ret; - - /* - * The SVC protocol is currently driven by the SVC, so the SVC device - * is added from the connection request handler when enough - * information has been received. - */ - ret = gb_connection_enable(svc->connection); - if (ret) - return ret; - - return 0; -} - -static void gb_svc_remove_modules(struct gb_svc *svc) -{ - struct gb_host_device *hd = svc->hd; - struct gb_module *module, *tmp; - - list_for_each_entry_safe(module, tmp, &hd->modules, hd_node) { - gb_module_del(module); - list_del(&module->hd_node); - gb_module_put(module); - } -} - -void gb_svc_del(struct gb_svc *svc) -{ - gb_connection_disable_rx(svc->connection); - - /* - * The SVC device may have been registered from the request handler. - */ - if (device_is_registered(&svc->dev)) { - gb_svc_debugfs_exit(svc); - gb_svc_watchdog_destroy(svc); - device_del(&svc->dev); - } - - flush_workqueue(svc->wq); - - gb_svc_remove_modules(svc); - - gb_connection_disable(svc->connection); -} - -void gb_svc_put(struct gb_svc *svc) -{ - put_device(&svc->dev); -} diff --git a/drivers/staging/greybus/svc_watchdog.c b/drivers/staging/greybus/svc_watchdog.c deleted file mode 100644 index b6b1682c19c4..000000000000 --- a/drivers/staging/greybus/svc_watchdog.c +++ /dev/null @@ -1,197 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * SVC Greybus "watchdog" driver. - * - * Copyright 2016 Google Inc. - */ - -#include -#include -#include -#include - -#define SVC_WATCHDOG_PERIOD (2 * HZ) - -struct gb_svc_watchdog { - struct delayed_work work; - struct gb_svc *svc; - bool enabled; - struct notifier_block pm_notifier; -}; - -static struct delayed_work reset_work; - -static int svc_watchdog_pm_notifier(struct notifier_block *notifier, - unsigned long pm_event, void *unused) -{ - struct gb_svc_watchdog *watchdog = - container_of(notifier, struct gb_svc_watchdog, pm_notifier); - - switch (pm_event) { - case PM_SUSPEND_PREPARE: - gb_svc_watchdog_disable(watchdog->svc); - break; - case PM_POST_SUSPEND: - gb_svc_watchdog_enable(watchdog->svc); - break; - default: - break; - } - - return NOTIFY_DONE; -} - -static void greybus_reset(struct work_struct *work) -{ - static char const start_path[] = "/system/bin/start"; - static char *envp[] = { - "HOME=/", - "PATH=/sbin:/vendor/bin:/system/sbin:/system/bin:/system/xbin", - NULL, - }; - static char *argv[] = { - (char *)start_path, - "unipro_reset", - NULL, - }; - - pr_err("svc_watchdog: calling \"%s %s\" to reset greybus network!\n", - argv[0], argv[1]); - call_usermodehelper(start_path, argv, envp, UMH_WAIT_EXEC); -} - -static void do_work(struct work_struct *work) -{ - struct gb_svc_watchdog *watchdog; - struct gb_svc *svc; - int retval; - - watchdog = container_of(work, struct gb_svc_watchdog, work.work); - svc = watchdog->svc; - - dev_dbg(&svc->dev, "%s: ping.\n", __func__); - retval = gb_svc_ping(svc); - if (retval) { - /* - * Something went really wrong, let's warn userspace and then - * pull the plug and reset the whole greybus network. - * We need to do this outside of this workqueue as we will be - * tearing down the svc device itself. So queue up - * yet-another-callback to do that. - */ - dev_err(&svc->dev, - "SVC ping has returned %d, something is wrong!!!\n", - retval); - - if (svc->action == GB_SVC_WATCHDOG_BITE_PANIC_KERNEL) { - panic("SVC is not responding\n"); - } else if (svc->action == GB_SVC_WATCHDOG_BITE_RESET_UNIPRO) { - dev_err(&svc->dev, "Resetting the greybus network, watch out!!!\n"); - - INIT_DELAYED_WORK(&reset_work, greybus_reset); - schedule_delayed_work(&reset_work, HZ / 2); - - /* - * Disable ourselves, we don't want to trip again unless - * userspace wants us to. - */ - watchdog->enabled = false; - } - } - - /* resubmit our work to happen again, if we are still "alive" */ - if (watchdog->enabled) - schedule_delayed_work(&watchdog->work, SVC_WATCHDOG_PERIOD); -} - -int gb_svc_watchdog_create(struct gb_svc *svc) -{ - struct gb_svc_watchdog *watchdog; - int retval; - - if (svc->watchdog) - return 0; - - watchdog = kmalloc(sizeof(*watchdog), GFP_KERNEL); - if (!watchdog) - return -ENOMEM; - - watchdog->enabled = false; - watchdog->svc = svc; - INIT_DELAYED_WORK(&watchdog->work, do_work); - svc->watchdog = watchdog; - - watchdog->pm_notifier.notifier_call = svc_watchdog_pm_notifier; - retval = register_pm_notifier(&watchdog->pm_notifier); - if (retval) { - dev_err(&svc->dev, "error registering pm notifier(%d)\n", - retval); - goto svc_watchdog_create_err; - } - - retval = gb_svc_watchdog_enable(svc); - if (retval) { - dev_err(&svc->dev, "error enabling watchdog (%d)\n", retval); - unregister_pm_notifier(&watchdog->pm_notifier); - goto svc_watchdog_create_err; - } - return retval; - -svc_watchdog_create_err: - svc->watchdog = NULL; - kfree(watchdog); - - return retval; -} - -void gb_svc_watchdog_destroy(struct gb_svc *svc) -{ - struct gb_svc_watchdog *watchdog = svc->watchdog; - - if (!watchdog) - return; - - unregister_pm_notifier(&watchdog->pm_notifier); - gb_svc_watchdog_disable(svc); - svc->watchdog = NULL; - kfree(watchdog); -} - -bool gb_svc_watchdog_enabled(struct gb_svc *svc) -{ - if (!svc || !svc->watchdog) - return false; - return svc->watchdog->enabled; -} - -int gb_svc_watchdog_enable(struct gb_svc *svc) -{ - struct gb_svc_watchdog *watchdog; - - if (!svc->watchdog) - return -ENODEV; - - watchdog = svc->watchdog; - if (watchdog->enabled) - return 0; - - watchdog->enabled = true; - schedule_delayed_work(&watchdog->work, SVC_WATCHDOG_PERIOD); - return 0; -} - -int gb_svc_watchdog_disable(struct gb_svc *svc) -{ - struct gb_svc_watchdog *watchdog; - - if (!svc->watchdog) - return -ENODEV; - - watchdog = svc->watchdog; - if (!watchdog->enabled) - return 0; - - watchdog->enabled = false; - cancel_delayed_work_sync(&watchdog->work); - return 0; -} -- cgit v1.2.3-59-g8ed1b From c48c9f7ff32b8b3965a08e40eb6763682d905b5d Mon Sep 17 00:00:00 2001 From: Valdis KlÄ“tnieks Date: Wed, 28 Aug 2019 18:08:17 +0200 Subject: staging: exfat: add exfat filesystem code to staging The exfat code needs a lot of work to get it into "real" shape for the fs/ part of the kernel, so put it into drivers/staging/ for now so that it can be worked on by everyone in the community. The full specification of the filesystem can be found at: https://docs.microsoft.com/en-us/windows/win32/fileio/exfat-specification Signed-off-by: Valdis Kletnieks Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman Link: https://lore.kernel.org/r/20190828160817.6250-1-gregkh@linuxfoundation.org Signed-off-by: Greg Kroah-Hartman --- MAINTAINERS | 5 + drivers/staging/Kconfig | 2 + drivers/staging/Makefile | 1 + drivers/staging/exfat/Kconfig | 39 + drivers/staging/exfat/Makefile | 10 + drivers/staging/exfat/TODO | 12 + drivers/staging/exfat/exfat.h | 973 ++++++++ drivers/staging/exfat/exfat_blkdev.c | 136 ++ drivers/staging/exfat/exfat_cache.c | 722 ++++++ drivers/staging/exfat/exfat_core.c | 3704 ++++++++++++++++++++++++++++++ drivers/staging/exfat/exfat_nls.c | 404 ++++ drivers/staging/exfat/exfat_super.c | 4137 ++++++++++++++++++++++++++++++++++ drivers/staging/exfat/exfat_upcase.c | 740 ++++++ 13 files changed, 10885 insertions(+) create mode 100644 drivers/staging/exfat/Kconfig create mode 100644 drivers/staging/exfat/Makefile create mode 100644 drivers/staging/exfat/TODO create mode 100644 drivers/staging/exfat/exfat.h create mode 100644 drivers/staging/exfat/exfat_blkdev.c create mode 100644 drivers/staging/exfat/exfat_cache.c create mode 100644 drivers/staging/exfat/exfat_core.c create mode 100644 drivers/staging/exfat/exfat_nls.c create mode 100644 drivers/staging/exfat/exfat_super.c create mode 100644 drivers/staging/exfat/exfat_upcase.c (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index e3242687cd19..a484b36e5117 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6097,6 +6097,11 @@ F: include/trace/events/mdio.h F: include/uapi/linux/mdio.h F: include/uapi/linux/mii.h +EXFAT FILE SYSTEM +M: Valdis Kletnieks +S: Maintained +F: fs/exfat/ + EXT2 FILE SYSTEM M: Jan Kara L: linux-ext4@vger.kernel.org diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index d972ec8e71fb..fbdc33874780 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -118,4 +118,6 @@ source "drivers/staging/kpc2000/Kconfig" source "drivers/staging/isdn/Kconfig" +source "drivers/staging/exfat/Kconfig" + endif # STAGING diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index 6018b9a4a077..ca13f87b1e1b 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -49,3 +49,4 @@ obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/ obj-$(CONFIG_FIELDBUS_DEV) += fieldbus/ obj-$(CONFIG_KPC2000) += kpc2000/ obj-$(CONFIG_ISDN_CAPI) += isdn/ +obj-$(CONFIG_EXFAT_FS) += exfat/ diff --git a/drivers/staging/exfat/Kconfig b/drivers/staging/exfat/Kconfig new file mode 100644 index 000000000000..78b32aa2ca19 --- /dev/null +++ b/drivers/staging/exfat/Kconfig @@ -0,0 +1,39 @@ +config EXFAT_FS + tristate "exFAT fs support" + select NLS + help + This adds support for the exFAT file system. + +config EXFAT_DISCARD + bool "enable discard support" + depends on EXFAT_FS + default y + +config EXFAT_DELAYED_SYNC + bool "enable delayed sync" + depends on EXFAT_FS + default n + +config EXFAT_KERNEL_DEBUG + bool "enable kernel debug features via ioctl" + depends on EXFAT_FS + default n + +config EXFAT_DEBUG_MSG + bool "print debug messages" + depends on EXFAT_FS + default n + +config EXFAT_DEFAULT_CODEPAGE + int "Default codepage for exFAT" + default 437 + depends on EXFAT_FS + help + This option should be set to the codepage of your exFAT filesystems. + +config EXFAT_DEFAULT_IOCHARSET + string "Default iocharset for exFAT" + default "utf8" + depends on EXFAT_FS + help + Set this to the default input/output character set you'd like exFAT to use. diff --git a/drivers/staging/exfat/Makefile b/drivers/staging/exfat/Makefile new file mode 100644 index 000000000000..84944dfbae28 --- /dev/null +++ b/drivers/staging/exfat/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_EXFAT_FS) += exfat.o + +exfat-y := exfat_core.o \ + exfat_super.o \ + exfat_blkdev.o \ + exfat_cache.o \ + exfat_nls.o \ + exfat_upcase.o diff --git a/drivers/staging/exfat/TODO b/drivers/staging/exfat/TODO new file mode 100644 index 000000000000..a3eb282f9efc --- /dev/null +++ b/drivers/staging/exfat/TODO @@ -0,0 +1,12 @@ +exfat_core.c - ffsReadFile - the goto err_out seem to leak a brelse(). +same for ffsWriteFile. + +exfat_core.c - fs_sync(sb,0) all over the place looks fishy as hell. +There's only one place that calls it with a non-zero argument. + +ffsTruncateFile - if (old_size <= new_size) { +That doesn't look right. How did it ever work? Are they relying on lazy +block allocation when actual writes happen? If nothing else, it never +does the 'fid->size = new_size' and do the inode update.... + +ffsSetAttr() is just dangling in the breeze, not wired up at all... diff --git a/drivers/staging/exfat/exfat.h b/drivers/staging/exfat/exfat.h new file mode 100644 index 000000000000..bae180e10609 --- /dev/null +++ b/drivers/staging/exfat/exfat.h @@ -0,0 +1,973 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. + */ + +#ifndef _EXFAT_H +#define _EXFAT_H + +#include +#include + +#ifdef CONFIG_EXFAT_KERNEL_DEBUG + /* For Debugging Purpose */ + /* IOCTL code 'f' used by + * - file systems typically #0~0x1F + * - embedded terminal devices #128~ + * - exts for debugging purpose #99 + * number 100 and 101 is available now but has possible conflicts + */ +#define EXFAT_IOC_GET_DEBUGFLAGS _IOR('f', 100, long) +#define EXFAT_IOC_SET_DEBUGFLAGS _IOW('f', 101, long) + +#define EXFAT_DEBUGFLAGS_INVALID_UMOUNT 0x01 +#define EXFAT_DEBUGFLAGS_ERROR_RW 0x02 +#endif /* CONFIG_EXFAT_KERNEL_DEBUG */ + +#ifdef CONFIG_EXFAT_DEBUG_MSG +#define DEBUG 1 +#else +#undef DEBUG +#endif + +#define DENTRY_SIZE 32 /* dir entry size */ +#define DENTRY_SIZE_BITS 5 + +/* PBR entries */ +#define PBR_SIGNATURE 0xAA55 +#define EXT_SIGNATURE 0xAA550000 +#define VOL_LABEL "NO NAME " /* size should be 11 */ +#define OEM_NAME "MSWIN4.1" /* size should be 8 */ +#define STR_FAT12 "FAT12 " /* size should be 8 */ +#define STR_FAT16 "FAT16 " /* size should be 8 */ +#define STR_FAT32 "FAT32 " /* size should be 8 */ +#define STR_EXFAT "EXFAT " /* size should be 8 */ +#define VOL_CLEAN 0x0000 +#define VOL_DIRTY 0x0002 + +/* max number of clusters */ +#define FAT12_THRESHOLD 4087 /* 2^12 - 1 + 2 (clu 0 & 1) */ +#define FAT16_THRESHOLD 65527 /* 2^16 - 1 + 2 */ +#define FAT32_THRESHOLD 268435457 /* 2^28 - 1 + 2 */ +#define EXFAT_THRESHOLD 268435457 /* 2^28 - 1 + 2 */ + +/* file types */ +#define TYPE_UNUSED 0x0000 +#define TYPE_DELETED 0x0001 +#define TYPE_INVALID 0x0002 +#define TYPE_CRITICAL_PRI 0x0100 +#define TYPE_BITMAP 0x0101 +#define TYPE_UPCASE 0x0102 +#define TYPE_VOLUME 0x0103 +#define TYPE_DIR 0x0104 +#define TYPE_FILE 0x011F +#define TYPE_SYMLINK 0x015F +#define TYPE_CRITICAL_SEC 0x0200 +#define TYPE_STREAM 0x0201 +#define TYPE_EXTEND 0x0202 +#define TYPE_ACL 0x0203 +#define TYPE_BENIGN_PRI 0x0400 +#define TYPE_GUID 0x0401 +#define TYPE_PADDING 0x0402 +#define TYPE_ACLTAB 0x0403 +#define TYPE_BENIGN_SEC 0x0800 +#define TYPE_ALL 0x0FFF + +/* time modes */ +#define TM_CREATE 0 +#define TM_MODIFY 1 +#define TM_ACCESS 2 + +/* checksum types */ +#define CS_DIR_ENTRY 0 +#define CS_PBR_SECTOR 1 +#define CS_DEFAULT 2 + +#define CLUSTER_16(x) ((u16)(x)) +#define CLUSTER_32(x) ((u32)(x)) + +#define FALSE 0 +#define TRUE 1 + +#define START_SECTOR(x) \ + ((((sector_t)((x) - 2)) << p_fs->sectors_per_clu_bits) + \ + p_fs->data_start_sector) + +#define IS_LAST_SECTOR_IN_CLUSTER(sec) \ + ((((sec) - p_fs->data_start_sector + 1) & \ + ((1 << p_fs->sectors_per_clu_bits) - 1)) == 0) + +#define GET_CLUSTER_FROM_SECTOR(sec) \ + ((u32)((((sec) - p_fs->data_start_sector) >> \ + p_fs->sectors_per_clu_bits) + 2)) + +#define GET16(p_src) \ + (((u16)(p_src)[0]) | (((u16)(p_src)[1]) << 8)) +#define GET32(p_src) \ + (((u32)(p_src)[0]) | (((u32)(p_src)[1]) << 8) | \ + (((u32)(p_src)[2]) << 16) | (((u32)(p_src)[3]) << 24)) +#define GET64(p_src) \ + (((u64)(p_src)[0]) | (((u64)(p_src)[1]) << 8) | \ + (((u64)(p_src)[2]) << 16) | (((u64)(p_src)[3]) << 24) | \ + (((u64)(p_src)[4]) << 32) | (((u64)(p_src)[5]) << 40) | \ + (((u64)(p_src)[6]) << 48) | (((u64)(p_src)[7]) << 56)) + +#define SET16(p_dst, src) \ + do { \ + (p_dst)[0] = (u8)(src); \ + (p_dst)[1] = (u8)(((u16)(src)) >> 8); \ + } while (0) +#define SET32(p_dst, src) \ + do { \ + (p_dst)[0] = (u8)(src); \ + (p_dst)[1] = (u8)(((u32)(src)) >> 8); \ + (p_dst)[2] = (u8)(((u32)(src)) >> 16); \ + (p_dst)[3] = (u8)(((u32)(src)) >> 24); \ + } while (0) +#define SET64(p_dst, src) \ + do { \ + (p_dst)[0] = (u8)(src); \ + (p_dst)[1] = (u8)(((u64)(src)) >> 8); \ + (p_dst)[2] = (u8)(((u64)(src)) >> 16); \ + (p_dst)[3] = (u8)(((u64)(src)) >> 24); \ + (p_dst)[4] = (u8)(((u64)(src)) >> 32); \ + (p_dst)[5] = (u8)(((u64)(src)) >> 40); \ + (p_dst)[6] = (u8)(((u64)(src)) >> 48); \ + (p_dst)[7] = (u8)(((u64)(src)) >> 56); \ + } while (0) + +#ifdef __LITTLE_ENDIAN +#define GET16_A(p_src) (*((u16 *)(p_src))) +#define GET32_A(p_src) (*((u32 *)(p_src))) +#define GET64_A(p_src) (*((u64 *)(p_src))) +#define SET16_A(p_dst, src) (*((u16 *)(p_dst)) = (u16)(src)) +#define SET32_A(p_dst, src) (*((u32 *)(p_dst)) = (u32)(src)) +#define SET64_A(p_dst, src) (*((u64 *)(p_dst)) = (u64)(src)) +#else /* BIG_ENDIAN */ +#define GET16_A(p_src) GET16(p_src) +#define GET32_A(p_src) GET32(p_src) +#define GET64_A(p_src) GET64(p_src) +#define SET16_A(p_dst, src) SET16(p_dst, src) +#define SET32_A(p_dst, src) SET32(p_dst, src) +#define SET64_A(p_dst, src) SET64(p_dst, src) +#endif + +/* cache size (in number of sectors) */ +/* (should be an exponential value of 2) */ +#define FAT_CACHE_SIZE 128 +#define FAT_CACHE_HASH_SIZE 64 +#define BUF_CACHE_SIZE 256 +#define BUF_CACHE_HASH_SIZE 64 + +/* Upcase table macro */ +#define HIGH_INDEX_BIT (8) +#define HIGH_INDEX_MASK (0xFF00) +#define LOW_INDEX_BIT (16-HIGH_INDEX_BIT) +#define UTBL_ROW_COUNT (1<> LOW_INDEX_BIT; +} +static inline u16 get_row_index(u16 i) +{ + return i & ~HIGH_INDEX_MASK; +} + +#define EXFAT_SUPER_MAGIC (0x2011BAB0L) +#define EXFAT_ROOT_INO 1 + +/* FAT types */ +#define FAT12 0x01 /* FAT12 */ +#define FAT16 0x0E /* Win95 FAT16 (LBA) */ +#define FAT32 0x0C /* Win95 FAT32 (LBA) */ +#define EXFAT 0x07 /* exFAT */ + +/* file name lengths */ +#define MAX_CHARSET_SIZE 3 /* max size of multi-byte character */ +#define MAX_PATH_DEPTH 15 /* max depth of path name */ +#define MAX_NAME_LENGTH 256 /* max len of filename including NULL */ +#define MAX_PATH_LENGTH 260 /* max len of pathname including NULL */ +#define DOS_NAME_LENGTH 11 /* DOS filename length excluding NULL */ +#define DOS_PATH_LENGTH 80 /* DOS pathname length excluding NULL */ + +/* file attributes */ +#define ATTR_NORMAL 0x0000 +#define ATTR_READONLY 0x0001 +#define ATTR_HIDDEN 0x0002 +#define ATTR_SYSTEM 0x0004 +#define ATTR_VOLUME 0x0008 +#define ATTR_SUBDIR 0x0010 +#define ATTR_ARCHIVE 0x0020 +#define ATTR_SYMLINK 0x0040 +#define ATTR_EXTEND 0x000F +#define ATTR_RWMASK 0x007E + +/* file creation modes */ +#define FM_REGULAR 0x00 +#define FM_SYMLINK 0x40 + +/* return values */ +#define FFS_SUCCESS 0 +#define FFS_MEDIAERR 1 +#define FFS_FORMATERR 2 +#define FFS_MOUNTED 3 +#define FFS_NOTMOUNTED 4 +#define FFS_ALIGNMENTERR 5 +#define FFS_SEMAPHOREERR 6 +#define FFS_INVALIDPATH 7 +#define FFS_INVALIDFID 8 +#define FFS_NOTFOUND 9 +#define FFS_FILEEXIST 10 +#define FFS_PERMISSIONERR 11 +#define FFS_NOTOPENED 12 +#define FFS_MAXOPENED 13 +#define FFS_FULL 14 +#define FFS_EOF 15 +#define FFS_DIRBUSY 16 +#define FFS_MEMORYERR 17 +#define FFS_NAMETOOLONG 18 +#define FFS_ERROR 19 + +#define NUM_UPCASE 2918 + +#define DOS_CUR_DIR_NAME ". " +#define DOS_PAR_DIR_NAME ".. " + +#ifdef __LITTLE_ENDIAN +#define UNI_CUR_DIR_NAME ".\0" +#define UNI_PAR_DIR_NAME ".\0.\0" +#else +#define UNI_CUR_DIR_NAME "\0." +#define UNI_PAR_DIR_NAME "\0.\0." +#endif + +struct date_time_t { + u16 Year; + u16 Month; + u16 Day; + u16 Hour; + u16 Minute; + u16 Second; + u16 MilliSecond; +}; + +struct part_info_t { + u32 Offset; /* start sector number of the partition */ + u32 Size; /* in sectors */ +}; + +struct dev_info_t { + u32 SecSize; /* sector size in bytes */ + u32 DevSize; /* block device size in sectors */ +}; + +struct vol_info_t { + u32 FatType; + u32 ClusterSize; + u32 NumClusters; + u32 FreeClusters; + u32 UsedClusters; +}; + +/* directory structure */ +struct chain_t { + u32 dir; + s32 size; + u8 flags; +}; + +struct file_id_t { + struct chain_t dir; + s32 entry; + u32 type; + u32 attr; + u32 start_clu; + u64 size; + u8 flags; + s64 rwoffset; + s32 hint_last_off; + u32 hint_last_clu; +}; + +struct dir_entry_t { + char Name[MAX_NAME_LENGTH * MAX_CHARSET_SIZE]; + + /* used only for FAT12/16/32, not used for exFAT */ + char ShortName[DOS_NAME_LENGTH + 2]; + + u32 Attr; + u64 Size; + u32 NumSubdirs; + struct date_time_t CreateTimestamp; + struct date_time_t ModifyTimestamp; + struct date_time_t AccessTimestamp; +}; + +struct timestamp_t { + u16 sec; /* 0 ~ 59 */ + u16 min; /* 0 ~ 59 */ + u16 hour; /* 0 ~ 23 */ + u16 day; /* 1 ~ 31 */ + u16 mon; /* 1 ~ 12 */ + u16 year; /* 0 ~ 127 (since 1980) */ +}; + +/* MS_DOS FAT partition boot record (512 bytes) */ +struct pbr_sector_t { + u8 jmp_boot[3]; + u8 oem_name[8]; + u8 bpb[109]; + u8 boot_code[390]; + u8 signature[2]; +}; + +/* MS-DOS FAT12/16 BIOS parameter block (51 bytes) */ +struct bpb16_t { + u8 sector_size[2]; + u8 sectors_per_clu; + u8 num_reserved[2]; + u8 num_fats; + u8 num_root_entries[2]; + u8 num_sectors[2]; + u8 media_type; + u8 num_fat_sectors[2]; + u8 sectors_in_track[2]; + u8 num_heads[2]; + u8 num_hid_sectors[4]; + u8 num_huge_sectors[4]; + + u8 phy_drv_no; + u8 reserved; + u8 ext_signature; + u8 vol_serial[4]; + u8 vol_label[11]; + u8 vol_type[8]; +}; + +/* MS-DOS FAT32 BIOS parameter block (79 bytes) */ +struct bpb32_t { + u8 sector_size[2]; + u8 sectors_per_clu; + u8 num_reserved[2]; + u8 num_fats; + u8 num_root_entries[2]; + u8 num_sectors[2]; + u8 media_type; + u8 num_fat_sectors[2]; + u8 sectors_in_track[2]; + u8 num_heads[2]; + u8 num_hid_sectors[4]; + u8 num_huge_sectors[4]; + u8 num_fat32_sectors[4]; + u8 ext_flags[2]; + u8 fs_version[2]; + u8 root_cluster[4]; + u8 fsinfo_sector[2]; + u8 backup_sector[2]; + u8 reserved[12]; + + u8 phy_drv_no; + u8 ext_reserved; + u8 ext_signature; + u8 vol_serial[4]; + u8 vol_label[11]; + u8 vol_type[8]; +}; + +/* MS-DOS EXFAT BIOS parameter block (109 bytes) */ +struct bpbex_t { + u8 reserved1[53]; + u8 vol_offset[8]; + u8 vol_length[8]; + u8 fat_offset[4]; + u8 fat_length[4]; + u8 clu_offset[4]; + u8 clu_count[4]; + u8 root_cluster[4]; + u8 vol_serial[4]; + u8 fs_version[2]; + u8 vol_flags[2]; + u8 sector_size_bits; + u8 sectors_per_clu_bits; + u8 num_fats; + u8 phy_drv_no; + u8 perc_in_use; + u8 reserved2[7]; +}; + +/* MS-DOS FAT file system information sector (512 bytes) */ +struct fsi_sector_t { + u8 signature1[4]; + u8 reserved1[480]; + u8 signature2[4]; + u8 free_cluster[4]; + u8 next_cluster[4]; + u8 reserved2[14]; + u8 signature3[2]; +}; + +/* MS-DOS FAT directory entry (32 bytes) */ +struct dentry_t { + u8 dummy[32]; +}; + +struct dos_dentry_t { + u8 name[DOS_NAME_LENGTH]; + u8 attr; + u8 lcase; + u8 create_time_ms; + u8 create_time[2]; + u8 create_date[2]; + u8 access_date[2]; + u8 start_clu_hi[2]; + u8 modify_time[2]; + u8 modify_date[2]; + u8 start_clu_lo[2]; + u8 size[4]; +}; + +/* MS-DOS FAT extended directory entry (32 bytes) */ +struct ext_dentry_t { + u8 order; + u8 unicode_0_4[10]; + u8 attr; + u8 sysid; + u8 checksum; + u8 unicode_5_10[12]; + u8 start_clu[2]; + u8 unicode_11_12[4]; +}; + +/* MS-DOS EXFAT file directory entry (32 bytes) */ +struct file_dentry_t { + u8 type; + u8 num_ext; + u8 checksum[2]; + u8 attr[2]; + u8 reserved1[2]; + u8 create_time[2]; + u8 create_date[2]; + u8 modify_time[2]; + u8 modify_date[2]; + u8 access_time[2]; + u8 access_date[2]; + u8 create_time_ms; + u8 modify_time_ms; + u8 access_time_ms; + u8 reserved2[9]; +}; + +/* MS-DOS EXFAT stream extension directory entry (32 bytes) */ +struct strm_dentry_t { + u8 type; + u8 flags; + u8 reserved1; + u8 name_len; + u8 name_hash[2]; + u8 reserved2[2]; + u8 valid_size[8]; + u8 reserved3[4]; + u8 start_clu[4]; + u8 size[8]; +}; + +/* MS-DOS EXFAT file name directory entry (32 bytes) */ +struct name_dentry_t { + u8 type; + u8 flags; + u8 unicode_0_14[30]; +}; + +/* MS-DOS EXFAT allocation bitmap directory entry (32 bytes) */ +struct bmap_dentry_t { + u8 type; + u8 flags; + u8 reserved[18]; + u8 start_clu[4]; + u8 size[8]; +}; + +/* MS-DOS EXFAT up-case table directory entry (32 bytes) */ +struct case_dentry_t { + u8 type; + u8 reserved1[3]; + u8 checksum[4]; + u8 reserved2[12]; + u8 start_clu[4]; + u8 size[8]; +}; + +/* MS-DOS EXFAT volume label directory entry (32 bytes) */ +struct volm_dentry_t { + u8 type; + u8 label_len; + u8 unicode_0_10[22]; + u8 reserved[8]; +}; + +/* unused entry hint information */ +struct uentry_t { + u32 dir; + s32 entry; + struct chain_t clu; +}; + +/* DOS name structure */ +struct dos_name_t { + u8 name[DOS_NAME_LENGTH]; + u8 name_case; +}; + +/* unicode name structure */ +struct uni_name_t { + u16 name[MAX_NAME_LENGTH]; + u16 name_hash; + u8 name_len; +}; + +struct buf_cache_t { + struct buf_cache_t *next; + struct buf_cache_t *prev; + struct buf_cache_t *hash_next; + struct buf_cache_t *hash_prev; + s32 drv; + sector_t sec; + u32 flag; + struct buffer_head *buf_bh; +}; + +struct fs_func { + s32 (*alloc_cluster)(struct super_block *sb, s32 num_alloc, + struct chain_t *p_chain); + void (*free_cluster)(struct super_block *sb, struct chain_t *p_chain, + s32 do_relse); + s32 (*count_used_clusters)(struct super_block *sb); + + s32 (*init_dir_entry)(struct super_block *sb, struct chain_t *p_dir, + s32 entry, u32 type, u32 start_clu, u64 size); + s32 (*init_ext_entry)(struct super_block *sb, struct chain_t *p_dir, + s32 entry, s32 num_entries, + struct uni_name_t *p_uniname, + struct dos_name_t *p_dosname); + s32 (*find_dir_entry)(struct super_block *sb, struct chain_t *p_dir, + struct uni_name_t *p_uniname, s32 num_entries, + struct dos_name_t *p_dosname, u32 type); + void (*delete_dir_entry)(struct super_block *sb, + struct chain_t *p_dir, s32 entry, + s32 offset, s32 num_entries); + void (*get_uni_name_from_ext_entry)(struct super_block *sb, + struct chain_t *p_dir, s32 entry, + u16 *uniname); + s32 (*count_ext_entries)(struct super_block *sb, + struct chain_t *p_dir, s32 entry, + struct dentry_t *p_entry); + s32 (*calc_num_entries)(struct uni_name_t *p_uniname); + + u32 (*get_entry_type)(struct dentry_t *p_entry); + void (*set_entry_type)(struct dentry_t *p_entry, u32 type); + u32 (*get_entry_attr)(struct dentry_t *p_entry); + void (*set_entry_attr)(struct dentry_t *p_entry, u32 attr); + u8 (*get_entry_flag)(struct dentry_t *p_entry); + void (*set_entry_flag)(struct dentry_t *p_entry, u8 flag); + u32 (*get_entry_clu0)(struct dentry_t *p_entry); + void (*set_entry_clu0)(struct dentry_t *p_entry, u32 clu0); + u64 (*get_entry_size)(struct dentry_t *p_entry); + void (*set_entry_size)(struct dentry_t *p_entry, u64 size); + void (*get_entry_time)(struct dentry_t *p_entry, + struct timestamp_t *tp, u8 mode); + void (*set_entry_time)(struct dentry_t *p_entry, + struct timestamp_t *tp, u8 mode); +}; + +struct fs_info_t { + u32 drv; /* drive ID */ + u32 vol_type; /* volume FAT type */ + u32 vol_id; /* volume serial number */ + + u64 num_sectors; /* num of sectors in volume */ + u32 num_clusters; /* num of clusters in volume */ + u32 cluster_size; /* cluster size in bytes */ + u32 cluster_size_bits; + u32 sectors_per_clu; /* cluster size in sectors */ + u32 sectors_per_clu_bits; + + u32 PBR_sector; /* PBR sector */ + u32 FAT1_start_sector; /* FAT1 start sector */ + u32 FAT2_start_sector; /* FAT2 start sector */ + u32 root_start_sector; /* root dir start sector */ + u32 data_start_sector; /* data area start sector */ + u32 num_FAT_sectors; /* num of FAT sectors */ + + u32 root_dir; /* root dir cluster */ + u32 dentries_in_root; /* num of dentries in root dir */ + u32 dentries_per_clu; /* num of dentries per cluster */ + + u32 vol_flag; /* volume dirty flag */ + struct buffer_head *pbr_bh; /* PBR sector */ + + u32 map_clu; /* allocation bitmap start cluster */ + u32 map_sectors; /* num of allocation bitmap sectors */ + struct buffer_head **vol_amap; /* allocation bitmap */ + + u16 **vol_utbl; /* upcase table */ + + u32 clu_srch_ptr; /* cluster search pointer */ + u32 used_clusters; /* number of used clusters */ + struct uentry_t hint_uentry; /* unused entry hint information */ + + u32 dev_ejected; /* block device operation error flag */ + + struct fs_func *fs_func; + struct semaphore v_sem; + + /* FAT cache */ + struct buf_cache_t FAT_cache_array[FAT_CACHE_SIZE]; + struct buf_cache_t FAT_cache_lru_list; + struct buf_cache_t FAT_cache_hash_list[FAT_CACHE_HASH_SIZE]; + + /* buf cache */ + struct buf_cache_t buf_cache_array[BUF_CACHE_SIZE]; + struct buf_cache_t buf_cache_lru_list; + struct buf_cache_t buf_cache_hash_list[BUF_CACHE_HASH_SIZE]; +}; + +#define ES_2_ENTRIES 2 +#define ES_3_ENTRIES 3 +#define ES_ALL_ENTRIES 0 + +struct entry_set_cache_t { + /* sector number that contains file_entry */ + sector_t sector; + + /* byte offset in the sector */ + s32 offset; + + /* + * flag in stream entry. + * 01 for cluster chain, + * 03 for contig. clusteres. + */ + s32 alloc_flag; + + u32 num_entries; + + /* __buf should be the last member */ + void *__buf; +}; + +#define EXFAT_ERRORS_CONT 1 /* ignore error and continue */ +#define EXFAT_ERRORS_PANIC 2 /* panic on error */ +#define EXFAT_ERRORS_RO 3 /* remount r/o on error */ + +/* ioctl command */ +#define EXFAT_IOCTL_GET_VOLUME_ID _IOR('r', 0x12, __u32) + +struct exfat_mount_options { + kuid_t fs_uid; + kgid_t fs_gid; + unsigned short fs_fmask; + unsigned short fs_dmask; + + /* permission for setting the [am]time */ + unsigned short allow_utime; + + /* codepage for shortname conversions */ + unsigned short codepage; + + /* charset for filename input/display */ + char *iocharset; + + unsigned char casesensitive; + + /* on error: continue, panic, remount-ro */ + unsigned char errors; +#ifdef CONFIG_EXFAT_DISCARD + /* flag on if -o dicard specified and device support discard() */ + unsigned char discard; +#endif /* CONFIG_EXFAT_DISCARD */ +}; + +#define EXFAT_HASH_BITS 8 +#define EXFAT_HASH_SIZE (1UL << EXFAT_HASH_BITS) + +/* + * EXFAT file system in-core superblock data + */ +struct bd_info_t { + s32 sector_size; /* in bytes */ + s32 sector_size_bits; + s32 sector_size_mask; + + /* total number of sectors in this block device */ + s32 num_sectors; + + /* opened or not */ + bool opened; +}; + +struct exfat_sb_info { + struct fs_info_t fs_info; + struct bd_info_t bd_info; + + struct exfat_mount_options options; + + int s_dirt; + struct mutex s_lock; + struct nls_table *nls_disk; /* Codepage used on disk */ + struct nls_table *nls_io; /* Charset used for input and display */ + + struct inode *fat_inode; + + spinlock_t inode_hash_lock; + struct hlist_head inode_hashtable[EXFAT_HASH_SIZE]; +#ifdef CONFIG_EXFAT_KERNEL_DEBUG + long debug_flags; +#endif /* CONFIG_EXFAT_KERNEL_DEBUG */ +}; + +/* + * EXFAT file system inode data in memory + */ +struct exfat_inode_info { + struct file_id_t fid; + char *target; + /* NOTE: mmu_private is 64bits, so must hold ->i_mutex to access */ + loff_t mmu_private; /* physically allocated size */ + loff_t i_pos; /* on-disk position of directory entry or 0 */ + struct hlist_node i_hash_fat; /* hash by i_location */ + struct rw_semaphore truncate_lock; + struct inode vfs_inode; + struct rw_semaphore i_alloc_sem; /* protect bmap against truncate */ +}; + +#define EXFAT_SB(sb) ((struct exfat_sb_info *)((sb)->s_fs_info)) + +static inline struct exfat_inode_info *EXFAT_I(struct inode *inode) +{ + return container_of(inode, struct exfat_inode_info, vfs_inode); +} + +/* NLS management function */ +u16 nls_upper(struct super_block *sb, u16 a); +int nls_dosname_cmp(struct super_block *sb, u8 *a, u8 *b); +int nls_uniname_cmp(struct super_block *sb, u16 *a, u16 *b); +void nls_uniname_to_dosname(struct super_block *sb, + struct dos_name_t *p_dosname, + struct uni_name_t *p_uniname, bool *p_lossy); +void nls_dosname_to_uniname(struct super_block *sb, + struct uni_name_t *p_uniname, + struct dos_name_t *p_dosname); +void nls_uniname_to_cstring(struct super_block *sb, u8 *p_cstring, + struct uni_name_t *p_uniname); +void nls_cstring_to_uniname(struct super_block *sb, + struct uni_name_t *p_uniname, u8 *p_cstring, + bool *p_lossy); + +/* buffer cache management */ +void buf_init(struct super_block *sb); +void buf_shutdown(struct super_block *sb); +int FAT_read(struct super_block *sb, u32 loc, u32 *content); +s32 FAT_write(struct super_block *sb, u32 loc, u32 content); +u8 *FAT_getblk(struct super_block *sb, sector_t sec); +void FAT_modify(struct super_block *sb, sector_t sec); +void FAT_release_all(struct super_block *sb); +void FAT_sync(struct super_block *sb); +u8 *buf_getblk(struct super_block *sb, sector_t sec); +void buf_modify(struct super_block *sb, sector_t sec); +void buf_lock(struct super_block *sb, sector_t sec); +void buf_unlock(struct super_block *sb, sector_t sec); +void buf_release(struct super_block *sb, sector_t sec); +void buf_release_all(struct super_block *sb); +void buf_sync(struct super_block *sb); + +/* fs management functions */ +void fs_set_vol_flags(struct super_block *sb, u32 new_flag); +void fs_error(struct super_block *sb); + +/* cluster management functions */ +s32 clear_cluster(struct super_block *sb, u32 clu); +s32 fat_alloc_cluster(struct super_block *sb, s32 num_alloc, + struct chain_t *p_chain); +s32 exfat_alloc_cluster(struct super_block *sb, s32 num_alloc, + struct chain_t *p_chain); +void fat_free_cluster(struct super_block *sb, struct chain_t *p_chain, + s32 do_relse); +void exfat_free_cluster(struct super_block *sb, struct chain_t *p_chain, + s32 do_relse); +u32 find_last_cluster(struct super_block *sb, struct chain_t *p_chain); +s32 count_num_clusters(struct super_block *sb, struct chain_t *dir); +s32 fat_count_used_clusters(struct super_block *sb); +s32 exfat_count_used_clusters(struct super_block *sb); +void exfat_chain_cont_cluster(struct super_block *sb, u32 chain, s32 len); + +/* allocation bitmap management functions */ +s32 load_alloc_bitmap(struct super_block *sb); +void free_alloc_bitmap(struct super_block *sb); +s32 set_alloc_bitmap(struct super_block *sb, u32 clu); +s32 clr_alloc_bitmap(struct super_block *sb, u32 clu); +u32 test_alloc_bitmap(struct super_block *sb, u32 clu); +void sync_alloc_bitmap(struct super_block *sb); + +/* upcase table management functions */ +s32 load_upcase_table(struct super_block *sb); +void free_upcase_table(struct super_block *sb); + +/* dir entry management functions */ +u32 fat_get_entry_type(struct dentry_t *p_entry); +u32 exfat_get_entry_type(struct dentry_t *p_entry); +void fat_set_entry_type(struct dentry_t *p_entry, u32 type); +void exfat_set_entry_type(struct dentry_t *p_entry, u32 type); +u32 fat_get_entry_attr(struct dentry_t *p_entry); +u32 exfat_get_entry_attr(struct dentry_t *p_entry); +void fat_set_entry_attr(struct dentry_t *p_entry, u32 attr); +void exfat_set_entry_attr(struct dentry_t *p_entry, u32 attr); +u8 fat_get_entry_flag(struct dentry_t *p_entry); +u8 exfat_get_entry_flag(struct dentry_t *p_entry); +void fat_set_entry_flag(struct dentry_t *p_entry, u8 flag); +void exfat_set_entry_flag(struct dentry_t *p_entry, u8 flag); +u32 fat_get_entry_clu0(struct dentry_t *p_entry); +u32 exfat_get_entry_clu0(struct dentry_t *p_entry); +void fat_set_entry_clu0(struct dentry_t *p_entry, u32 start_clu); +void exfat_set_entry_clu0(struct dentry_t *p_entry, u32 start_clu); +u64 fat_get_entry_size(struct dentry_t *p_entry); +u64 exfat_get_entry_size(struct dentry_t *p_entry); +void fat_set_entry_size(struct dentry_t *p_entry, u64 size); +void exfat_set_entry_size(struct dentry_t *p_entry, u64 size); +struct timestamp_t *tm_current(struct timestamp_t *tm); +void fat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp, + u8 mode); +void exfat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp, + u8 mode); +void fat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp, + u8 mode); +void exfat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp, + u8 mode); +s32 fat_init_dir_entry(struct super_block *sb, struct chain_t *p_dir, s32 entry, + u32 type, u32 start_clu, u64 size); +s32 exfat_init_dir_entry(struct super_block *sb, struct chain_t *p_dir, + s32 entry, u32 type, u32 start_clu, u64 size); +s32 fat_init_ext_dir_entry(struct super_block *sb, struct chain_t *p_dir, + s32 entry, s32 num_entries, + struct uni_name_t *p_uniname, + struct dos_name_t *p_dosname); +s32 exfat_init_ext_dir_entry(struct super_block *sb, struct chain_t *p_dir, + s32 entry, s32 num_entries, + struct uni_name_t *p_uniname, + struct dos_name_t *p_dosname); +void init_dos_entry(struct dos_dentry_t *ep, u32 type, u32 start_clu); +void init_ext_entry(struct ext_dentry_t *ep, s32 order, u8 chksum, + u16 *uniname); +void init_file_entry(struct file_dentry_t *ep, u32 type); +void init_strm_entry(struct strm_dentry_t *ep, u8 flags, u32 start_clu, + u64 size); +void init_name_entry(struct name_dentry_t *ep, u16 *uniname); +void fat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir, + s32 entry, s32 order, s32 num_entries); +void exfat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir, + s32 entry, s32 order, s32 num_entries); + +s32 find_location(struct super_block *sb, struct chain_t *p_dir, s32 entry, + sector_t *sector, s32 *offset); +struct dentry_t *get_entry_with_sector(struct super_block *sb, sector_t sector, + s32 offset); +struct dentry_t *get_entry_in_dir(struct super_block *sb, struct chain_t *p_dir, + s32 entry, sector_t *sector); +struct entry_set_cache_t *get_entry_set_in_dir(struct super_block *sb, + struct chain_t *p_dir, s32 entry, + u32 type, + struct dentry_t **file_ep); +void release_entry_set(struct entry_set_cache_t *es); +s32 write_whole_entry_set(struct super_block *sb, struct entry_set_cache_t *es); +s32 write_partial_entries_in_entry_set(struct super_block *sb, + struct entry_set_cache_t *es, + struct dentry_t *ep, u32 count); +s32 search_deleted_or_unused_entry(struct super_block *sb, + struct chain_t *p_dir, s32 num_entries); +s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir, + s32 num_entries); +s32 fat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir, + struct uni_name_t *p_uniname, s32 num_entries, + struct dos_name_t *p_dosname, u32 type); +s32 exfat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir, + struct uni_name_t *p_uniname, s32 num_entries, + struct dos_name_t *p_dosname, u32 type); +s32 fat_count_ext_entries(struct super_block *sb, struct chain_t *p_dir, + s32 entry, struct dentry_t *p_entry); +s32 exfat_count_ext_entries(struct super_block *sb, struct chain_t *p_dir, + s32 entry, struct dentry_t *p_entry); +s32 count_dos_name_entries(struct super_block *sb, struct chain_t *p_dir, + u32 type); +void update_dir_checksum(struct super_block *sb, struct chain_t *p_dir, + s32 entry); +void update_dir_checksum_with_entry_set(struct super_block *sb, + struct entry_set_cache_t *es); +bool is_dir_empty(struct super_block *sb, struct chain_t *p_dir); + +/* name conversion functions */ +s32 get_num_entries_and_dos_name(struct super_block *sb, struct chain_t *p_dir, + struct uni_name_t *p_uniname, s32 *entries, + struct dos_name_t *p_dosname); +void get_uni_name_from_dos_entry(struct super_block *sb, + struct dos_dentry_t *ep, + struct uni_name_t *p_uniname, u8 mode); +void fat_get_uni_name_from_ext_entry(struct super_block *sb, + struct chain_t *p_dir, s32 entry, + u16 *uniname); +void exfat_get_uni_name_from_ext_entry(struct super_block *sb, + struct chain_t *p_dir, s32 entry, + u16 *uniname); +s32 extract_uni_name_from_ext_entry(struct ext_dentry_t *ep, + u16 *uniname, s32 order); +s32 extract_uni_name_from_name_entry(struct name_dentry_t *ep, + u16 *uniname, s32 order); +s32 fat_generate_dos_name(struct super_block *sb, struct chain_t *p_dir, + struct dos_name_t *p_dosname); +void fat_attach_count_to_dos_name(u8 *dosname, s32 count); +s32 fat_calc_num_entries(struct uni_name_t *p_uniname); +s32 exfat_calc_num_entries(struct uni_name_t *p_uniname); +u8 calc_checksum_1byte(void *data, s32 len, u8 chksum); +u16 calc_checksum_2byte(void *data, s32 len, u16 chksum, s32 type); +u32 calc_checksum_4byte(void *data, s32 len, u32 chksum, s32 type); + +/* name resolution functions */ +s32 resolve_path(struct inode *inode, char *path, struct chain_t *p_dir, + struct uni_name_t *p_uniname); +s32 resolve_name(u8 *name, u8 **arg); + +/* file operation functions */ +s32 fat16_mount(struct super_block *sb, struct pbr_sector_t *p_pbr); +s32 fat32_mount(struct super_block *sb, struct pbr_sector_t *p_pbr); +s32 exfat_mount(struct super_block *sb, struct pbr_sector_t *p_pbr); +s32 create_dir(struct inode *inode, struct chain_t *p_dir, + struct uni_name_t *p_uniname, struct file_id_t *fid); +s32 create_file(struct inode *inode, struct chain_t *p_dir, + struct uni_name_t *p_uniname, u8 mode, struct file_id_t *fid); +void remove_file(struct inode *inode, struct chain_t *p_dir, s32 entry); +s32 rename_file(struct inode *inode, struct chain_t *p_dir, s32 old_entry, + struct uni_name_t *p_uniname, struct file_id_t *fid); +s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry, + struct chain_t *p_newdir, struct uni_name_t *p_uniname, + struct file_id_t *fid); + +/* sector read/write functions */ +int sector_read(struct super_block *sb, sector_t sec, + struct buffer_head **bh, bool read); +int sector_write(struct super_block *sb, sector_t sec, + struct buffer_head *bh, bool sync); +int multi_sector_read(struct super_block *sb, sector_t sec, + struct buffer_head **bh, s32 num_secs, bool read); +int multi_sector_write(struct super_block *sb, sector_t sec, + struct buffer_head *bh, s32 num_secs, bool sync); + +void bdev_open(struct super_block *sb); +void bdev_close(struct super_block *sb); +int bdev_read(struct super_block *sb, sector_t secno, + struct buffer_head **bh, u32 num_secs, bool read); +int bdev_write(struct super_block *sb, sector_t secno, + struct buffer_head *bh, u32 num_secs, bool sync); +int bdev_sync(struct super_block *sb); + +extern const u8 uni_upcase[]; +#endif /* _EXFAT_H */ diff --git a/drivers/staging/exfat/exfat_blkdev.c b/drivers/staging/exfat/exfat_blkdev.c new file mode 100644 index 000000000000..f086c75e7076 --- /dev/null +++ b/drivers/staging/exfat/exfat_blkdev.c @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. + */ + +#include +#include +#include +#include "exfat.h" + +void bdev_open(struct super_block *sb) +{ + struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); + + if (p_bd->opened) + return; + + p_bd->sector_size = bdev_logical_block_size(sb->s_bdev); + p_bd->sector_size_bits = ilog2(p_bd->sector_size); + p_bd->sector_size_mask = p_bd->sector_size - 1; + p_bd->num_sectors = i_size_read(sb->s_bdev->bd_inode) >> + p_bd->sector_size_bits; + p_bd->opened = true; +} + +void bdev_close(struct super_block *sb) +{ + struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); + + p_bd->opened = false; +} + +int bdev_read(struct super_block *sb, sector_t secno, struct buffer_head **bh, + u32 num_secs, bool read) +{ + struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); +#ifdef CONFIG_EXFAT_KERNEL_DEBUG + struct exfat_sb_info *sbi = EXFAT_SB(sb); + long flags = sbi->debug_flags; + + if (flags & EXFAT_DEBUGFLAGS_ERROR_RW) + return FFS_MEDIAERR; +#endif /* CONFIG_EXFAT_KERNEL_DEBUG */ + + if (!p_bd->opened) + return FFS_MEDIAERR; + + if (*bh) + __brelse(*bh); + + if (read) + *bh = __bread(sb->s_bdev, secno, + num_secs << p_bd->sector_size_bits); + else + *bh = __getblk(sb->s_bdev, secno, + num_secs << p_bd->sector_size_bits); + + if (*bh) + return 0; + + WARN(!p_fs->dev_ejected, + "[EXFAT] No bh, device seems wrong or to be ejected.\n"); + + return FFS_MEDIAERR; +} + +int bdev_write(struct super_block *sb, sector_t secno, struct buffer_head *bh, + u32 num_secs, bool sync) +{ + s32 count; + struct buffer_head *bh2; + struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); +#ifdef CONFIG_EXFAT_KERNEL_DEBUG + struct exfat_sb_info *sbi = EXFAT_SB(sb); + long flags = sbi->debug_flags; + + if (flags & EXFAT_DEBUGFLAGS_ERROR_RW) + return FFS_MEDIAERR; +#endif /* CONFIG_EXFAT_KERNEL_DEBUG */ + + if (!p_bd->opened) + return FFS_MEDIAERR; + + if (secno == bh->b_blocknr) { + lock_buffer(bh); + set_buffer_uptodate(bh); + mark_buffer_dirty(bh); + unlock_buffer(bh); + if (sync && (sync_dirty_buffer(bh) != 0)) + return FFS_MEDIAERR; + } else { + count = num_secs << p_bd->sector_size_bits; + + bh2 = __getblk(sb->s_bdev, secno, count); + if (!bh2) + goto no_bh; + + lock_buffer(bh2); + memcpy(bh2->b_data, bh->b_data, count); + set_buffer_uptodate(bh2); + mark_buffer_dirty(bh2); + unlock_buffer(bh2); + if (sync && (sync_dirty_buffer(bh2) != 0)) { + __brelse(bh2); + goto no_bh; + } + __brelse(bh2); + } + + return 0; + +no_bh: + WARN(!p_fs->dev_ejected, + "[EXFAT] No bh, device seems wrong or to be ejected.\n"); + + return FFS_MEDIAERR; +} + +int bdev_sync(struct super_block *sb) +{ + struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); +#ifdef CONFIG_EXFAT_KERNEL_DEBUG + struct exfat_sb_info *sbi = EXFAT_SB(sb); + long flags = sbi->debug_flags; + + if (flags & EXFAT_DEBUGFLAGS_ERROR_RW) + return FFS_MEDIAERR; +#endif /* CONFIG_EXFAT_KERNEL_DEBUG */ + + if (!p_bd->opened) + return FFS_MEDIAERR; + + return sync_blockdev(sb->s_bdev); +} diff --git a/drivers/staging/exfat/exfat_cache.c b/drivers/staging/exfat/exfat_cache.c new file mode 100644 index 000000000000..f05d692c2b1e --- /dev/null +++ b/drivers/staging/exfat/exfat_cache.c @@ -0,0 +1,722 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. + */ + +#include +#include +#include +#include "exfat.h" + +#define LOCKBIT 0x01 +#define DIRTYBIT 0x02 + +/* Local variables */ +static DEFINE_SEMAPHORE(f_sem); +static DEFINE_SEMAPHORE(b_sem); + +static struct buf_cache_t *FAT_cache_find(struct super_block *sb, sector_t sec) +{ + s32 off; + struct buf_cache_t *bp, *hp; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + off = (sec + + (sec >> p_fs->sectors_per_clu_bits)) & (FAT_CACHE_HASH_SIZE - 1); + + hp = &p_fs->FAT_cache_hash_list[off]; + for (bp = hp->hash_next; bp != hp; bp = bp->hash_next) { + if ((bp->drv == p_fs->drv) && (bp->sec == sec)) { + WARN(!bp->buf_bh, + "[EXFAT] FAT_cache has no bh. It will make system panic.\n"); + + touch_buffer(bp->buf_bh); + return bp; + } + } + return NULL; +} + +static void push_to_mru(struct buf_cache_t *bp, struct buf_cache_t *list) +{ + bp->next = list->next; + bp->prev = list; + list->next->prev = bp; + list->next = bp; +} + +static void push_to_lru(struct buf_cache_t *bp, struct buf_cache_t *list) +{ + bp->prev = list->prev; + bp->next = list; + list->prev->next = bp; + list->prev = bp; +} + +static void move_to_mru(struct buf_cache_t *bp, struct buf_cache_t *list) +{ + bp->prev->next = bp->next; + bp->next->prev = bp->prev; + push_to_mru(bp, list); +} + +static void move_to_lru(struct buf_cache_t *bp, struct buf_cache_t *list) +{ + bp->prev->next = bp->next; + bp->next->prev = bp->prev; + push_to_lru(bp, list); +} + +static struct buf_cache_t *FAT_cache_get(struct super_block *sb, sector_t sec) +{ + struct buf_cache_t *bp; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + bp = p_fs->FAT_cache_lru_list.prev; + + move_to_mru(bp, &p_fs->FAT_cache_lru_list); + return bp; +} + +static void FAT_cache_insert_hash(struct super_block *sb, + struct buf_cache_t *bp) +{ + s32 off; + struct buf_cache_t *hp; + struct fs_info_t *p_fs; + + p_fs = &(EXFAT_SB(sb)->fs_info); + off = (bp->sec + + (bp->sec >> p_fs->sectors_per_clu_bits)) & + (FAT_CACHE_HASH_SIZE - 1); + + hp = &p_fs->FAT_cache_hash_list[off]; + bp->hash_next = hp->hash_next; + bp->hash_prev = hp; + hp->hash_next->hash_prev = bp; + hp->hash_next = bp; +} + +static void FAT_cache_remove_hash(struct buf_cache_t *bp) +{ + (bp->hash_prev)->hash_next = bp->hash_next; + (bp->hash_next)->hash_prev = bp->hash_prev; +} + +static void buf_cache_insert_hash(struct super_block *sb, + struct buf_cache_t *bp) +{ + s32 off; + struct buf_cache_t *hp; + struct fs_info_t *p_fs; + + p_fs = &(EXFAT_SB(sb)->fs_info); + off = (bp->sec + + (bp->sec >> p_fs->sectors_per_clu_bits)) & + (BUF_CACHE_HASH_SIZE - 1); + + hp = &p_fs->buf_cache_hash_list[off]; + bp->hash_next = hp->hash_next; + bp->hash_prev = hp; + hp->hash_next->hash_prev = bp; + hp->hash_next = bp; +} + +static void buf_cache_remove_hash(struct buf_cache_t *bp) +{ + (bp->hash_prev)->hash_next = bp->hash_next; + (bp->hash_next)->hash_prev = bp->hash_prev; +} + +void buf_init(struct super_block *sb) +{ + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + int i; + + /* LRU list */ + p_fs->FAT_cache_lru_list.next = &p_fs->FAT_cache_lru_list; + p_fs->FAT_cache_lru_list.prev = &p_fs->FAT_cache_lru_list; + + for (i = 0; i < FAT_CACHE_SIZE; i++) { + p_fs->FAT_cache_array[i].drv = -1; + p_fs->FAT_cache_array[i].sec = ~0; + p_fs->FAT_cache_array[i].flag = 0; + p_fs->FAT_cache_array[i].buf_bh = NULL; + p_fs->FAT_cache_array[i].prev = NULL; + p_fs->FAT_cache_array[i].next = NULL; + push_to_mru(&p_fs->FAT_cache_array[i], + &p_fs->FAT_cache_lru_list); + } + + p_fs->buf_cache_lru_list.next = &p_fs->buf_cache_lru_list; + p_fs->buf_cache_lru_list.prev = &p_fs->buf_cache_lru_list; + + for (i = 0; i < BUF_CACHE_SIZE; i++) { + p_fs->buf_cache_array[i].drv = -1; + p_fs->buf_cache_array[i].sec = ~0; + p_fs->buf_cache_array[i].flag = 0; + p_fs->buf_cache_array[i].buf_bh = NULL; + p_fs->buf_cache_array[i].prev = NULL; + p_fs->buf_cache_array[i].next = NULL; + push_to_mru(&p_fs->buf_cache_array[i], + &p_fs->buf_cache_lru_list); + } + + /* HASH list */ + for (i = 0; i < FAT_CACHE_HASH_SIZE; i++) { + p_fs->FAT_cache_hash_list[i].drv = -1; + p_fs->FAT_cache_hash_list[i].sec = ~0; + p_fs->FAT_cache_hash_list[i].hash_next = + &p_fs->FAT_cache_hash_list[i]; + p_fs->FAT_cache_hash_list[i].hash_prev = + &p_fs->FAT_cache_hash_list[i]; + } + + for (i = 0; i < FAT_CACHE_SIZE; i++) + FAT_cache_insert_hash(sb, &p_fs->FAT_cache_array[i]); + + for (i = 0; i < BUF_CACHE_HASH_SIZE; i++) { + p_fs->buf_cache_hash_list[i].drv = -1; + p_fs->buf_cache_hash_list[i].sec = ~0; + p_fs->buf_cache_hash_list[i].hash_next = + &p_fs->buf_cache_hash_list[i]; + p_fs->buf_cache_hash_list[i].hash_prev = + &p_fs->buf_cache_hash_list[i]; + } + + for (i = 0; i < BUF_CACHE_SIZE; i++) + buf_cache_insert_hash(sb, &p_fs->buf_cache_array[i]); +} + +void buf_shutdown(struct super_block *sb) +{ +} + +static int __FAT_read(struct super_block *sb, u32 loc, u32 *content) +{ + s32 off; + u32 _content; + sector_t sec; + u8 *fat_sector, *fat_entry; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); + + if (p_fs->vol_type == FAT12) { + sec = p_fs->FAT1_start_sector + + ((loc + (loc >> 1)) >> p_bd->sector_size_bits); + off = (loc + (loc >> 1)) & p_bd->sector_size_mask; + + if (off == (p_bd->sector_size - 1)) { + fat_sector = FAT_getblk(sb, sec); + if (!fat_sector) + return -1; + + _content = (u32)fat_sector[off]; + + fat_sector = FAT_getblk(sb, ++sec); + if (!fat_sector) + return -1; + + _content |= (u32)fat_sector[0] << 8; + } else { + fat_sector = FAT_getblk(sb, sec); + if (!fat_sector) + return -1; + + fat_entry = &fat_sector[off]; + _content = GET16(fat_entry); + } + + if (loc & 1) + _content >>= 4; + + _content &= 0x00000FFF; + + if (_content >= CLUSTER_16(0x0FF8)) { + *content = CLUSTER_32(~0); + return 0; + } + *content = CLUSTER_32(_content); + return 0; + } else if (p_fs->vol_type == FAT16) { + sec = p_fs->FAT1_start_sector + + (loc >> (p_bd->sector_size_bits - 1)); + off = (loc << 1) & p_bd->sector_size_mask; + + fat_sector = FAT_getblk(sb, sec); + if (!fat_sector) + return -1; + + fat_entry = &fat_sector[off]; + + _content = GET16_A(fat_entry); + + _content &= 0x0000FFFF; + + if (_content >= CLUSTER_16(0xFFF8)) { + *content = CLUSTER_32(~0); + return 0; + } + *content = CLUSTER_32(_content); + return 0; + } else if (p_fs->vol_type == FAT32) { + sec = p_fs->FAT1_start_sector + + (loc >> (p_bd->sector_size_bits - 2)); + off = (loc << 2) & p_bd->sector_size_mask; + + fat_sector = FAT_getblk(sb, sec); + if (!fat_sector) + return -1; + + fat_entry = &fat_sector[off]; + + _content = GET32_A(fat_entry); + + _content &= 0x0FFFFFFF; + + if (_content >= CLUSTER_32(0x0FFFFFF8)) { + *content = CLUSTER_32(~0); + return 0; + } + *content = CLUSTER_32(_content); + return 0; + } else if (p_fs->vol_type == EXFAT) { + sec = p_fs->FAT1_start_sector + + (loc >> (p_bd->sector_size_bits - 2)); + off = (loc << 2) & p_bd->sector_size_mask; + + fat_sector = FAT_getblk(sb, sec); + if (!fat_sector) + return -1; + + fat_entry = &fat_sector[off]; + _content = GET32_A(fat_entry); + + if (_content >= CLUSTER_32(0xFFFFFFF8)) { + *content = CLUSTER_32(~0); + return 0; + } + *content = CLUSTER_32(_content); + return 0; + } + + /* Unknown volume type, throw in the towel and go home */ + *content = CLUSTER_32(~0); + return 0; +} + +/* in : sb, loc + * out: content + * returns 0 on success + * -1 on error + */ +int FAT_read(struct super_block *sb, u32 loc, u32 *content) +{ + s32 ret; + + down(&f_sem); + ret = __FAT_read(sb, loc, content); + up(&f_sem); + + return ret; +} + +static s32 __FAT_write(struct super_block *sb, u32 loc, u32 content) +{ + s32 off; + sector_t sec; + u8 *fat_sector, *fat_entry; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); + + if (p_fs->vol_type == FAT12) { + content &= 0x00000FFF; + + sec = p_fs->FAT1_start_sector + + ((loc + (loc >> 1)) >> p_bd->sector_size_bits); + off = (loc + (loc >> 1)) & p_bd->sector_size_mask; + + fat_sector = FAT_getblk(sb, sec); + if (!fat_sector) + return -1; + + if (loc & 1) { /* odd */ + content <<= 4; + + if (off == (p_bd->sector_size - 1)) { + fat_sector[off] = (u8)(content | + (fat_sector[off] & + 0x0F)); + FAT_modify(sb, sec); + + fat_sector = FAT_getblk(sb, ++sec); + if (!fat_sector) + return -1; + + fat_sector[0] = (u8)(content >> 8); + } else { + fat_entry = &fat_sector[off]; + content |= GET16(fat_entry) & 0x000F; + + SET16(fat_entry, content); + } + } else { /* even */ + fat_sector[off] = (u8)(content); + + if (off == (p_bd->sector_size - 1)) { + fat_sector[off] = (u8)(content); + FAT_modify(sb, sec); + + fat_sector = FAT_getblk(sb, ++sec); + fat_sector[0] = (u8)((fat_sector[0] & 0xF0) | + (content >> 8)); + } else { + fat_entry = &fat_sector[off]; + content |= GET16(fat_entry) & 0xF000; + + SET16(fat_entry, content); + } + } + } + + else if (p_fs->vol_type == FAT16) { + content &= 0x0000FFFF; + + sec = p_fs->FAT1_start_sector + (loc >> + (p_bd->sector_size_bits - 1)); + off = (loc << 1) & p_bd->sector_size_mask; + + fat_sector = FAT_getblk(sb, sec); + if (!fat_sector) + return -1; + + fat_entry = &fat_sector[off]; + + SET16_A(fat_entry, content); + } else if (p_fs->vol_type == FAT32) { + content &= 0x0FFFFFFF; + + sec = p_fs->FAT1_start_sector + (loc >> + (p_bd->sector_size_bits - 2)); + off = (loc << 2) & p_bd->sector_size_mask; + + fat_sector = FAT_getblk(sb, sec); + if (!fat_sector) + return -1; + + fat_entry = &fat_sector[off]; + + content |= GET32_A(fat_entry) & 0xF0000000; + + SET32_A(fat_entry, content); + } else { /* p_fs->vol_type == EXFAT */ + sec = p_fs->FAT1_start_sector + (loc >> + (p_bd->sector_size_bits - 2)); + off = (loc << 2) & p_bd->sector_size_mask; + + fat_sector = FAT_getblk(sb, sec); + if (!fat_sector) + return -1; + + fat_entry = &fat_sector[off]; + + SET32_A(fat_entry, content); + } + + FAT_modify(sb, sec); + return 0; +} + +int FAT_write(struct super_block *sb, u32 loc, u32 content) +{ + s32 ret; + + down(&f_sem); + ret = __FAT_write(sb, loc, content); + up(&f_sem); + + return ret; +} + +u8 *FAT_getblk(struct super_block *sb, sector_t sec) +{ + struct buf_cache_t *bp; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + bp = FAT_cache_find(sb, sec); + if (bp) { + move_to_mru(bp, &p_fs->FAT_cache_lru_list); + return bp->buf_bh->b_data; + } + + bp = FAT_cache_get(sb, sec); + + FAT_cache_remove_hash(bp); + + bp->drv = p_fs->drv; + bp->sec = sec; + bp->flag = 0; + + FAT_cache_insert_hash(sb, bp); + + if (sector_read(sb, sec, &bp->buf_bh, 1) != FFS_SUCCESS) { + FAT_cache_remove_hash(bp); + bp->drv = -1; + bp->sec = ~0; + bp->flag = 0; + bp->buf_bh = NULL; + + move_to_lru(bp, &p_fs->FAT_cache_lru_list); + return NULL; + } + + return bp->buf_bh->b_data; +} + +void FAT_modify(struct super_block *sb, sector_t sec) +{ + struct buf_cache_t *bp; + + bp = FAT_cache_find(sb, sec); + if (bp) + sector_write(sb, sec, bp->buf_bh, 0); +} + +void FAT_release_all(struct super_block *sb) +{ + struct buf_cache_t *bp; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + down(&f_sem); + + bp = p_fs->FAT_cache_lru_list.next; + while (bp != &p_fs->FAT_cache_lru_list) { + if (bp->drv == p_fs->drv) { + bp->drv = -1; + bp->sec = ~0; + bp->flag = 0; + + if (bp->buf_bh) { + __brelse(bp->buf_bh); + bp->buf_bh = NULL; + } + } + bp = bp->next; + } + + up(&f_sem); +} + +void FAT_sync(struct super_block *sb) +{ + struct buf_cache_t *bp; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + down(&f_sem); + + bp = p_fs->FAT_cache_lru_list.next; + while (bp != &p_fs->FAT_cache_lru_list) { + if ((bp->drv == p_fs->drv) && (bp->flag & DIRTYBIT)) { + sync_dirty_buffer(bp->buf_bh); + bp->flag &= ~(DIRTYBIT); + } + bp = bp->next; + } + + up(&f_sem); +} + +static struct buf_cache_t *buf_cache_find(struct super_block *sb, sector_t sec) +{ + s32 off; + struct buf_cache_t *bp, *hp; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + off = (sec + (sec >> p_fs->sectors_per_clu_bits)) & + (BUF_CACHE_HASH_SIZE - 1); + + hp = &p_fs->buf_cache_hash_list[off]; + for (bp = hp->hash_next; bp != hp; bp = bp->hash_next) { + if ((bp->drv == p_fs->drv) && (bp->sec == sec)) { + touch_buffer(bp->buf_bh); + return bp; + } + } + return NULL; +} + +static struct buf_cache_t *buf_cache_get(struct super_block *sb, sector_t sec) +{ + struct buf_cache_t *bp; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + bp = p_fs->buf_cache_lru_list.prev; + while (bp->flag & LOCKBIT) + bp = bp->prev; + + move_to_mru(bp, &p_fs->buf_cache_lru_list); + return bp; +} + +static u8 *__buf_getblk(struct super_block *sb, sector_t sec) +{ + struct buf_cache_t *bp; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + bp = buf_cache_find(sb, sec); + if (bp) { + move_to_mru(bp, &p_fs->buf_cache_lru_list); + return bp->buf_bh->b_data; + } + + bp = buf_cache_get(sb, sec); + + buf_cache_remove_hash(bp); + + bp->drv = p_fs->drv; + bp->sec = sec; + bp->flag = 0; + + buf_cache_insert_hash(sb, bp); + + if (sector_read(sb, sec, &bp->buf_bh, 1) != FFS_SUCCESS) { + buf_cache_remove_hash(bp); + bp->drv = -1; + bp->sec = ~0; + bp->flag = 0; + bp->buf_bh = NULL; + + move_to_lru(bp, &p_fs->buf_cache_lru_list); + return NULL; + } + + return bp->buf_bh->b_data; +} + +u8 *buf_getblk(struct super_block *sb, sector_t sec) +{ + u8 *buf; + + down(&b_sem); + buf = __buf_getblk(sb, sec); + up(&b_sem); + + return buf; +} + +void buf_modify(struct super_block *sb, sector_t sec) +{ + struct buf_cache_t *bp; + + down(&b_sem); + + bp = buf_cache_find(sb, sec); + if (likely(bp)) + sector_write(sb, sec, bp->buf_bh, 0); + + WARN(!bp, "[EXFAT] failed to find buffer_cache(sector:%llu).\n", + (unsigned long long)sec); + + up(&b_sem); +} + +void buf_lock(struct super_block *sb, sector_t sec) +{ + struct buf_cache_t *bp; + + down(&b_sem); + + bp = buf_cache_find(sb, sec); + if (likely(bp)) + bp->flag |= LOCKBIT; + + WARN(!bp, "[EXFAT] failed to find buffer_cache(sector:%llu).\n", + (unsigned long long)sec); + + up(&b_sem); +} + +void buf_unlock(struct super_block *sb, sector_t sec) +{ + struct buf_cache_t *bp; + + down(&b_sem); + + bp = buf_cache_find(sb, sec); + if (likely(bp)) + bp->flag &= ~(LOCKBIT); + + WARN(!bp, "[EXFAT] failed to find buffer_cache(sector:%llu).\n", + (unsigned long long)sec); + + up(&b_sem); +} + +void buf_release(struct super_block *sb, sector_t sec) +{ + struct buf_cache_t *bp; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + down(&b_sem); + + bp = buf_cache_find(sb, sec); + if (likely(bp)) { + bp->drv = -1; + bp->sec = ~0; + bp->flag = 0; + + if (bp->buf_bh) { + __brelse(bp->buf_bh); + bp->buf_bh = NULL; + } + + move_to_lru(bp, &p_fs->buf_cache_lru_list); + } + + up(&b_sem); +} + +void buf_release_all(struct super_block *sb) +{ + struct buf_cache_t *bp; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + down(&b_sem); + + bp = p_fs->buf_cache_lru_list.next; + while (bp != &p_fs->buf_cache_lru_list) { + if (bp->drv == p_fs->drv) { + bp->drv = -1; + bp->sec = ~0; + bp->flag = 0; + + if (bp->buf_bh) { + __brelse(bp->buf_bh); + bp->buf_bh = NULL; + } + } + bp = bp->next; + } + + up(&b_sem); +} + +void buf_sync(struct super_block *sb) +{ + struct buf_cache_t *bp; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + down(&b_sem); + + bp = p_fs->buf_cache_lru_list.next; + while (bp != &p_fs->buf_cache_lru_list) { + if ((bp->drv == p_fs->drv) && (bp->flag & DIRTYBIT)) { + sync_dirty_buffer(bp->buf_bh); + bp->flag &= ~(DIRTYBIT); + } + bp = bp->next; + } + + up(&b_sem); +} diff --git a/drivers/staging/exfat/exfat_core.c b/drivers/staging/exfat/exfat_core.c new file mode 100644 index 000000000000..9f76ca175c80 --- /dev/null +++ b/drivers/staging/exfat/exfat_core.c @@ -0,0 +1,3704 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include "exfat.h" + + +static void __set_sb_dirty(struct super_block *sb) +{ + struct exfat_sb_info *sbi = EXFAT_SB(sb); + + sbi->s_dirt = 1; +} + +static u8 name_buf[MAX_PATH_LENGTH * MAX_CHARSET_SIZE]; + +static char *reserved_names[] = { + "AUX ", "CON ", "NUL ", "PRN ", + "COM1 ", "COM2 ", "COM3 ", "COM4 ", + "COM5 ", "COM6 ", "COM7 ", "COM8 ", "COM9 ", + "LPT1 ", "LPT2 ", "LPT3 ", "LPT4 ", + "LPT5 ", "LPT6 ", "LPT7 ", "LPT8 ", "LPT9 ", + NULL +}; + +static u8 free_bit[] = { + 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, /* 0 ~ 19 */ + 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, /* 20 ~ 39 */ + 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, /* 40 ~ 59 */ + 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, /* 60 ~ 79 */ + 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, /* 80 ~ 99 */ + 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, /* 100 ~ 119 */ + 0, 1, 0, 2, 0, 1, 0, 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, /* 120 ~ 139 */ + 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, /* 140 ~ 159 */ + 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, /* 160 ~ 179 */ + 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, /* 180 ~ 199 */ + 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, /* 200 ~ 219 */ + 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, /* 220 ~ 239 */ + 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0 /* 240 ~ 254 */ +}; + +static u8 used_bit[] = { + 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, /* 0 ~ 19 */ + 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 1, 2, 2, 3, 2, 3, 3, 4, /* 20 ~ 39 */ + 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, /* 40 ~ 59 */ + 4, 5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, /* 60 ~ 79 */ + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, /* 80 ~ 99 */ + 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, /* 100 ~ 119 */ + 4, 5, 5, 6, 5, 6, 6, 7, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, /* 120 ~ 139 */ + 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, /* 140 ~ 159 */ + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, /* 160 ~ 179 */ + 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, 5, /* 180 ~ 199 */ + 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, /* 200 ~ 219 */ + 5, 6, 6, 7, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, /* 220 ~ 239 */ + 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8 /* 240 ~ 255 */ +}; + +#define BITMAP_LOC(v) ((v) >> 3) +#define BITMAP_SHIFT(v) ((v) & 0x07) + +static inline s32 exfat_bitmap_test(u8 *bitmap, int i) +{ + u8 data; + + data = bitmap[BITMAP_LOC(i)]; + if ((data >> BITMAP_SHIFT(i)) & 0x01) + return 1; + return 0; +} + +static inline void exfat_bitmap_set(u8 *bitmap, int i) +{ + bitmap[BITMAP_LOC(i)] |= (0x01 << BITMAP_SHIFT(i)); +} + +static inline void exfat_bitmap_clear(u8 *bitmap, int i) +{ + bitmap[BITMAP_LOC(i)] &= ~(0x01 << BITMAP_SHIFT(i)); +} + +/* + * File System Management Functions + */ + +void fs_set_vol_flags(struct super_block *sb, u32 new_flag) +{ + struct pbr_sector_t *p_pbr; + struct bpbex_t *p_bpb; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + if (p_fs->vol_flag == new_flag) + return; + + p_fs->vol_flag = new_flag; + + if (p_fs->vol_type == EXFAT) { + if (p_fs->pbr_bh == NULL) { + if (sector_read(sb, p_fs->PBR_sector, + &p_fs->pbr_bh, 1) != FFS_SUCCESS) + return; + } + + p_pbr = (struct pbr_sector_t *)p_fs->pbr_bh->b_data; + p_bpb = (struct bpbex_t *)p_pbr->bpb; + SET16(p_bpb->vol_flags, (u16)new_flag); + + /* XXX duyoung + * what can we do here? (cuz fs_set_vol_flags() is void) + */ + if ((new_flag == VOL_DIRTY) && (!buffer_dirty(p_fs->pbr_bh))) + sector_write(sb, p_fs->PBR_sector, p_fs->pbr_bh, 1); + else + sector_write(sb, p_fs->PBR_sector, p_fs->pbr_bh, 0); + } +} + +void fs_error(struct super_block *sb) +{ + struct exfat_mount_options *opts = &EXFAT_SB(sb)->options; + + if (opts->errors == EXFAT_ERRORS_PANIC) { + panic("[EXFAT] Filesystem panic from previous error\n"); + } else if ((opts->errors == EXFAT_ERRORS_RO) && !sb_rdonly(sb)) { + sb->s_flags |= SB_RDONLY; + pr_err("[EXFAT] Filesystem has been set read-only\n"); + } +} + +/* + * Cluster Management Functions + */ + +s32 clear_cluster(struct super_block *sb, u32 clu) +{ + sector_t s, n; + s32 ret = FFS_SUCCESS; + struct buffer_head *tmp_bh = NULL; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); + + if (clu == CLUSTER_32(0)) { /* FAT16 root_dir */ + s = p_fs->root_start_sector; + n = p_fs->data_start_sector; + } else { + s = START_SECTOR(clu); + n = s + p_fs->sectors_per_clu; + } + + for (; s < n; s++) { + ret = sector_read(sb, s, &tmp_bh, 0); + if (ret != FFS_SUCCESS) + return ret; + + memset((char *)tmp_bh->b_data, 0x0, p_bd->sector_size); + ret = sector_write(sb, s, tmp_bh, 0); + if (ret != FFS_SUCCESS) + break; + } + + brelse(tmp_bh); + return ret; +} + +s32 fat_alloc_cluster(struct super_block *sb, s32 num_alloc, + struct chain_t *p_chain) +{ + int i, num_clusters = 0; + u32 new_clu, last_clu = CLUSTER_32(~0), read_clu; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + new_clu = p_chain->dir; + if (new_clu == CLUSTER_32(~0)) + new_clu = p_fs->clu_srch_ptr; + else if (new_clu >= p_fs->num_clusters) + new_clu = 2; + + __set_sb_dirty(sb); + + p_chain->dir = CLUSTER_32(~0); + + for (i = 2; i < p_fs->num_clusters; i++) { + if (FAT_read(sb, new_clu, &read_clu) != 0) + return -1; + + if (read_clu == CLUSTER_32(0)) { + if (FAT_write(sb, new_clu, CLUSTER_32(~0)) < 0) + return -1; + num_clusters++; + + if (p_chain->dir == CLUSTER_32(~0)) { + p_chain->dir = new_clu; + } else { + if (FAT_write(sb, last_clu, new_clu) < 0) + return -1; + } + + last_clu = new_clu; + + if ((--num_alloc) == 0) { + p_fs->clu_srch_ptr = new_clu; + if (p_fs->used_clusters != (u32) ~0) + p_fs->used_clusters += num_clusters; + + return num_clusters; + } + } + if ((++new_clu) >= p_fs->num_clusters) + new_clu = 2; + } + + p_fs->clu_srch_ptr = new_clu; + if (p_fs->used_clusters != (u32) ~0) + p_fs->used_clusters += num_clusters; + + return num_clusters; +} + +s32 exfat_alloc_cluster(struct super_block *sb, s32 num_alloc, + struct chain_t *p_chain) +{ + s32 num_clusters = 0; + u32 hint_clu, new_clu, last_clu = CLUSTER_32(~0); + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + hint_clu = p_chain->dir; + if (hint_clu == CLUSTER_32(~0)) { + hint_clu = test_alloc_bitmap(sb, p_fs->clu_srch_ptr-2); + if (hint_clu == CLUSTER_32(~0)) + return 0; + } else if (hint_clu >= p_fs->num_clusters) { + hint_clu = 2; + p_chain->flags = 0x01; + } + + __set_sb_dirty(sb); + + p_chain->dir = CLUSTER_32(~0); + + while ((new_clu = test_alloc_bitmap(sb, hint_clu-2)) != CLUSTER_32(~0)) { + if (new_clu != hint_clu) { + if (p_chain->flags == 0x03) { + exfat_chain_cont_cluster(sb, p_chain->dir, + num_clusters); + p_chain->flags = 0x01; + } + } + + if (set_alloc_bitmap(sb, new_clu-2) != FFS_SUCCESS) + return -1; + + num_clusters++; + + if (p_chain->flags == 0x01) { + if (FAT_write(sb, new_clu, CLUSTER_32(~0)) < 0) + return -1; + } + + if (p_chain->dir == CLUSTER_32(~0)) { + p_chain->dir = new_clu; + } else { + if (p_chain->flags == 0x01) { + if (FAT_write(sb, last_clu, new_clu) < 0) + return -1; + } + } + last_clu = new_clu; + + if ((--num_alloc) == 0) { + p_fs->clu_srch_ptr = hint_clu; + if (p_fs->used_clusters != (u32) ~0) + p_fs->used_clusters += num_clusters; + + p_chain->size += num_clusters; + return num_clusters; + } + + hint_clu = new_clu + 1; + if (hint_clu >= p_fs->num_clusters) { + hint_clu = 2; + + if (p_chain->flags == 0x03) { + exfat_chain_cont_cluster(sb, p_chain->dir, + num_clusters); + p_chain->flags = 0x01; + } + } + } + + p_fs->clu_srch_ptr = hint_clu; + if (p_fs->used_clusters != (u32) ~0) + p_fs->used_clusters += num_clusters; + + p_chain->size += num_clusters; + return num_clusters; +} + +void fat_free_cluster(struct super_block *sb, struct chain_t *p_chain, + s32 do_relse) +{ + s32 num_clusters = 0; + u32 clu, prev; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + int i; + sector_t sector; + + if ((p_chain->dir == CLUSTER_32(0)) || (p_chain->dir == CLUSTER_32(~0))) + return; + __set_sb_dirty(sb); + clu = p_chain->dir; + + if (p_chain->size <= 0) + return; + + do { + if (p_fs->dev_ejected) + break; + + if (do_relse) { + sector = START_SECTOR(clu); + for (i = 0; i < p_fs->sectors_per_clu; i++) + buf_release(sb, sector+i); + } + + prev = clu; + if (FAT_read(sb, clu, &clu) == -1) + break; + + if (FAT_write(sb, prev, CLUSTER_32(0)) < 0) + break; + num_clusters++; + + } while (clu != CLUSTER_32(~0)); + + if (p_fs->used_clusters != (u32) ~0) + p_fs->used_clusters -= num_clusters; +} + +void exfat_free_cluster(struct super_block *sb, struct chain_t *p_chain, + s32 do_relse) +{ + s32 num_clusters = 0; + u32 clu; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + int i; + sector_t sector; + + if ((p_chain->dir == CLUSTER_32(0)) || (p_chain->dir == CLUSTER_32(~0))) + return; + + if (p_chain->size <= 0) { + pr_err("[EXFAT] free_cluster : skip free-req clu:%u, because of zero-size truncation\n", + p_chain->dir); + return; + } + + __set_sb_dirty(sb); + clu = p_chain->dir; + + if (p_chain->flags == 0x03) { + do { + if (do_relse) { + sector = START_SECTOR(clu); + for (i = 0; i < p_fs->sectors_per_clu; i++) + buf_release(sb, sector+i); + } + + if (clr_alloc_bitmap(sb, clu-2) != FFS_SUCCESS) + break; + clu++; + + num_clusters++; + } while (num_clusters < p_chain->size); + } else { + do { + if (p_fs->dev_ejected) + break; + + if (do_relse) { + sector = START_SECTOR(clu); + for (i = 0; i < p_fs->sectors_per_clu; i++) + buf_release(sb, sector+i); + } + + if (clr_alloc_bitmap(sb, clu-2) != FFS_SUCCESS) + break; + + if (FAT_read(sb, clu, &clu) == -1) + break; + num_clusters++; + } while ((clu != CLUSTER_32(0)) && (clu != CLUSTER_32(~0))); + } + + if (p_fs->used_clusters != (u32) ~0) + p_fs->used_clusters -= num_clusters; +} + +u32 find_last_cluster(struct super_block *sb, struct chain_t *p_chain) +{ + u32 clu, next; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + clu = p_chain->dir; + + if (p_chain->flags == 0x03) { + clu += p_chain->size - 1; + } else { + while ((FAT_read(sb, clu, &next) == 0) && + (next != CLUSTER_32(~0))) { + if (p_fs->dev_ejected) + break; + clu = next; + } + } + + return clu; +} + +s32 count_num_clusters(struct super_block *sb, struct chain_t *p_chain) +{ + int i, count = 0; + u32 clu; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + if ((p_chain->dir == CLUSTER_32(0)) || (p_chain->dir == CLUSTER_32(~0))) + return 0; + + clu = p_chain->dir; + + if (p_chain->flags == 0x03) { + count = p_chain->size; + } else { + for (i = 2; i < p_fs->num_clusters; i++) { + count++; + if (FAT_read(sb, clu, &clu) != 0) + return 0; + if (clu == CLUSTER_32(~0)) + break; + } + } + + return count; +} + +s32 fat_count_used_clusters(struct super_block *sb) +{ + int i, count = 0; + u32 clu; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + for (i = 2; i < p_fs->num_clusters; i++) { + if (FAT_read(sb, i, &clu) != 0) + break; + if (clu != CLUSTER_32(0)) + count++; + } + + return count; +} + +s32 exfat_count_used_clusters(struct super_block *sb) +{ + int i, map_i, map_b, count = 0; + u8 k; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); + + map_i = map_b = 0; + + for (i = 2; i < p_fs->num_clusters; i += 8) { + k = *(((u8 *) p_fs->vol_amap[map_i]->b_data) + map_b); + count += used_bit[k]; + + if ((++map_b) >= p_bd->sector_size) { + map_i++; + map_b = 0; + } + } + + return count; +} + +void exfat_chain_cont_cluster(struct super_block *sb, u32 chain, s32 len) +{ + if (len == 0) + return; + + while (len > 1) { + if (FAT_write(sb, chain, chain+1) < 0) + break; + chain++; + len--; + } + FAT_write(sb, chain, CLUSTER_32(~0)); +} + +/* + * Allocation Bitmap Management Functions + */ + +s32 load_alloc_bitmap(struct super_block *sb) +{ + int i, j, ret; + u32 map_size; + u32 type; + sector_t sector; + struct chain_t clu; + struct bmap_dentry_t *ep; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); + + clu.dir = p_fs->root_dir; + clu.flags = 0x01; + + while (clu.dir != CLUSTER_32(~0)) { + if (p_fs->dev_ejected) + break; + + for (i = 0; i < p_fs->dentries_per_clu; i++) { + ep = (struct bmap_dentry_t *)get_entry_in_dir(sb, &clu, + i, NULL); + if (!ep) + return FFS_MEDIAERR; + + type = p_fs->fs_func->get_entry_type((struct dentry_t *)ep); + + if (type == TYPE_UNUSED) + break; + if (type != TYPE_BITMAP) + continue; + + if (ep->flags == 0x0) { + p_fs->map_clu = GET32_A(ep->start_clu); + map_size = (u32) GET64_A(ep->size); + + p_fs->map_sectors = ((map_size-1) >> p_bd->sector_size_bits) + 1; + + p_fs->vol_amap = kmalloc_array(p_fs->map_sectors, + sizeof(struct buffer_head *), + GFP_KERNEL); + if (p_fs->vol_amap == NULL) + return FFS_MEMORYERR; + + sector = START_SECTOR(p_fs->map_clu); + + for (j = 0; j < p_fs->map_sectors; j++) { + p_fs->vol_amap[j] = NULL; + ret = sector_read(sb, sector+j, &(p_fs->vol_amap[j]), 1); + if (ret != FFS_SUCCESS) { + /* release all buffers and free vol_amap */ + i = 0; + while (i < j) + brelse(p_fs->vol_amap[i++]); + + kfree(p_fs->vol_amap); + p_fs->vol_amap = NULL; + return ret; + } + } + + p_fs->pbr_bh = NULL; + return FFS_SUCCESS; + } + } + + if (FAT_read(sb, clu.dir, &clu.dir) != 0) + return FFS_MEDIAERR; + } + + return FFS_FORMATERR; +} + +void free_alloc_bitmap(struct super_block *sb) +{ + int i; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + brelse(p_fs->pbr_bh); + + for (i = 0; i < p_fs->map_sectors; i++) + __brelse(p_fs->vol_amap[i]); + + kfree(p_fs->vol_amap); + p_fs->vol_amap = NULL; +} + +s32 set_alloc_bitmap(struct super_block *sb, u32 clu) +{ + int i, b; + sector_t sector; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); + + i = clu >> (p_bd->sector_size_bits + 3); + b = clu & ((p_bd->sector_size << 3) - 1); + + sector = START_SECTOR(p_fs->map_clu) + i; + + exfat_bitmap_set((u8 *) p_fs->vol_amap[i]->b_data, b); + + return sector_write(sb, sector, p_fs->vol_amap[i], 0); +} + +s32 clr_alloc_bitmap(struct super_block *sb, u32 clu) +{ + int i, b; + sector_t sector; +#ifdef CONFIG_EXFAT_DISCARD + struct exfat_sb_info *sbi = EXFAT_SB(sb); + struct exfat_mount_options *opts = &sbi->options; + int ret; +#endif /* CONFIG_EXFAT_DISCARD */ + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); + + i = clu >> (p_bd->sector_size_bits + 3); + b = clu & ((p_bd->sector_size << 3) - 1); + + sector = START_SECTOR(p_fs->map_clu) + i; + + exfat_bitmap_clear((u8 *) p_fs->vol_amap[i]->b_data, b); + + return sector_write(sb, sector, p_fs->vol_amap[i], 0); + +#ifdef CONFIG_EXFAT_DISCARD + if (opts->discard) { + ret = sb_issue_discard(sb, START_SECTOR(clu), + (1 << p_fs->sectors_per_clu_bits), + GFP_NOFS, 0); + if (ret == -EOPNOTSUPP) { + pr_warn("discard not supported by device, disabling"); + opts->discard = 0; + } + } +#endif /* CONFIG_EXFAT_DISCARD */ +} + +u32 test_alloc_bitmap(struct super_block *sb, u32 clu) +{ + int i, map_i, map_b; + u32 clu_base, clu_free; + u8 k, clu_mask; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); + + clu_base = (clu & ~(0x7)) + 2; + clu_mask = (1 << (clu - clu_base + 2)) - 1; + + map_i = clu >> (p_bd->sector_size_bits + 3); + map_b = (clu >> 3) & p_bd->sector_size_mask; + + for (i = 2; i < p_fs->num_clusters; i += 8) { + k = *(((u8 *) p_fs->vol_amap[map_i]->b_data) + map_b); + if (clu_mask > 0) { + k |= clu_mask; + clu_mask = 0; + } + if (k < 0xFF) { + clu_free = clu_base + free_bit[k]; + if (clu_free < p_fs->num_clusters) + return clu_free; + } + clu_base += 8; + + if (((++map_b) >= p_bd->sector_size) || + (clu_base >= p_fs->num_clusters)) { + if ((++map_i) >= p_fs->map_sectors) { + clu_base = 2; + map_i = 0; + } + map_b = 0; + } + } + + return CLUSTER_32(~0); +} + +void sync_alloc_bitmap(struct super_block *sb) +{ + int i; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + if (p_fs->vol_amap == NULL) + return; + + for (i = 0; i < p_fs->map_sectors; i++) + sync_dirty_buffer(p_fs->vol_amap[i]); +} + +/* + * Upcase table Management Functions + */ +static s32 __load_upcase_table(struct super_block *sb, sector_t sector, + u32 num_sectors, u32 utbl_checksum) +{ + int i, ret = FFS_ERROR; + u32 j; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); + struct buffer_head *tmp_bh = NULL; + sector_t end_sector = num_sectors + sector; + + u8 skip = FALSE; + u32 index = 0; + u16 uni = 0; + u16 **upcase_table; + + u32 checksum = 0; + + upcase_table = p_fs->vol_utbl = kmalloc(UTBL_COL_COUNT * sizeof(u16 *), + GFP_KERNEL); + if (upcase_table == NULL) + return FFS_MEMORYERR; + memset(upcase_table, 0, UTBL_COL_COUNT * sizeof(u16 *)); + + while (sector < end_sector) { + ret = sector_read(sb, sector, &tmp_bh, 1); + if (ret != FFS_SUCCESS) { + pr_debug("sector read (0x%llX)fail\n", + (unsigned long long)sector); + goto error; + } + sector++; + + for (i = 0; i < p_bd->sector_size && index <= 0xFFFF; i += 2) { + uni = GET16(((u8 *) tmp_bh->b_data)+i); + + checksum = ((checksum & 1) ? 0x80000000 : 0) + + (checksum >> 1) + *(((u8 *)tmp_bh->b_data) + + i); + checksum = ((checksum & 1) ? 0x80000000 : 0) + + (checksum >> 1) + *(((u8 *)tmp_bh->b_data) + + (i + 1)); + + if (skip) { + pr_debug("skip from 0x%X ", index); + index += uni; + pr_debug("to 0x%X (amount of 0x%X)\n", + index, uni); + skip = FALSE; + } else if (uni == index) + index++; + else if (uni == 0xFFFF) + skip = TRUE; + else { /* uni != index , uni != 0xFFFF */ + u16 col_index = get_col_index(index); + + if (upcase_table[col_index] == NULL) { + pr_debug("alloc = 0x%X\n", col_index); + upcase_table[col_index] = kmalloc_array(UTBL_ROW_COUNT, + sizeof(u16), GFP_KERNEL); + if (upcase_table[col_index] == NULL) { + ret = FFS_MEMORYERR; + goto error; + } + + for (j = 0; j < UTBL_ROW_COUNT; j++) + upcase_table[col_index][j] = (col_index << LOW_INDEX_BIT) | j; + } + + upcase_table[col_index][get_row_index(index)] = uni; + index++; + } + } + } + if (index >= 0xFFFF && utbl_checksum == checksum) { + if (tmp_bh) + brelse(tmp_bh); + return FFS_SUCCESS; + } + ret = FFS_ERROR; +error: + if (tmp_bh) + brelse(tmp_bh); + free_upcase_table(sb); + return ret; +} + +static s32 __load_default_upcase_table(struct super_block *sb) +{ + int i, ret = FFS_ERROR; + u32 j; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + u8 skip = FALSE; + u32 index = 0; + u16 uni = 0; + u16 **upcase_table; + + upcase_table = p_fs->vol_utbl = kmalloc(UTBL_COL_COUNT * sizeof(u16 *), + GFP_KERNEL); + if (upcase_table == NULL) + return FFS_MEMORYERR; + memset(upcase_table, 0, UTBL_COL_COUNT * sizeof(u16 *)); + + for (i = 0; index <= 0xFFFF && i < NUM_UPCASE*2; i += 2) { + uni = GET16(uni_upcase + i); + if (skip) { + pr_debug("skip from 0x%X ", index); + index += uni; + pr_debug("to 0x%X (amount of 0x%X)\n", index, uni); + skip = FALSE; + } else if (uni == index) + index++; + else if (uni == 0xFFFF) + skip = TRUE; + else { /* uni != index , uni != 0xFFFF */ + u16 col_index = get_col_index(index); + + if (upcase_table[col_index] == NULL) { + pr_debug("alloc = 0x%X\n", col_index); + upcase_table[col_index] = kmalloc_array(UTBL_ROW_COUNT, + sizeof(u16), + GFP_KERNEL); + if (upcase_table[col_index] == NULL) { + ret = FFS_MEMORYERR; + goto error; + } + + for (j = 0; j < UTBL_ROW_COUNT; j++) + upcase_table[col_index][j] = (col_index << LOW_INDEX_BIT) | j; + } + + upcase_table[col_index][get_row_index(index)] = uni; + index++; + } + } + + if (index >= 0xFFFF) + return FFS_SUCCESS; + +error: + /* FATAL error: default upcase table has error */ + free_upcase_table(sb); + return ret; +} + +s32 load_upcase_table(struct super_block *sb) +{ + int i; + u32 tbl_clu, tbl_size; + sector_t sector; + u32 type, num_sectors; + struct chain_t clu; + struct case_dentry_t *ep; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); + + clu.dir = p_fs->root_dir; + clu.flags = 0x01; + + if (p_fs->dev_ejected) + return FFS_MEDIAERR; + + while (clu.dir != CLUSTER_32(~0)) { + for (i = 0; i < p_fs->dentries_per_clu; i++) { + ep = (struct case_dentry_t *)get_entry_in_dir(sb, &clu, + i, NULL); + if (!ep) + return FFS_MEDIAERR; + + type = p_fs->fs_func->get_entry_type((struct dentry_t *)ep); + + if (type == TYPE_UNUSED) + break; + if (type != TYPE_UPCASE) + continue; + + tbl_clu = GET32_A(ep->start_clu); + tbl_size = (u32) GET64_A(ep->size); + + sector = START_SECTOR(tbl_clu); + num_sectors = ((tbl_size-1) >> p_bd->sector_size_bits) + 1; + if (__load_upcase_table(sb, sector, num_sectors, + GET32_A(ep->checksum)) != FFS_SUCCESS) + break; + return FFS_SUCCESS; + } + if (FAT_read(sb, clu.dir, &clu.dir) != 0) + return FFS_MEDIAERR; + } + /* load default upcase table */ + return __load_default_upcase_table(sb); +} + +void free_upcase_table(struct super_block *sb) +{ + u32 i; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + u16 **upcase_table; + + upcase_table = p_fs->vol_utbl; + for (i = 0; i < UTBL_COL_COUNT; i++) + kfree(upcase_table[i]); + + kfree(p_fs->vol_utbl); + p_fs->vol_utbl = NULL; +} + +/* + * Directory Entry Management Functions + */ + +u32 fat_get_entry_type(struct dentry_t *p_entry) +{ + struct dos_dentry_t *ep = (struct dos_dentry_t *) p_entry; + + if (*(ep->name) == 0x0) + return TYPE_UNUSED; + + else if (*(ep->name) == 0xE5) + return TYPE_DELETED; + + else if (ep->attr == ATTR_EXTEND) + return TYPE_EXTEND; + + else if ((ep->attr & (ATTR_SUBDIR|ATTR_VOLUME)) == ATTR_VOLUME) + return TYPE_VOLUME; + + else if ((ep->attr & (ATTR_SUBDIR|ATTR_VOLUME)) == ATTR_SUBDIR) + return TYPE_DIR; + + return TYPE_FILE; +} + +u32 exfat_get_entry_type(struct dentry_t *p_entry) +{ + struct file_dentry_t *ep = (struct file_dentry_t *) p_entry; + + if (ep->type == 0x0) { + return TYPE_UNUSED; + } else if (ep->type < 0x80) { + return TYPE_DELETED; + } else if (ep->type == 0x80) { + return TYPE_INVALID; + } else if (ep->type < 0xA0) { + if (ep->type == 0x81) { + return TYPE_BITMAP; + } else if (ep->type == 0x82) { + return TYPE_UPCASE; + } else if (ep->type == 0x83) { + return TYPE_VOLUME; + } else if (ep->type == 0x85) { + if (GET16_A(ep->attr) & ATTR_SUBDIR) + return TYPE_DIR; + else + return TYPE_FILE; + } + return TYPE_CRITICAL_PRI; + } else if (ep->type < 0xC0) { + if (ep->type == 0xA0) + return TYPE_GUID; + else if (ep->type == 0xA1) + return TYPE_PADDING; + else if (ep->type == 0xA2) + return TYPE_ACLTAB; + return TYPE_BENIGN_PRI; + } else if (ep->type < 0xE0) { + if (ep->type == 0xC0) + return TYPE_STREAM; + else if (ep->type == 0xC1) + return TYPE_EXTEND; + else if (ep->type == 0xC2) + return TYPE_ACL; + return TYPE_CRITICAL_SEC; + } + + return TYPE_BENIGN_SEC; +} + +void fat_set_entry_type(struct dentry_t *p_entry, u32 type) +{ + struct dos_dentry_t *ep = (struct dos_dentry_t *) p_entry; + + if (type == TYPE_UNUSED) + *(ep->name) = 0x0; + + else if (type == TYPE_DELETED) + *(ep->name) = 0xE5; + + else if (type == TYPE_EXTEND) + ep->attr = ATTR_EXTEND; + + else if (type == TYPE_DIR) + ep->attr = ATTR_SUBDIR; + + else if (type == TYPE_FILE) + ep->attr = ATTR_ARCHIVE; + + else if (type == TYPE_SYMLINK) + ep->attr = ATTR_ARCHIVE | ATTR_SYMLINK; +} + +void exfat_set_entry_type(struct dentry_t *p_entry, u32 type) +{ + struct file_dentry_t *ep = (struct file_dentry_t *) p_entry; + + if (type == TYPE_UNUSED) { + ep->type = 0x0; + } else if (type == TYPE_DELETED) { + ep->type &= ~0x80; + } else if (type == TYPE_STREAM) { + ep->type = 0xC0; + } else if (type == TYPE_EXTEND) { + ep->type = 0xC1; + } else if (type == TYPE_BITMAP) { + ep->type = 0x81; + } else if (type == TYPE_UPCASE) { + ep->type = 0x82; + } else if (type == TYPE_VOLUME) { + ep->type = 0x83; + } else if (type == TYPE_DIR) { + ep->type = 0x85; + SET16_A(ep->attr, ATTR_SUBDIR); + } else if (type == TYPE_FILE) { + ep->type = 0x85; + SET16_A(ep->attr, ATTR_ARCHIVE); + } else if (type == TYPE_SYMLINK) { + ep->type = 0x85; + SET16_A(ep->attr, ATTR_ARCHIVE | ATTR_SYMLINK); + } +} + +u32 fat_get_entry_attr(struct dentry_t *p_entry) +{ + struct dos_dentry_t *ep = (struct dos_dentry_t *) p_entry; + + return (u32) ep->attr; +} + +u32 exfat_get_entry_attr(struct dentry_t *p_entry) +{ + struct file_dentry_t *ep = (struct file_dentry_t *) p_entry; + + return (u32) GET16_A(ep->attr); +} + +void fat_set_entry_attr(struct dentry_t *p_entry, u32 attr) +{ + struct dos_dentry_t *ep = (struct dos_dentry_t *) p_entry; + + ep->attr = (u8) attr; +} + +void exfat_set_entry_attr(struct dentry_t *p_entry, u32 attr) +{ + struct file_dentry_t *ep = (struct file_dentry_t *) p_entry; + + SET16_A(ep->attr, (u16) attr); +} + +u8 fat_get_entry_flag(struct dentry_t *p_entry) +{ + return 0x01; +} + +u8 exfat_get_entry_flag(struct dentry_t *p_entry) +{ + struct strm_dentry_t *ep = (struct strm_dentry_t *) p_entry; + + return ep->flags; +} + +void fat_set_entry_flag(struct dentry_t *p_entry, u8 flags) +{ +} + +void exfat_set_entry_flag(struct dentry_t *p_entry, u8 flags) +{ + struct strm_dentry_t *ep = (struct strm_dentry_t *) p_entry; + + ep->flags = flags; +} + +u32 fat_get_entry_clu0(struct dentry_t *p_entry) +{ + struct dos_dentry_t *ep = (struct dos_dentry_t *) p_entry; + + return ((u32)GET16_A(ep->start_clu_hi) << 16) | + GET16_A(ep->start_clu_lo); +} + +u32 exfat_get_entry_clu0(struct dentry_t *p_entry) +{ + struct strm_dentry_t *ep = (struct strm_dentry_t *) p_entry; + + return GET32_A(ep->start_clu); +} + +void fat_set_entry_clu0(struct dentry_t *p_entry, u32 start_clu) +{ + struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry; + + SET16_A(ep->start_clu_lo, CLUSTER_16(start_clu)); + SET16_A(ep->start_clu_hi, CLUSTER_16(start_clu >> 16)); +} + +void exfat_set_entry_clu0(struct dentry_t *p_entry, u32 start_clu) +{ + struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry; + + SET32_A(ep->start_clu, start_clu); +} + +u64 fat_get_entry_size(struct dentry_t *p_entry) +{ + struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry; + + return (u64) GET32_A(ep->size); +} + +u64 exfat_get_entry_size(struct dentry_t *p_entry) +{ + struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry; + + return GET64_A(ep->valid_size); +} + +void fat_set_entry_size(struct dentry_t *p_entry, u64 size) +{ + struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry; + + SET32_A(ep->size, (u32) size); +} + +void exfat_set_entry_size(struct dentry_t *p_entry, u64 size) +{ + struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry; + + SET64_A(ep->valid_size, size); + SET64_A(ep->size, size); +} + +void fat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp, + u8 mode) +{ + u16 t = 0x00, d = 0x21; + struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry; + + switch (mode) { + case TM_CREATE: + t = GET16_A(ep->create_time); + d = GET16_A(ep->create_date); + break; + case TM_MODIFY: + t = GET16_A(ep->modify_time); + d = GET16_A(ep->modify_date); + break; + } + + tp->sec = (t & 0x001F) << 1; + tp->min = (t >> 5) & 0x003F; + tp->hour = (t >> 11); + tp->day = (d & 0x001F); + tp->mon = (d >> 5) & 0x000F; + tp->year = (d >> 9); +} + +void exfat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp, + u8 mode) +{ + u16 t = 0x00, d = 0x21; + struct file_dentry_t *ep = (struct file_dentry_t *)p_entry; + + switch (mode) { + case TM_CREATE: + t = GET16_A(ep->create_time); + d = GET16_A(ep->create_date); + break; + case TM_MODIFY: + t = GET16_A(ep->modify_time); + d = GET16_A(ep->modify_date); + break; + case TM_ACCESS: + t = GET16_A(ep->access_time); + d = GET16_A(ep->access_date); + break; + } + + tp->sec = (t & 0x001F) << 1; + tp->min = (t >> 5) & 0x003F; + tp->hour = (t >> 11); + tp->day = (d & 0x001F); + tp->mon = (d >> 5) & 0x000F; + tp->year = (d >> 9); +} + +void fat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp, + u8 mode) +{ + u16 t, d; + struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry; + + t = (tp->hour << 11) | (tp->min << 5) | (tp->sec >> 1); + d = (tp->year << 9) | (tp->mon << 5) | tp->day; + + switch (mode) { + case TM_CREATE: + SET16_A(ep->create_time, t); + SET16_A(ep->create_date, d); + break; + case TM_MODIFY: + SET16_A(ep->modify_time, t); + SET16_A(ep->modify_date, d); + break; + } +} + +void exfat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp, + u8 mode) +{ + u16 t, d; + struct file_dentry_t *ep = (struct file_dentry_t *)p_entry; + + t = (tp->hour << 11) | (tp->min << 5) | (tp->sec >> 1); + d = (tp->year << 9) | (tp->mon << 5) | tp->day; + + switch (mode) { + case TM_CREATE: + SET16_A(ep->create_time, t); + SET16_A(ep->create_date, d); + break; + case TM_MODIFY: + SET16_A(ep->modify_time, t); + SET16_A(ep->modify_date, d); + break; + case TM_ACCESS: + SET16_A(ep->access_time, t); + SET16_A(ep->access_date, d); + break; + } +} + +s32 fat_init_dir_entry(struct super_block *sb, struct chain_t *p_dir, s32 entry, + u32 type, u32 start_clu, u64 size) +{ + sector_t sector; + struct dos_dentry_t *dos_ep; + + dos_ep = (struct dos_dentry_t *)get_entry_in_dir(sb, p_dir, entry, + §or); + if (!dos_ep) + return FFS_MEDIAERR; + + init_dos_entry(dos_ep, type, start_clu); + buf_modify(sb, sector); + + return FFS_SUCCESS; +} + +s32 exfat_init_dir_entry(struct super_block *sb, struct chain_t *p_dir, + s32 entry, u32 type, u32 start_clu, u64 size) +{ + sector_t sector; + u8 flags; + struct file_dentry_t *file_ep; + struct strm_dentry_t *strm_ep; + + flags = (type == TYPE_FILE) ? 0x01 : 0x03; + + /* we cannot use get_entry_set_in_dir here because file ep is not initialized yet */ + file_ep = (struct file_dentry_t *)get_entry_in_dir(sb, p_dir, entry, + §or); + if (!file_ep) + return FFS_MEDIAERR; + + strm_ep = (struct strm_dentry_t *)get_entry_in_dir(sb, p_dir, entry+1, + §or); + if (!strm_ep) + return FFS_MEDIAERR; + + init_file_entry(file_ep, type); + buf_modify(sb, sector); + + init_strm_entry(strm_ep, flags, start_clu, size); + buf_modify(sb, sector); + + return FFS_SUCCESS; +} + +static s32 fat_init_ext_entry(struct super_block *sb, struct chain_t *p_dir, + s32 entry, s32 num_entries, + struct uni_name_t *p_uniname, + struct dos_name_t *p_dosname) +{ + int i; + sector_t sector; + u8 chksum; + u16 *uniname = p_uniname->name; + struct dos_dentry_t *dos_ep; + struct ext_dentry_t *ext_ep; + + dos_ep = (struct dos_dentry_t *)get_entry_in_dir(sb, p_dir, entry, + §or); + if (!dos_ep) + return FFS_MEDIAERR; + + dos_ep->lcase = p_dosname->name_case; + memcpy(dos_ep->name, p_dosname->name, DOS_NAME_LENGTH); + buf_modify(sb, sector); + + if ((--num_entries) > 0) { + chksum = calc_checksum_1byte((void *)dos_ep->name, + DOS_NAME_LENGTH, 0); + + for (i = 1; i < num_entries; i++) { + ext_ep = (struct ext_dentry_t *)get_entry_in_dir(sb, + p_dir, + entry - i, + §or); + if (!ext_ep) + return FFS_MEDIAERR; + + init_ext_entry(ext_ep, i, chksum, uniname); + buf_modify(sb, sector); + uniname += 13; + } + + ext_ep = (struct ext_dentry_t *)get_entry_in_dir(sb, p_dir, + entry - i, + §or); + if (!ext_ep) + return FFS_MEDIAERR; + + init_ext_entry(ext_ep, i+0x40, chksum, uniname); + buf_modify(sb, sector); + } + + return FFS_SUCCESS; +} + +static s32 exfat_init_ext_entry(struct super_block *sb, struct chain_t *p_dir, + s32 entry, s32 num_entries, + struct uni_name_t *p_uniname, + struct dos_name_t *p_dosname) +{ + int i; + sector_t sector; + u16 *uniname = p_uniname->name; + struct file_dentry_t *file_ep; + struct strm_dentry_t *strm_ep; + struct name_dentry_t *name_ep; + + file_ep = (struct file_dentry_t *)get_entry_in_dir(sb, p_dir, entry, + §or); + if (!file_ep) + return FFS_MEDIAERR; + + file_ep->num_ext = (u8)(num_entries - 1); + buf_modify(sb, sector); + + strm_ep = (struct strm_dentry_t *)get_entry_in_dir(sb, p_dir, entry+1, + §or); + if (!strm_ep) + return FFS_MEDIAERR; + + strm_ep->name_len = p_uniname->name_len; + SET16_A(strm_ep->name_hash, p_uniname->name_hash); + buf_modify(sb, sector); + + for (i = 2; i < num_entries; i++) { + name_ep = (struct name_dentry_t *)get_entry_in_dir(sb, p_dir, + entry + i, + §or); + if (!name_ep) + return FFS_MEDIAERR; + + init_name_entry(name_ep, uniname); + buf_modify(sb, sector); + uniname += 15; + } + + update_dir_checksum(sb, p_dir, entry); + + return FFS_SUCCESS; +} + +void init_dos_entry(struct dos_dentry_t *ep, u32 type, u32 start_clu) +{ + struct timestamp_t tm, *tp; + + fat_set_entry_type((struct dentry_t *) ep, type); + SET16_A(ep->start_clu_lo, CLUSTER_16(start_clu)); + SET16_A(ep->start_clu_hi, CLUSTER_16(start_clu >> 16)); + SET32_A(ep->size, 0); + + tp = tm_current(&tm); + fat_set_entry_time((struct dentry_t *) ep, tp, TM_CREATE); + fat_set_entry_time((struct dentry_t *) ep, tp, TM_MODIFY); + SET16_A(ep->access_date, 0); + ep->create_time_ms = 0; +} + +void init_ext_entry(struct ext_dentry_t *ep, s32 order, u8 chksum, u16 *uniname) +{ + int i; + u8 end = FALSE; + + fat_set_entry_type((struct dentry_t *) ep, TYPE_EXTEND); + ep->order = (u8) order; + ep->sysid = 0; + ep->checksum = chksum; + SET16_A(ep->start_clu, 0); + + for (i = 0; i < 10; i += 2) { + if (!end) { + SET16(ep->unicode_0_4+i, *uniname); + if (*uniname == 0x0) + end = TRUE; + else + uniname++; + } else { + SET16(ep->unicode_0_4+i, 0xFFFF); + } + } + + for (i = 0; i < 12; i += 2) { + if (!end) { + SET16_A(ep->unicode_5_10 + i, *uniname); + if (*uniname == 0x0) + end = TRUE; + else + uniname++; + } else { + SET16_A(ep->unicode_5_10 + i, 0xFFFF); + } + } + + for (i = 0; i < 4; i += 2) { + if (!end) { + SET16_A(ep->unicode_11_12 + i, *uniname); + if (*uniname == 0x0) + end = TRUE; + else + uniname++; + } else { + SET16_A(ep->unicode_11_12 + i, 0xFFFF); + } + } +} + +void init_file_entry(struct file_dentry_t *ep, u32 type) +{ + struct timestamp_t tm, *tp; + + exfat_set_entry_type((struct dentry_t *)ep, type); + + tp = tm_current(&tm); + exfat_set_entry_time((struct dentry_t *)ep, tp, TM_CREATE); + exfat_set_entry_time((struct dentry_t *)ep, tp, TM_MODIFY); + exfat_set_entry_time((struct dentry_t *)ep, tp, TM_ACCESS); + ep->create_time_ms = 0; + ep->modify_time_ms = 0; + ep->access_time_ms = 0; +} + +void init_strm_entry(struct strm_dentry_t *ep, u8 flags, u32 start_clu, u64 size) +{ + exfat_set_entry_type((struct dentry_t *)ep, TYPE_STREAM); + ep->flags = flags; + SET32_A(ep->start_clu, start_clu); + SET64_A(ep->valid_size, size); + SET64_A(ep->size, size); +} + +void init_name_entry(struct name_dentry_t *ep, u16 *uniname) +{ + int i; + + exfat_set_entry_type((struct dentry_t *)ep, TYPE_EXTEND); + ep->flags = 0x0; + + for (i = 0; i < 30; i++, i++) { + SET16_A(ep->unicode_0_14+i, *uniname); + if (*uniname == 0x0) + break; + uniname++; + } +} + +void fat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir, + s32 entry, s32 order, s32 num_entries) +{ + int i; + sector_t sector; + struct dentry_t *ep; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + for (i = num_entries-1; i >= order; i--) { + ep = get_entry_in_dir(sb, p_dir, entry-i, §or); + if (!ep) + return; + + p_fs->fs_func->set_entry_type(ep, TYPE_DELETED); + buf_modify(sb, sector); + } +} + +void exfat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir, + s32 entry, s32 order, s32 num_entries) +{ + int i; + sector_t sector; + struct dentry_t *ep; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + for (i = order; i < num_entries; i++) { + ep = get_entry_in_dir(sb, p_dir, entry+i, §or); + if (!ep) + return; + + p_fs->fs_func->set_entry_type(ep, TYPE_DELETED); + buf_modify(sb, sector); + } +} + +void update_dir_checksum(struct super_block *sb, struct chain_t *p_dir, + s32 entry) +{ + int i, num_entries; + sector_t sector; + u16 chksum; + struct file_dentry_t *file_ep; + struct dentry_t *ep; + + file_ep = (struct file_dentry_t *)get_entry_in_dir(sb, p_dir, entry, + §or); + if (!file_ep) + return; + + buf_lock(sb, sector); + + num_entries = (s32) file_ep->num_ext + 1; + chksum = calc_checksum_2byte((void *)file_ep, DENTRY_SIZE, 0, + CS_DIR_ENTRY); + + for (i = 1; i < num_entries; i++) { + ep = get_entry_in_dir(sb, p_dir, entry+i, NULL); + if (!ep) { + buf_unlock(sb, sector); + return; + } + + chksum = calc_checksum_2byte((void *)ep, DENTRY_SIZE, chksum, + CS_DEFAULT); + } + + SET16_A(file_ep->checksum, chksum); + buf_modify(sb, sector); + buf_unlock(sb, sector); +} + +void update_dir_checksum_with_entry_set(struct super_block *sb, + struct entry_set_cache_t *es) +{ + struct dentry_t *ep; + u16 chksum = 0; + s32 chksum_type = CS_DIR_ENTRY, i; + + ep = (struct dentry_t *)&(es->__buf); + for (i = 0; i < es->num_entries; i++) { + pr_debug("%s ep %p\n", __func__, ep); + chksum = calc_checksum_2byte((void *)ep, DENTRY_SIZE, chksum, + chksum_type); + ep++; + chksum_type = CS_DEFAULT; + } + + ep = (struct dentry_t *)&(es->__buf); + SET16_A(((struct file_dentry_t *)ep)->checksum, chksum); + write_whole_entry_set(sb, es); +} + +static s32 _walk_fat_chain(struct super_block *sb, struct chain_t *p_dir, + s32 byte_offset, u32 *clu) +{ + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + s32 clu_offset; + u32 cur_clu; + + clu_offset = byte_offset >> p_fs->cluster_size_bits; + cur_clu = p_dir->dir; + + if (p_dir->flags == 0x03) { + cur_clu += clu_offset; + } else { + while (clu_offset > 0) { + if (FAT_read(sb, cur_clu, &cur_clu) == -1) + return FFS_MEDIAERR; + clu_offset--; + } + } + + if (clu) + *clu = cur_clu; + return FFS_SUCCESS; +} + +s32 find_location(struct super_block *sb, struct chain_t *p_dir, s32 entry, + sector_t *sector, s32 *offset) +{ + s32 off, ret; + u32 clu = 0; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); + + off = entry << DENTRY_SIZE_BITS; + + if (p_dir->dir == CLUSTER_32(0)) { /* FAT16 root_dir */ + *offset = off & p_bd->sector_size_mask; + *sector = off >> p_bd->sector_size_bits; + *sector += p_fs->root_start_sector; + } else { + ret = _walk_fat_chain(sb, p_dir, off, &clu); + if (ret != FFS_SUCCESS) + return ret; + + /* byte offset in cluster */ + off &= p_fs->cluster_size - 1; + + /* byte offset in sector */ + *offset = off & p_bd->sector_size_mask; + + /* sector offset in cluster */ + *sector = off >> p_bd->sector_size_bits; + *sector += START_SECTOR(clu); + } + return FFS_SUCCESS; +} + +struct dentry_t *get_entry_with_sector(struct super_block *sb, sector_t sector, + s32 offset) +{ + u8 *buf; + + buf = buf_getblk(sb, sector); + + if (buf == NULL) + return NULL; + + return (struct dentry_t *)(buf + offset); +} + +struct dentry_t *get_entry_in_dir(struct super_block *sb, struct chain_t *p_dir, + s32 entry, sector_t *sector) +{ + s32 off; + sector_t sec; + u8 *buf; + + if (find_location(sb, p_dir, entry, &sec, &off) != FFS_SUCCESS) + return NULL; + + buf = buf_getblk(sb, sec); + + if (buf == NULL) + return NULL; + + if (sector != NULL) + *sector = sec; + return (struct dentry_t *)(buf + off); +} + +/* returns a set of dentries for a file or dir. + * Note that this is a copy (dump) of dentries so that user should call write_entry_set() + * to apply changes made in this entry set to the real device. + * in: + * sb+p_dir+entry: indicates a file/dir + * type: specifies how many dentries should be included. + * out: + * file_ep: will point the first dentry(= file dentry) on success + * return: + * pointer of entry set on success, + * NULL on failure. + */ + +#define ES_MODE_STARTED 0 +#define ES_MODE_GET_FILE_ENTRY 1 +#define ES_MODE_GET_STRM_ENTRY 2 +#define ES_MODE_GET_NAME_ENTRY 3 +#define ES_MODE_GET_CRITICAL_SEC_ENTRY 4 +struct entry_set_cache_t *get_entry_set_in_dir(struct super_block *sb, + struct chain_t *p_dir, s32 entry, + u32 type, + struct dentry_t **file_ep) +{ + s32 off, ret, byte_offset; + u32 clu = 0; + sector_t sec; + u32 entry_type; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); + struct entry_set_cache_t *es = NULL; + struct dentry_t *ep, *pos; + u8 *buf; + u8 num_entries; + s32 mode = ES_MODE_STARTED; + size_t bufsize; + + pr_debug("%s entered p_dir dir %u flags %x size %d\n", + __func__, p_dir->dir, p_dir->flags, p_dir->size); + + byte_offset = entry << DENTRY_SIZE_BITS; + ret = _walk_fat_chain(sb, p_dir, byte_offset, &clu); + if (ret != FFS_SUCCESS) + return NULL; + + + /* byte offset in cluster */ + byte_offset &= p_fs->cluster_size - 1; + + /* byte offset in sector */ + off = byte_offset & p_bd->sector_size_mask; + + /* sector offset in cluster */ + sec = byte_offset >> p_bd->sector_size_bits; + sec += START_SECTOR(clu); + + buf = buf_getblk(sb, sec); + if (buf == NULL) + goto err_out; + + + ep = (struct dentry_t *)(buf + off); + entry_type = p_fs->fs_func->get_entry_type(ep); + + if ((entry_type != TYPE_FILE) + && (entry_type != TYPE_DIR)) + goto err_out; + + if (type == ES_ALL_ENTRIES) + num_entries = ((struct file_dentry_t *)ep)->num_ext+1; + else + num_entries = type; + + bufsize = offsetof(struct entry_set_cache_t, __buf) + (num_entries) * + sizeof(struct dentry_t); + pr_debug("%s: trying to kmalloc %zx bytes for %d entries\n", __func__, + bufsize, num_entries); + es = kmalloc(bufsize, GFP_KERNEL); + if (es == NULL) + goto err_out; + + es->num_entries = num_entries; + es->sector = sec; + es->offset = off; + es->alloc_flag = p_dir->flags; + + pos = (struct dentry_t *) &(es->__buf); + + while (num_entries) { + /* + * instead of copying whole sector, we will check every entry. + * this will provide minimum stablity and consistancy. + */ + entry_type = p_fs->fs_func->get_entry_type(ep); + + if ((entry_type == TYPE_UNUSED) || (entry_type == TYPE_DELETED)) + goto err_out; + + switch (mode) { + case ES_MODE_STARTED: + if ((entry_type == TYPE_FILE) || (entry_type == TYPE_DIR)) + mode = ES_MODE_GET_FILE_ENTRY; + else + goto err_out; + break; + case ES_MODE_GET_FILE_ENTRY: + if (entry_type == TYPE_STREAM) + mode = ES_MODE_GET_STRM_ENTRY; + else + goto err_out; + break; + case ES_MODE_GET_STRM_ENTRY: + if (entry_type == TYPE_EXTEND) + mode = ES_MODE_GET_NAME_ENTRY; + else + goto err_out; + break; + case ES_MODE_GET_NAME_ENTRY: + if (entry_type == TYPE_EXTEND) + break; + else if (entry_type == TYPE_STREAM) + goto err_out; + else if (entry_type & TYPE_CRITICAL_SEC) + mode = ES_MODE_GET_CRITICAL_SEC_ENTRY; + else + goto err_out; + break; + case ES_MODE_GET_CRITICAL_SEC_ENTRY: + if ((entry_type == TYPE_EXTEND) || + (entry_type == TYPE_STREAM)) + goto err_out; + else if ((entry_type & TYPE_CRITICAL_SEC) != + TYPE_CRITICAL_SEC) + goto err_out; + break; + } + + memcpy(pos, ep, sizeof(struct dentry_t)); + + if (--num_entries == 0) + break; + + if (((off + DENTRY_SIZE) & p_bd->sector_size_mask) < + (off & p_bd->sector_size_mask)) { + /* get the next sector */ + if (IS_LAST_SECTOR_IN_CLUSTER(sec)) { + if (es->alloc_flag == 0x03) { + clu++; + } else { + if (FAT_read(sb, clu, &clu) == -1) + goto err_out; + } + sec = START_SECTOR(clu); + } else { + sec++; + } + buf = buf_getblk(sb, sec); + if (buf == NULL) + goto err_out; + off = 0; + ep = (struct dentry_t *)(buf); + } else { + ep++; + off += DENTRY_SIZE; + } + pos++; + } + + if (file_ep) + *file_ep = (struct dentry_t *)&(es->__buf); + + pr_debug("%s exiting es %p sec %llu offset %d flags %d, num_entries %u buf ptr %p\n", + __func__, es, (unsigned long long)es->sector, es->offset, + es->alloc_flag, es->num_entries, &es->__buf); + return es; +err_out: + pr_debug("%s exited NULL (es %p)\n", __func__, es); + kfree(es); + return NULL; +} + +void release_entry_set(struct entry_set_cache_t *es) +{ + pr_debug("%s es=%p\n", __func__, es); + kfree(es); +} + + +static s32 __write_partial_entries_in_entry_set(struct super_block *sb, + struct entry_set_cache_t *es, + sector_t sec, s32 off, u32 count) +{ + s32 num_entries, buf_off = (off - es->offset); + u32 remaining_byte_in_sector, copy_entries; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); + u32 clu; + u8 *buf, *esbuf = (u8 *)&(es->__buf); + + pr_debug("%s entered es %p sec %llu off %d count %d\n", + __func__, es, (unsigned long long)sec, off, count); + num_entries = count; + + while (num_entries) { + /* white per sector base */ + remaining_byte_in_sector = (1 << p_bd->sector_size_bits) - off; + copy_entries = min_t(s32, + remaining_byte_in_sector >> DENTRY_SIZE_BITS, + num_entries); + buf = buf_getblk(sb, sec); + if (buf == NULL) + goto err_out; + pr_debug("es->buf %p buf_off %u\n", esbuf, buf_off); + pr_debug("copying %d entries from %p to sector %llu\n", + copy_entries, (esbuf + buf_off), + (unsigned long long)sec); + memcpy(buf + off, esbuf + buf_off, + copy_entries << DENTRY_SIZE_BITS); + buf_modify(sb, sec); + num_entries -= copy_entries; + + if (num_entries) { + /* get next sector */ + if (IS_LAST_SECTOR_IN_CLUSTER(sec)) { + clu = GET_CLUSTER_FROM_SECTOR(sec); + if (es->alloc_flag == 0x03) { + clu++; + } else { + if (FAT_read(sb, clu, &clu) == -1) + goto err_out; + } + sec = START_SECTOR(clu); + } else { + sec++; + } + off = 0; + buf_off += copy_entries << DENTRY_SIZE_BITS; + } + } + + pr_debug("%s exited successfully\n", __func__); + return FFS_SUCCESS; +err_out: + pr_debug("%s failed\n", __func__); + return FFS_ERROR; +} + +/* write back all entries in entry set */ +s32 write_whole_entry_set(struct super_block *sb, struct entry_set_cache_t *es) +{ + return __write_partial_entries_in_entry_set(sb, es, es->sector, + es->offset, + es->num_entries); +} + +/* write back some entries in entry set */ +s32 write_partial_entries_in_entry_set(struct super_block *sb, + struct entry_set_cache_t *es, struct dentry_t *ep, u32 count) +{ + s32 ret, byte_offset, off; + u32 clu = 0; + sector_t sec; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); + struct chain_t dir; + + /* vaidity check */ + if (ep + count > ((struct dentry_t *)&(es->__buf)) + es->num_entries) + return FFS_ERROR; + + dir.dir = GET_CLUSTER_FROM_SECTOR(es->sector); + dir.flags = es->alloc_flag; + dir.size = 0xffffffff; /* XXX */ + + byte_offset = (es->sector - START_SECTOR(dir.dir)) << + p_bd->sector_size_bits; + byte_offset += ((void **)ep - &(es->__buf)) + es->offset; + + ret = _walk_fat_chain(sb, &dir, byte_offset, &clu); + if (ret != FFS_SUCCESS) + return ret; + + /* byte offset in cluster */ + byte_offset &= p_fs->cluster_size - 1; + + /* byte offset in sector */ + off = byte_offset & p_bd->sector_size_mask; + + /* sector offset in cluster */ + sec = byte_offset >> p_bd->sector_size_bits; + sec += START_SECTOR(clu); + return __write_partial_entries_in_entry_set(sb, es, sec, off, count); +} + +/* search EMPTY CONTINUOUS "num_entries" entries */ +s32 search_deleted_or_unused_entry(struct super_block *sb, + struct chain_t *p_dir, s32 num_entries) +{ + int i, dentry, num_empty = 0; + s32 dentries_per_clu; + u32 type; + struct chain_t clu; + struct dentry_t *ep; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */ + dentries_per_clu = p_fs->dentries_in_root; + else + dentries_per_clu = p_fs->dentries_per_clu; + + if (p_fs->hint_uentry.dir == p_dir->dir) { + if (p_fs->hint_uentry.entry == -1) + return -1; + + clu.dir = p_fs->hint_uentry.clu.dir; + clu.size = p_fs->hint_uentry.clu.size; + clu.flags = p_fs->hint_uentry.clu.flags; + + dentry = p_fs->hint_uentry.entry; + } else { + p_fs->hint_uentry.entry = -1; + + clu.dir = p_dir->dir; + clu.size = p_dir->size; + clu.flags = p_dir->flags; + + dentry = 0; + } + + while (clu.dir != CLUSTER_32(~0)) { + if (p_fs->dev_ejected) + break; + + if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */ + i = dentry % dentries_per_clu; + else + i = dentry & (dentries_per_clu-1); + + for (; i < dentries_per_clu; i++, dentry++) { + ep = get_entry_in_dir(sb, &clu, i, NULL); + if (!ep) + return -1; + + type = p_fs->fs_func->get_entry_type(ep); + + if (type == TYPE_UNUSED) { + num_empty++; + if (p_fs->hint_uentry.entry == -1) { + p_fs->hint_uentry.dir = p_dir->dir; + p_fs->hint_uentry.entry = dentry; + + p_fs->hint_uentry.clu.dir = clu.dir; + p_fs->hint_uentry.clu.size = clu.size; + p_fs->hint_uentry.clu.flags = clu.flags; + } + } else if (type == TYPE_DELETED) { + num_empty++; + } else { + num_empty = 0; + } + + if (num_empty >= num_entries) { + p_fs->hint_uentry.dir = CLUSTER_32(~0); + p_fs->hint_uentry.entry = -1; + + if (p_fs->vol_type == EXFAT) + return dentry - (num_entries-1); + else + return dentry; + } + } + + if (p_dir->dir == CLUSTER_32(0)) + break; /* FAT16 root_dir */ + + if (clu.flags == 0x03) { + if ((--clu.size) > 0) + clu.dir++; + else + clu.dir = CLUSTER_32(~0); + } else { + if (FAT_read(sb, clu.dir, &clu.dir) != 0) + return -1; + } + } + + return -1; +} + +s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir, s32 num_entries) +{ + s32 ret, dentry; + u32 last_clu; + sector_t sector; + u64 size = 0; + struct chain_t clu; + struct dentry_t *ep = NULL; + struct super_block *sb = inode->i_sb; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + struct file_id_t *fid = &(EXFAT_I(inode)->fid); + + if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */ + return search_deleted_or_unused_entry(sb, p_dir, num_entries); + + while ((dentry = search_deleted_or_unused_entry(sb, p_dir, num_entries)) < 0) { + if (p_fs->dev_ejected) + break; + + if (p_fs->vol_type == EXFAT) { + if (p_dir->dir != p_fs->root_dir) + size = i_size_read(inode); + } + + last_clu = find_last_cluster(sb, p_dir); + clu.dir = last_clu + 1; + clu.size = 0; + clu.flags = p_dir->flags; + + /* (1) allocate a cluster */ + ret = p_fs->fs_func->alloc_cluster(sb, 1, &clu); + if (ret < 1) + return -1; + + if (clear_cluster(sb, clu.dir) != FFS_SUCCESS) + return -1; + + /* (2) append to the FAT chain */ + if (clu.flags != p_dir->flags) { + exfat_chain_cont_cluster(sb, p_dir->dir, p_dir->size); + p_dir->flags = 0x01; + p_fs->hint_uentry.clu.flags = 0x01; + } + if (clu.flags == 0x01) + if (FAT_write(sb, last_clu, clu.dir) < 0) + return -1; + + if (p_fs->hint_uentry.entry == -1) { + p_fs->hint_uentry.dir = p_dir->dir; + p_fs->hint_uentry.entry = p_dir->size << (p_fs->cluster_size_bits - DENTRY_SIZE_BITS); + + p_fs->hint_uentry.clu.dir = clu.dir; + p_fs->hint_uentry.clu.size = 0; + p_fs->hint_uentry.clu.flags = clu.flags; + } + p_fs->hint_uentry.clu.size++; + p_dir->size++; + + /* (3) update the directory entry */ + if (p_fs->vol_type == EXFAT) { + if (p_dir->dir != p_fs->root_dir) { + size += p_fs->cluster_size; + + ep = get_entry_in_dir(sb, &fid->dir, + fid->entry + 1, §or); + if (!ep) + return -1; + p_fs->fs_func->set_entry_size(ep, size); + p_fs->fs_func->set_entry_flag(ep, p_dir->flags); + buf_modify(sb, sector); + + update_dir_checksum(sb, &(fid->dir), + fid->entry); + } + } + + i_size_write(inode, i_size_read(inode)+p_fs->cluster_size); + EXFAT_I(inode)->mmu_private += p_fs->cluster_size; + EXFAT_I(inode)->fid.size += p_fs->cluster_size; + EXFAT_I(inode)->fid.flags = p_dir->flags; + inode->i_blocks += 1 << (p_fs->cluster_size_bits - 9); + } + + return dentry; +} + +/* return values of fat_find_dir_entry() + * >= 0 : return dir entiry position with the name in dir + * -1 : (root dir, ".") it is the root dir itself + * -2 : entry with the name does not exist + */ +s32 fat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir, + struct uni_name_t *p_uniname, s32 num_entries, + struct dos_name_t *p_dosname, u32 type) +{ + int i, dentry = 0, lossy = FALSE, len; + s32 order = 0, is_feasible_entry = TRUE, has_ext_entry = FALSE; + s32 dentries_per_clu; + u32 entry_type; + u16 entry_uniname[14], *uniname = NULL, unichar; + struct chain_t clu; + struct dentry_t *ep; + struct dos_dentry_t *dos_ep; + struct ext_dentry_t *ext_ep; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + if (p_dir->dir == p_fs->root_dir) { + if ((!nls_uniname_cmp(sb, p_uniname->name, + (u16 *)UNI_CUR_DIR_NAME)) || + (!nls_uniname_cmp(sb, p_uniname->name, + (u16 *)UNI_PAR_DIR_NAME))) + return -1; // special case, root directory itself + } + + if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */ + dentries_per_clu = p_fs->dentries_in_root; + else + dentries_per_clu = p_fs->dentries_per_clu; + + clu.dir = p_dir->dir; + clu.flags = p_dir->flags; + + while (clu.dir != CLUSTER_32(~0)) { + if (p_fs->dev_ejected) + break; + + for (i = 0; i < dentries_per_clu; i++, dentry++) { + ep = get_entry_in_dir(sb, &clu, i, NULL); + if (!ep) + return -2; + + entry_type = p_fs->fs_func->get_entry_type(ep); + + if ((entry_type == TYPE_FILE) || (entry_type == TYPE_DIR)) { + if ((type == TYPE_ALL) || (type == entry_type)) { + if (is_feasible_entry && has_ext_entry) + return dentry; + + dos_ep = (struct dos_dentry_t *) ep; + if ((!lossy) && (!nls_dosname_cmp(sb, p_dosname->name, dos_ep->name))) + return dentry; + } + is_feasible_entry = TRUE; + has_ext_entry = FALSE; + } else if (entry_type == TYPE_EXTEND) { + if (is_feasible_entry) { + ext_ep = (struct ext_dentry_t *) ep; + if (ext_ep->order > 0x40) { + order = (s32)(ext_ep->order - 0x40); + uniname = p_uniname->name + 13 * (order-1); + } else { + order = (s32) ext_ep->order; + uniname -= 13; + } + + len = extract_uni_name_from_ext_entry(ext_ep, entry_uniname, order); + + unichar = *(uniname+len); + *(uniname+len) = 0x0; + + if (nls_uniname_cmp(sb, uniname, entry_uniname)) + is_feasible_entry = FALSE; + + *(uniname+len) = unichar; + } + has_ext_entry = TRUE; + } else if (entry_type == TYPE_UNUSED) { + return -2; + } + is_feasible_entry = TRUE; + has_ext_entry = FALSE; + } + + if (p_dir->dir == CLUSTER_32(0)) + break; /* FAT16 root_dir */ + + if (FAT_read(sb, clu.dir, &clu.dir) != 0) + return -2; + } + + return -2; +} + +/* return values of exfat_find_dir_entry() + * >= 0 : return dir entiry position with the name in dir + * -1 : (root dir, ".") it is the root dir itself + * -2 : entry with the name does not exist + */ +s32 exfat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir, + struct uni_name_t *p_uniname, s32 num_entries, + struct dos_name_t *p_dosname, u32 type) +{ + int i = 0, dentry = 0, num_ext_entries = 0, len, step; + s32 order = 0, is_feasible_entry = FALSE; + s32 dentries_per_clu, num_empty = 0; + u32 entry_type; + u16 entry_uniname[16], *uniname = NULL, unichar; + struct chain_t clu; + struct dentry_t *ep; + struct file_dentry_t *file_ep; + struct strm_dentry_t *strm_ep; + struct name_dentry_t *name_ep; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + if (p_dir->dir == p_fs->root_dir) { + if ((!nls_uniname_cmp(sb, p_uniname->name, + (u16 *)UNI_CUR_DIR_NAME)) || + (!nls_uniname_cmp(sb, p_uniname->name, + (u16 *)UNI_PAR_DIR_NAME))) + return -1; // special case, root directory itself + } + + if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */ + dentries_per_clu = p_fs->dentries_in_root; + else + dentries_per_clu = p_fs->dentries_per_clu; + + clu.dir = p_dir->dir; + clu.size = p_dir->size; + clu.flags = p_dir->flags; + + p_fs->hint_uentry.dir = p_dir->dir; + p_fs->hint_uentry.entry = -1; + + while (clu.dir != CLUSTER_32(~0)) { + if (p_fs->dev_ejected) + break; + + while (i < dentries_per_clu) { + ep = get_entry_in_dir(sb, &clu, i, NULL); + if (!ep) + return -2; + + entry_type = p_fs->fs_func->get_entry_type(ep); + step = 1; + + if ((entry_type == TYPE_UNUSED) || (entry_type == TYPE_DELETED)) { + is_feasible_entry = FALSE; + + if (p_fs->hint_uentry.entry == -1) { + num_empty++; + + if (num_empty == 1) { + p_fs->hint_uentry.clu.dir = clu.dir; + p_fs->hint_uentry.clu.size = clu.size; + p_fs->hint_uentry.clu.flags = clu.flags; + } + if ((num_empty >= num_entries) || (entry_type == TYPE_UNUSED)) + p_fs->hint_uentry.entry = dentry - (num_empty-1); + } + + if (entry_type == TYPE_UNUSED) + return -2; + } else { + num_empty = 0; + + if ((entry_type == TYPE_FILE) || (entry_type == TYPE_DIR)) { + file_ep = (struct file_dentry_t *) ep; + if ((type == TYPE_ALL) || (type == entry_type)) { + num_ext_entries = file_ep->num_ext; + is_feasible_entry = TRUE; + } else { + is_feasible_entry = FALSE; + step = file_ep->num_ext + 1; + } + } else if (entry_type == TYPE_STREAM) { + if (is_feasible_entry) { + strm_ep = (struct strm_dentry_t *)ep; + if (p_uniname->name_hash == GET16_A(strm_ep->name_hash) && + p_uniname->name_len == strm_ep->name_len) { + order = 1; + } else { + is_feasible_entry = FALSE; + step = num_ext_entries; + } + } + } else if (entry_type == TYPE_EXTEND) { + if (is_feasible_entry) { + name_ep = (struct name_dentry_t *)ep; + + if ((++order) == 2) + uniname = p_uniname->name; + else + uniname += 15; + + len = extract_uni_name_from_name_entry(name_ep, + entry_uniname, order); + + unichar = *(uniname+len); + *(uniname+len) = 0x0; + + if (nls_uniname_cmp(sb, uniname, entry_uniname)) { + is_feasible_entry = FALSE; + step = num_ext_entries - order + 1; + } else if (order == num_ext_entries) { + p_fs->hint_uentry.dir = CLUSTER_32(~0); + p_fs->hint_uentry.entry = -1; + return dentry - (num_ext_entries); + } + + *(uniname+len) = unichar; + } + } else { + is_feasible_entry = FALSE; + } + } + + i += step; + dentry += step; + } + + i -= dentries_per_clu; + + if (p_dir->dir == CLUSTER_32(0)) + break; /* FAT16 root_dir */ + + if (clu.flags == 0x03) { + if ((--clu.size) > 0) + clu.dir++; + else + clu.dir = CLUSTER_32(~0); + } else { + if (FAT_read(sb, clu.dir, &clu.dir) != 0) + return -2; + } + } + + return -2; +} + +s32 fat_count_ext_entries(struct super_block *sb, struct chain_t *p_dir, + s32 entry, struct dentry_t *p_entry) +{ + s32 count = 0; + u8 chksum; + struct dos_dentry_t *dos_ep = (struct dos_dentry_t *) p_entry; + struct ext_dentry_t *ext_ep; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + chksum = calc_checksum_1byte((void *) dos_ep->name, DOS_NAME_LENGTH, 0); + + for (entry--; entry >= 0; entry--) { + ext_ep = (struct ext_dentry_t *)get_entry_in_dir(sb, p_dir, + entry, NULL); + if (!ext_ep) + return -1; + + if ((p_fs->fs_func->get_entry_type((struct dentry_t *)ext_ep) == + TYPE_EXTEND) && (ext_ep->checksum == chksum)) { + count++; + if (ext_ep->order > 0x40) + return count; + } else { + return count; + } + } + + return count; +} + +s32 exfat_count_ext_entries(struct super_block *sb, struct chain_t *p_dir, + s32 entry, struct dentry_t *p_entry) +{ + int i, count = 0; + u32 type; + struct file_dentry_t *file_ep = (struct file_dentry_t *)p_entry; + struct dentry_t *ext_ep; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + for (i = 0, entry++; i < file_ep->num_ext; i++, entry++) { + ext_ep = get_entry_in_dir(sb, p_dir, entry, NULL); + if (!ext_ep) + return -1; + + type = p_fs->fs_func->get_entry_type(ext_ep); + if ((type == TYPE_EXTEND) || (type == TYPE_STREAM)) + count++; + else + return count; + } + + return count; +} + +s32 count_dos_name_entries(struct super_block *sb, struct chain_t *p_dir, + u32 type) +{ + int i, count = 0; + s32 dentries_per_clu; + u32 entry_type; + struct chain_t clu; + struct dentry_t *ep; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */ + dentries_per_clu = p_fs->dentries_in_root; + else + dentries_per_clu = p_fs->dentries_per_clu; + + clu.dir = p_dir->dir; + clu.size = p_dir->size; + clu.flags = p_dir->flags; + + while (clu.dir != CLUSTER_32(~0)) { + if (p_fs->dev_ejected) + break; + + for (i = 0; i < dentries_per_clu; i++) { + ep = get_entry_in_dir(sb, &clu, i, NULL); + if (!ep) + return -1; + + entry_type = p_fs->fs_func->get_entry_type(ep); + + if (entry_type == TYPE_UNUSED) + return count; + if (!(type & TYPE_CRITICAL_PRI) && + !(type & TYPE_BENIGN_PRI)) + continue; + + if ((type == TYPE_ALL) || (type == entry_type)) + count++; + } + + if (p_dir->dir == CLUSTER_32(0)) + break; /* FAT16 root_dir */ + + if (clu.flags == 0x03) { + if ((--clu.size) > 0) + clu.dir++; + else + clu.dir = CLUSTER_32(~0); + } else { + if (FAT_read(sb, clu.dir, &clu.dir) != 0) + return -1; + } + } + + return count; +} + +bool is_dir_empty(struct super_block *sb, struct chain_t *p_dir) +{ + int i, count = 0; + s32 dentries_per_clu; + u32 type; + struct chain_t clu; + struct dentry_t *ep; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */ + dentries_per_clu = p_fs->dentries_in_root; + else + dentries_per_clu = p_fs->dentries_per_clu; + + clu.dir = p_dir->dir; + clu.size = p_dir->size; + clu.flags = p_dir->flags; + + while (clu.dir != CLUSTER_32(~0)) { + if (p_fs->dev_ejected) + break; + + for (i = 0; i < dentries_per_clu; i++) { + ep = get_entry_in_dir(sb, &clu, i, NULL); + if (!ep) + break; + + type = p_fs->fs_func->get_entry_type(ep); + + if (type == TYPE_UNUSED) + return TRUE; + if ((type != TYPE_FILE) && (type != TYPE_DIR)) + continue; + + if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */ + return FALSE; + + if (p_fs->vol_type == EXFAT) + return FALSE; + if ((p_dir->dir == p_fs->root_dir) || ((++count) > 2)) + return FALSE; + } + + if (p_dir->dir == CLUSTER_32(0)) + break; /* FAT16 root_dir */ + + if (clu.flags == 0x03) { + if ((--clu.size) > 0) + clu.dir++; + else + clu.dir = CLUSTER_32(~0); + } + if (FAT_read(sb, clu.dir, &clu.dir) != 0) + break; + } + + return TRUE; +} + +/* + * Name Conversion Functions + */ + +/* input : dir, uni_name + * output : num_of_entry, dos_name(format : aaaaaa~1.bbb) + */ +s32 get_num_entries_and_dos_name(struct super_block *sb, struct chain_t *p_dir, + struct uni_name_t *p_uniname, s32 *entries, + struct dos_name_t *p_dosname) +{ + s32 ret, num_entries; + bool lossy = false; + char **r; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + num_entries = p_fs->fs_func->calc_num_entries(p_uniname); + if (num_entries == 0) + return FFS_INVALIDPATH; + + if (p_fs->vol_type != EXFAT) { + nls_uniname_to_dosname(sb, p_dosname, p_uniname, &lossy); + + if (lossy) { + ret = fat_generate_dos_name(sb, p_dir, p_dosname); + if (ret) + return ret; + } else { + for (r = reserved_names; *r; r++) { + if (!strncmp((void *)p_dosname->name, *r, 8)) + return FFS_INVALIDPATH; + } + + if (p_dosname->name_case != 0xFF) + num_entries = 1; + } + + if (num_entries > 1) + p_dosname->name_case = 0x0; + } + + *entries = num_entries; + + return FFS_SUCCESS; +} + +void get_uni_name_from_dos_entry(struct super_block *sb, + struct dos_dentry_t *ep, + struct uni_name_t *p_uniname, u8 mode) +{ + struct dos_name_t dos_name; + + if (mode == 0x0) + dos_name.name_case = 0x0; + else + dos_name.name_case = ep->lcase; + + memcpy(dos_name.name, ep->name, DOS_NAME_LENGTH); + nls_dosname_to_uniname(sb, p_uniname, &dos_name); +} + +void fat_get_uni_name_from_ext_entry(struct super_block *sb, + struct chain_t *p_dir, s32 entry, + u16 *uniname) +{ + int i; + struct ext_dentry_t *ep; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + for (entry--, i = 1; entry >= 0; entry--, i++) { + ep = (struct ext_dentry_t *)get_entry_in_dir(sb, p_dir, entry, + NULL); + if (!ep) + return; + + if (p_fs->fs_func->get_entry_type((struct dentry_t *)ep) == + TYPE_EXTEND) { + extract_uni_name_from_ext_entry(ep, uniname, i); + if (ep->order > 0x40) + return; + } else { + return; + } + + uniname += 13; + } +} + +void exfat_get_uni_name_from_ext_entry(struct super_block *sb, + struct chain_t *p_dir, s32 entry, + u16 *uniname) +{ + int i; + struct dentry_t *ep; + struct entry_set_cache_t *es; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + es = get_entry_set_in_dir(sb, p_dir, entry, ES_ALL_ENTRIES, &ep); + if (es == NULL || es->num_entries < 3) { + if (es) + release_entry_set(es); + return; + } + + ep += 2; + + /* + * First entry : file entry + * Second entry : stream-extension entry + * Third entry : first file-name entry + * So, the index of first file-name dentry should start from 2. + */ + for (i = 2; i < es->num_entries; i++, ep++) { + if (p_fs->fs_func->get_entry_type(ep) == TYPE_EXTEND) + extract_uni_name_from_name_entry((struct name_dentry_t *) + ep, uniname, i); + else + goto out; + uniname += 15; + } + +out: + release_entry_set(es); +} + +s32 extract_uni_name_from_ext_entry(struct ext_dentry_t *ep, u16 *uniname, + s32 order) +{ + int i, len = 0; + + for (i = 0; i < 10; i += 2) { + *uniname = GET16(ep->unicode_0_4 + i); + if (*uniname == 0x0) + return len; + uniname++; + len++; + } + + if (order < 20) { + for (i = 0; i < 12; i += 2) { + *uniname = GET16_A(ep->unicode_5_10 + i); + if (*uniname == 0x0) + return len; + uniname++; + len++; + } + } else { + for (i = 0; i < 8; i += 2) { + *uniname = GET16_A(ep->unicode_5_10 + i); + if (*uniname == 0x0) + return len; + uniname++; + len++; + } + *uniname = 0x0; /* uniname[MAX_NAME_LENGTH-1] */ + return len; + } + + for (i = 0; i < 4; i += 2) { + *uniname = GET16_A(ep->unicode_11_12 + i); + if (*uniname == 0x0) + return len; + uniname++; + len++; + } + + *uniname = 0x0; + return len; +} + +s32 extract_uni_name_from_name_entry(struct name_dentry_t *ep, u16 *uniname, + s32 order) +{ + int i, len = 0; + + for (i = 0; i < 30; i += 2) { + *uniname = GET16_A(ep->unicode_0_14 + i); + if (*uniname == 0x0) + return len; + uniname++; + len++; + } + + *uniname = 0x0; + return len; +} + +s32 fat_generate_dos_name(struct super_block *sb, struct chain_t *p_dir, + struct dos_name_t *p_dosname) +{ + int i, j, count = 0, count_begin = FALSE; + s32 dentries_per_clu; + u32 type; + u8 bmap[128/* 1 ~ 1023 */]; + struct chain_t clu; + struct dos_dentry_t *ep; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + memset(bmap, 0, sizeof(bmap)); + exfat_bitmap_set(bmap, 0); + + if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */ + dentries_per_clu = p_fs->dentries_in_root; + else + dentries_per_clu = p_fs->dentries_per_clu; + + clu.dir = p_dir->dir; + clu.flags = p_dir->flags; + + while (clu.dir != CLUSTER_32(~0)) { + if (p_fs->dev_ejected) + break; + + for (i = 0; i < dentries_per_clu; i++) { + ep = (struct dos_dentry_t *)get_entry_in_dir(sb, &clu, + i, NULL); + if (!ep) + return FFS_MEDIAERR; + + type = p_fs->fs_func->get_entry_type((struct dentry_t *) + ep); + + if (type == TYPE_UNUSED) + break; + if ((type != TYPE_FILE) && (type != TYPE_DIR)) + continue; + + count = 0; + count_begin = FALSE; + + for (j = 0; j < 8; j++) { + if (ep->name[j] == ' ') + break; + + if (ep->name[j] == '~') { + count_begin = TRUE; + } else if (count_begin) { + if ((ep->name[j] >= '0') && + (ep->name[j] <= '9')) { + count = count * 10 + + (ep->name[j] - '0'); + } else { + count = 0; + count_begin = FALSE; + } + } + } + + if ((count > 0) && (count < 1024)) + exfat_bitmap_set(bmap, count); + } + + if (p_dir->dir == CLUSTER_32(0)) + break; /* FAT16 root_dir */ + + if (FAT_read(sb, clu.dir, &clu.dir) != 0) + return FFS_MEDIAERR; + } + + count = 0; + for (i = 0; i < 128; i++) { + if (bmap[i] != 0xFF) { + for (j = 0; j < 8; j++) { + if (exfat_bitmap_test(&bmap[i], j) == 0) { + count = (i << 3) + j; + break; + } + } + if (count != 0) + break; + } + } + + if ((count == 0) || (count >= 1024)) + return FFS_FILEEXIST; + fat_attach_count_to_dos_name(p_dosname->name, count); + + /* Now dos_name has DOS~????.EXT */ + return FFS_SUCCESS; +} + +void fat_attach_count_to_dos_name(u8 *dosname, s32 count) +{ + int i, j, length; + char str_count[6]; + + snprintf(str_count, sizeof(str_count), "~%d", count); + length = strlen(str_count); + + i = 0; + j = 0; + while (j <= (8 - length)) { + i = j; + if (dosname[j] == ' ') + break; + if (dosname[j] & 0x80) + j += 2; + else + j++; + } + + for (j = 0; j < length; i++, j++) + dosname[i] = (u8)str_count[j]; + + if (i == 7) + dosname[7] = ' '; +} + +s32 fat_calc_num_entries(struct uni_name_t *p_uniname) +{ + s32 len; + + len = p_uniname->name_len; + if (len == 0) + return 0; + + /* 1 dos name entry + extended entries */ + return (len - 1) / 13 + 2; +} + +s32 exfat_calc_num_entries(struct uni_name_t *p_uniname) +{ + s32 len; + + len = p_uniname->name_len; + if (len == 0) + return 0; + + /* 1 file entry + 1 stream entry + name entries */ + return (len - 1) / 15 + 3; +} + +u8 calc_checksum_1byte(void *data, s32 len, u8 chksum) +{ + int i; + u8 *c = (u8 *)data; + + for (i = 0; i < len; i++, c++) + chksum = (((chksum & 1) << 7) | ((chksum & 0xFE) >> 1)) + *c; + + return chksum; +} + +u16 calc_checksum_2byte(void *data, s32 len, u16 chksum, s32 type) +{ + int i; + u8 *c = (u8 *)data; + + switch (type) { + case CS_DIR_ENTRY: + for (i = 0; i < len; i++, c++) { + if ((i == 2) || (i == 3)) + continue; + chksum = (((chksum & 1) << 15) | + ((chksum & 0xFFFE) >> 1)) + (u16)*c; + } + break; + default + : + for (i = 0; i < len; i++, c++) + chksum = (((chksum & 1) << 15) | + ((chksum & 0xFFFE) >> 1)) + (u16)*c; + } + + return chksum; +} + +u32 calc_checksum_4byte(void *data, s32 len, u32 chksum, s32 type) +{ + int i; + u8 *c = (u8 *)data; + + switch (type) { + case CS_PBR_SECTOR: + for (i = 0; i < len; i++, c++) { + if ((i == 106) || (i == 107) || (i == 112)) + continue; + chksum = (((chksum & 1) << 31) | + ((chksum & 0xFFFFFFFE) >> 1)) + (u32)*c; + } + break; + default + : + for (i = 0; i < len; i++, c++) + chksum = (((chksum & 1) << 31) | + ((chksum & 0xFFFFFFFE) >> 1)) + (u32)*c; + } + + return chksum; +} + +/* + * Name Resolution Functions + */ + +/* return values of resolve_path() + * > 0 : return the length of the path + * < 0 : return error + */ +s32 resolve_path(struct inode *inode, char *path, struct chain_t *p_dir, + struct uni_name_t *p_uniname) +{ + bool lossy = false; + struct super_block *sb = inode->i_sb; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + struct file_id_t *fid = &(EXFAT_I(inode)->fid); + + if (strlen(path) >= (MAX_NAME_LENGTH * MAX_CHARSET_SIZE)) + return FFS_INVALIDPATH; + + strcpy(name_buf, path); + + nls_cstring_to_uniname(sb, p_uniname, name_buf, &lossy); + if (lossy) + return FFS_INVALIDPATH; + + fid->size = i_size_read(inode); + + p_dir->dir = fid->start_clu; + p_dir->size = (s32)(fid->size >> p_fs->cluster_size_bits); + p_dir->flags = fid->flags; + + return FFS_SUCCESS; +} + +/* + * File Operation Functions + */ +static struct fs_func fat_fs_func = { + .alloc_cluster = fat_alloc_cluster, + .free_cluster = fat_free_cluster, + .count_used_clusters = fat_count_used_clusters, + + .init_dir_entry = fat_init_dir_entry, + .init_ext_entry = fat_init_ext_entry, + .find_dir_entry = fat_find_dir_entry, + .delete_dir_entry = fat_delete_dir_entry, + .get_uni_name_from_ext_entry = fat_get_uni_name_from_ext_entry, + .count_ext_entries = fat_count_ext_entries, + .calc_num_entries = fat_calc_num_entries, + + .get_entry_type = fat_get_entry_type, + .set_entry_type = fat_set_entry_type, + .get_entry_attr = fat_get_entry_attr, + .set_entry_attr = fat_set_entry_attr, + .get_entry_flag = fat_get_entry_flag, + .set_entry_flag = fat_set_entry_flag, + .get_entry_clu0 = fat_get_entry_clu0, + .set_entry_clu0 = fat_set_entry_clu0, + .get_entry_size = fat_get_entry_size, + .set_entry_size = fat_set_entry_size, + .get_entry_time = fat_get_entry_time, + .set_entry_time = fat_set_entry_time, +}; + +s32 fat16_mount(struct super_block *sb, struct pbr_sector_t *p_pbr) +{ + s32 num_reserved, num_root_sectors; + struct bpb16_t *p_bpb = (struct bpb16_t *)p_pbr->bpb; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); + + if (p_bpb->num_fats == 0) + return FFS_FORMATERR; + + num_root_sectors = GET16(p_bpb->num_root_entries) << DENTRY_SIZE_BITS; + num_root_sectors = ((num_root_sectors - 1) >> + p_bd->sector_size_bits) + 1; + + p_fs->sectors_per_clu = p_bpb->sectors_per_clu; + p_fs->sectors_per_clu_bits = ilog2(p_bpb->sectors_per_clu); + p_fs->cluster_size_bits = p_fs->sectors_per_clu_bits + + p_bd->sector_size_bits; + p_fs->cluster_size = 1 << p_fs->cluster_size_bits; + + p_fs->num_FAT_sectors = GET16(p_bpb->num_fat_sectors); + + p_fs->FAT1_start_sector = p_fs->PBR_sector + GET16(p_bpb->num_reserved); + if (p_bpb->num_fats == 1) + p_fs->FAT2_start_sector = p_fs->FAT1_start_sector; + else + p_fs->FAT2_start_sector = p_fs->FAT1_start_sector + + p_fs->num_FAT_sectors; + + p_fs->root_start_sector = p_fs->FAT2_start_sector + + p_fs->num_FAT_sectors; + p_fs->data_start_sector = p_fs->root_start_sector + num_root_sectors; + + p_fs->num_sectors = GET16(p_bpb->num_sectors); + if (p_fs->num_sectors == 0) + p_fs->num_sectors = GET32(p_bpb->num_huge_sectors); + + num_reserved = p_fs->data_start_sector - p_fs->PBR_sector; + p_fs->num_clusters = ((p_fs->num_sectors - num_reserved) >> + p_fs->sectors_per_clu_bits) + 2; + /* because the cluster index starts with 2 */ + + if (p_fs->num_clusters < FAT12_THRESHOLD) + p_fs->vol_type = FAT12; + else + p_fs->vol_type = FAT16; + p_fs->vol_id = GET32(p_bpb->vol_serial); + + p_fs->root_dir = 0; + p_fs->dentries_in_root = GET16(p_bpb->num_root_entries); + p_fs->dentries_per_clu = 1 << (p_fs->cluster_size_bits - + DENTRY_SIZE_BITS); + + p_fs->vol_flag = VOL_CLEAN; + p_fs->clu_srch_ptr = 2; + p_fs->used_clusters = (u32)~0; + + p_fs->fs_func = &fat_fs_func; + + return FFS_SUCCESS; +} + +s32 fat32_mount(struct super_block *sb, struct pbr_sector_t *p_pbr) +{ + s32 num_reserved; + struct bpb32_t *p_bpb = (struct bpb32_t *)p_pbr->bpb; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); + + if (p_bpb->num_fats == 0) + return FFS_FORMATERR; + + p_fs->sectors_per_clu = p_bpb->sectors_per_clu; + p_fs->sectors_per_clu_bits = ilog2(p_bpb->sectors_per_clu); + p_fs->cluster_size_bits = p_fs->sectors_per_clu_bits + + p_bd->sector_size_bits; + p_fs->cluster_size = 1 << p_fs->cluster_size_bits; + + p_fs->num_FAT_sectors = GET32(p_bpb->num_fat32_sectors); + + p_fs->FAT1_start_sector = p_fs->PBR_sector + GET16(p_bpb->num_reserved); + if (p_bpb->num_fats == 1) + p_fs->FAT2_start_sector = p_fs->FAT1_start_sector; + else + p_fs->FAT2_start_sector = p_fs->FAT1_start_sector + + p_fs->num_FAT_sectors; + + p_fs->root_start_sector = p_fs->FAT2_start_sector + + p_fs->num_FAT_sectors; + p_fs->data_start_sector = p_fs->root_start_sector; + + p_fs->num_sectors = GET32(p_bpb->num_huge_sectors); + num_reserved = p_fs->data_start_sector - p_fs->PBR_sector; + + p_fs->num_clusters = ((p_fs->num_sectors - num_reserved) >> + p_fs->sectors_per_clu_bits) + 2; + /* because the cluster index starts with 2 */ + + p_fs->vol_type = FAT32; + p_fs->vol_id = GET32(p_bpb->vol_serial); + + p_fs->root_dir = GET32(p_bpb->root_cluster); + p_fs->dentries_in_root = 0; + p_fs->dentries_per_clu = 1 << (p_fs->cluster_size_bits - + DENTRY_SIZE_BITS); + + p_fs->vol_flag = VOL_CLEAN; + p_fs->clu_srch_ptr = 2; + p_fs->used_clusters = (u32)~0; + + p_fs->fs_func = &fat_fs_func; + + return FFS_SUCCESS; +} + +static struct fs_func exfat_fs_func = { + .alloc_cluster = exfat_alloc_cluster, + .free_cluster = exfat_free_cluster, + .count_used_clusters = exfat_count_used_clusters, + + .init_dir_entry = exfat_init_dir_entry, + .init_ext_entry = exfat_init_ext_entry, + .find_dir_entry = exfat_find_dir_entry, + .delete_dir_entry = exfat_delete_dir_entry, + .get_uni_name_from_ext_entry = exfat_get_uni_name_from_ext_entry, + .count_ext_entries = exfat_count_ext_entries, + .calc_num_entries = exfat_calc_num_entries, + + .get_entry_type = exfat_get_entry_type, + .set_entry_type = exfat_set_entry_type, + .get_entry_attr = exfat_get_entry_attr, + .set_entry_attr = exfat_set_entry_attr, + .get_entry_flag = exfat_get_entry_flag, + .set_entry_flag = exfat_set_entry_flag, + .get_entry_clu0 = exfat_get_entry_clu0, + .set_entry_clu0 = exfat_set_entry_clu0, + .get_entry_size = exfat_get_entry_size, + .set_entry_size = exfat_set_entry_size, + .get_entry_time = exfat_get_entry_time, + .set_entry_time = exfat_set_entry_time, +}; + +s32 exfat_mount(struct super_block *sb, struct pbr_sector_t *p_pbr) +{ + struct bpbex_t *p_bpb = (struct bpbex_t *)p_pbr->bpb; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); + + if (p_bpb->num_fats == 0) + return FFS_FORMATERR; + + p_fs->sectors_per_clu = 1 << p_bpb->sectors_per_clu_bits; + p_fs->sectors_per_clu_bits = p_bpb->sectors_per_clu_bits; + p_fs->cluster_size_bits = p_fs->sectors_per_clu_bits + + p_bd->sector_size_bits; + p_fs->cluster_size = 1 << p_fs->cluster_size_bits; + + p_fs->num_FAT_sectors = GET32(p_bpb->fat_length); + + p_fs->FAT1_start_sector = p_fs->PBR_sector + GET32(p_bpb->fat_offset); + if (p_bpb->num_fats == 1) + p_fs->FAT2_start_sector = p_fs->FAT1_start_sector; + else + p_fs->FAT2_start_sector = p_fs->FAT1_start_sector + + p_fs->num_FAT_sectors; + + p_fs->root_start_sector = p_fs->PBR_sector + GET32(p_bpb->clu_offset); + p_fs->data_start_sector = p_fs->root_start_sector; + + p_fs->num_sectors = GET64(p_bpb->vol_length); + p_fs->num_clusters = GET32(p_bpb->clu_count) + 2; + /* because the cluster index starts with 2 */ + + p_fs->vol_type = EXFAT; + p_fs->vol_id = GET32(p_bpb->vol_serial); + + p_fs->root_dir = GET32(p_bpb->root_cluster); + p_fs->dentries_in_root = 0; + p_fs->dentries_per_clu = 1 << (p_fs->cluster_size_bits - + DENTRY_SIZE_BITS); + + p_fs->vol_flag = (u32)GET16(p_bpb->vol_flags); + p_fs->clu_srch_ptr = 2; + p_fs->used_clusters = (u32)~0; + + p_fs->fs_func = &exfat_fs_func; + + return FFS_SUCCESS; +} + +s32 create_dir(struct inode *inode, struct chain_t *p_dir, + struct uni_name_t *p_uniname, struct file_id_t *fid) +{ + s32 ret, dentry, num_entries; + u64 size; + struct chain_t clu; + struct dos_name_t dos_name, dot_name; + struct super_block *sb = inode->i_sb; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + struct fs_func *fs_func = p_fs->fs_func; + + ret = get_num_entries_and_dos_name(sb, p_dir, p_uniname, &num_entries, + &dos_name); + if (ret) + return ret; + + /* find_empty_entry must be called before alloc_cluster */ + dentry = find_empty_entry(inode, p_dir, num_entries); + if (dentry < 0) + return FFS_FULL; + + clu.dir = CLUSTER_32(~0); + clu.size = 0; + clu.flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01; + + /* (1) allocate a cluster */ + ret = fs_func->alloc_cluster(sb, 1, &clu); + if (ret < 0) + return FFS_MEDIAERR; + else if (ret == 0) + return FFS_FULL; + + ret = clear_cluster(sb, clu.dir); + if (ret != FFS_SUCCESS) + return ret; + + if (p_fs->vol_type == EXFAT) { + size = p_fs->cluster_size; + } else { + size = 0; + + /* initialize the . and .. entry + * Information for . points to itself + * Information for .. points to parent dir + */ + + dot_name.name_case = 0x0; + memcpy(dot_name.name, DOS_CUR_DIR_NAME, DOS_NAME_LENGTH); + + ret = fs_func->init_dir_entry(sb, &clu, 0, TYPE_DIR, clu.dir, + 0); + if (ret != FFS_SUCCESS) + return ret; + + ret = fs_func->init_ext_entry(sb, &clu, 0, 1, NULL, &dot_name); + if (ret != FFS_SUCCESS) + return ret; + + memcpy(dot_name.name, DOS_PAR_DIR_NAME, DOS_NAME_LENGTH); + + if (p_dir->dir == p_fs->root_dir) + ret = fs_func->init_dir_entry(sb, &clu, 1, TYPE_DIR, + CLUSTER_32(0), 0); + else + ret = fs_func->init_dir_entry(sb, &clu, 1, TYPE_DIR, + p_dir->dir, 0); + + if (ret != FFS_SUCCESS) + return ret; + + ret = p_fs->fs_func->init_ext_entry(sb, &clu, 1, 1, NULL, + &dot_name); + if (ret != FFS_SUCCESS) + return ret; + } + + /* (2) update the directory entry */ + /* make sub-dir entry in parent directory */ + ret = fs_func->init_dir_entry(sb, p_dir, dentry, TYPE_DIR, clu.dir, + size); + if (ret != FFS_SUCCESS) + return ret; + + ret = fs_func->init_ext_entry(sb, p_dir, dentry, num_entries, p_uniname, + &dos_name); + if (ret != FFS_SUCCESS) + return ret; + + fid->dir.dir = p_dir->dir; + fid->dir.size = p_dir->size; + fid->dir.flags = p_dir->flags; + fid->entry = dentry; + + fid->attr = ATTR_SUBDIR; + fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01; + fid->size = size; + fid->start_clu = clu.dir; + + fid->type = TYPE_DIR; + fid->rwoffset = 0; + fid->hint_last_off = -1; + + return FFS_SUCCESS; +} + +s32 create_file(struct inode *inode, struct chain_t *p_dir, + struct uni_name_t *p_uniname, u8 mode, struct file_id_t *fid) +{ + s32 ret, dentry, num_entries; + struct dos_name_t dos_name; + struct super_block *sb = inode->i_sb; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + struct fs_func *fs_func = p_fs->fs_func; + + ret = get_num_entries_and_dos_name(sb, p_dir, p_uniname, &num_entries, + &dos_name); + if (ret) + return ret; + + /* find_empty_entry must be called before alloc_cluster() */ + dentry = find_empty_entry(inode, p_dir, num_entries); + if (dentry < 0) + return FFS_FULL; + + /* (1) update the directory entry */ + /* fill the dos name directory entry information of the created file. + * the first cluster is not determined yet. (0) + */ + ret = fs_func->init_dir_entry(sb, p_dir, dentry, TYPE_FILE | mode, + CLUSTER_32(0), 0); + if (ret != FFS_SUCCESS) + return ret; + + ret = fs_func->init_ext_entry(sb, p_dir, dentry, num_entries, p_uniname, + &dos_name); + if (ret != FFS_SUCCESS) + return ret; + + fid->dir.dir = p_dir->dir; + fid->dir.size = p_dir->size; + fid->dir.flags = p_dir->flags; + fid->entry = dentry; + + fid->attr = ATTR_ARCHIVE | mode; + fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01; + fid->size = 0; + fid->start_clu = CLUSTER_32(~0); + + fid->type = TYPE_FILE; + fid->rwoffset = 0; + fid->hint_last_off = -1; + + return FFS_SUCCESS; +} + +void remove_file(struct inode *inode, struct chain_t *p_dir, s32 entry) +{ + s32 num_entries; + sector_t sector; + struct dentry_t *ep; + struct super_block *sb = inode->i_sb; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + struct fs_func *fs_func = p_fs->fs_func; + + ep = get_entry_in_dir(sb, p_dir, entry, §or); + if (!ep) + return; + + buf_lock(sb, sector); + + /* buf_lock() before call count_ext_entries() */ + num_entries = fs_func->count_ext_entries(sb, p_dir, entry, ep); + if (num_entries < 0) { + buf_unlock(sb, sector); + return; + } + num_entries++; + + buf_unlock(sb, sector); + + /* (1) update the directory entry */ + fs_func->delete_dir_entry(sb, p_dir, entry, 0, num_entries); +} + +s32 rename_file(struct inode *inode, struct chain_t *p_dir, s32 oldentry, + struct uni_name_t *p_uniname, struct file_id_t *fid) +{ + s32 ret, newentry = -1, num_old_entries, num_new_entries; + sector_t sector_old, sector_new; + struct dos_name_t dos_name; + struct dentry_t *epold, *epnew; + struct super_block *sb = inode->i_sb; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + struct fs_func *fs_func = p_fs->fs_func; + + epold = get_entry_in_dir(sb, p_dir, oldentry, §or_old); + if (!epold) + return FFS_MEDIAERR; + + buf_lock(sb, sector_old); + + /* buf_lock() before call count_ext_entries() */ + num_old_entries = fs_func->count_ext_entries(sb, p_dir, oldentry, + epold); + if (num_old_entries < 0) { + buf_unlock(sb, sector_old); + return FFS_MEDIAERR; + } + num_old_entries++; + + ret = get_num_entries_and_dos_name(sb, p_dir, p_uniname, + &num_new_entries, &dos_name); + if (ret) { + buf_unlock(sb, sector_old); + return ret; + } + + if (num_old_entries < num_new_entries) { + newentry = find_empty_entry(inode, p_dir, num_new_entries); + if (newentry < 0) { + buf_unlock(sb, sector_old); + return FFS_FULL; + } + + epnew = get_entry_in_dir(sb, p_dir, newentry, §or_new); + if (!epnew) { + buf_unlock(sb, sector_old); + return FFS_MEDIAERR; + } + + memcpy((void *)epnew, (void *)epold, DENTRY_SIZE); + if (fs_func->get_entry_type(epnew) == TYPE_FILE) { + fs_func->set_entry_attr(epnew, + fs_func->get_entry_attr(epnew) | + ATTR_ARCHIVE); + fid->attr |= ATTR_ARCHIVE; + } + buf_modify(sb, sector_new); + buf_unlock(sb, sector_old); + + if (p_fs->vol_type == EXFAT) { + epold = get_entry_in_dir(sb, p_dir, oldentry + 1, + §or_old); + buf_lock(sb, sector_old); + epnew = get_entry_in_dir(sb, p_dir, newentry + 1, + §or_new); + + if (!epold || !epnew) { + buf_unlock(sb, sector_old); + return FFS_MEDIAERR; + } + + memcpy((void *)epnew, (void *)epold, DENTRY_SIZE); + buf_modify(sb, sector_new); + buf_unlock(sb, sector_old); + } + + ret = fs_func->init_ext_entry(sb, p_dir, newentry, + num_new_entries, p_uniname, + &dos_name); + if (ret != FFS_SUCCESS) + return ret; + + fs_func->delete_dir_entry(sb, p_dir, oldentry, 0, + num_old_entries); + fid->entry = newentry; + } else { + if (fs_func->get_entry_type(epold) == TYPE_FILE) { + fs_func->set_entry_attr(epold, + fs_func->get_entry_attr(epold) | + ATTR_ARCHIVE); + fid->attr |= ATTR_ARCHIVE; + } + buf_modify(sb, sector_old); + buf_unlock(sb, sector_old); + + ret = fs_func->init_ext_entry(sb, p_dir, oldentry, + num_new_entries, p_uniname, + &dos_name); + if (ret != FFS_SUCCESS) + return ret; + + fs_func->delete_dir_entry(sb, p_dir, oldentry, num_new_entries, + num_old_entries); + } + + return FFS_SUCCESS; +} + +s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry, + struct chain_t *p_newdir, struct uni_name_t *p_uniname, + struct file_id_t *fid) +{ + s32 ret, newentry, num_new_entries, num_old_entries; + sector_t sector_mov, sector_new; + struct chain_t clu; + struct dos_name_t dos_name; + struct dentry_t *epmov, *epnew; + struct super_block *sb = inode->i_sb; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + struct fs_func *fs_func = p_fs->fs_func; + + epmov = get_entry_in_dir(sb, p_olddir, oldentry, §or_mov); + if (!epmov) + return FFS_MEDIAERR; + + /* check if the source and target directory is the same */ + if (fs_func->get_entry_type(epmov) == TYPE_DIR && + fs_func->get_entry_clu0(epmov) == p_newdir->dir) + return FFS_INVALIDPATH; + + buf_lock(sb, sector_mov); + + /* buf_lock() before call count_ext_entries() */ + num_old_entries = fs_func->count_ext_entries(sb, p_olddir, oldentry, + epmov); + if (num_old_entries < 0) { + buf_unlock(sb, sector_mov); + return FFS_MEDIAERR; + } + num_old_entries++; + + ret = get_num_entries_and_dos_name(sb, p_newdir, p_uniname, + &num_new_entries, &dos_name); + if (ret) { + buf_unlock(sb, sector_mov); + return ret; + } + + newentry = find_empty_entry(inode, p_newdir, num_new_entries); + if (newentry < 0) { + buf_unlock(sb, sector_mov); + return FFS_FULL; + } + + epnew = get_entry_in_dir(sb, p_newdir, newentry, §or_new); + if (!epnew) { + buf_unlock(sb, sector_mov); + return FFS_MEDIAERR; + } + + memcpy((void *)epnew, (void *)epmov, DENTRY_SIZE); + if (fs_func->get_entry_type(epnew) == TYPE_FILE) { + fs_func->set_entry_attr(epnew, fs_func->get_entry_attr(epnew) | + ATTR_ARCHIVE); + fid->attr |= ATTR_ARCHIVE; + } + buf_modify(sb, sector_new); + buf_unlock(sb, sector_mov); + + if (p_fs->vol_type == EXFAT) { + epmov = get_entry_in_dir(sb, p_olddir, oldentry + 1, + §or_mov); + buf_lock(sb, sector_mov); + epnew = get_entry_in_dir(sb, p_newdir, newentry + 1, + §or_new); + if (!epmov || !epnew) { + buf_unlock(sb, sector_mov); + return FFS_MEDIAERR; + } + + memcpy((void *)epnew, (void *)epmov, DENTRY_SIZE); + buf_modify(sb, sector_new); + buf_unlock(sb, sector_mov); + } else if (fs_func->get_entry_type(epnew) == TYPE_DIR) { + /* change ".." pointer to new parent dir */ + clu.dir = fs_func->get_entry_clu0(epnew); + clu.flags = 0x01; + + epnew = get_entry_in_dir(sb, &clu, 1, §or_new); + if (!epnew) + return FFS_MEDIAERR; + + if (p_newdir->dir == p_fs->root_dir) + fs_func->set_entry_clu0(epnew, CLUSTER_32(0)); + else + fs_func->set_entry_clu0(epnew, p_newdir->dir); + buf_modify(sb, sector_new); + } + + ret = fs_func->init_ext_entry(sb, p_newdir, newentry, num_new_entries, + p_uniname, &dos_name); + if (ret != FFS_SUCCESS) + return ret; + + fs_func->delete_dir_entry(sb, p_olddir, oldentry, 0, num_old_entries); + + fid->dir.dir = p_newdir->dir; + fid->dir.size = p_newdir->size; + fid->dir.flags = p_newdir->flags; + + fid->entry = newentry; + + return FFS_SUCCESS; +} + +/* + * Sector Read/Write Functions + */ + +int sector_read(struct super_block *sb, sector_t sec, struct buffer_head **bh, + bool read) +{ + s32 ret = FFS_MEDIAERR; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + if ((sec >= (p_fs->PBR_sector + p_fs->num_sectors)) && + (p_fs->num_sectors > 0)) { + pr_err("[EXFAT] %s: out of range error! (sec = %llu)\n", + __func__, (unsigned long long)sec); + fs_error(sb); + return ret; + } + + if (!p_fs->dev_ejected) { + ret = bdev_read(sb, sec, bh, 1, read); + if (ret != FFS_SUCCESS) + p_fs->dev_ejected = TRUE; + } + + return ret; +} + +int sector_write(struct super_block *sb, sector_t sec, struct buffer_head *bh, + bool sync) +{ + s32 ret = FFS_MEDIAERR; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + if (sec >= (p_fs->PBR_sector + p_fs->num_sectors) && + (p_fs->num_sectors > 0)) { + pr_err("[EXFAT] %s: out of range error! (sec = %llu)\n", + __func__, (unsigned long long)sec); + fs_error(sb); + return ret; + } + + if (!bh) { + pr_err("[EXFAT] %s: bh is NULL!\n", __func__); + fs_error(sb); + return ret; + } + + if (!p_fs->dev_ejected) { + ret = bdev_write(sb, sec, bh, 1, sync); + if (ret != FFS_SUCCESS) + p_fs->dev_ejected = TRUE; + } + + return ret; +} + +int multi_sector_read(struct super_block *sb, sector_t sec, + struct buffer_head **bh, s32 num_secs, bool read) +{ + s32 ret = FFS_MEDIAERR; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + if (((sec + num_secs) > (p_fs->PBR_sector + p_fs->num_sectors)) && + (p_fs->num_sectors > 0)) { + pr_err("[EXFAT] %s: out of range error! (sec = %llu, num_secs = %d)\n", + __func__, (unsigned long long)sec, num_secs); + fs_error(sb); + return ret; + } + + if (!p_fs->dev_ejected) { + ret = bdev_read(sb, sec, bh, num_secs, read); + if (ret != FFS_SUCCESS) + p_fs->dev_ejected = TRUE; + } + + return ret; +} + +int multi_sector_write(struct super_block *sb, sector_t sec, + struct buffer_head *bh, s32 num_secs, bool sync) +{ + s32 ret = FFS_MEDIAERR; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + if ((sec + num_secs) > (p_fs->PBR_sector + p_fs->num_sectors) && + (p_fs->num_sectors > 0)) { + pr_err("[EXFAT] %s: out of range error! (sec = %llu, num_secs = %d)\n", + __func__, (unsigned long long)sec, num_secs); + fs_error(sb); + return ret; + } + if (!bh) { + pr_err("[EXFAT] %s: bh is NULL!\n", __func__); + fs_error(sb); + return ret; + } + + if (!p_fs->dev_ejected) { + ret = bdev_write(sb, sec, bh, num_secs, sync); + if (ret != FFS_SUCCESS) + p_fs->dev_ejected = TRUE; + } + + return ret; +} diff --git a/drivers/staging/exfat/exfat_nls.c b/drivers/staging/exfat/exfat_nls.c new file mode 100644 index 000000000000..2ca58616159b --- /dev/null +++ b/drivers/staging/exfat/exfat_nls.c @@ -0,0 +1,404 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. + */ + +#include +#include +#include "exfat.h" + +static u16 bad_dos_chars[] = { + /* + , ; = [ ] */ + 0x002B, 0x002C, 0x003B, 0x003D, 0x005B, 0x005D, + 0xFF0B, 0xFF0C, 0xFF1B, 0xFF1D, 0xFF3B, 0xFF3D, + 0 +}; + +static u16 bad_uni_chars[] = { + /* " * / : < > ? \ | */ + 0x0022, 0x002A, 0x002F, 0x003A, + 0x003C, 0x003E, 0x003F, 0x005C, 0x007C, + 0 +}; + +static int convert_ch_to_uni(struct nls_table *nls, u16 *uni, u8 *ch, + bool *lossy) +{ + int len; + + *uni = 0x0; + + if (ch[0] < 0x80) { + *uni = (u16)ch[0]; + return 1; + } + + len = nls->char2uni(ch, NLS_MAX_CHARSET_SIZE, uni); + if (len < 0) { + /* conversion failed */ + pr_info("%s: fail to use nls\n", __func__); + if (lossy) + *lossy = true; + *uni = (u16)'_'; + if (!strcmp(nls->charset, "utf8")) + return 1; + else + return 2; + } + + return len; +} + +static int convert_uni_to_ch(struct nls_table *nls, u8 *ch, u16 uni, + bool *lossy) +{ + int len; + + ch[0] = 0x0; + + if (uni < 0x0080) { + ch[0] = (u8)uni; + return 1; + } + + len = nls->uni2char(uni, ch, NLS_MAX_CHARSET_SIZE); + if (len < 0) { + /* conversion failed */ + pr_info("%s: fail to use nls\n", __func__); + if (lossy) + *lossy = true; + ch[0] = '_'; + return 1; + } + + return len; +} + +u16 nls_upper(struct super_block *sb, u16 a) +{ + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + if (EXFAT_SB(sb)->options.casesensitive) + return a; + if (p_fs->vol_utbl && p_fs->vol_utbl[get_col_index(a)]) + return p_fs->vol_utbl[get_col_index(a)][get_row_index(a)]; + else + return a; +} + +static u16 *nls_wstrchr(u16 *str, u16 wchar) +{ + while (*str) { + if (*(str++) == wchar) + return str; + } + + return NULL; +} + +int nls_dosname_cmp(struct super_block *sb, u8 *a, u8 *b) +{ + return strncmp(a, b, DOS_NAME_LENGTH); +} + +int nls_uniname_cmp(struct super_block *sb, u16 *a, u16 *b) +{ + int i; + + for (i = 0; i < MAX_NAME_LENGTH; i++, a++, b++) { + if (nls_upper(sb, *a) != nls_upper(sb, *b)) + return 1; + if (*a == 0x0) + return 0; + } + return 0; +} + +void nls_uniname_to_dosname(struct super_block *sb, + struct dos_name_t *p_dosname, + struct uni_name_t *p_uniname, bool *p_lossy) +{ + int i, j, len; + bool lossy = false; + u8 buf[MAX_CHARSET_SIZE]; + u8 lower = 0, upper = 0; + u8 *dosname = p_dosname->name; + u16 *uniname = p_uniname->name; + u16 *p, *last_period; + struct nls_table *nls = EXFAT_SB(sb)->nls_disk; + + for (i = 0; i < DOS_NAME_LENGTH; i++) + *(dosname + i) = ' '; + + if (!nls_uniname_cmp(sb, uniname, (u16 *)UNI_CUR_DIR_NAME)) { + *(dosname) = '.'; + p_dosname->name_case = 0x0; + if (p_lossy) + *p_lossy = false; + return; + } + + if (!nls_uniname_cmp(sb, uniname, (u16 *)UNI_PAR_DIR_NAME)) { + *(dosname) = '.'; + *(dosname + 1) = '.'; + p_dosname->name_case = 0x0; + if (p_lossy) + *p_lossy = false; + return; + } + + /* search for the last embedded period */ + last_period = NULL; + for (p = uniname; *p; p++) { + if (*p == (u16)'.') + last_period = p; + } + + i = 0; + while (i < DOS_NAME_LENGTH) { + if (i == 8) { + if (!last_period) + break; + + if (uniname <= last_period) { + if (uniname < last_period) + lossy = true; + uniname = last_period + 1; + } + } + + if (*uniname == (u16)'\0') { + break; + } else if (*uniname == (u16)' ') { + lossy = true; + } else if (*uniname == (u16)'.') { + if (uniname < last_period) + lossy = true; + else + i = 8; + } else if (nls_wstrchr(bad_dos_chars, *uniname)) { + lossy = true; + *(dosname + i) = '_'; + i++; + } else { + len = convert_uni_to_ch(nls, buf, *uniname, &lossy); + + if (len > 1) { + if ((i >= 8) && ((i + len) > DOS_NAME_LENGTH)) + break; + + if ((i < 8) && ((i + len) > 8)) { + i = 8; + continue; + } + + lower = 0xFF; + + for (j = 0; j < len; j++, i++) + *(dosname + i) = *(buf + j); + } else { /* len == 1 */ + if ((*buf >= 'a') && (*buf <= 'z')) { + *(dosname + i) = *buf - ('a' - 'A'); + + if (i < 8) + lower |= 0x08; + else + lower |= 0x10; + } else if ((*buf >= 'A') && (*buf <= 'Z')) { + *(dosname + i) = *buf; + + if (i < 8) + upper |= 0x08; + else + upper |= 0x10; + } else { + *(dosname + i) = *buf; + } + i++; + } + } + + uniname++; + } + + if (*dosname == 0xE5) + *dosname = 0x05; + + if (*uniname != 0x0) + lossy = TRUE; + + if (upper & lower) + p_dosname->name_case = 0xFF; + else + p_dosname->name_case = lower; + + if (p_lossy) + *p_lossy = lossy; +} + +void nls_dosname_to_uniname(struct super_block *sb, + struct uni_name_t *p_uniname, + struct dos_name_t *p_dosname) +{ + int i = 0, j, n = 0; + u8 buf[DOS_NAME_LENGTH + 2]; + u8 *dosname = p_dosname->name; + u16 *uniname = p_uniname->name; + struct nls_table *nls = EXFAT_SB(sb)->nls_disk; + + if (*dosname == 0x05) { + *buf = 0xE5; + i++; + n++; + } + + for (; i < 8; i++, n++) { + if (*(dosname + i) == ' ') + break; + + if ((*(dosname + i) >= 'A') && (*(dosname + i) <= 'Z') && + (p_dosname->name_case & 0x08)) + *(buf + n) = *(dosname + i) + ('a' - 'A'); + else + *(buf + n) = *(dosname + i); + } + if (*(dosname + 8) != ' ') { + *(buf + n) = '.'; + n++; + } + + for (i = 8; i < DOS_NAME_LENGTH; i++, n++) { + if (*(dosname + i) == ' ') + break; + + if ((*(dosname + i) >= 'A') && (*(dosname + i) <= 'Z') && + (p_dosname->name_case & 0x10)) + *(buf + n) = *(dosname + i) + ('a' - 'A'); + else + *(buf + n) = *(dosname + i); + } + *(buf + n) = '\0'; + + i = 0; + j = 0; + while (j < (MAX_NAME_LENGTH - 1)) { + if (*(buf + i) == '\0') + break; + + i += convert_ch_to_uni(nls, uniname, (buf + i), NULL); + + uniname++; + j++; + } + + *uniname = (u16)'\0'; +} + +void nls_uniname_to_cstring(struct super_block *sb, u8 *p_cstring, + struct uni_name_t *p_uniname) +{ + int i, j, len; + u8 buf[MAX_CHARSET_SIZE]; + u16 *uniname = p_uniname->name; + struct nls_table *nls = EXFAT_SB(sb)->nls_io; + + if (!nls) { + len = utf16s_to_utf8s(uniname, MAX_NAME_LENGTH, + UTF16_HOST_ENDIAN, p_cstring, + MAX_NAME_LENGTH); + p_cstring[len] = 0; + return; + } + + i = 0; + while (i < (MAX_NAME_LENGTH - 1)) { + if (*uniname == (u16)'\0') + break; + + len = convert_uni_to_ch(nls, buf, *uniname, NULL); + + if (len > 1) { + for (j = 0; j < len; j++) + *p_cstring++ = (char)*(buf + j); + } else { /* len == 1 */ + *p_cstring++ = (char)*buf; + } + + uniname++; + i++; + } + + *p_cstring = '\0'; +} + +void nls_cstring_to_uniname(struct super_block *sb, + struct uni_name_t *p_uniname, u8 *p_cstring, + bool *p_lossy) +{ + int i, j; + bool lossy = false; + u8 *end_of_name; + u8 upname[MAX_NAME_LENGTH * 2]; + u16 *uniname = p_uniname->name; + struct nls_table *nls = EXFAT_SB(sb)->nls_io; + + /* strip all trailing spaces */ + end_of_name = p_cstring + strlen(p_cstring); + + while (*(--end_of_name) == ' ') { + if (end_of_name < p_cstring) + break; + } + *(++end_of_name) = '\0'; + + if (strcmp(p_cstring, ".") && strcmp(p_cstring, "..")) { + /* strip all trailing periods */ + while (*(--end_of_name) == '.') { + if (end_of_name < p_cstring) + break; + } + *(++end_of_name) = '\0'; + } + + if (*p_cstring == '\0') + lossy = true; + + if (!nls) { + i = utf8s_to_utf16s(p_cstring, MAX_NAME_LENGTH, + UTF16_HOST_ENDIAN, uniname, + MAX_NAME_LENGTH); + for (j = 0; j < i; j++) + SET16_A(upname + j * 2, nls_upper(sb, uniname[j])); + uniname[i] = '\0'; + } else { + i = 0; + j = 0; + while (j < (MAX_NAME_LENGTH - 1)) { + if (*(p_cstring + i) == '\0') + break; + + i += convert_ch_to_uni(nls, uniname, + (u8 *)(p_cstring + i), &lossy); + + if ((*uniname < 0x0020) || + nls_wstrchr(bad_uni_chars, *uniname)) + lossy = true; + + SET16_A(upname + j * 2, nls_upper(sb, *uniname)); + + uniname++; + j++; + } + + if (*(p_cstring + i) != '\0') + lossy = true; + *uniname = (u16)'\0'; + } + + p_uniname->name_len = j; + p_uniname->name_hash = calc_checksum_2byte(upname, j << 1, 0, + CS_DEFAULT); + + if (p_lossy) + *p_lossy = lossy; +} diff --git a/drivers/staging/exfat/exfat_super.c b/drivers/staging/exfat/exfat_super.c new file mode 100644 index 000000000000..5b5c2ca8c9aa --- /dev/null +++ b/drivers/staging/exfat/exfat_super.c @@ -0,0 +1,4137 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#define EXFAT_VERSION "1.3.0" + +#include "exfat.h" + +static struct kmem_cache *exfat_inode_cachep; + +// FIXME use commented lines +// static int exfat_default_codepage = CONFIG_EXFAT_DEFAULT_CODEPAGE; +// static char exfat_default_iocharset[] = CONFIG_EXFAT_DEFAULT_IOCHARSET; +static int exfat_default_codepage = CONFIG_FAT_DEFAULT_CODEPAGE; +static char exfat_default_iocharset[] = CONFIG_FAT_DEFAULT_IOCHARSET; + +#define INC_IVERSION(x) (inode_inc_iversion(x)) +#define GET_IVERSION(x) (inode_peek_iversion_raw(x)) +#define SET_IVERSION(x, y) (inode_set_iversion(x, y)) + +static struct inode *exfat_iget(struct super_block *sb, loff_t i_pos); +static int exfat_sync_inode(struct inode *inode); +static struct inode *exfat_build_inode(struct super_block *sb, + struct file_id_t *fid, loff_t i_pos); +static int exfat_write_inode(struct inode *inode, + struct writeback_control *wbc); +static void exfat_write_super(struct super_block *sb); + +#define UNIX_SECS_1980 315532800L + +#if BITS_PER_LONG == 64 +#define UNIX_SECS_2108 4354819200L +#endif + +/* days between 1.1.70 and 1.1.80 (2 leap days) */ +#define DAYS_DELTA_DECADE (365 * 10 + 2) +/* 120 (2100 - 1980) isn't leap year */ +#define NO_LEAP_YEAR_2100 (120) +#define IS_LEAP_YEAR(y) (!((y) & 0x3) && (y) != NO_LEAP_YEAR_2100) + +#define SECS_PER_MIN (60) +#define SECS_PER_HOUR (60 * SECS_PER_MIN) +#define SECS_PER_DAY (24 * SECS_PER_HOUR) + +#define MAKE_LEAP_YEAR(leap_year, year) \ + do { \ + if (unlikely(year > NO_LEAP_YEAR_2100)) \ + leap_year = ((year + 3) / 4) - 1; \ + else \ + leap_year = ((year + 3) / 4); \ + } while (0) + +/* Linear day numbers of the respective 1sts in non-leap years. */ +static time_t accum_days_in_year[] = { + /* Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec */ + 0, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 0, 0, 0, +}; + +/* Convert a FAT time/date pair to a UNIX date (seconds since 1 1 70). */ +static void exfat_time_fat2unix(struct exfat_sb_info *sbi, + struct timespec64 *ts, struct date_time_t *tp) +{ + time_t year = tp->Year; + time_t ld; + + MAKE_LEAP_YEAR(ld, year); + + if (IS_LEAP_YEAR(year) && (tp->Month) > 2) + ld++; + + ts->tv_sec = tp->Second + + tp->Minute * SECS_PER_MIN + + tp->Hour * SECS_PER_HOUR + + (ld + accum_days_in_year[(tp->Month)] + + (tp->Day - 1)) * SECS_PER_DAY + + (year * 365 + DAYS_DELTA_DECADE) * SECS_PER_DAY + + sys_tz.tz_minuteswest * SECS_PER_MIN; + + ts->tv_nsec = 0; +} + +/* Convert linear UNIX date to a FAT time/date pair. */ +static void exfat_time_unix2fat(struct exfat_sb_info *sbi, + struct timespec64 *ts, struct date_time_t *tp) +{ + time_t second = ts->tv_sec; + time_t day, month, year; + time_t ld; + + second -= sys_tz.tz_minuteswest * SECS_PER_MIN; + + /* Jan 1 GMT 00:00:00 1980. But what about another time zone? */ + if (second < UNIX_SECS_1980) { + tp->Second = 0; + tp->Minute = 0; + tp->Hour = 0; + tp->Day = 1; + tp->Month = 1; + tp->Year = 0; + return; + } +#if (BITS_PER_LONG == 64) + if (second >= UNIX_SECS_2108) { + tp->Second = 59; + tp->Minute = 59; + tp->Hour = 23; + tp->Day = 31; + tp->Month = 12; + tp->Year = 127; + return; + } +#endif + day = second / SECS_PER_DAY - DAYS_DELTA_DECADE; + year = day / 365; + MAKE_LEAP_YEAR(ld, year); + if (year * 365 + ld > day) + year--; + + MAKE_LEAP_YEAR(ld, year); + day -= year * 365 + ld; + + if (IS_LEAP_YEAR(year) && day == accum_days_in_year[3]) { + month = 2; + } else { + if (IS_LEAP_YEAR(year) && day > accum_days_in_year[3]) + day--; + for (month = 1; month < 12; month++) { + if (accum_days_in_year[month + 1] > day) + break; + } + } + day -= accum_days_in_year[month]; + + tp->Second = second % SECS_PER_MIN; + tp->Minute = (second / SECS_PER_MIN) % 60; + tp->Hour = (second / SECS_PER_HOUR) % 24; + tp->Day = day + 1; + tp->Month = month; + tp->Year = year; +} + +struct timestamp_t *tm_current(struct timestamp_t *tp) +{ + struct timespec64 ts; + time_t second, day, leap_day, month, year; + + ktime_get_real_ts64(&ts); + + second = ts.tv_sec; + second -= sys_tz.tz_minuteswest * SECS_PER_MIN; + + /* Jan 1 GMT 00:00:00 1980. But what about another time zone? */ + if (second < UNIX_SECS_1980) { + tp->sec = 0; + tp->min = 0; + tp->hour = 0; + tp->day = 1; + tp->mon = 1; + tp->year = 0; + return tp; + } +#if BITS_PER_LONG == 64 + if (second >= UNIX_SECS_2108) { + tp->sec = 59; + tp->min = 59; + tp->hour = 23; + tp->day = 31; + tp->mon = 12; + tp->year = 127; + return tp; + } +#endif + + day = second / SECS_PER_DAY - DAYS_DELTA_DECADE; + year = day / 365; + + MAKE_LEAP_YEAR(leap_day, year); + if (year * 365 + leap_day > day) + year--; + + MAKE_LEAP_YEAR(leap_day, year); + + day -= year * 365 + leap_day; + + if (IS_LEAP_YEAR(year) && day == accum_days_in_year[3]) { + month = 2; + } else { + if (IS_LEAP_YEAR(year) && day > accum_days_in_year[3]) + day--; + for (month = 1; month < 12; month++) { + if (accum_days_in_year[month + 1] > day) + break; + } + } + day -= accum_days_in_year[month]; + + tp->sec = second % SECS_PER_MIN; + tp->min = (second / SECS_PER_MIN) % 60; + tp->hour = (second / SECS_PER_HOUR) % 24; + tp->day = day + 1; + tp->mon = month; + tp->year = year; + + return tp; +} + +static void __lock_super(struct super_block *sb) +{ + struct exfat_sb_info *sbi = EXFAT_SB(sb); + + mutex_lock(&sbi->s_lock); +} + +static void __unlock_super(struct super_block *sb) +{ + struct exfat_sb_info *sbi = EXFAT_SB(sb); + + mutex_unlock(&sbi->s_lock); +} + +static int __is_sb_dirty(struct super_block *sb) +{ + struct exfat_sb_info *sbi = EXFAT_SB(sb); + + return sbi->s_dirt; +} + +static void __set_sb_clean(struct super_block *sb) +{ + struct exfat_sb_info *sbi = EXFAT_SB(sb); + + sbi->s_dirt = 0; +} + +static int __exfat_revalidate(struct dentry *dentry) +{ + return 0; +} + +static int exfat_revalidate(struct dentry *dentry, unsigned int flags) +{ + if (flags & LOOKUP_RCU) + return -ECHILD; + + if (dentry->d_inode) + return 1; + return __exfat_revalidate(dentry); +} + +static int exfat_revalidate_ci(struct dentry *dentry, unsigned int flags) +{ + if (flags & LOOKUP_RCU) + return -ECHILD; + + if (dentry->d_inode) + return 1; + + if (!flags) + return 0; + + if (flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET)) + return 0; + + return __exfat_revalidate(dentry); +} + +static unsigned int __exfat_striptail_len(unsigned int len, const char *name) +{ + while (len && name[len - 1] == '.') + len--; + return len; +} + +static unsigned int exfat_striptail_len(const struct qstr *qstr) +{ + return __exfat_striptail_len(qstr->len, qstr->name); +} + +static int exfat_d_hash(const struct dentry *dentry, struct qstr *qstr) +{ + qstr->hash = full_name_hash(dentry, qstr->name, + exfat_striptail_len(qstr)); + return 0; +} + +static int exfat_d_hashi(const struct dentry *dentry, struct qstr *qstr) +{ + struct super_block *sb = dentry->d_sb; + const unsigned char *name; + unsigned int len; + unsigned long hash; + + name = qstr->name; + len = exfat_striptail_len(qstr); + + hash = init_name_hash(dentry); + while (len--) + hash = partial_name_hash(nls_upper(sb, *name++), hash); + qstr->hash = end_name_hash(hash); + + return 0; +} + +static int exfat_cmpi(const struct dentry *dentry, unsigned int len, + const char *str, const struct qstr *name) +{ + struct nls_table *t = EXFAT_SB(dentry->d_sb)->nls_io; + unsigned int alen, blen; + + alen = exfat_striptail_len(name); + blen = __exfat_striptail_len(len, str); + if (alen == blen) { + if (t == NULL) { + if (strncasecmp(name->name, str, alen) == 0) + return 0; + } else if (nls_strnicmp(t, name->name, str, alen) == 0) + return 0; + } + return 1; +} + +static int exfat_cmp(const struct dentry *dentry, unsigned int len, + const char *str, const struct qstr *name) +{ + unsigned int alen, blen; + + alen = exfat_striptail_len(name); + blen = __exfat_striptail_len(len, str); + if (alen == blen) { + if (strncmp(name->name, str, alen) == 0) + return 0; + } + return 1; +} + +static const struct dentry_operations exfat_ci_dentry_ops = { + .d_revalidate = exfat_revalidate_ci, + .d_hash = exfat_d_hashi, + .d_compare = exfat_cmpi, +}; + +static const struct dentry_operations exfat_dentry_ops = { + .d_revalidate = exfat_revalidate, + .d_hash = exfat_d_hash, + .d_compare = exfat_cmp, +}; + +static DEFINE_SEMAPHORE(z_sem); + +static inline void fs_sync(struct super_block *sb, bool do_sync) +{ + if (do_sync) + bdev_sync(sb); +} + +/* + * If ->i_mode can't hold S_IWUGO (i.e. ATTR_RO), we use ->i_attrs to + * save ATTR_RO instead of ->i_mode. + * + * If it's directory and !sbi->options.rodir, ATTR_RO isn't read-only + * bit, it's just used as flag for app. + */ +static inline int exfat_mode_can_hold_ro(struct inode *inode) +{ + struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb); + + if (S_ISDIR(inode->i_mode)) + return 0; + + if ((~sbi->options.fs_fmask) & 0222) + return 1; + return 0; +} + +/* Convert attribute bits and a mask to the UNIX mode. */ +static inline mode_t exfat_make_mode(struct exfat_sb_info *sbi, u32 attr, + mode_t mode) +{ + if ((attr & ATTR_READONLY) && !(attr & ATTR_SUBDIR)) + mode &= ~0222; + + if (attr & ATTR_SUBDIR) + return (mode & ~sbi->options.fs_dmask) | S_IFDIR; + else if (attr & ATTR_SYMLINK) + return (mode & ~sbi->options.fs_dmask) | S_IFLNK; + else + return (mode & ~sbi->options.fs_fmask) | S_IFREG; +} + +/* Return the FAT attribute byte for this inode */ +static inline u32 exfat_make_attr(struct inode *inode) +{ + if (exfat_mode_can_hold_ro(inode) && !(inode->i_mode & 0222)) + return (EXFAT_I(inode)->fid.attr) | ATTR_READONLY; + else + return EXFAT_I(inode)->fid.attr; +} + +static inline void exfat_save_attr(struct inode *inode, u32 attr) +{ + if (exfat_mode_can_hold_ro(inode)) + EXFAT_I(inode)->fid.attr = attr & ATTR_RWMASK; + else + EXFAT_I(inode)->fid.attr = attr & (ATTR_RWMASK | ATTR_READONLY); +} + +static int ffsMountVol(struct super_block *sb) +{ + int i, ret; + struct pbr_sector_t *p_pbr; + struct buffer_head *tmp_bh = NULL; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); + + pr_info("[EXFAT] trying to mount...\n"); + + down(&z_sem); + + buf_init(sb); + + sema_init(&p_fs->v_sem, 1); + p_fs->dev_ejected = FALSE; + + /* open the block device */ + bdev_open(sb); + + if (p_bd->sector_size < sb->s_blocksize) { + ret = FFS_MEDIAERR; + goto out; + } + if (p_bd->sector_size > sb->s_blocksize) + sb_set_blocksize(sb, p_bd->sector_size); + + /* read Sector 0 */ + if (sector_read(sb, 0, &tmp_bh, 1) != FFS_SUCCESS) { + ret = FFS_MEDIAERR; + goto out; + } + + p_fs->PBR_sector = 0; + + p_pbr = (struct pbr_sector_t *) tmp_bh->b_data; + + /* check the validity of PBR */ + if (GET16_A(p_pbr->signature) != PBR_SIGNATURE) { + brelse(tmp_bh); + bdev_close(sb); + ret = FFS_FORMATERR; + goto out; + } + + /* fill fs_stuct */ + for (i = 0; i < 53; i++) + if (p_pbr->bpb[i]) + break; + + if (i < 53) { + if (GET16(p_pbr->bpb + 11)) /* num_fat_sectors */ + ret = fat16_mount(sb, p_pbr); + else + ret = fat32_mount(sb, p_pbr); + } else { + ret = exfat_mount(sb, p_pbr); + } + + brelse(tmp_bh); + + if (ret) { + bdev_close(sb); + goto out; + } + + if (p_fs->vol_type == EXFAT) { + ret = load_alloc_bitmap(sb); + if (ret) { + bdev_close(sb); + goto out; + } + ret = load_upcase_table(sb); + if (ret) { + free_alloc_bitmap(sb); + bdev_close(sb); + goto out; + } + } + + if (p_fs->dev_ejected) { + if (p_fs->vol_type == EXFAT) { + free_upcase_table(sb); + free_alloc_bitmap(sb); + } + bdev_close(sb); + ret = FFS_MEDIAERR; + goto out; + } + + pr_info("[EXFAT] mounted successfully\n"); + +out: + up(&z_sem); + + return ret; +} + +static int ffsUmountVol(struct super_block *sb) +{ + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + int err = FFS_SUCCESS; + + pr_info("[EXFAT] trying to unmount...\n"); + + down(&z_sem); + + /* acquire the lock for file system critical section */ + down(&p_fs->v_sem); + + fs_sync(sb, false); + fs_set_vol_flags(sb, VOL_CLEAN); + + if (p_fs->vol_type == EXFAT) { + free_upcase_table(sb); + free_alloc_bitmap(sb); + } + + FAT_release_all(sb); + buf_release_all(sb); + + /* close the block device */ + bdev_close(sb); + + if (p_fs->dev_ejected) { + pr_info("[EXFAT] unmounted with media errors. Device is already ejected.\n"); + err = FFS_MEDIAERR; + } + + buf_shutdown(sb); + + /* release the lock for file system critical section */ + up(&p_fs->v_sem); + up(&z_sem); + + pr_info("[EXFAT] unmounted successfully\n"); + + return err; +} + +static int ffsGetVolInfo(struct super_block *sb, struct vol_info_t *info) +{ + int err = FFS_SUCCESS; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + /* check the validity of pointer parameters */ + if (info == NULL) + return FFS_ERROR; + + /* acquire the lock for file system critical section */ + down(&p_fs->v_sem); + + if (p_fs->used_clusters == (u32) ~0) + p_fs->used_clusters = p_fs->fs_func->count_used_clusters(sb); + + info->FatType = p_fs->vol_type; + info->ClusterSize = p_fs->cluster_size; + info->NumClusters = p_fs->num_clusters - 2; /* clu 0 & 1 */ + info->UsedClusters = p_fs->used_clusters; + info->FreeClusters = info->NumClusters - info->UsedClusters; + + if (p_fs->dev_ejected) + err = FFS_MEDIAERR; + + /* release the lock for file system critical section */ + up(&p_fs->v_sem); + + return err; +} + +static int ffsSyncVol(struct super_block *sb, bool do_sync) +{ + int err = FFS_SUCCESS; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + /* acquire the lock for file system critical section */ + down(&p_fs->v_sem); + + /* synchronize the file system */ + fs_sync(sb, do_sync); + fs_set_vol_flags(sb, VOL_CLEAN); + + if (p_fs->dev_ejected) + err = FFS_MEDIAERR; + + /* release the lock for file system critical section */ + up(&p_fs->v_sem); + + return err; +} + +/*----------------------------------------------------------------------*/ +/* File Operation Functions */ +/*----------------------------------------------------------------------*/ + +static int ffsLookupFile(struct inode *inode, char *path, struct file_id_t *fid) +{ + int ret, dentry, num_entries; + struct chain_t dir; + struct uni_name_t uni_name; + struct dos_name_t dos_name; + struct dentry_t *ep, *ep2; + struct entry_set_cache_t *es = NULL; + struct super_block *sb = inode->i_sb; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + pr_debug("%s entered\n", __func__); + + /* check the validity of pointer parameters */ + if ((fid == NULL) || (path == NULL) || (*path == '\0')) + return FFS_ERROR; + + /* acquire the lock for file system critical section */ + down(&p_fs->v_sem); + + /* check the validity of directory name in the given pathname */ + ret = resolve_path(inode, path, &dir, &uni_name); + if (ret) + goto out; + + ret = get_num_entries_and_dos_name(sb, &dir, &uni_name, &num_entries, + &dos_name); + if (ret) + goto out; + + /* search the file name for directories */ + dentry = p_fs->fs_func->find_dir_entry(sb, &dir, &uni_name, num_entries, + &dos_name, TYPE_ALL); + if (dentry < -1) { + return FFS_NOTFOUND; + goto out; + } + + fid->dir.dir = dir.dir; + fid->dir.size = dir.size; + fid->dir.flags = dir.flags; + fid->entry = dentry; + + if (dentry == -1) { + fid->type = TYPE_DIR; + fid->rwoffset = 0; + fid->hint_last_off = -1; + + fid->attr = ATTR_SUBDIR; + fid->flags = 0x01; + fid->size = 0; + fid->start_clu = p_fs->root_dir; + } else { + if (p_fs->vol_type == EXFAT) { + es = get_entry_set_in_dir(sb, &dir, dentry, + ES_2_ENTRIES, &ep); + if (!es) { + ret = FFS_MEDIAERR; + goto out; + } + ep2 = ep+1; + } else { + ep = get_entry_in_dir(sb, &dir, dentry, NULL); + if (!ep) { + ret = FFS_MEDIAERR; + goto out; + } + ep2 = ep; + } + + fid->type = p_fs->fs_func->get_entry_type(ep); + fid->rwoffset = 0; + fid->hint_last_off = -1; + fid->attr = p_fs->fs_func->get_entry_attr(ep); + + fid->size = p_fs->fs_func->get_entry_size(ep2); + if ((fid->type == TYPE_FILE) && (fid->size == 0)) { + fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01; + fid->start_clu = CLUSTER_32(~0); + } else { + fid->flags = p_fs->fs_func->get_entry_flag(ep2); + fid->start_clu = p_fs->fs_func->get_entry_clu0(ep2); + } + + if (p_fs->vol_type == EXFAT) + release_entry_set(es); + } + + if (p_fs->dev_ejected) + ret = FFS_MEDIAERR; +out: + /* release the lock for file system critical section */ + up(&p_fs->v_sem); + + return ret; +} + +static int ffsCreateFile(struct inode *inode, char *path, u8 mode, + struct file_id_t *fid) +{ + struct chain_t dir; + struct uni_name_t uni_name; + struct super_block *sb = inode->i_sb; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + int ret; + + /* check the validity of pointer parameters */ + if ((fid == NULL) || (path == NULL) || (*path == '\0')) + return FFS_ERROR; + + /* acquire the lock for file system critical section */ + down(&p_fs->v_sem); + + /* check the validity of directory name in the given pathname */ + ret = resolve_path(inode, path, &dir, &uni_name); + if (ret) + goto out; + + fs_set_vol_flags(sb, VOL_DIRTY); + + /* create a new file */ + ret = create_file(inode, &dir, &uni_name, mode, fid); + +#ifdef CONFIG_EXFAT_DELAYED_SYNC + fs_sync(sb, false); + fs_set_vol_flags(sb, VOL_CLEAN); +#endif + + if (p_fs->dev_ejected) + ret = FFS_MEDIAERR; + +out: + /* release the lock for file system critical section */ + up(&p_fs->v_sem); + + return ret; +} + +static int ffsReadFile(struct inode *inode, struct file_id_t *fid, void *buffer, + u64 count, u64 *rcount) +{ + s32 offset, sec_offset, clu_offset; + u32 clu; + int ret; + sector_t LogSector; + u64 oneblkread, read_bytes; + struct buffer_head *tmp_bh = NULL; + struct super_block *sb = inode->i_sb; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); + + /* check the validity of the given file id */ + if (fid == NULL) + return FFS_INVALIDFID; + + /* check the validity of pointer parameters */ + if (buffer == NULL) + return FFS_ERROR; + + /* acquire the lock for file system critical section */ + down(&p_fs->v_sem); + + /* check if the given file ID is opened */ + if (fid->type != TYPE_FILE) { + ret = FFS_PERMISSIONERR; + goto out; + } + + if (fid->rwoffset > fid->size) + fid->rwoffset = fid->size; + + if (count > (fid->size - fid->rwoffset)) + count = fid->size - fid->rwoffset; + + if (count == 0) { + if (rcount != NULL) + *rcount = 0; + ret = FFS_EOF; + goto out; + } + + read_bytes = 0; + + while (count > 0) { + clu_offset = (s32)(fid->rwoffset >> p_fs->cluster_size_bits); + clu = fid->start_clu; + + if (fid->flags == 0x03) { + clu += clu_offset; + } else { + /* hint information */ + if ((clu_offset > 0) && (fid->hint_last_off > 0) && + (clu_offset >= fid->hint_last_off)) { + clu_offset -= fid->hint_last_off; + clu = fid->hint_last_clu; + } + + while (clu_offset > 0) { + /* clu = FAT_read(sb, clu); */ + if (FAT_read(sb, clu, &clu) == -1) + return FFS_MEDIAERR; + + clu_offset--; + } + } + + /* hint information */ + fid->hint_last_off = (s32)(fid->rwoffset >> + p_fs->cluster_size_bits); + fid->hint_last_clu = clu; + + /* byte offset in cluster */ + offset = (s32)(fid->rwoffset & (p_fs->cluster_size-1)); + + /* sector offset in cluster */ + sec_offset = offset >> p_bd->sector_size_bits; + + /* byte offset in sector */ + offset &= p_bd->sector_size_mask; + + LogSector = START_SECTOR(clu) + sec_offset; + + oneblkread = (u64)(p_bd->sector_size - offset); + if (oneblkread > count) + oneblkread = count; + + if ((offset == 0) && (oneblkread == p_bd->sector_size)) { + if (sector_read(sb, LogSector, &tmp_bh, 1) != + FFS_SUCCESS) + goto err_out; + memcpy((char *)buffer + read_bytes, + (char *)tmp_bh->b_data, (s32)oneblkread); + } else { + if (sector_read(sb, LogSector, &tmp_bh, 1) != + FFS_SUCCESS) + goto err_out; + memcpy((char *)buffer + read_bytes, + (char *)tmp_bh->b_data + offset, + (s32)oneblkread); + } + count -= oneblkread; + read_bytes += oneblkread; + fid->rwoffset += oneblkread; + } + brelse(tmp_bh); + +/* How did this ever work and not leak a brlse()?? */ +err_out: + /* set the size of read bytes */ + if (rcount != NULL) + *rcount = read_bytes; + + if (p_fs->dev_ejected) + ret = FFS_MEDIAERR; + +out: + /* release the lock for file system critical section */ + up(&p_fs->v_sem); + + return ret; +} + +static int ffsWriteFile(struct inode *inode, struct file_id_t *fid, + void *buffer, u64 count, u64 *wcount) +{ + s32 modified = FALSE, offset, sec_offset, clu_offset; + s32 num_clusters, num_alloc, num_alloced = (s32) ~0; + int ret = 0; + u32 clu, last_clu; + sector_t LogSector, sector = 0; + u64 oneblkwrite, write_bytes; + struct chain_t new_clu; + struct timestamp_t tm; + struct dentry_t *ep, *ep2; + struct entry_set_cache_t *es = NULL; + struct buffer_head *tmp_bh = NULL; + struct super_block *sb = inode->i_sb; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); + + /* check the validity of the given file id */ + if (fid == NULL) + return FFS_INVALIDFID; + + /* check the validity of pointer parameters */ + if (buffer == NULL) + return FFS_ERROR; + + /* acquire the lock for file system critical section */ + down(&p_fs->v_sem); + + /* check if the given file ID is opened */ + if (fid->type != TYPE_FILE) { + ret = FFS_PERMISSIONERR; + goto out; + } + + if (fid->rwoffset > fid->size) + fid->rwoffset = fid->size; + + if (count == 0) { + if (wcount != NULL) + *wcount = 0; + ret = FFS_SUCCESS; + goto out; + } + + fs_set_vol_flags(sb, VOL_DIRTY); + + if (fid->size == 0) + num_clusters = 0; + else + num_clusters = (s32)((fid->size-1) >> + p_fs->cluster_size_bits) + 1; + + write_bytes = 0; + + while (count > 0) { + clu_offset = (s32)(fid->rwoffset >> p_fs->cluster_size_bits); + clu = last_clu = fid->start_clu; + + if (fid->flags == 0x03) { + if ((clu_offset > 0) && (clu != CLUSTER_32(~0))) { + last_clu += clu_offset - 1; + + if (clu_offset == num_clusters) + clu = CLUSTER_32(~0); + else + clu += clu_offset; + } + } else { + /* hint information */ + if ((clu_offset > 0) && (fid->hint_last_off > 0) && + (clu_offset >= fid->hint_last_off)) { + clu_offset -= fid->hint_last_off; + clu = fid->hint_last_clu; + } + + while ((clu_offset > 0) && (clu != CLUSTER_32(~0))) { + last_clu = clu; + /* clu = FAT_read(sb, clu); */ + if (FAT_read(sb, clu, &clu) == -1) { + ret = FFS_MEDIAERR; + goto out; + } + clu_offset--; + } + } + + if (clu == CLUSTER_32(~0)) { + num_alloc = (s32)((count - 1) >> + p_fs->cluster_size_bits) + 1; + new_clu.dir = (last_clu == CLUSTER_32(~0)) ? + CLUSTER_32(~0) : last_clu+1; + new_clu.size = 0; + new_clu.flags = fid->flags; + + /* (1) allocate a chain of clusters */ + num_alloced = p_fs->fs_func->alloc_cluster(sb, + num_alloc, + &new_clu); + if (num_alloced == 0) + break; + else if (num_alloced < 0) { + ret = FFS_MEDIAERR; + goto out; + } + + /* (2) append to the FAT chain */ + if (last_clu == CLUSTER_32(~0)) { + if (new_clu.flags == 0x01) + fid->flags = 0x01; + fid->start_clu = new_clu.dir; + modified = TRUE; + } else { + if (new_clu.flags != fid->flags) { + exfat_chain_cont_cluster(sb, + fid->start_clu, + num_clusters); + fid->flags = 0x01; + modified = TRUE; + } + if (new_clu.flags == 0x01) + FAT_write(sb, last_clu, new_clu.dir); + } + + num_clusters += num_alloced; + clu = new_clu.dir; + } + + /* hint information */ + fid->hint_last_off = (s32)(fid->rwoffset >> + p_fs->cluster_size_bits); + fid->hint_last_clu = clu; + + /* byte offset in cluster */ + offset = (s32)(fid->rwoffset & (p_fs->cluster_size - 1)); + + /* sector offset in cluster */ + sec_offset = offset >> p_bd->sector_size_bits; + + /* byte offset in sector */ + offset &= p_bd->sector_size_mask; + + LogSector = START_SECTOR(clu) + sec_offset; + + oneblkwrite = (u64)(p_bd->sector_size - offset); + if (oneblkwrite > count) + oneblkwrite = count; + + if ((offset == 0) && (oneblkwrite == p_bd->sector_size)) { + if (sector_read(sb, LogSector, &tmp_bh, 0) != + FFS_SUCCESS) + goto err_out; + memcpy((char *)tmp_bh->b_data, + (char *)buffer + write_bytes, (s32)oneblkwrite); + if (sector_write(sb, LogSector, tmp_bh, 0) != + FFS_SUCCESS) { + brelse(tmp_bh); + goto err_out; + } + } else { + if ((offset > 0) || + ((fid->rwoffset+oneblkwrite) < fid->size)) { + if (sector_read(sb, LogSector, &tmp_bh, 1) != + FFS_SUCCESS) + goto err_out; + } else { + if (sector_read(sb, LogSector, &tmp_bh, 0) != + FFS_SUCCESS) + goto err_out; + } + + memcpy((char *)tmp_bh->b_data + offset, + (char *)buffer + write_bytes, (s32)oneblkwrite); + if (sector_write(sb, LogSector, tmp_bh, 0) != + FFS_SUCCESS) { + brelse(tmp_bh); + goto err_out; + } + } + + count -= oneblkwrite; + write_bytes += oneblkwrite; + fid->rwoffset += oneblkwrite; + + fid->attr |= ATTR_ARCHIVE; + + if (fid->size < fid->rwoffset) { + fid->size = fid->rwoffset; + modified = TRUE; + } + } + + brelse(tmp_bh); + + /* (3) update the direcoty entry */ + if (p_fs->vol_type == EXFAT) { + es = get_entry_set_in_dir(sb, &(fid->dir), fid->entry, + ES_ALL_ENTRIES, &ep); + if (es == NULL) + goto err_out; + ep2 = ep+1; + } else { + ep = get_entry_in_dir(sb, &(fid->dir), fid->entry, §or); + if (!ep) + goto err_out; + ep2 = ep; + } + + p_fs->fs_func->set_entry_time(ep, tm_current(&tm), TM_MODIFY); + p_fs->fs_func->set_entry_attr(ep, fid->attr); + + if (p_fs->vol_type != EXFAT) + buf_modify(sb, sector); + + if (modified) { + if (p_fs->fs_func->get_entry_flag(ep2) != fid->flags) + p_fs->fs_func->set_entry_flag(ep2, fid->flags); + + if (p_fs->fs_func->get_entry_size(ep2) != fid->size) + p_fs->fs_func->set_entry_size(ep2, fid->size); + + if (p_fs->fs_func->get_entry_clu0(ep2) != fid->start_clu) + p_fs->fs_func->set_entry_clu0(ep2, fid->start_clu); + + if (p_fs->vol_type != EXFAT) + buf_modify(sb, sector); + } + + if (p_fs->vol_type == EXFAT) { + update_dir_checksum_with_entry_set(sb, es); + release_entry_set(es); + } + +#ifdef CONFIG_EXFAT_DELAYED_SYNC + fs_sync(sb, false); + fs_set_vol_flags(sb, VOL_CLEAN); +#endif + +err_out: + /* set the size of written bytes */ + if (wcount != NULL) + *wcount = write_bytes; + + if (num_alloced == 0) + ret = FFS_FULL; + + else if (p_fs->dev_ejected) + ret = FFS_MEDIAERR; + +out: + /* release the lock for file system critical section */ + up(&p_fs->v_sem); + + return ret; +} + +static int ffsTruncateFile(struct inode *inode, u64 old_size, u64 new_size) +{ + s32 num_clusters; + u32 last_clu = CLUSTER_32(0); + int ret = 0; + sector_t sector = 0; + struct chain_t clu; + struct timestamp_t tm; + struct dentry_t *ep, *ep2; + struct super_block *sb = inode->i_sb; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + struct file_id_t *fid = &(EXFAT_I(inode)->fid); + struct entry_set_cache_t *es = NULL; + + pr_debug("%s entered (inode %p size %llu)\n", __func__, inode, + new_size); + + /* acquire the lock for file system critical section */ + down(&p_fs->v_sem); + + /* check if the given file ID is opened */ + if (fid->type != TYPE_FILE) { + ret = FFS_PERMISSIONERR; + goto out; + } + + if (fid->size != old_size) { + pr_err("[EXFAT] truncate : can't skip it because of size-mismatch(old:%lld->fid:%lld).\n", + old_size, fid->size); + } + + if (old_size <= new_size) { + ret = FFS_SUCCESS; + goto out; + } + + fs_set_vol_flags(sb, VOL_DIRTY); + + clu.dir = fid->start_clu; + clu.size = (s32)((old_size-1) >> p_fs->cluster_size_bits) + 1; + clu.flags = fid->flags; + + if (new_size > 0) { + num_clusters = (s32)((new_size-1) >> + p_fs->cluster_size_bits) + 1; + + if (clu.flags == 0x03) { + clu.dir += num_clusters; + } else { + while (num_clusters > 0) { + last_clu = clu.dir; + if (FAT_read(sb, clu.dir, &clu.dir) == -1) + return FFS_MEDIAERR; + num_clusters--; + } + } + + clu.size -= num_clusters; + } + + fid->size = new_size; + fid->attr |= ATTR_ARCHIVE; + if (new_size == 0) { + fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01; + fid->start_clu = CLUSTER_32(~0); + } + + /* (1) update the directory entry */ + if (p_fs->vol_type == EXFAT) { + es = get_entry_set_in_dir(sb, &fid->dir, fid->entry, + ES_ALL_ENTRIES, &ep); + if (es == NULL) { + ret = FFS_MEDIAERR; + goto out; + } + ep2 = ep+1; + } else { + ep = get_entry_in_dir(sb, &(fid->dir), fid->entry, §or); + if (!ep) { + ret = FFS_MEDIAERR; + goto out; + } + ep2 = ep; + } + + p_fs->fs_func->set_entry_time(ep, tm_current(&tm), TM_MODIFY); + p_fs->fs_func->set_entry_attr(ep, fid->attr); + + p_fs->fs_func->set_entry_size(ep2, new_size); + if (new_size == 0) { + p_fs->fs_func->set_entry_flag(ep2, 0x01); + p_fs->fs_func->set_entry_clu0(ep2, CLUSTER_32(0)); + } + + if (p_fs->vol_type != EXFAT) + buf_modify(sb, sector); + else { + update_dir_checksum_with_entry_set(sb, es); + release_entry_set(es); + } + + /* (2) cut off from the FAT chain */ + if (last_clu != CLUSTER_32(0)) { + if (fid->flags == 0x01) + FAT_write(sb, last_clu, CLUSTER_32(~0)); + } + + /* (3) free the clusters */ + p_fs->fs_func->free_cluster(sb, &clu, 0); + + /* hint information */ + fid->hint_last_off = -1; + if (fid->rwoffset > fid->size) + fid->rwoffset = fid->size; + +#ifdef CONFIG_EXFAT_DELAYED_SYNC + fs_sync(sb, false); + fs_set_vol_flags(sb, VOL_CLEAN); +#endif + + if (p_fs->dev_ejected) + ret = FFS_MEDIAERR; + +out: + pr_debug("%s exited (%d)\n", __func__, ret); + /* release the lock for file system critical section */ + up(&p_fs->v_sem); + + return ret; +} + +static void update_parent_info(struct file_id_t *fid, + struct inode *parent_inode) +{ + struct fs_info_t *p_fs = &(EXFAT_SB(parent_inode->i_sb)->fs_info); + struct file_id_t *parent_fid = &(EXFAT_I(parent_inode)->fid); + + if (unlikely((parent_fid->flags != fid->dir.flags) || + (parent_fid->size != + (fid->dir.size << p_fs->cluster_size_bits)) || + (parent_fid->start_clu != fid->dir.dir))) { + fid->dir.dir = parent_fid->start_clu; + fid->dir.flags = parent_fid->flags; + fid->dir.size = ((parent_fid->size + (p_fs->cluster_size-1)) + >> p_fs->cluster_size_bits); + } +} + +static int ffsMoveFile(struct inode *old_parent_inode, struct file_id_t *fid, + struct inode *new_parent_inode, struct dentry *new_dentry) +{ + s32 ret; + s32 dentry; + struct chain_t olddir, newdir; + struct chain_t *p_dir = NULL; + struct uni_name_t uni_name; + struct dentry_t *ep; + struct super_block *sb = old_parent_inode->i_sb; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + u8 *new_path = (u8 *) new_dentry->d_name.name; + struct inode *new_inode = new_dentry->d_inode; + int num_entries; + struct file_id_t *new_fid = NULL; + s32 new_entry = 0; + + /* check the validity of the given file id */ + if (fid == NULL) + return FFS_INVALIDFID; + + /* check the validity of pointer parameters */ + if ((new_path == NULL) || (*new_path == '\0')) + return FFS_ERROR; + + /* acquire the lock for file system critical section */ + down(&p_fs->v_sem); + + update_parent_info(fid, old_parent_inode); + + olddir.dir = fid->dir.dir; + olddir.size = fid->dir.size; + olddir.flags = fid->dir.flags; + + dentry = fid->entry; + + /* check if the old file is "." or ".." */ + if (p_fs->vol_type != EXFAT) { + if ((olddir.dir != p_fs->root_dir) && (dentry < 2)) { + ret = FFS_PERMISSIONERR; + goto out2; + } + } + + ep = get_entry_in_dir(sb, &olddir, dentry, NULL); + if (!ep) { + ret = FFS_MEDIAERR; + goto out2; + } + + if (p_fs->fs_func->get_entry_attr(ep) & ATTR_READONLY) { + ret = FFS_PERMISSIONERR; + goto out2; + } + + /* check whether new dir is existing directory and empty */ + if (new_inode) { + u32 entry_type; + + ret = FFS_MEDIAERR; + new_fid = &EXFAT_I(new_inode)->fid; + + update_parent_info(new_fid, new_parent_inode); + + p_dir = &(new_fid->dir); + new_entry = new_fid->entry; + ep = get_entry_in_dir(sb, p_dir, new_entry, NULL); + if (!ep) + goto out; + + entry_type = p_fs->fs_func->get_entry_type(ep); + + if (entry_type == TYPE_DIR) { + struct chain_t new_clu; + + new_clu.dir = new_fid->start_clu; + new_clu.size = (s32)((new_fid->size - 1) >> + p_fs->cluster_size_bits) + 1; + new_clu.flags = new_fid->flags; + + if (!is_dir_empty(sb, &new_clu)) { + ret = FFS_FILEEXIST; + goto out; + } + } + } + + /* check the validity of directory name in the given new pathname */ + ret = resolve_path(new_parent_inode, new_path, &newdir, &uni_name); + if (ret) + goto out2; + + fs_set_vol_flags(sb, VOL_DIRTY); + + if (olddir.dir == newdir.dir) + ret = rename_file(new_parent_inode, &olddir, dentry, &uni_name, + fid); + else + ret = move_file(new_parent_inode, &olddir, dentry, &newdir, + &uni_name, fid); + + if ((ret == FFS_SUCCESS) && new_inode) { + /* delete entries of new_dir */ + ep = get_entry_in_dir(sb, p_dir, new_entry, NULL); + if (!ep) + goto out; + + num_entries = p_fs->fs_func->count_ext_entries(sb, p_dir, + new_entry, ep); + if (num_entries < 0) + goto out; + p_fs->fs_func->delete_dir_entry(sb, p_dir, new_entry, 0, + num_entries+1); + } +out: +#ifdef CONFIG_EXFAT_DELAYED_SYNC + fs_sync(sb, false); + fs_set_vol_flags(sb, VOL_CLEAN); +#endif + + if (p_fs->dev_ejected) + ret = FFS_MEDIAERR; +out2: + /* release the lock for file system critical section */ + up(&p_fs->v_sem); + + return ret; +} + +static int ffsRemoveFile(struct inode *inode, struct file_id_t *fid) +{ + s32 dentry; + int ret = FFS_SUCCESS; + struct chain_t dir, clu_to_free; + struct dentry_t *ep; + struct super_block *sb = inode->i_sb; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + /* check the validity of the given file id */ + if (fid == NULL) + return FFS_INVALIDFID; + + /* acquire the lock for file system critical section */ + down(&p_fs->v_sem); + + dir.dir = fid->dir.dir; + dir.size = fid->dir.size; + dir.flags = fid->dir.flags; + + dentry = fid->entry; + + ep = get_entry_in_dir(sb, &dir, dentry, NULL); + if (!ep) { + ret = FFS_MEDIAERR; + goto out; + } + + if (p_fs->fs_func->get_entry_attr(ep) & ATTR_READONLY) { + ret = FFS_PERMISSIONERR; + goto out; + } + fs_set_vol_flags(sb, VOL_DIRTY); + + /* (1) update the directory entry */ + remove_file(inode, &dir, dentry); + + clu_to_free.dir = fid->start_clu; + clu_to_free.size = (s32)((fid->size-1) >> p_fs->cluster_size_bits) + 1; + clu_to_free.flags = fid->flags; + + /* (2) free the clusters */ + p_fs->fs_func->free_cluster(sb, &clu_to_free, 0); + + fid->size = 0; + fid->start_clu = CLUSTER_32(~0); + fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01; + +#ifdef CONFIG_EXFAT_DELAYED_SYNC + fs_sync(sb, false); + fs_set_vol_flags(sb, VOL_CLEAN); +#endif + + if (p_fs->dev_ejected) + ret = FFS_MEDIAERR; +out: + /* release the lock for file system critical section */ + up(&p_fs->v_sem); + + return ret; +} + +#if 0 +/* Not currently wired up */ +static int ffsSetAttr(struct inode *inode, u32 attr) +{ + u32 type; + int ret = FFS_SUCCESS; + sector_t sector = 0; + struct dentry_t *ep; + struct super_block *sb = inode->i_sb; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + struct file_id_t *fid = &(EXFAT_I(inode)->fid); + u8 is_dir = (fid->type == TYPE_DIR) ? 1 : 0; + struct entry_set_cache_t *es = NULL; + + if (fid->attr == attr) { + if (p_fs->dev_ejected) + return FFS_MEDIAERR; + return FFS_SUCCESS; + } + + if (is_dir) { + if ((fid->dir.dir == p_fs->root_dir) && + (fid->entry == -1)) { + if (p_fs->dev_ejected) + return FFS_MEDIAERR; + return FFS_SUCCESS; + } + } + + /* acquire the lock for file system critical section */ + down(&p_fs->v_sem); + + /* get the directory entry of given file */ + if (p_fs->vol_type == EXFAT) { + es = get_entry_set_in_dir(sb, &(fid->dir), fid->entry, + ES_ALL_ENTRIES, &ep); + if (es == NULL) { + ret = FFS_MEDIAERR; + goto out; + } + } else { + ep = get_entry_in_dir(sb, &(fid->dir), fid->entry, §or); + if (!ep) { + ret = FFS_MEDIAERR; + goto out; + } + } + + type = p_fs->fs_func->get_entry_type(ep); + + if (((type == TYPE_FILE) && (attr & ATTR_SUBDIR)) || + ((type == TYPE_DIR) && (!(attr & ATTR_SUBDIR)))) { + if (p_fs->dev_ejected) + ret = FFS_MEDIAERR; + else + ret = FFS_ERROR; + + if (p_fs->vol_type == EXFAT) + release_entry_set(es); + goto out; + } + + fs_set_vol_flags(sb, VOL_DIRTY); + + /* set the file attribute */ + fid->attr = attr; + p_fs->fs_func->set_entry_attr(ep, attr); + + if (p_fs->vol_type != EXFAT) + buf_modify(sb, sector); + else { + update_dir_checksum_with_entry_set(sb, es); + release_entry_set(es); + } + +#ifdef CONFIG_EXFAT_DELAYED_SYNC + fs_sync(sb, false); + fs_set_vol_flags(sb, VOL_CLEAN); +#endif + + if (p_fs->dev_ejected) + ret = FFS_MEDIAERR; +out: + /* release the lock for file system critical section */ + up(&p_fs->v_sem); + + return ret; +} +#endif + +static int ffsReadStat(struct inode *inode, struct dir_entry_t *info) +{ + sector_t sector = 0; + s32 count; + int ret = FFS_SUCCESS; + struct chain_t dir; + struct uni_name_t uni_name; + struct timestamp_t tm; + struct dentry_t *ep, *ep2; + struct super_block *sb = inode->i_sb; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + struct file_id_t *fid = &(EXFAT_I(inode)->fid); + struct entry_set_cache_t *es = NULL; + u8 is_dir = (fid->type == TYPE_DIR) ? 1 : 0; + + pr_debug("%s entered\n", __func__); + + /* acquire the lock for file system critical section */ + down(&p_fs->v_sem); + + if (is_dir) { + if ((fid->dir.dir == p_fs->root_dir) && + (fid->entry == -1)) { + info->Attr = ATTR_SUBDIR; + memset((char *)&info->CreateTimestamp, 0, + sizeof(struct date_time_t)); + memset((char *)&info->ModifyTimestamp, 0, + sizeof(struct date_time_t)); + memset((char *)&info->AccessTimestamp, 0, + sizeof(struct date_time_t)); + strcpy(info->ShortName, "."); + strcpy(info->Name, "."); + + dir.dir = p_fs->root_dir; + dir.flags = 0x01; + + if (p_fs->root_dir == CLUSTER_32(0)) { + /* FAT16 root_dir */ + info->Size = p_fs->dentries_in_root << + DENTRY_SIZE_BITS; + } else { + info->Size = count_num_clusters(sb, &dir) << + p_fs->cluster_size_bits; + } + + count = count_dos_name_entries(sb, &dir, TYPE_DIR); + if (count < 0) { + ret = FFS_MEDIAERR; + goto out; + } + info->NumSubdirs = count; + + if (p_fs->dev_ejected) + ret = FFS_MEDIAERR; + goto out; + } + } + + /* get the directory entry of given file or directory */ + if (p_fs->vol_type == EXFAT) { + es = get_entry_set_in_dir(sb, &(fid->dir), fid->entry, + ES_2_ENTRIES, &ep); + if (es == NULL) { + ret = FFS_MEDIAERR; + goto out; + } + ep2 = ep+1; + } else { + ep = get_entry_in_dir(sb, &(fid->dir), fid->entry, §or); + if (!ep) { + ret = FFS_MEDIAERR; + goto out; + } + ep2 = ep; + buf_lock(sb, sector); + } + + /* set FILE_INFO structure using the acquired struct dentry_t */ + info->Attr = p_fs->fs_func->get_entry_attr(ep); + + p_fs->fs_func->get_entry_time(ep, &tm, TM_CREATE); + info->CreateTimestamp.Year = tm.year; + info->CreateTimestamp.Month = tm.mon; + info->CreateTimestamp.Day = tm.day; + info->CreateTimestamp.Hour = tm.hour; + info->CreateTimestamp.Minute = tm.min; + info->CreateTimestamp.Second = tm.sec; + info->CreateTimestamp.MilliSecond = 0; + + p_fs->fs_func->get_entry_time(ep, &tm, TM_MODIFY); + info->ModifyTimestamp.Year = tm.year; + info->ModifyTimestamp.Month = tm.mon; + info->ModifyTimestamp.Day = tm.day; + info->ModifyTimestamp.Hour = tm.hour; + info->ModifyTimestamp.Minute = tm.min; + info->ModifyTimestamp.Second = tm.sec; + info->ModifyTimestamp.MilliSecond = 0; + + memset((char *) &info->AccessTimestamp, 0, sizeof(struct date_time_t)); + + *(uni_name.name) = 0x0; + /* XXX this is very bad for exfat cuz name is already included in es. + * API should be revised + */ + p_fs->fs_func->get_uni_name_from_ext_entry(sb, &(fid->dir), fid->entry, + uni_name.name); + if (*uni_name.name == 0x0 && p_fs->vol_type != EXFAT) + get_uni_name_from_dos_entry(sb, (struct dos_dentry_t *)ep, + &uni_name, 0x1); + nls_uniname_to_cstring(sb, info->Name, &uni_name); + + if (p_fs->vol_type == EXFAT) { + info->NumSubdirs = 2; + } else { + buf_unlock(sb, sector); + get_uni_name_from_dos_entry(sb, (struct dos_dentry_t *)ep, + &uni_name, 0x0); + nls_uniname_to_cstring(sb, info->ShortName, &uni_name); + info->NumSubdirs = 0; + } + + info->Size = p_fs->fs_func->get_entry_size(ep2); + + if (p_fs->vol_type == EXFAT) + release_entry_set(es); + + if (is_dir) { + dir.dir = fid->start_clu; + dir.flags = 0x01; + + if (info->Size == 0) + info->Size = (u64)count_num_clusters(sb, &dir) << + p_fs->cluster_size_bits; + + count = count_dos_name_entries(sb, &dir, TYPE_DIR); + if (count < 0) { + ret = FFS_MEDIAERR; + goto out; + } + info->NumSubdirs += count; + } + + if (p_fs->dev_ejected) + ret = FFS_MEDIAERR; + +out: + /* release the lock for file system critical section */ + up(&p_fs->v_sem); + + pr_debug("%s exited successfully\n", __func__); + return ret; +} + +static int ffsWriteStat(struct inode *inode, struct dir_entry_t *info) +{ + sector_t sector = 0; + int ret = FFS_SUCCESS; + struct timestamp_t tm; + struct dentry_t *ep, *ep2; + struct entry_set_cache_t *es = NULL; + struct super_block *sb = inode->i_sb; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + struct file_id_t *fid = &(EXFAT_I(inode)->fid); + u8 is_dir = (fid->type == TYPE_DIR) ? 1 : 0; + + pr_debug("%s entered (inode %p info %p\n", __func__, inode, info); + + /* acquire the lock for file system critical section */ + down(&p_fs->v_sem); + + if (is_dir) { + if ((fid->dir.dir == p_fs->root_dir) && + (fid->entry == -1)) { + if (p_fs->dev_ejected) + ret = FFS_MEDIAERR; + ret = FFS_SUCCESS; + goto out; + } + } + + fs_set_vol_flags(sb, VOL_DIRTY); + + /* get the directory entry of given file or directory */ + if (p_fs->vol_type == EXFAT) { + es = get_entry_set_in_dir(sb, &(fid->dir), fid->entry, + ES_ALL_ENTRIES, &ep); + if (es == NULL) { + ret = FFS_MEDIAERR; + goto out; + } + ep2 = ep+1; + } else { + /* for other than exfat */ + ep = get_entry_in_dir(sb, &(fid->dir), fid->entry, §or); + if (!ep) { + ret = FFS_MEDIAERR; + goto out; + } + ep2 = ep; + } + + p_fs->fs_func->set_entry_attr(ep, info->Attr); + + /* set FILE_INFO structure using the acquired struct dentry_t */ + tm.sec = info->CreateTimestamp.Second; + tm.min = info->CreateTimestamp.Minute; + tm.hour = info->CreateTimestamp.Hour; + tm.day = info->CreateTimestamp.Day; + tm.mon = info->CreateTimestamp.Month; + tm.year = info->CreateTimestamp.Year; + p_fs->fs_func->set_entry_time(ep, &tm, TM_CREATE); + + tm.sec = info->ModifyTimestamp.Second; + tm.min = info->ModifyTimestamp.Minute; + tm.hour = info->ModifyTimestamp.Hour; + tm.day = info->ModifyTimestamp.Day; + tm.mon = info->ModifyTimestamp.Month; + tm.year = info->ModifyTimestamp.Year; + p_fs->fs_func->set_entry_time(ep, &tm, TM_MODIFY); + + p_fs->fs_func->set_entry_size(ep2, info->Size); + + if (p_fs->vol_type != EXFAT) { + buf_modify(sb, sector); + } else { + update_dir_checksum_with_entry_set(sb, es); + release_entry_set(es); + } + + if (p_fs->dev_ejected) + ret = FFS_MEDIAERR; + +out: + /* release the lock for file system critical section */ + up(&p_fs->v_sem); + + pr_debug("%s exited (%d)\n", __func__, ret); + + return ret; +} + +static int ffsMapCluster(struct inode *inode, s32 clu_offset, u32 *clu) +{ + s32 num_clusters, num_alloced, modified = FALSE; + u32 last_clu; + int ret = FFS_SUCCESS; + sector_t sector = 0; + struct chain_t new_clu; + struct dentry_t *ep; + struct entry_set_cache_t *es = NULL; + struct super_block *sb = inode->i_sb; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + struct file_id_t *fid = &(EXFAT_I(inode)->fid); + + /* check the validity of pointer parameters */ + if (clu == NULL) + return FFS_ERROR; + + /* acquire the lock for file system critical section */ + down(&p_fs->v_sem); + + fid->rwoffset = (s64)(clu_offset) << p_fs->cluster_size_bits; + + if (EXFAT_I(inode)->mmu_private == 0) + num_clusters = 0; + else + num_clusters = (s32)((EXFAT_I(inode)->mmu_private - 1) >> + p_fs->cluster_size_bits) + 1; + + *clu = last_clu = fid->start_clu; + + if (fid->flags == 0x03) { + if ((clu_offset > 0) && (*clu != CLUSTER_32(~0))) { + last_clu += clu_offset - 1; + + if (clu_offset == num_clusters) + *clu = CLUSTER_32(~0); + else + *clu += clu_offset; + } + } else { + /* hint information */ + if ((clu_offset > 0) && (fid->hint_last_off > 0) && + (clu_offset >= fid->hint_last_off)) { + clu_offset -= fid->hint_last_off; + *clu = fid->hint_last_clu; + } + + while ((clu_offset > 0) && (*clu != CLUSTER_32(~0))) { + last_clu = *clu; + if (FAT_read(sb, *clu, clu) == -1) { + ret = FFS_MEDIAERR; + goto out; + } + clu_offset--; + } + } + + if (*clu == CLUSTER_32(~0)) { + fs_set_vol_flags(sb, VOL_DIRTY); + + new_clu.dir = (last_clu == CLUSTER_32(~0)) ? CLUSTER_32(~0) : + last_clu + 1; + new_clu.size = 0; + new_clu.flags = fid->flags; + + /* (1) allocate a cluster */ + num_alloced = p_fs->fs_func->alloc_cluster(sb, 1, &new_clu); + if (num_alloced < 0) { + ret = FFS_MEDIAERR; + goto out; + } else if (num_alloced == 0) { + ret = FFS_FULL; + goto out; + } + + /* (2) append to the FAT chain */ + if (last_clu == CLUSTER_32(~0)) { + if (new_clu.flags == 0x01) + fid->flags = 0x01; + fid->start_clu = new_clu.dir; + modified = TRUE; + } else { + if (new_clu.flags != fid->flags) { + exfat_chain_cont_cluster(sb, fid->start_clu, + num_clusters); + fid->flags = 0x01; + modified = TRUE; + } + if (new_clu.flags == 0x01) + FAT_write(sb, last_clu, new_clu.dir); + } + + num_clusters += num_alloced; + *clu = new_clu.dir; + + if (p_fs->vol_type == EXFAT) { + es = get_entry_set_in_dir(sb, &fid->dir, fid->entry, + ES_ALL_ENTRIES, &ep); + if (es == NULL) { + ret = FFS_MEDIAERR; + goto out; + } + /* get stream entry */ + ep++; + } + + /* (3) update directory entry */ + if (modified) { + if (p_fs->vol_type != EXFAT) { + ep = get_entry_in_dir(sb, &(fid->dir), + fid->entry, §or); + if (!ep) { + ret = FFS_MEDIAERR; + goto out; + } + } + + if (p_fs->fs_func->get_entry_flag(ep) != fid->flags) + p_fs->fs_func->set_entry_flag(ep, fid->flags); + + if (p_fs->fs_func->get_entry_clu0(ep) != fid->start_clu) + p_fs->fs_func->set_entry_clu0(ep, + fid->start_clu); + + if (p_fs->vol_type != EXFAT) + buf_modify(sb, sector); + } + + if (p_fs->vol_type == EXFAT) { + update_dir_checksum_with_entry_set(sb, es); + release_entry_set(es); + } + + /* add number of new blocks to inode */ + inode->i_blocks += num_alloced << (p_fs->cluster_size_bits - 9); + } + + /* hint information */ + fid->hint_last_off = (s32)(fid->rwoffset >> p_fs->cluster_size_bits); + fid->hint_last_clu = *clu; + + if (p_fs->dev_ejected) + ret = FFS_MEDIAERR; + +out: + /* release the lock for file system critical section */ + up(&p_fs->v_sem); + + return ret; +} + +/*----------------------------------------------------------------------*/ +/* Directory Operation Functions */ +/*----------------------------------------------------------------------*/ + +static int ffsCreateDir(struct inode *inode, char *path, struct file_id_t *fid) +{ + int ret = FFS_SUCCESS; + struct chain_t dir; + struct uni_name_t uni_name; + struct super_block *sb = inode->i_sb; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + pr_debug("%s entered\n", __func__); + + /* check the validity of pointer parameters */ + if ((fid == NULL) || (path == NULL) || (*path == '\0')) + return FFS_ERROR; + + /* acquire the lock for file system critical section */ + down(&p_fs->v_sem); + + /* check the validity of directory name in the given old pathname */ + ret = resolve_path(inode, path, &dir, &uni_name); + if (ret) + goto out; + + fs_set_vol_flags(sb, VOL_DIRTY); + + ret = create_dir(inode, &dir, &uni_name, fid); + +#ifdef CONFIG_EXFAT_DELAYED_SYNC + fs_sync(sb, false); + fs_set_vol_flags(sb, VOL_CLEAN); +#endif + + if (p_fs->dev_ejected) + ret = FFS_MEDIAERR; +out: + /* release the lock for file system critical section */ + up(&p_fs->v_sem); + + return ret; +} + +static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry) +{ + int i, dentry, clu_offset; + int ret = FFS_SUCCESS; + s32 dentries_per_clu, dentries_per_clu_bits = 0; + u32 type; + sector_t sector; + struct chain_t dir, clu; + struct uni_name_t uni_name; + struct timestamp_t tm; + struct dentry_t *ep; + struct super_block *sb = inode->i_sb; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + struct fs_func *fs_func = p_fs->fs_func; + struct file_id_t *fid = &(EXFAT_I(inode)->fid); + + /* check the validity of pointer parameters */ + if (dir_entry == NULL) + return FFS_ERROR; + + /* check if the given file ID is opened */ + if (fid->type != TYPE_DIR) + return FFS_PERMISSIONERR; + + /* acquire the lock for file system critical section */ + down(&p_fs->v_sem); + + if (fid->entry == -1) { + dir.dir = p_fs->root_dir; + dir.flags = 0x01; + } else { + dir.dir = fid->start_clu; + dir.size = (s32)(fid->size >> p_fs->cluster_size_bits); + dir.flags = fid->flags; + } + + dentry = (s32)fid->rwoffset; + + if (dir.dir == CLUSTER_32(0)) { + /* FAT16 root_dir */ + dentries_per_clu = p_fs->dentries_in_root; + + if (dentry == dentries_per_clu) { + clu.dir = CLUSTER_32(~0); + } else { + clu.dir = dir.dir; + clu.size = dir.size; + clu.flags = dir.flags; + } + } else { + dentries_per_clu = p_fs->dentries_per_clu; + dentries_per_clu_bits = ilog2(dentries_per_clu); + + clu_offset = dentry >> dentries_per_clu_bits; + clu.dir = dir.dir; + clu.size = dir.size; + clu.flags = dir.flags; + + if (clu.flags == 0x03) { + clu.dir += clu_offset; + clu.size -= clu_offset; + } else { + /* hint_information */ + if ((clu_offset > 0) && (fid->hint_last_off > 0) && + (clu_offset >= fid->hint_last_off)) { + clu_offset -= fid->hint_last_off; + clu.dir = fid->hint_last_clu; + } + + while (clu_offset > 0) { + /* clu.dir = FAT_read(sb, clu.dir); */ + if (FAT_read(sb, clu.dir, &clu.dir) == -1) { + ret = FFS_MEDIAERR; + goto out; + } + clu_offset--; + } + } + } + + while (clu.dir != CLUSTER_32(~0)) { + if (p_fs->dev_ejected) + break; + + if (dir.dir == CLUSTER_32(0)) /* FAT16 root_dir */ + i = dentry % dentries_per_clu; + else + i = dentry & (dentries_per_clu-1); + + for ( ; i < dentries_per_clu; i++, dentry++) { + ep = get_entry_in_dir(sb, &clu, i, §or); + if (!ep) { + ret = FFS_MEDIAERR; + goto out; + } + type = fs_func->get_entry_type(ep); + + if (type == TYPE_UNUSED) + break; + + if ((type != TYPE_FILE) && (type != TYPE_DIR)) + continue; + + buf_lock(sb, sector); + dir_entry->Attr = fs_func->get_entry_attr(ep); + + fs_func->get_entry_time(ep, &tm, TM_CREATE); + dir_entry->CreateTimestamp.Year = tm.year; + dir_entry->CreateTimestamp.Month = tm.mon; + dir_entry->CreateTimestamp.Day = tm.day; + dir_entry->CreateTimestamp.Hour = tm.hour; + dir_entry->CreateTimestamp.Minute = tm.min; + dir_entry->CreateTimestamp.Second = tm.sec; + dir_entry->CreateTimestamp.MilliSecond = 0; + + fs_func->get_entry_time(ep, &tm, TM_MODIFY); + dir_entry->ModifyTimestamp.Year = tm.year; + dir_entry->ModifyTimestamp.Month = tm.mon; + dir_entry->ModifyTimestamp.Day = tm.day; + dir_entry->ModifyTimestamp.Hour = tm.hour; + dir_entry->ModifyTimestamp.Minute = tm.min; + dir_entry->ModifyTimestamp.Second = tm.sec; + dir_entry->ModifyTimestamp.MilliSecond = 0; + + memset((char *)&dir_entry->AccessTimestamp, 0, + sizeof(struct date_time_t)); + + *(uni_name.name) = 0x0; + fs_func->get_uni_name_from_ext_entry(sb, &dir, dentry, + uni_name.name); + if (*uni_name.name == 0x0 && p_fs->vol_type != EXFAT) + get_uni_name_from_dos_entry(sb, + (struct dos_dentry_t *)ep, + &uni_name, 0x1); + nls_uniname_to_cstring(sb, dir_entry->Name, &uni_name); + buf_unlock(sb, sector); + + if (p_fs->vol_type == EXFAT) { + ep = get_entry_in_dir(sb, &clu, i+1, NULL); + if (!ep) { + ret = FFS_MEDIAERR; + goto out; + } + } else { + get_uni_name_from_dos_entry(sb, + (struct dos_dentry_t *)ep, + &uni_name, 0x0); + nls_uniname_to_cstring(sb, dir_entry->ShortName, + &uni_name); + } + + dir_entry->Size = fs_func->get_entry_size(ep); + + /* hint information */ + if (dir.dir == CLUSTER_32(0)) { /* FAT16 root_dir */ + } else { + fid->hint_last_off = dentry >> + dentries_per_clu_bits; + fid->hint_last_clu = clu.dir; + } + + fid->rwoffset = (s64) ++dentry; + + if (p_fs->dev_ejected) + ret = FFS_MEDIAERR; + goto out; + } + + if (dir.dir == CLUSTER_32(0)) + break; /* FAT16 root_dir */ + + if (clu.flags == 0x03) { + if ((--clu.size) > 0) + clu.dir++; + else + clu.dir = CLUSTER_32(~0); + } else { + /* clu.dir = FAT_read(sb, clu.dir); */ + if (FAT_read(sb, clu.dir, &clu.dir) == -1) { + ret = FFS_MEDIAERR; + goto out; + } + } + } + + *(dir_entry->Name) = '\0'; + + fid->rwoffset = (s64) ++dentry; + + if (p_fs->dev_ejected) + ret = FFS_MEDIAERR; + +out: + /* release the lock for file system critical section */ + up(&p_fs->v_sem); + + return ret; +} + +static int ffsRemoveDir(struct inode *inode, struct file_id_t *fid) +{ + s32 dentry; + int ret = FFS_SUCCESS; + struct chain_t dir, clu_to_free; + struct super_block *sb = inode->i_sb; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + /* check the validity of the given file id */ + if (fid == NULL) + return FFS_INVALIDFID; + + dir.dir = fid->dir.dir; + dir.size = fid->dir.size; + dir.flags = fid->dir.flags; + + dentry = fid->entry; + + /* check if the file is "." or ".." */ + if (p_fs->vol_type != EXFAT) { + if ((dir.dir != p_fs->root_dir) && (dentry < 2)) + return FFS_PERMISSIONERR; + } + + /* acquire the lock for file system critical section */ + down(&p_fs->v_sem); + + clu_to_free.dir = fid->start_clu; + clu_to_free.size = (s32)((fid->size-1) >> p_fs->cluster_size_bits) + 1; + clu_to_free.flags = fid->flags; + + if (!is_dir_empty(sb, &clu_to_free)) { + ret = FFS_FILEEXIST; + goto out; + } + + fs_set_vol_flags(sb, VOL_DIRTY); + + /* (1) update the directory entry */ + remove_file(inode, &dir, dentry); + + /* (2) free the clusters */ + p_fs->fs_func->free_cluster(sb, &clu_to_free, 1); + + fid->size = 0; + fid->start_clu = CLUSTER_32(~0); + fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01; + +#ifdef CONFIG_EXFAT_DELAYED_SYNC + fs_sync(sb, false); + fs_set_vol_flags(sb, VOL_CLEAN); +#endif + + if (p_fs->dev_ejected) + ret = FFS_MEDIAERR; + +out: + /* release the lock for file system critical section */ + up(&p_fs->v_sem); + + return ret; +} + +/*======================================================================*/ +/* Directory Entry Operations */ +/*======================================================================*/ + +static int exfat_readdir(struct file *filp, struct dir_context *ctx) +{ + struct inode *inode = file_inode(filp); + struct super_block *sb = inode->i_sb; + struct exfat_sb_info *sbi = EXFAT_SB(sb); + struct fs_info_t *p_fs = &(sbi->fs_info); + struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info); + struct dir_entry_t de; + unsigned long inum; + loff_t cpos; + int err = 0; + + __lock_super(sb); + + cpos = ctx->pos; + /* Fake . and .. for the root directory. */ + if ((p_fs->vol_type == EXFAT) || (inode->i_ino == EXFAT_ROOT_INO)) { + while (cpos < 2) { + if (inode->i_ino == EXFAT_ROOT_INO) + inum = EXFAT_ROOT_INO; + else if (cpos == 0) + inum = inode->i_ino; + else /* (cpos == 1) */ + inum = parent_ino(filp->f_path.dentry); + + if (!dir_emit_dots(filp, ctx)) + goto out; + cpos++; + ctx->pos++; + } + if (cpos == 2) + cpos = 0; + } + if (cpos & (DENTRY_SIZE - 1)) { + err = -ENOENT; + goto out; + } + +get_new: + EXFAT_I(inode)->fid.size = i_size_read(inode); + EXFAT_I(inode)->fid.rwoffset = cpos >> DENTRY_SIZE_BITS; + + err = ffsReadDir(inode, &de); + if (err) { + /* at least we tried to read a sector + * move cpos to next sector position (should be aligned) + */ + if (err == FFS_MEDIAERR) { + cpos += 1 << p_bd->sector_size_bits; + cpos &= ~((1 << p_bd->sector_size_bits)-1); + } + + err = -EIO; + goto end_of_dir; + } + + cpos = EXFAT_I(inode)->fid.rwoffset << DENTRY_SIZE_BITS; + + if (!de.Name[0]) + goto end_of_dir; + + if (!memcmp(de.ShortName, DOS_CUR_DIR_NAME, DOS_NAME_LENGTH)) { + inum = inode->i_ino; + } else if (!memcmp(de.ShortName, DOS_PAR_DIR_NAME, DOS_NAME_LENGTH)) { + inum = parent_ino(filp->f_path.dentry); + } else { + loff_t i_pos = ((loff_t) EXFAT_I(inode)->fid.start_clu << 32) | + ((EXFAT_I(inode)->fid.rwoffset-1) & 0xffffffff); + struct inode *tmp = exfat_iget(sb, i_pos); + + if (tmp) { + inum = tmp->i_ino; + iput(tmp); + } else { + inum = iunique(sb, EXFAT_ROOT_INO); + } + } + + if (!dir_emit(ctx, de.Name, strlen(de.Name), inum, + (de.Attr & ATTR_SUBDIR) ? DT_DIR : DT_REG)) + goto out; + + ctx->pos = cpos; + goto get_new; + +end_of_dir: + ctx->pos = cpos; +out: + __unlock_super(sb); + return err; +} + +static int exfat_ioctl_volume_id(struct inode *dir) +{ + struct super_block *sb = dir->i_sb; + struct exfat_sb_info *sbi = EXFAT_SB(sb); + struct fs_info_t *p_fs = &(sbi->fs_info); + + return p_fs->vol_id; +} + +static long exfat_generic_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ +struct inode *inode = filp->f_path.dentry->d_inode; +#ifdef CONFIG_EXFAT_KERNEL_DEBUG + unsigned int flags; +#endif /* CONFIG_EXFAT_KERNEL_DEBUG */ + + switch (cmd) { + case EXFAT_IOCTL_GET_VOLUME_ID: + return exfat_ioctl_volume_id(inode); +#ifdef CONFIG_EXFAT_KERNEL_DEBUG + case EXFAT_IOC_GET_DEBUGFLAGS: { + struct super_block *sb = inode->i_sb; + struct exfat_sb_info *sbi = EXFAT_SB(sb); + + flags = sbi->debug_flags; + return put_user(flags, (int __user *)arg); + } + case EXFAT_IOC_SET_DEBUGFLAGS: { + struct super_block *sb = inode->i_sb; + struct exfat_sb_info *sbi = EXFAT_SB(sb); + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (get_user(flags, (int __user *) arg)) + return -EFAULT; + + __lock_super(sb); + sbi->debug_flags = flags; + __unlock_super(sb); + + return 0; + } +#endif /* CONFIG_EXFAT_KERNEL_DEBUG */ + default: + return -ENOTTY; /* Inappropriate ioctl for device */ + } +} + +static const struct file_operations exfat_dir_operations = { + .llseek = generic_file_llseek, + .read = generic_read_dir, + .iterate = exfat_readdir, + .unlocked_ioctl = exfat_generic_ioctl, + .fsync = generic_file_fsync, +}; + +static int exfat_create(struct inode *dir, struct dentry *dentry, umode_t mode, + bool excl) +{ + struct super_block *sb = dir->i_sb; + struct inode *inode; + struct file_id_t fid; + loff_t i_pos; + int err; + + __lock_super(sb); + + pr_debug("%s entered\n", __func__); + + err = ffsCreateFile(dir, (u8 *) dentry->d_name.name, FM_REGULAR, &fid); + if (err) { + if (err == FFS_INVALIDPATH) + err = -EINVAL; + else if (err == FFS_FILEEXIST) + err = -EEXIST; + else if (err == FFS_FULL) + err = -ENOSPC; + else if (err == FFS_NAMETOOLONG) + err = -ENAMETOOLONG; + else + err = -EIO; + goto out; + } + INC_IVERSION(dir); + dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir); + if (IS_DIRSYNC(dir)) + (void) exfat_sync_inode(dir); + else + mark_inode_dirty(dir); + + i_pos = ((loff_t) fid.dir.dir << 32) | (fid.entry & 0xffffffff); + + inode = exfat_build_inode(sb, &fid, i_pos); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + goto out; + } + INC_IVERSION(inode); + inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); + /* + * timestamp is already written, so mark_inode_dirty() is unnecessary. + */ + + dentry->d_time = GET_IVERSION(dentry->d_parent->d_inode); + d_instantiate(dentry, inode); + +out: + __unlock_super(sb); + pr_debug("%s exited\n", __func__); + return err; +} + +static int exfat_find(struct inode *dir, struct qstr *qname, + struct file_id_t *fid) +{ + int err; + + if (qname->len == 0) + return -ENOENT; + + err = ffsLookupFile(dir, (u8 *) qname->name, fid); + if (err) + return -ENOENT; + + return 0; +} + +static int exfat_d_anon_disconn(struct dentry *dentry) +{ + return IS_ROOT(dentry) && (dentry->d_flags & DCACHE_DISCONNECTED); +} + +static struct dentry *exfat_lookup(struct inode *dir, struct dentry *dentry, + unsigned int flags) +{ + struct super_block *sb = dir->i_sb; + struct inode *inode; + struct dentry *alias; + int err; + struct file_id_t fid; + loff_t i_pos; + u64 ret; + mode_t i_mode; + + __lock_super(sb); + pr_debug("%s entered\n", __func__); + err = exfat_find(dir, &dentry->d_name, &fid); + if (err) { + if (err == -ENOENT) { + inode = NULL; + goto out; + } + goto error; + } + + i_pos = ((loff_t) fid.dir.dir << 32) | (fid.entry & 0xffffffff); + inode = exfat_build_inode(sb, &fid, i_pos); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + goto error; + } + + i_mode = inode->i_mode; + if (S_ISLNK(i_mode) && !EXFAT_I(inode)->target) { + EXFAT_I(inode)->target = kmalloc(i_size_read(inode) + 1, + GFP_KERNEL); + if (!EXFAT_I(inode)->target) { + err = -ENOMEM; + goto error; + } + ffsReadFile(dir, &fid, EXFAT_I(inode)->target, + i_size_read(inode), &ret); + *(EXFAT_I(inode)->target + i_size_read(inode)) = '\0'; + } + + alias = d_find_alias(inode); + if (alias && !exfat_d_anon_disconn(alias)) { + BUG_ON(d_unhashed(alias)); + if (!S_ISDIR(i_mode)) + d_move(alias, dentry); + iput(inode); + __unlock_super(sb); + pr_debug("%s exited 1\n", __func__); + return alias; + } + dput(alias); +out: + __unlock_super(sb); + dentry->d_time = GET_IVERSION(dentry->d_parent->d_inode); + dentry = d_splice_alias(inode, dentry); + if (dentry) + dentry->d_time = GET_IVERSION(dentry->d_parent->d_inode); + pr_debug("%s exited 2\n", __func__); + return dentry; + +error: + __unlock_super(sb); + pr_debug("%s exited 3\n", __func__); + return ERR_PTR(err); +} + +static inline unsigned long exfat_hash(loff_t i_pos) +{ + return hash_32(i_pos, EXFAT_HASH_BITS); +} + +static void exfat_attach(struct inode *inode, loff_t i_pos) +{ + struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb); + struct hlist_head *head = sbi->inode_hashtable + exfat_hash(i_pos); + + spin_lock(&sbi->inode_hash_lock); + EXFAT_I(inode)->i_pos = i_pos; + hlist_add_head(&EXFAT_I(inode)->i_hash_fat, head); + spin_unlock(&sbi->inode_hash_lock); +} + +static void exfat_detach(struct inode *inode) +{ + struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb); + + spin_lock(&sbi->inode_hash_lock); + hlist_del_init(&EXFAT_I(inode)->i_hash_fat); + EXFAT_I(inode)->i_pos = 0; + spin_unlock(&sbi->inode_hash_lock); +} + +static int exfat_unlink(struct inode *dir, struct dentry *dentry) +{ + struct inode *inode = dentry->d_inode; + struct super_block *sb = dir->i_sb; + int err; + + __lock_super(sb); + + pr_debug("%s entered\n", __func__); + + EXFAT_I(inode)->fid.size = i_size_read(inode); + + err = ffsRemoveFile(dir, &(EXFAT_I(inode)->fid)); + if (err) { + if (err == FFS_PERMISSIONERR) + err = -EPERM; + else + err = -EIO; + goto out; + } + INC_IVERSION(dir); + dir->i_mtime = dir->i_atime = current_time(dir); + if (IS_DIRSYNC(dir)) + (void) exfat_sync_inode(dir); + else + mark_inode_dirty(dir); + + clear_nlink(inode); + inode->i_mtime = inode->i_atime = current_time(inode); + exfat_detach(inode); + remove_inode_hash(inode); + +out: + __unlock_super(sb); + pr_debug("%s exited\n", __func__); + return err; +} + +static int exfat_symlink(struct inode *dir, struct dentry *dentry, + const char *target) +{ + struct super_block *sb = dir->i_sb; + struct inode *inode; + struct file_id_t fid; + loff_t i_pos; + int err; + u64 len = (u64) strlen(target); + u64 ret; + + __lock_super(sb); + + pr_debug("%s entered\n", __func__); + + err = ffsCreateFile(dir, (u8 *) dentry->d_name.name, FM_SYMLINK, &fid); + if (err) { + if (err == FFS_INVALIDPATH) + err = -EINVAL; + else if (err == FFS_FILEEXIST) + err = -EEXIST; + else if (err == FFS_FULL) + err = -ENOSPC; + else + err = -EIO; + goto out; + } + + err = ffsWriteFile(dir, &fid, (char *) target, len, &ret); + + if (err) { + ffsRemoveFile(dir, &fid); + + if (err == FFS_FULL) + err = -ENOSPC; + else + err = -EIO; + goto out; + } + + INC_IVERSION(dir); + dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir); + if (IS_DIRSYNC(dir)) + (void) exfat_sync_inode(dir); + else + mark_inode_dirty(dir); + + i_pos = ((loff_t) fid.dir.dir << 32) | (fid.entry & 0xffffffff); + + inode = exfat_build_inode(sb, &fid, i_pos); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + goto out; + } + INC_IVERSION(inode); + inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); + /* timestamp is already written, so mark_inode_dirty() is unneeded. */ + + EXFAT_I(inode)->target = kmalloc(len+1, GFP_KERNEL); + if (!EXFAT_I(inode)->target) { + err = -ENOMEM; + goto out; + } + memcpy(EXFAT_I(inode)->target, target, len+1); + + dentry->d_time = GET_IVERSION(dentry->d_parent->d_inode); + d_instantiate(dentry, inode); + +out: + __unlock_super(sb); + pr_debug("%s exited\n", __func__); + return err; +} + +static int exfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) +{ + struct super_block *sb = dir->i_sb; + struct inode *inode; + struct file_id_t fid; + loff_t i_pos; + int err; + + __lock_super(sb); + + pr_debug("%s entered\n", __func__); + + err = ffsCreateDir(dir, (u8 *) dentry->d_name.name, &fid); + if (err) { + if (err == FFS_INVALIDPATH) + err = -EINVAL; + else if (err == FFS_FILEEXIST) + err = -EEXIST; + else if (err == FFS_FULL) + err = -ENOSPC; + else if (err == FFS_NAMETOOLONG) + err = -ENAMETOOLONG; + else + err = -EIO; + goto out; + } + INC_IVERSION(dir); + dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir); + if (IS_DIRSYNC(dir)) + (void) exfat_sync_inode(dir); + else + mark_inode_dirty(dir); + inc_nlink(dir); + + i_pos = ((loff_t) fid.dir.dir << 32) | (fid.entry & 0xffffffff); + + inode = exfat_build_inode(sb, &fid, i_pos); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + goto out; + } + INC_IVERSION(inode); + inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); + /* timestamp is already written, so mark_inode_dirty() is unneeded. */ + + dentry->d_time = GET_IVERSION(dentry->d_parent->d_inode); + d_instantiate(dentry, inode); + +out: + __unlock_super(sb); + pr_debug("%s exited\n", __func__); + return err; +} + +static int exfat_rmdir(struct inode *dir, struct dentry *dentry) +{ + struct inode *inode = dentry->d_inode; + struct super_block *sb = dir->i_sb; + int err; + + __lock_super(sb); + + pr_debug("%s entered\n", __func__); + + EXFAT_I(inode)->fid.size = i_size_read(inode); + + err = ffsRemoveDir(dir, &(EXFAT_I(inode)->fid)); + if (err) { + if (err == FFS_INVALIDPATH) + err = -EINVAL; + else if (err == FFS_FILEEXIST) + err = -ENOTEMPTY; + else if (err == FFS_NOTFOUND) + err = -ENOENT; + else if (err == FFS_DIRBUSY) + err = -EBUSY; + else + err = -EIO; + goto out; + } + INC_IVERSION(dir); + dir->i_mtime = dir->i_atime = current_time(dir); + if (IS_DIRSYNC(dir)) + (void) exfat_sync_inode(dir); + else + mark_inode_dirty(dir); + drop_nlink(dir); + + clear_nlink(inode); + inode->i_mtime = inode->i_atime = current_time(inode); + exfat_detach(inode); + remove_inode_hash(inode); + +out: + __unlock_super(sb); + pr_debug("%s exited\n", __func__); + return err; +} + +static int exfat_rename(struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry, + unsigned int flags) +{ + struct inode *old_inode, *new_inode; + struct super_block *sb = old_dir->i_sb; + loff_t i_pos; + int err; + + if (flags) + return -EINVAL; + + __lock_super(sb); + + pr_debug("%s entered\n", __func__); + + old_inode = old_dentry->d_inode; + new_inode = new_dentry->d_inode; + + EXFAT_I(old_inode)->fid.size = i_size_read(old_inode); + + err = ffsMoveFile(old_dir, &(EXFAT_I(old_inode)->fid), new_dir, + new_dentry); + if (err) { + if (err == FFS_PERMISSIONERR) + err = -EPERM; + else if (err == FFS_INVALIDPATH) + err = -EINVAL; + else if (err == FFS_FILEEXIST) + err = -EEXIST; + else if (err == FFS_NOTFOUND) + err = -ENOENT; + else if (err == FFS_FULL) + err = -ENOSPC; + else + err = -EIO; + goto out; + } + INC_IVERSION(new_dir); + new_dir->i_ctime = new_dir->i_mtime = new_dir->i_atime = + current_time(new_dir); + if (IS_DIRSYNC(new_dir)) + (void) exfat_sync_inode(new_dir); + else + mark_inode_dirty(new_dir); + + i_pos = ((loff_t) EXFAT_I(old_inode)->fid.dir.dir << 32) | + (EXFAT_I(old_inode)->fid.entry & 0xffffffff); + + exfat_detach(old_inode); + exfat_attach(old_inode, i_pos); + if (IS_DIRSYNC(new_dir)) + (void) exfat_sync_inode(old_inode); + else + mark_inode_dirty(old_inode); + + if ((S_ISDIR(old_inode->i_mode)) && (old_dir != new_dir)) { + drop_nlink(old_dir); + if (!new_inode) + inc_nlink(new_dir); + } + INC_IVERSION(old_dir); + old_dir->i_ctime = old_dir->i_mtime = current_time(old_dir); + if (IS_DIRSYNC(old_dir)) + (void) exfat_sync_inode(old_dir); + else + mark_inode_dirty(old_dir); + + if (new_inode) { + exfat_detach(new_inode); + drop_nlink(new_inode); + if (S_ISDIR(new_inode->i_mode)) + drop_nlink(new_inode); + new_inode->i_ctime = current_time(new_inode); + } + +out: + __unlock_super(sb); + pr_debug("%s exited\n", __func__); + return err; +} + +static int exfat_cont_expand(struct inode *inode, loff_t size) +{ + struct address_space *mapping = inode->i_mapping; + loff_t start = i_size_read(inode), count = size - i_size_read(inode); + int err, err2; + + err = generic_cont_expand_simple(inode, size); + if (err != 0) + return err; + + inode->i_ctime = inode->i_mtime = current_time(inode); + mark_inode_dirty(inode); + + if (IS_SYNC(inode)) { + err = filemap_fdatawrite_range(mapping, start, + start + count - 1); + err2 = sync_mapping_buffers(mapping); + err = (err) ? (err) : (err2); + err2 = write_inode_now(inode, 1); + err = (err) ? (err) : (err2); + if (!err) + err = filemap_fdatawait_range(mapping, start, + start + count - 1); + } + return err; +} + +static int exfat_allow_set_time(struct exfat_sb_info *sbi, struct inode *inode) +{ + mode_t allow_utime = sbi->options.allow_utime; + + if (!uid_eq(current_fsuid(), inode->i_uid)) { + if (in_group_p(inode->i_gid)) + allow_utime >>= 3; + if (allow_utime & MAY_WRITE) + return 1; + } + + /* use a default check */ + return 0; +} + +static int exfat_sanitize_mode(const struct exfat_sb_info *sbi, + struct inode *inode, umode_t *mode_ptr) +{ + mode_t i_mode, mask, perm; + + i_mode = inode->i_mode; + + if (S_ISREG(i_mode) || S_ISLNK(i_mode)) + mask = sbi->options.fs_fmask; + else + mask = sbi->options.fs_dmask; + + perm = *mode_ptr & ~(S_IFMT | mask); + + /* Of the r and x bits, all (subject to umask) must be present.*/ + if ((perm & 0555) != (i_mode & 0555)) + return -EPERM; + + if (exfat_mode_can_hold_ro(inode)) { + /* + * Of the w bits, either all (subject to umask) or none must be + * present. + */ + if ((perm & 0222) && ((perm & 0222) != (0222 & ~mask))) + return -EPERM; + } else { + /* + * If exfat_mode_can_hold_ro(inode) is false, can't change w + * bits. + */ + if ((perm & 0222) != (0222 & ~mask)) + return -EPERM; + } + + *mode_ptr &= S_IFMT | perm; + + return 0; +} + +static void exfat_truncate(struct inode *inode, loff_t old_size) +{ + struct super_block *sb = inode->i_sb; + struct exfat_sb_info *sbi = EXFAT_SB(sb); + struct fs_info_t *p_fs = &(sbi->fs_info); + int err; + + __lock_super(sb); + + /* + * This protects against truncating a file bigger than it was then + * trying to write into the hole. + */ + if (EXFAT_I(inode)->mmu_private > i_size_read(inode)) + EXFAT_I(inode)->mmu_private = i_size_read(inode); + + if (EXFAT_I(inode)->fid.start_clu == 0) + goto out; + + err = ffsTruncateFile(inode, old_size, i_size_read(inode)); + if (err) + goto out; + + inode->i_ctime = inode->i_mtime = current_time(inode); + if (IS_DIRSYNC(inode)) + (void) exfat_sync_inode(inode); + else + mark_inode_dirty(inode); + + inode->i_blocks = ((i_size_read(inode) + (p_fs->cluster_size - 1)) & + ~((loff_t)p_fs->cluster_size - 1)) >> 9; +out: + __unlock_super(sb); +} + +static int exfat_setattr(struct dentry *dentry, struct iattr *attr) +{ + + struct exfat_sb_info *sbi = EXFAT_SB(dentry->d_sb); + struct inode *inode = dentry->d_inode; + unsigned int ia_valid; + int error; + loff_t old_size; + + pr_debug("%s entered\n", __func__); + + if ((attr->ia_valid & ATTR_SIZE) + && (attr->ia_size > i_size_read(inode))) { + error = exfat_cont_expand(inode, attr->ia_size); + if (error || attr->ia_valid == ATTR_SIZE) + return error; + attr->ia_valid &= ~ATTR_SIZE; + } + + ia_valid = attr->ia_valid; + + if ((ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)) + && exfat_allow_set_time(sbi, inode)) { + attr->ia_valid &= ~(ATTR_MTIME_SET | + ATTR_ATIME_SET | + ATTR_TIMES_SET); + } + + error = setattr_prepare(dentry, attr); + attr->ia_valid = ia_valid; + if (error) + return error; + + if (((attr->ia_valid & ATTR_UID) && + (!uid_eq(attr->ia_uid, sbi->options.fs_uid))) || + ((attr->ia_valid & ATTR_GID) && + (!gid_eq(attr->ia_gid, sbi->options.fs_gid))) || + ((attr->ia_valid & ATTR_MODE) && + (attr->ia_mode & ~(S_IFREG | S_IFLNK | S_IFDIR | 0777)))) { + return -EPERM; + } + + /* + * We don't return -EPERM here. Yes, strange, but this is too + * old behavior. + */ + if (attr->ia_valid & ATTR_MODE) { + if (exfat_sanitize_mode(sbi, inode, &attr->ia_mode) < 0) + attr->ia_valid &= ~ATTR_MODE; + } + + EXFAT_I(inode)->fid.size = i_size_read(inode); + + if (attr->ia_valid & ATTR_SIZE) { + old_size = i_size_read(inode); + down_write(&EXFAT_I(inode)->truncate_lock); + truncate_setsize(inode, attr->ia_size); + exfat_truncate(inode, old_size); + up_write(&EXFAT_I(inode)->truncate_lock); + } + setattr_copy(inode, attr); + mark_inode_dirty(inode); + + pr_debug("%s exited\n", __func__); + return error; +} + +static int exfat_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) +{ + struct inode *inode = path->dentry->d_inode; + + pr_debug("%s entered\n", __func__); + + generic_fillattr(inode, stat); + stat->blksize = EXFAT_SB(inode->i_sb)->fs_info.cluster_size; + + pr_debug("%s exited\n", __func__); + return 0; +} + +static const struct inode_operations exfat_dir_inode_operations = { + .create = exfat_create, + .lookup = exfat_lookup, + .unlink = exfat_unlink, + .symlink = exfat_symlink, + .mkdir = exfat_mkdir, + .rmdir = exfat_rmdir, + .rename = exfat_rename, + .setattr = exfat_setattr, + .getattr = exfat_getattr, +}; + +/*======================================================================*/ +/* File Operations */ +/*======================================================================*/ +static const char *exfat_get_link(struct dentry *dentry, struct inode *inode, + struct delayed_call *done) +{ + struct exfat_inode_info *ei = EXFAT_I(inode); + + if (ei->target != NULL) { + char *cookie = ei->target; + + if (cookie != NULL) + return (char *)(ei->target); + } + return NULL; +} + +static const struct inode_operations exfat_symlink_inode_operations = { + .get_link = exfat_get_link, +}; + +static int exfat_file_release(struct inode *inode, struct file *filp) +{ + struct super_block *sb = inode->i_sb; + + EXFAT_I(inode)->fid.size = i_size_read(inode); + ffsSyncVol(sb, false); + return 0; +} + +static const struct file_operations exfat_file_operations = { + .llseek = generic_file_llseek, + .read_iter = generic_file_read_iter, + .write_iter = generic_file_write_iter, + .mmap = generic_file_mmap, + .release = exfat_file_release, + .unlocked_ioctl = exfat_generic_ioctl, + .fsync = generic_file_fsync, + .splice_read = generic_file_splice_read, +}; + +static const struct inode_operations exfat_file_inode_operations = { + .setattr = exfat_setattr, + .getattr = exfat_getattr, +}; + +/*======================================================================*/ +/* Address Space Operations */ +/*======================================================================*/ + +static int exfat_bmap(struct inode *inode, sector_t sector, sector_t *phys, + unsigned long *mapped_blocks, int *create) +{ + struct super_block *sb = inode->i_sb; + struct exfat_sb_info *sbi = EXFAT_SB(sb); + struct fs_info_t *p_fs = &(sbi->fs_info); + struct bd_info_t *p_bd = &(sbi->bd_info); + const unsigned long blocksize = sb->s_blocksize; + const unsigned char blocksize_bits = sb->s_blocksize_bits; + sector_t last_block; + int err, clu_offset, sec_offset; + unsigned int cluster; + + *phys = 0; + *mapped_blocks = 0; + + if ((p_fs->vol_type == FAT12) || (p_fs->vol_type == FAT16)) { + if (inode->i_ino == EXFAT_ROOT_INO) { + if (sector < + (p_fs->dentries_in_root >> + (p_bd->sector_size_bits-DENTRY_SIZE_BITS))) { + *phys = sector + p_fs->root_start_sector; + *mapped_blocks = 1; + } + return 0; + } + } + + last_block = (i_size_read(inode) + (blocksize - 1)) >> blocksize_bits; + if (sector >= last_block) { + if (*create == 0) + return 0; + } else { + *create = 0; + } + + /* cluster offset */ + clu_offset = sector >> p_fs->sectors_per_clu_bits; + + /* sector offset in cluster */ + sec_offset = sector & (p_fs->sectors_per_clu - 1); + + EXFAT_I(inode)->fid.size = i_size_read(inode); + + err = ffsMapCluster(inode, clu_offset, &cluster); + + if (err) { + if (err == FFS_FULL) + return -ENOSPC; + else + return -EIO; + } else if (cluster != CLUSTER_32(~0)) { + *phys = START_SECTOR(cluster) + sec_offset; + *mapped_blocks = p_fs->sectors_per_clu - sec_offset; + } + + return 0; +} + +static int exfat_get_block(struct inode *inode, sector_t iblock, + struct buffer_head *bh_result, int create) +{ + struct super_block *sb = inode->i_sb; + unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits; + int err; + unsigned long mapped_blocks; + sector_t phys; + + __lock_super(sb); + + err = exfat_bmap(inode, iblock, &phys, &mapped_blocks, &create); + if (err) { + __unlock_super(sb); + return err; + } + + if (phys) { + max_blocks = min(mapped_blocks, max_blocks); + if (create) { + EXFAT_I(inode)->mmu_private += max_blocks << + sb->s_blocksize_bits; + set_buffer_new(bh_result); + } + map_bh(bh_result, sb, phys); + } + + bh_result->b_size = max_blocks << sb->s_blocksize_bits; + __unlock_super(sb); + + return 0; +} + +static int exfat_readpage(struct file *file, struct page *page) +{ + return mpage_readpage(page, exfat_get_block); +} + +static int exfat_readpages(struct file *file, struct address_space *mapping, + struct list_head *pages, unsigned int nr_pages) +{ + return mpage_readpages(mapping, pages, nr_pages, exfat_get_block); +} + +static int exfat_writepage(struct page *page, struct writeback_control *wbc) +{ + return block_write_full_page(page, exfat_get_block, wbc); +} + +static int exfat_writepages(struct address_space *mapping, + struct writeback_control *wbc) +{ + return mpage_writepages(mapping, wbc, exfat_get_block); +} + +static void exfat_write_failed(struct address_space *mapping, loff_t to) +{ + struct inode *inode = mapping->host; + + if (to > i_size_read(inode)) { + truncate_pagecache(inode, i_size_read(inode)); + EXFAT_I(inode)->fid.size = i_size_read(inode); + exfat_truncate(inode, i_size_read(inode)); + } +} + +static int exfat_write_begin(struct file *file, struct address_space *mapping, + loff_t pos, unsigned int len, unsigned int flags, + struct page **pagep, void **fsdata) +{ + int ret; + + *pagep = NULL; + ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, + exfat_get_block, + &EXFAT_I(mapping->host)->mmu_private); + + if (ret < 0) + exfat_write_failed(mapping, pos+len); + return ret; +} + +static int exfat_write_end(struct file *file, struct address_space *mapping, + loff_t pos, unsigned int len, unsigned int copied, + struct page *pagep, void *fsdata) +{ + struct inode *inode = mapping->host; + struct file_id_t *fid = &(EXFAT_I(inode)->fid); + int err; + + err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata); + + if (err < len) + exfat_write_failed(mapping, pos+len); + + if (!(err < 0) && !(fid->attr & ATTR_ARCHIVE)) { + inode->i_mtime = inode->i_ctime = current_time(inode); + fid->attr |= ATTR_ARCHIVE; + mark_inode_dirty(inode); + } + return err; +} + +static ssize_t exfat_direct_IO(struct kiocb *iocb, struct iov_iter *iter) +{ + struct inode *inode = iocb->ki_filp->f_mapping->host; + struct address_space *mapping = iocb->ki_filp->f_mapping; + ssize_t ret; + int rw; + + rw = iov_iter_rw(iter); + + if (rw == WRITE) { + if (EXFAT_I(inode)->mmu_private < iov_iter_count(iter)) + return 0; + } + ret = blockdev_direct_IO(iocb, inode, iter, exfat_get_block); + + if ((ret < 0) && (rw & WRITE)) + exfat_write_failed(mapping, iov_iter_count(iter)); + return ret; +} + +static sector_t _exfat_bmap(struct address_space *mapping, sector_t block) +{ + sector_t blocknr; + + /* exfat_get_cluster() assumes the requested blocknr isn't truncated. */ + down_read(&EXFAT_I(mapping->host)->truncate_lock); + blocknr = generic_block_bmap(mapping, block, exfat_get_block); + up_read(&EXFAT_I(mapping->host)->truncate_lock); + + return blocknr; +} + +static const struct address_space_operations exfat_aops = { + .readpage = exfat_readpage, + .readpages = exfat_readpages, + .writepage = exfat_writepage, + .writepages = exfat_writepages, + .write_begin = exfat_write_begin, + .write_end = exfat_write_end, + .direct_IO = exfat_direct_IO, + .bmap = _exfat_bmap +}; + +/*======================================================================*/ +/* Super Operations */ +/*======================================================================*/ + +static struct inode *exfat_iget(struct super_block *sb, loff_t i_pos) +{ + struct exfat_sb_info *sbi = EXFAT_SB(sb); + struct exfat_inode_info *info; + struct hlist_head *head = sbi->inode_hashtable + exfat_hash(i_pos); + struct inode *inode = NULL; + + spin_lock(&sbi->inode_hash_lock); + hlist_for_each_entry(info, head, i_hash_fat) { + BUG_ON(info->vfs_inode.i_sb != sb); + + if (i_pos != info->i_pos) + continue; + inode = igrab(&info->vfs_inode); + if (inode) + break; + } + spin_unlock(&sbi->inode_hash_lock); + return inode; +} + +/* doesn't deal with root inode */ +static int exfat_fill_inode(struct inode *inode, struct file_id_t *fid) +{ + struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb); + struct fs_info_t *p_fs = &(sbi->fs_info); + struct dir_entry_t info; + + memcpy(&(EXFAT_I(inode)->fid), fid, sizeof(struct file_id_t)); + + ffsReadStat(inode, &info); + + EXFAT_I(inode)->i_pos = 0; + EXFAT_I(inode)->target = NULL; + inode->i_uid = sbi->options.fs_uid; + inode->i_gid = sbi->options.fs_gid; + INC_IVERSION(inode); + inode->i_generation = get_seconds(); + + if (info.Attr & ATTR_SUBDIR) { /* directory */ + inode->i_generation &= ~1; + inode->i_mode = exfat_make_mode(sbi, info.Attr, 0777); + inode->i_op = &exfat_dir_inode_operations; + inode->i_fop = &exfat_dir_operations; + + i_size_write(inode, info.Size); + EXFAT_I(inode)->mmu_private = i_size_read(inode); + set_nlink(inode, info.NumSubdirs); + } else if (info.Attr & ATTR_SYMLINK) { /* symbolic link */ + inode->i_generation |= 1; + inode->i_mode = exfat_make_mode(sbi, info.Attr, 0777); + inode->i_op = &exfat_symlink_inode_operations; + + i_size_write(inode, info.Size); + EXFAT_I(inode)->mmu_private = i_size_read(inode); + } else { /* regular file */ + inode->i_generation |= 1; + inode->i_mode = exfat_make_mode(sbi, info.Attr, 0777); + inode->i_op = &exfat_file_inode_operations; + inode->i_fop = &exfat_file_operations; + inode->i_mapping->a_ops = &exfat_aops; + inode->i_mapping->nrpages = 0; + + i_size_write(inode, info.Size); + EXFAT_I(inode)->mmu_private = i_size_read(inode); + } + exfat_save_attr(inode, info.Attr); + + inode->i_blocks = ((i_size_read(inode) + (p_fs->cluster_size - 1)) + & ~((loff_t)p_fs->cluster_size - 1)) >> 9; + + exfat_time_fat2unix(sbi, &inode->i_mtime, &info.ModifyTimestamp); + exfat_time_fat2unix(sbi, &inode->i_ctime, &info.CreateTimestamp); + exfat_time_fat2unix(sbi, &inode->i_atime, &info.AccessTimestamp); + + return 0; +} + +static struct inode *exfat_build_inode(struct super_block *sb, + struct file_id_t *fid, loff_t i_pos) +{ + struct inode *inode; + int err; + + inode = exfat_iget(sb, i_pos); + if (inode) + goto out; + inode = new_inode(sb); + if (!inode) { + inode = ERR_PTR(-ENOMEM); + goto out; + } + inode->i_ino = iunique(sb, EXFAT_ROOT_INO); + SET_IVERSION(inode, 1); + err = exfat_fill_inode(inode, fid); + if (err) { + iput(inode); + inode = ERR_PTR(err); + goto out; + } + exfat_attach(inode, i_pos); + insert_inode_hash(inode); +out: + return inode; +} + +static int exfat_sync_inode(struct inode *inode) +{ + return exfat_write_inode(inode, NULL); +} + +static struct inode *exfat_alloc_inode(struct super_block *sb) +{ + struct exfat_inode_info *ei; + + ei = kmem_cache_alloc(exfat_inode_cachep, GFP_NOFS); + if (!ei) + return NULL; + + init_rwsem(&ei->truncate_lock); + + return &ei->vfs_inode; +} + +static void exfat_destroy_inode(struct inode *inode) +{ + if (EXFAT_I(inode)->target) + kfree(EXFAT_I(inode)->target); + EXFAT_I(inode)->target = NULL; + + kmem_cache_free(exfat_inode_cachep, EXFAT_I(inode)); +} + +static int exfat_write_inode(struct inode *inode, struct writeback_control *wbc) +{ + struct super_block *sb = inode->i_sb; + struct exfat_sb_info *sbi = EXFAT_SB(sb); + struct dir_entry_t info; + + if (inode->i_ino == EXFAT_ROOT_INO) + return 0; + + info.Attr = exfat_make_attr(inode); + info.Size = i_size_read(inode); + + exfat_time_unix2fat(sbi, &inode->i_mtime, &info.ModifyTimestamp); + exfat_time_unix2fat(sbi, &inode->i_ctime, &info.CreateTimestamp); + exfat_time_unix2fat(sbi, &inode->i_atime, &info.AccessTimestamp); + + ffsWriteStat(inode, &info); + + return 0; +} + +static void exfat_evict_inode(struct inode *inode) +{ + truncate_inode_pages(&inode->i_data, 0); + + if (!inode->i_nlink) + i_size_write(inode, 0); + invalidate_inode_buffers(inode); + clear_inode(inode); + exfat_detach(inode); + + remove_inode_hash(inode); +} + +static void exfat_free_super(struct exfat_sb_info *sbi) +{ + if (sbi->nls_disk) + unload_nls(sbi->nls_disk); + if (sbi->nls_io) + unload_nls(sbi->nls_io); + if (sbi->options.iocharset != exfat_default_iocharset) + kfree(sbi->options.iocharset); + /* mutex_init is in exfat_fill_super function. only for 3.7+ */ + mutex_destroy(&sbi->s_lock); + kfree(sbi); +} + +static void exfat_put_super(struct super_block *sb) +{ + struct exfat_sb_info *sbi = EXFAT_SB(sb); + + if (__is_sb_dirty(sb)) + exfat_write_super(sb); + + ffsUmountVol(sb); + + sb->s_fs_info = NULL; + exfat_free_super(sbi); +} + +static void exfat_write_super(struct super_block *sb) +{ + __lock_super(sb); + + __set_sb_clean(sb); + + if (!sb_rdonly(sb)) + ffsSyncVol(sb, true); + + __unlock_super(sb); +} + +static int exfat_sync_fs(struct super_block *sb, int wait) +{ + int err = 0; + + if (__is_sb_dirty(sb)) { + __lock_super(sb); + __set_sb_clean(sb); + err = ffsSyncVol(sb, true); + __unlock_super(sb); + } + + return err; +} + +static int exfat_statfs(struct dentry *dentry, struct kstatfs *buf) +{ + struct super_block *sb = dentry->d_sb; + u64 id = huge_encode_dev(sb->s_bdev->bd_dev); + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + struct vol_info_t info; + + if (p_fs->used_clusters == (u32) ~0) { + if (ffsGetVolInfo(sb, &info) == FFS_MEDIAERR) + return -EIO; + + } else { + info.FatType = p_fs->vol_type; + info.ClusterSize = p_fs->cluster_size; + info.NumClusters = p_fs->num_clusters - 2; + info.UsedClusters = p_fs->used_clusters; + info.FreeClusters = info.NumClusters - info.UsedClusters; + + if (p_fs->dev_ejected) + pr_info("[EXFAT] statfs on device that is ejected\n"); + } + + buf->f_type = sb->s_magic; + buf->f_bsize = info.ClusterSize; + buf->f_blocks = info.NumClusters; + buf->f_bfree = info.FreeClusters; + buf->f_bavail = info.FreeClusters; + buf->f_fsid.val[0] = (u32)id; + buf->f_fsid.val[1] = (u32)(id >> 32); + buf->f_namelen = 260; + + return 0; +} + +static int exfat_remount(struct super_block *sb, int *flags, char *data) +{ + *flags |= SB_NODIRATIME; + return 0; +} + +static int exfat_show_options(struct seq_file *m, struct dentry *root) +{ + struct exfat_sb_info *sbi = EXFAT_SB(root->d_sb); + struct exfat_mount_options *opts = &sbi->options; + + if (__kuid_val(opts->fs_uid)) + seq_printf(m, ",uid=%u", __kuid_val(opts->fs_uid)); + if (__kgid_val(opts->fs_gid)) + seq_printf(m, ",gid=%u", __kgid_val(opts->fs_gid)); + seq_printf(m, ",fmask=%04o", opts->fs_fmask); + seq_printf(m, ",dmask=%04o", opts->fs_dmask); + if (opts->allow_utime) + seq_printf(m, ",allow_utime=%04o", opts->allow_utime); + if (sbi->nls_disk) + seq_printf(m, ",codepage=%s", sbi->nls_disk->charset); + if (sbi->nls_io) + seq_printf(m, ",iocharset=%s", sbi->nls_io->charset); + seq_printf(m, ",namecase=%u", opts->casesensitive); + if (opts->errors == EXFAT_ERRORS_CONT) + seq_puts(m, ",errors=continue"); + else if (opts->errors == EXFAT_ERRORS_PANIC) + seq_puts(m, ",errors=panic"); + else + seq_puts(m, ",errors=remount-ro"); +#ifdef CONFIG_EXFAT_DISCARD + if (opts->discard) + seq_puts(m, ",discard"); +#endif + return 0; +} + +static const struct super_operations exfat_sops = { + .alloc_inode = exfat_alloc_inode, + .destroy_inode = exfat_destroy_inode, + .write_inode = exfat_write_inode, + .evict_inode = exfat_evict_inode, + .put_super = exfat_put_super, + .sync_fs = exfat_sync_fs, + .statfs = exfat_statfs, + .remount_fs = exfat_remount, + .show_options = exfat_show_options, +}; + +/*======================================================================*/ +/* Export Operations */ +/*======================================================================*/ + +static struct inode *exfat_nfs_get_inode(struct super_block *sb, u64 ino, + u32 generation) +{ + struct inode *inode = NULL; + + if (ino < EXFAT_ROOT_INO) + return inode; + inode = ilookup(sb, ino); + + if (inode && generation && (inode->i_generation != generation)) { + iput(inode); + inode = NULL; + } + + return inode; +} + +static struct dentry *exfat_fh_to_dentry(struct super_block *sb, + struct fid *fid, int fh_len, + int fh_type) +{ + return generic_fh_to_dentry(sb, fid, fh_len, fh_type, + exfat_nfs_get_inode); +} + +static struct dentry *exfat_fh_to_parent(struct super_block *sb, + struct fid *fid, int fh_len, + int fh_type) +{ + return generic_fh_to_parent(sb, fid, fh_len, fh_type, + exfat_nfs_get_inode); +} + +static const struct export_operations exfat_export_ops = { + .fh_to_dentry = exfat_fh_to_dentry, + .fh_to_parent = exfat_fh_to_parent, +}; + +/*======================================================================*/ +/* Super Block Read Operations */ +/*======================================================================*/ + +enum { + Opt_uid, + Opt_gid, + Opt_umask, + Opt_dmask, + Opt_fmask, + Opt_allow_utime, + Opt_codepage, + Opt_charset, + Opt_namecase, + Opt_debug, + Opt_err_cont, + Opt_err_panic, + Opt_err_ro, + Opt_utf8_hack, + Opt_err, +#ifdef CONFIG_EXFAT_DISCARD + Opt_discard, +#endif /* EXFAT_CONFIG_DISCARD */ +}; + +static const match_table_t exfat_tokens = { + {Opt_uid, "uid=%u"}, + {Opt_gid, "gid=%u"}, + {Opt_umask, "umask=%o"}, + {Opt_dmask, "dmask=%o"}, + {Opt_fmask, "fmask=%o"}, + {Opt_allow_utime, "allow_utime=%o"}, + {Opt_codepage, "codepage=%u"}, + {Opt_charset, "iocharset=%s"}, + {Opt_namecase, "namecase=%u"}, + {Opt_debug, "debug"}, + {Opt_err_cont, "errors=continue"}, + {Opt_err_panic, "errors=panic"}, + {Opt_err_ro, "errors=remount-ro"}, + {Opt_utf8_hack, "utf8"}, +#ifdef CONFIG_EXFAT_DISCARD + {Opt_discard, "discard"}, +#endif /* CONFIG_EXFAT_DISCARD */ + {Opt_err, NULL} +}; + +static int parse_options(char *options, int silent, int *debug, + struct exfat_mount_options *opts) +{ + char *p; + substring_t args[MAX_OPT_ARGS]; + int option; + char *iocharset; + + opts->fs_uid = current_uid(); + opts->fs_gid = current_gid(); + opts->fs_fmask = opts->fs_dmask = current->fs->umask; + opts->allow_utime = (unsigned short) -1; + opts->codepage = exfat_default_codepage; + opts->iocharset = exfat_default_iocharset; + opts->casesensitive = 0; + opts->errors = EXFAT_ERRORS_RO; +#ifdef CONFIG_EXFAT_DISCARD + opts->discard = 0; +#endif + *debug = 0; + + if (!options) + goto out; + + while ((p = strsep(&options, ",")) != NULL) { + int token; + + if (!*p) + continue; + + token = match_token(p, exfat_tokens, args); + switch (token) { + case Opt_uid: + if (match_int(&args[0], &option)) + return 0; + opts->fs_uid = KUIDT_INIT(option); + break; + case Opt_gid: + if (match_int(&args[0], &option)) + return 0; + opts->fs_gid = KGIDT_INIT(option); + break; + case Opt_umask: + case Opt_dmask: + case Opt_fmask: + if (match_octal(&args[0], &option)) + return 0; + if (token != Opt_dmask) + opts->fs_fmask = option; + if (token != Opt_fmask) + opts->fs_dmask = option; + break; + case Opt_allow_utime: + if (match_octal(&args[0], &option)) + return 0; + opts->allow_utime = option & 0022; + break; + case Opt_codepage: + if (match_int(&args[0], &option)) + return 0; + opts->codepage = option; + break; + case Opt_charset: + if (opts->iocharset != exfat_default_iocharset) + kfree(opts->iocharset); + iocharset = match_strdup(&args[0]); + if (!iocharset) + return -ENOMEM; + opts->iocharset = iocharset; + break; + case Opt_namecase: + if (match_int(&args[0], &option)) + return 0; + opts->casesensitive = option; + break; + case Opt_err_cont: + opts->errors = EXFAT_ERRORS_CONT; + break; + case Opt_err_panic: + opts->errors = EXFAT_ERRORS_PANIC; + break; + case Opt_err_ro: + opts->errors = EXFAT_ERRORS_RO; + break; + case Opt_debug: + *debug = 1; + break; +#ifdef CONFIG_EXFAT_DISCARD + case Opt_discard: + opts->discard = 1; + break; +#endif /* CONFIG_EXFAT_DISCARD */ + case Opt_utf8_hack: + break; + default: + if (!silent) + pr_err("[EXFAT] Unrecognized mount option %s or missing value\n", + p); + return -EINVAL; + } + } + +out: + if (opts->allow_utime == (unsigned short) -1) + opts->allow_utime = ~opts->fs_dmask & 0022; + + return 0; +} + +static void exfat_hash_init(struct super_block *sb) +{ + struct exfat_sb_info *sbi = EXFAT_SB(sb); + int i; + + spin_lock_init(&sbi->inode_hash_lock); + for (i = 0; i < EXFAT_HASH_SIZE; i++) + INIT_HLIST_HEAD(&sbi->inode_hashtable[i]); +} + +static int exfat_read_root(struct inode *inode) +{ + struct super_block *sb = inode->i_sb; + struct exfat_sb_info *sbi = EXFAT_SB(sb); + struct fs_info_t *p_fs = &(sbi->fs_info); + struct dir_entry_t info; + + EXFAT_I(inode)->fid.dir.dir = p_fs->root_dir; + EXFAT_I(inode)->fid.dir.flags = 0x01; + EXFAT_I(inode)->fid.entry = -1; + EXFAT_I(inode)->fid.start_clu = p_fs->root_dir; + EXFAT_I(inode)->fid.flags = 0x01; + EXFAT_I(inode)->fid.type = TYPE_DIR; + EXFAT_I(inode)->fid.rwoffset = 0; + EXFAT_I(inode)->fid.hint_last_off = -1; + + EXFAT_I(inode)->target = NULL; + + ffsReadStat(inode, &info); + + inode->i_uid = sbi->options.fs_uid; + inode->i_gid = sbi->options.fs_gid; + INC_IVERSION(inode); + inode->i_generation = 0; + inode->i_mode = exfat_make_mode(sbi, ATTR_SUBDIR, 0777); + inode->i_op = &exfat_dir_inode_operations; + inode->i_fop = &exfat_dir_operations; + + i_size_write(inode, info.Size); + inode->i_blocks = ((i_size_read(inode) + (p_fs->cluster_size - 1)) + & ~((loff_t)p_fs->cluster_size - 1)) >> 9; + EXFAT_I(inode)->i_pos = ((loff_t) p_fs->root_dir << 32) | 0xffffffff; + EXFAT_I(inode)->mmu_private = i_size_read(inode); + + exfat_save_attr(inode, ATTR_SUBDIR); + inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); + set_nlink(inode, info.NumSubdirs + 2); + + return 0; +} + +static void setup_dops(struct super_block *sb) +{ + if (EXFAT_SB(sb)->options.casesensitive == 0) + sb->s_d_op = &exfat_ci_dentry_ops; + else + sb->s_d_op = &exfat_dentry_ops; +} + +static int exfat_fill_super(struct super_block *sb, void *data, int silent) +{ + struct inode *root_inode = NULL; + struct exfat_sb_info *sbi; + int debug, ret; + long error; + char buf[50]; + + /* + * GFP_KERNEL is ok here, because while we do hold the + * supeblock lock, memory pressure can't call back into + * the filesystem, since we're only just about to mount + * it and have no inodes etc active! + */ + sbi = kzalloc(sizeof(struct exfat_sb_info), GFP_KERNEL); + if (!sbi) + return -ENOMEM; + mutex_init(&sbi->s_lock); + sb->s_fs_info = sbi; + sb->s_flags |= SB_NODIRATIME; + sb->s_magic = EXFAT_SUPER_MAGIC; + sb->s_op = &exfat_sops; + sb->s_export_op = &exfat_export_ops; + + error = parse_options(data, silent, &debug, &sbi->options); + if (error) + goto out_fail; + + setup_dops(sb); + + error = -EIO; + sb_min_blocksize(sb, 512); + sb->s_maxbytes = 0x7fffffffffffffffLL; /* maximum file size */ + + ret = ffsMountVol(sb); + if (ret) { + if (!silent) + pr_err("[EXFAT] ffsMountVol failed\n"); + + goto out_fail; + } + + /* set up enough so that it can read an inode */ + exfat_hash_init(sb); + + /* + * The low byte of FAT's first entry must have same value with + * media-field. But in real world, too many devices is + * writing wrong value. So, removed that validity check. + * + * if (FAT_FIRST_ENT(sb, media) != first) + */ + + /* codepage is not meaningful in exfat */ + if (sbi->fs_info.vol_type != EXFAT) { + error = -EINVAL; + sprintf(buf, "cp%d", sbi->options.codepage); + sbi->nls_disk = load_nls(buf); + if (!sbi->nls_disk) { + pr_err("[EXFAT] Codepage %s not found\n", buf); + goto out_fail2; + } + } + + sbi->nls_io = load_nls(sbi->options.iocharset); + + error = -ENOMEM; + root_inode = new_inode(sb); + if (!root_inode) + goto out_fail2; + root_inode->i_ino = EXFAT_ROOT_INO; + SET_IVERSION(root_inode, 1); + + error = exfat_read_root(root_inode); + if (error < 0) + goto out_fail2; + error = -ENOMEM; + exfat_attach(root_inode, EXFAT_I(root_inode)->i_pos); + insert_inode_hash(root_inode); + sb->s_root = d_make_root(root_inode); + if (!sb->s_root) { + pr_err("[EXFAT] Getting the root inode failed\n"); + goto out_fail2; + } + + return 0; + +out_fail2: + ffsUmountVol(sb); +out_fail: + if (root_inode) + iput(root_inode); + sb->s_fs_info = NULL; + exfat_free_super(sbi); + return error; +} + +static struct dentry *exfat_fs_mount(struct file_system_type *fs_type, + int flags, const char *dev_name, + void *data) +{ + return mount_bdev(fs_type, flags, dev_name, data, exfat_fill_super); +} + +static void init_once(void *foo) +{ + struct exfat_inode_info *ei = (struct exfat_inode_info *)foo; + + INIT_HLIST_NODE(&ei->i_hash_fat); + inode_init_once(&ei->vfs_inode); +} + +static int __init exfat_init_inodecache(void) +{ + exfat_inode_cachep = kmem_cache_create("exfat_inode_cache", + sizeof(struct exfat_inode_info), + 0, + (SLAB_RECLAIM_ACCOUNT | + SLAB_MEM_SPREAD), + init_once); + if (exfat_inode_cachep == NULL) + return -ENOMEM; + return 0; +} + +static void __exit exfat_destroy_inodecache(void) +{ + /* + * Make sure all delayed rcu free inodes are flushed before we + * destroy cache. + */ + rcu_barrier(); + kmem_cache_destroy(exfat_inode_cachep); +} + +#ifdef CONFIG_EXFAT_KERNEL_DEBUG +static void exfat_debug_kill_sb(struct super_block *sb) +{ + struct exfat_sb_info *sbi = EXFAT_SB(sb); + struct block_device *bdev = sb->s_bdev; + struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info); + + long flags; + + if (sbi) { + flags = sbi->debug_flags; + + if (flags & EXFAT_DEBUGFLAGS_INVALID_UMOUNT) { + /* + * invalidate_bdev drops all device cache include + * dirty. We use this to simulate device removal. + */ + down(&p_fs->v_sem); + FAT_release_all(sb); + buf_release_all(sb); + up(&p_fs->v_sem); + + invalidate_bdev(bdev); + } + } + + kill_block_super(sb); +} +#endif /* CONFIG_EXFAT_KERNEL_DEBUG */ + +static struct file_system_type exfat_fs_type = { + .owner = THIS_MODULE, + .name = "exfat", + .mount = exfat_fs_mount, +#ifdef CONFIG_EXFAT_KERNEL_DEBUG + .kill_sb = exfat_debug_kill_sb, +#else + .kill_sb = kill_block_super, +#endif /* CONFIG_EXFAT_KERNEL_DEBUG */ + .fs_flags = FS_REQUIRES_DEV, +}; + +static int __init init_exfat(void) +{ + int err; + + BUILD_BUG_ON(sizeof(struct dentry_t) != DENTRY_SIZE); + BUILD_BUG_ON(sizeof(struct dos_dentry_t) != DENTRY_SIZE); + BUILD_BUG_ON(sizeof(struct ext_dentry_t) != DENTRY_SIZE); + BUILD_BUG_ON(sizeof(struct file_dentry_t) != DENTRY_SIZE); + BUILD_BUG_ON(sizeof(struct strm_dentry_t) != DENTRY_SIZE); + BUILD_BUG_ON(sizeof(struct name_dentry_t) != DENTRY_SIZE); + BUILD_BUG_ON(sizeof(struct bmap_dentry_t) != DENTRY_SIZE); + BUILD_BUG_ON(sizeof(struct case_dentry_t) != DENTRY_SIZE); + BUILD_BUG_ON(sizeof(struct volm_dentry_t) != DENTRY_SIZE); + + pr_info("exFAT: Version %s\n", EXFAT_VERSION); + + err = exfat_init_inodecache(); + if (err) + return err; + + err = register_filesystem(&exfat_fs_type); + if (err) + return err; + + return 0; +} + +static void __exit exit_exfat(void) +{ + exfat_destroy_inodecache(); + unregister_filesystem(&exfat_fs_type); +} + +module_init(init_exfat); +module_exit(exit_exfat); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("exFAT Filesystem Driver"); +MODULE_ALIAS_FS("exfat"); diff --git a/drivers/staging/exfat/exfat_upcase.c b/drivers/staging/exfat/exfat_upcase.c new file mode 100644 index 000000000000..366082fb3dab --- /dev/null +++ b/drivers/staging/exfat/exfat_upcase.c @@ -0,0 +1,740 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. + */ + +#include +#include "exfat.h" + +const u8 uni_upcase[NUM_UPCASE << 1] = { + 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x03, 0x00, + 0x04, 0x00, 0x05, 0x00, 0x06, 0x00, 0x07, 0x00, + 0x08, 0x00, 0x09, 0x00, 0x0A, 0x00, 0x0B, 0x00, + 0x0C, 0x00, 0x0D, 0x00, 0x0E, 0x00, 0x0F, 0x00, + 0x10, 0x00, 0x11, 0x00, 0x12, 0x00, 0x13, 0x00, + 0x14, 0x00, 0x15, 0x00, 0x16, 0x00, 0x17, 0x00, + 0x18, 0x00, 0x19, 0x00, 0x1A, 0x00, 0x1B, 0x00, + 0x1C, 0x00, 0x1D, 0x00, 0x1E, 0x00, 0x1F, 0x00, + 0x20, 0x00, 0x21, 0x00, 0x22, 0x00, 0x23, 0x00, + 0x24, 0x00, 0x25, 0x00, 0x26, 0x00, 0x27, 0x00, + 0x28, 0x00, 0x29, 0x00, 0x2A, 0x00, 0x2B, 0x00, + 0x2C, 0x00, 0x2D, 0x00, 0x2E, 0x00, 0x2F, 0x00, + 0x30, 0x00, 0x31, 0x00, 0x32, 0x00, 0x33, 0x00, + 0x34, 0x00, 0x35, 0x00, 0x36, 0x00, 0x37, 0x00, + 0x38, 0x00, 0x39, 0x00, 0x3A, 0x00, 0x3B, 0x00, + 0x3C, 0x00, 0x3D, 0x00, 0x3E, 0x00, 0x3F, 0x00, + 0x40, 0x00, 0x41, 0x00, 0x42, 0x00, 0x43, 0x00, + 0x44, 0x00, 0x45, 0x00, 0x46, 0x00, 0x47, 0x00, + 0x48, 0x00, 0x49, 0x00, 0x4A, 0x00, 0x4B, 0x00, + 0x4C, 0x00, 0x4D, 0x00, 0x4E, 0x00, 0x4F, 0x00, + 0x50, 0x00, 0x51, 0x00, 0x52, 0x00, 0x53, 0x00, + 0x54, 0x00, 0x55, 0x00, 0x56, 0x00, 0x57, 0x00, + 0x58, 0x00, 0x59, 0x00, 0x5A, 0x00, 0x5B, 0x00, + 0x5C, 0x00, 0x5D, 0x00, 0x5E, 0x00, 0x5F, 0x00, + 0x60, 0x00, 0x41, 0x00, 0x42, 0x00, 0x43, 0x00, + 0x44, 0x00, 0x45, 0x00, 0x46, 0x00, 0x47, 0x00, + 0x48, 0x00, 0x49, 0x00, 0x4A, 0x00, 0x4B, 0x00, + 0x4C, 0x00, 0x4D, 0x00, 0x4E, 0x00, 0x4F, 0x00, + 0x50, 0x00, 0x51, 0x00, 0x52, 0x00, 0x53, 0x00, + 0x54, 0x00, 0x55, 0x00, 0x56, 0x00, 0x57, 0x00, + 0x58, 0x00, 0x59, 0x00, 0x5A, 0x00, 0x7B, 0x00, + 0x7C, 0x00, 0x7D, 0x00, 0x7E, 0x00, 0x7F, 0x00, + 0x80, 0x00, 0x81, 0x00, 0x82, 0x00, 0x83, 0x00, + 0x84, 0x00, 0x85, 0x00, 0x86, 0x00, 0x87, 0x00, + 0x88, 0x00, 0x89, 0x00, 0x8A, 0x00, 0x8B, 0x00, + 0x8C, 0x00, 0x8D, 0x00, 0x8E, 0x00, 0x8F, 0x00, + 0x90, 0x00, 0x91, 0x00, 0x92, 0x00, 0x93, 0x00, + 0x94, 0x00, 0x95, 0x00, 0x96, 0x00, 0x97, 0x00, + 0x98, 0x00, 0x99, 0x00, 0x9A, 0x00, 0x9B, 0x00, + 0x9C, 0x00, 0x9D, 0x00, 0x9E, 0x00, 0x9F, 0x00, + 0xA0, 0x00, 0xA1, 0x00, 0xA2, 0x00, 0xA3, 0x00, + 0xA4, 0x00, 0xA5, 0x00, 0xA6, 0x00, 0xA7, 0x00, + 0xA8, 0x00, 0xA9, 0x00, 0xAA, 0x00, 0xAB, 0x00, + 0xAC, 0x00, 0xAD, 0x00, 0xAE, 0x00, 0xAF, 0x00, + 0xB0, 0x00, 0xB1, 0x00, 0xB2, 0x00, 0xB3, 0x00, + 0xB4, 0x00, 0xB5, 0x00, 0xB6, 0x00, 0xB7, 0x00, + 0xB8, 0x00, 0xB9, 0x00, 0xBA, 0x00, 0xBB, 0x00, + 0xBC, 0x00, 0xBD, 0x00, 0xBE, 0x00, 0xBF, 0x00, + 0xC0, 0x00, 0xC1, 0x00, 0xC2, 0x00, 0xC3, 0x00, + 0xC4, 0x00, 0xC5, 0x00, 0xC6, 0x00, 0xC7, 0x00, + 0xC8, 0x00, 0xC9, 0x00, 0xCA, 0x00, 0xCB, 0x00, + 0xCC, 0x00, 0xCD, 0x00, 0xCE, 0x00, 0xCF, 0x00, + 0xD0, 0x00, 0xD1, 0x00, 0xD2, 0x00, 0xD3, 0x00, + 0xD4, 0x00, 0xD5, 0x00, 0xD6, 0x00, 0xD7, 0x00, + 0xD8, 0x00, 0xD9, 0x00, 0xDA, 0x00, 0xDB, 0x00, + 0xDC, 0x00, 0xDD, 0x00, 0xDE, 0x00, 0xDF, 0x00, + 0xC0, 0x00, 0xC1, 0x00, 0xC2, 0x00, 0xC3, 0x00, + 0xC4, 0x00, 0xC5, 0x00, 0xC6, 0x00, 0xC7, 0x00, + 0xC8, 0x00, 0xC9, 0x00, 0xCA, 0x00, 0xCB, 0x00, + 0xCC, 0x00, 0xCD, 0x00, 0xCE, 0x00, 0xCF, 0x00, + 0xD0, 0x00, 0xD1, 0x00, 0xD2, 0x00, 0xD3, 0x00, + 0xD4, 0x00, 0xD5, 0x00, 0xD6, 0x00, 0xF7, 0x00, + 0xD8, 0x00, 0xD9, 0x00, 0xDA, 0x00, 0xDB, 0x00, + 0xDC, 0x00, 0xDD, 0x00, 0xDE, 0x00, 0x78, 0x01, + 0x00, 0x01, 0x00, 0x01, 0x02, 0x01, 0x02, 0x01, + 0x04, 0x01, 0x04, 0x01, 0x06, 0x01, 0x06, 0x01, + 0x08, 0x01, 0x08, 0x01, 0x0A, 0x01, 0x0A, 0x01, + 0x0C, 0x01, 0x0C, 0x01, 0x0E, 0x01, 0x0E, 0x01, + 0x10, 0x01, 0x10, 0x01, 0x12, 0x01, 0x12, 0x01, + 0x14, 0x01, 0x14, 0x01, 0x16, 0x01, 0x16, 0x01, + 0x18, 0x01, 0x18, 0x01, 0x1A, 0x01, 0x1A, 0x01, + 0x1C, 0x01, 0x1C, 0x01, 0x1E, 0x01, 0x1E, 0x01, + 0x20, 0x01, 0x20, 0x01, 0x22, 0x01, 0x22, 0x01, + 0x24, 0x01, 0x24, 0x01, 0x26, 0x01, 0x26, 0x01, + 0x28, 0x01, 0x28, 0x01, 0x2A, 0x01, 0x2A, 0x01, + 0x2C, 0x01, 0x2C, 0x01, 0x2E, 0x01, 0x2E, 0x01, + 0x30, 0x01, 0x31, 0x01, 0x32, 0x01, 0x32, 0x01, + 0x34, 0x01, 0x34, 0x01, 0x36, 0x01, 0x36, 0x01, + 0x38, 0x01, 0x39, 0x01, 0x39, 0x01, 0x3B, 0x01, + 0x3B, 0x01, 0x3D, 0x01, 0x3D, 0x01, 0x3F, 0x01, + 0x3F, 0x01, 0x41, 0x01, 0x41, 0x01, 0x43, 0x01, + 0x43, 0x01, 0x45, 0x01, 0x45, 0x01, 0x47, 0x01, + 0x47, 0x01, 0x49, 0x01, 0x4A, 0x01, 0x4A, 0x01, + 0x4C, 0x01, 0x4C, 0x01, 0x4E, 0x01, 0x4E, 0x01, + 0x50, 0x01, 0x50, 0x01, 0x52, 0x01, 0x52, 0x01, + 0x54, 0x01, 0x54, 0x01, 0x56, 0x01, 0x56, 0x01, + 0x58, 0x01, 0x58, 0x01, 0x5A, 0x01, 0x5A, 0x01, + 0x5C, 0x01, 0x5C, 0x01, 0x5E, 0x01, 0x5E, 0x01, + 0x60, 0x01, 0x60, 0x01, 0x62, 0x01, 0x62, 0x01, + 0x64, 0x01, 0x64, 0x01, 0x66, 0x01, 0x66, 0x01, + 0x68, 0x01, 0x68, 0x01, 0x6A, 0x01, 0x6A, 0x01, + 0x6C, 0x01, 0x6C, 0x01, 0x6E, 0x01, 0x6E, 0x01, + 0x70, 0x01, 0x70, 0x01, 0x72, 0x01, 0x72, 0x01, + 0x74, 0x01, 0x74, 0x01, 0x76, 0x01, 0x76, 0x01, + 0x78, 0x01, 0x79, 0x01, 0x79, 0x01, 0x7B, 0x01, + 0x7B, 0x01, 0x7D, 0x01, 0x7D, 0x01, 0x7F, 0x01, + 0x43, 0x02, 0x81, 0x01, 0x82, 0x01, 0x82, 0x01, + 0x84, 0x01, 0x84, 0x01, 0x86, 0x01, 0x87, 0x01, + 0x87, 0x01, 0x89, 0x01, 0x8A, 0x01, 0x8B, 0x01, + 0x8B, 0x01, 0x8D, 0x01, 0x8E, 0x01, 0x8F, 0x01, + 0x90, 0x01, 0x91, 0x01, 0x91, 0x01, 0x93, 0x01, + 0x94, 0x01, 0xF6, 0x01, 0x96, 0x01, 0x97, 0x01, + 0x98, 0x01, 0x98, 0x01, 0x3D, 0x02, 0x9B, 0x01, + 0x9C, 0x01, 0x9D, 0x01, 0x20, 0x02, 0x9F, 0x01, + 0xA0, 0x01, 0xA0, 0x01, 0xA2, 0x01, 0xA2, 0x01, + 0xA4, 0x01, 0xA4, 0x01, 0xA6, 0x01, 0xA7, 0x01, + 0xA7, 0x01, 0xA9, 0x01, 0xAA, 0x01, 0xAB, 0x01, + 0xAC, 0x01, 0xAC, 0x01, 0xAE, 0x01, 0xAF, 0x01, + 0xAF, 0x01, 0xB1, 0x01, 0xB2, 0x01, 0xB3, 0x01, + 0xB3, 0x01, 0xB5, 0x01, 0xB5, 0x01, 0xB7, 0x01, + 0xB8, 0x01, 0xB8, 0x01, 0xBA, 0x01, 0xBB, 0x01, + 0xBC, 0x01, 0xBC, 0x01, 0xBE, 0x01, 0xF7, 0x01, + 0xC0, 0x01, 0xC1, 0x01, 0xC2, 0x01, 0xC3, 0x01, + 0xC4, 0x01, 0xC5, 0x01, 0xC4, 0x01, 0xC7, 0x01, + 0xC8, 0x01, 0xC7, 0x01, 0xCA, 0x01, 0xCB, 0x01, + 0xCA, 0x01, 0xCD, 0x01, 0xCD, 0x01, 0xCF, 0x01, + 0xCF, 0x01, 0xD1, 0x01, 0xD1, 0x01, 0xD3, 0x01, + 0xD3, 0x01, 0xD5, 0x01, 0xD5, 0x01, 0xD7, 0x01, + 0xD7, 0x01, 0xD9, 0x01, 0xD9, 0x01, 0xDB, 0x01, + 0xDB, 0x01, 0x8E, 0x01, 0xDE, 0x01, 0xDE, 0x01, + 0xE0, 0x01, 0xE0, 0x01, 0xE2, 0x01, 0xE2, 0x01, + 0xE4, 0x01, 0xE4, 0x01, 0xE6, 0x01, 0xE6, 0x01, + 0xE8, 0x01, 0xE8, 0x01, 0xEA, 0x01, 0xEA, 0x01, + 0xEC, 0x01, 0xEC, 0x01, 0xEE, 0x01, 0xEE, 0x01, + 0xF0, 0x01, 0xF1, 0x01, 0xF2, 0x01, 0xF1, 0x01, + 0xF4, 0x01, 0xF4, 0x01, 0xF6, 0x01, 0xF7, 0x01, + 0xF8, 0x01, 0xF8, 0x01, 0xFA, 0x01, 0xFA, 0x01, + 0xFC, 0x01, 0xFC, 0x01, 0xFE, 0x01, 0xFE, 0x01, + 0x00, 0x02, 0x00, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x04, 0x02, 0x04, 0x02, 0x06, 0x02, 0x06, 0x02, + 0x08, 0x02, 0x08, 0x02, 0x0A, 0x02, 0x0A, 0x02, + 0x0C, 0x02, 0x0C, 0x02, 0x0E, 0x02, 0x0E, 0x02, + 0x10, 0x02, 0x10, 0x02, 0x12, 0x02, 0x12, 0x02, + 0x14, 0x02, 0x14, 0x02, 0x16, 0x02, 0x16, 0x02, + 0x18, 0x02, 0x18, 0x02, 0x1A, 0x02, 0x1A, 0x02, + 0x1C, 0x02, 0x1C, 0x02, 0x1E, 0x02, 0x1E, 0x02, + 0x20, 0x02, 0x21, 0x02, 0x22, 0x02, 0x22, 0x02, + 0x24, 0x02, 0x24, 0x02, 0x26, 0x02, 0x26, 0x02, + 0x28, 0x02, 0x28, 0x02, 0x2A, 0x02, 0x2A, 0x02, + 0x2C, 0x02, 0x2C, 0x02, 0x2E, 0x02, 0x2E, 0x02, + 0x30, 0x02, 0x30, 0x02, 0x32, 0x02, 0x32, 0x02, + 0x34, 0x02, 0x35, 0x02, 0x36, 0x02, 0x37, 0x02, + 0x38, 0x02, 0x39, 0x02, 0x65, 0x2C, 0x3B, 0x02, + 0x3B, 0x02, 0x3D, 0x02, 0x66, 0x2C, 0x3F, 0x02, + 0x40, 0x02, 0x41, 0x02, 0x41, 0x02, 0x43, 0x02, + 0x44, 0x02, 0x45, 0x02, 0x46, 0x02, 0x46, 0x02, + 0x48, 0x02, 0x48, 0x02, 0x4A, 0x02, 0x4A, 0x02, + 0x4C, 0x02, 0x4C, 0x02, 0x4E, 0x02, 0x4E, 0x02, + 0x50, 0x02, 0x51, 0x02, 0x52, 0x02, 0x81, 0x01, + 0x86, 0x01, 0x55, 0x02, 0x89, 0x01, 0x8A, 0x01, + 0x58, 0x02, 0x8F, 0x01, 0x5A, 0x02, 0x90, 0x01, + 0x5C, 0x02, 0x5D, 0x02, 0x5E, 0x02, 0x5F, 0x02, + 0x93, 0x01, 0x61, 0x02, 0x62, 0x02, 0x94, 0x01, + 0x64, 0x02, 0x65, 0x02, 0x66, 0x02, 0x67, 0x02, + 0x97, 0x01, 0x96, 0x01, 0x6A, 0x02, 0x62, 0x2C, + 0x6C, 0x02, 0x6D, 0x02, 0x6E, 0x02, 0x9C, 0x01, + 0x70, 0x02, 0x71, 0x02, 0x9D, 0x01, 0x73, 0x02, + 0x74, 0x02, 0x9F, 0x01, 0x76, 0x02, 0x77, 0x02, + 0x78, 0x02, 0x79, 0x02, 0x7A, 0x02, 0x7B, 0x02, + 0x7C, 0x02, 0x64, 0x2C, 0x7E, 0x02, 0x7F, 0x02, + 0xA6, 0x01, 0x81, 0x02, 0x82, 0x02, 0xA9, 0x01, + 0x84, 0x02, 0x85, 0x02, 0x86, 0x02, 0x87, 0x02, + 0xAE, 0x01, 0x44, 0x02, 0xB1, 0x01, 0xB2, 0x01, + 0x45, 0x02, 0x8D, 0x02, 0x8E, 0x02, 0x8F, 0x02, + 0x90, 0x02, 0x91, 0x02, 0xB7, 0x01, 0x93, 0x02, + 0x94, 0x02, 0x95, 0x02, 0x96, 0x02, 0x97, 0x02, + 0x98, 0x02, 0x99, 0x02, 0x9A, 0x02, 0x9B, 0x02, + 0x9C, 0x02, 0x9D, 0x02, 0x9E, 0x02, 0x9F, 0x02, + 0xA0, 0x02, 0xA1, 0x02, 0xA2, 0x02, 0xA3, 0x02, + 0xA4, 0x02, 0xA5, 0x02, 0xA6, 0x02, 0xA7, 0x02, + 0xA8, 0x02, 0xA9, 0x02, 0xAA, 0x02, 0xAB, 0x02, + 0xAC, 0x02, 0xAD, 0x02, 0xAE, 0x02, 0xAF, 0x02, + 0xB0, 0x02, 0xB1, 0x02, 0xB2, 0x02, 0xB3, 0x02, + 0xB4, 0x02, 0xB5, 0x02, 0xB6, 0x02, 0xB7, 0x02, + 0xB8, 0x02, 0xB9, 0x02, 0xBA, 0x02, 0xBB, 0x02, + 0xBC, 0x02, 0xBD, 0x02, 0xBE, 0x02, 0xBF, 0x02, + 0xC0, 0x02, 0xC1, 0x02, 0xC2, 0x02, 0xC3, 0x02, + 0xC4, 0x02, 0xC5, 0x02, 0xC6, 0x02, 0xC7, 0x02, + 0xC8, 0x02, 0xC9, 0x02, 0xCA, 0x02, 0xCB, 0x02, + 0xCC, 0x02, 0xCD, 0x02, 0xCE, 0x02, 0xCF, 0x02, + 0xD0, 0x02, 0xD1, 0x02, 0xD2, 0x02, 0xD3, 0x02, + 0xD4, 0x02, 0xD5, 0x02, 0xD6, 0x02, 0xD7, 0x02, + 0xD8, 0x02, 0xD9, 0x02, 0xDA, 0x02, 0xDB, 0x02, + 0xDC, 0x02, 0xDD, 0x02, 0xDE, 0x02, 0xDF, 0x02, + 0xE0, 0x02, 0xE1, 0x02, 0xE2, 0x02, 0xE3, 0x02, + 0xE4, 0x02, 0xE5, 0x02, 0xE6, 0x02, 0xE7, 0x02, + 0xE8, 0x02, 0xE9, 0x02, 0xEA, 0x02, 0xEB, 0x02, + 0xEC, 0x02, 0xED, 0x02, 0xEE, 0x02, 0xEF, 0x02, + 0xF0, 0x02, 0xF1, 0x02, 0xF2, 0x02, 0xF3, 0x02, + 0xF4, 0x02, 0xF5, 0x02, 0xF6, 0x02, 0xF7, 0x02, + 0xF8, 0x02, 0xF9, 0x02, 0xFA, 0x02, 0xFB, 0x02, + 0xFC, 0x02, 0xFD, 0x02, 0xFE, 0x02, 0xFF, 0x02, + 0x00, 0x03, 0x01, 0x03, 0x02, 0x03, 0x03, 0x03, + 0x04, 0x03, 0x05, 0x03, 0x06, 0x03, 0x07, 0x03, + 0x08, 0x03, 0x09, 0x03, 0x0A, 0x03, 0x0B, 0x03, + 0x0C, 0x03, 0x0D, 0x03, 0x0E, 0x03, 0x0F, 0x03, + 0x10, 0x03, 0x11, 0x03, 0x12, 0x03, 0x13, 0x03, + 0x14, 0x03, 0x15, 0x03, 0x16, 0x03, 0x17, 0x03, + 0x18, 0x03, 0x19, 0x03, 0x1A, 0x03, 0x1B, 0x03, + 0x1C, 0x03, 0x1D, 0x03, 0x1E, 0x03, 0x1F, 0x03, + 0x20, 0x03, 0x21, 0x03, 0x22, 0x03, 0x23, 0x03, + 0x24, 0x03, 0x25, 0x03, 0x26, 0x03, 0x27, 0x03, + 0x28, 0x03, 0x29, 0x03, 0x2A, 0x03, 0x2B, 0x03, + 0x2C, 0x03, 0x2D, 0x03, 0x2E, 0x03, 0x2F, 0x03, + 0x30, 0x03, 0x31, 0x03, 0x32, 0x03, 0x33, 0x03, + 0x34, 0x03, 0x35, 0x03, 0x36, 0x03, 0x37, 0x03, + 0x38, 0x03, 0x39, 0x03, 0x3A, 0x03, 0x3B, 0x03, + 0x3C, 0x03, 0x3D, 0x03, 0x3E, 0x03, 0x3F, 0x03, + 0x40, 0x03, 0x41, 0x03, 0x42, 0x03, 0x43, 0x03, + 0x44, 0x03, 0x45, 0x03, 0x46, 0x03, 0x47, 0x03, + 0x48, 0x03, 0x49, 0x03, 0x4A, 0x03, 0x4B, 0x03, + 0x4C, 0x03, 0x4D, 0x03, 0x4E, 0x03, 0x4F, 0x03, + 0x50, 0x03, 0x51, 0x03, 0x52, 0x03, 0x53, 0x03, + 0x54, 0x03, 0x55, 0x03, 0x56, 0x03, 0x57, 0x03, + 0x58, 0x03, 0x59, 0x03, 0x5A, 0x03, 0x5B, 0x03, + 0x5C, 0x03, 0x5D, 0x03, 0x5E, 0x03, 0x5F, 0x03, + 0x60, 0x03, 0x61, 0x03, 0x62, 0x03, 0x63, 0x03, + 0x64, 0x03, 0x65, 0x03, 0x66, 0x03, 0x67, 0x03, + 0x68, 0x03, 0x69, 0x03, 0x6A, 0x03, 0x6B, 0x03, + 0x6C, 0x03, 0x6D, 0x03, 0x6E, 0x03, 0x6F, 0x03, + 0x70, 0x03, 0x71, 0x03, 0x72, 0x03, 0x73, 0x03, + 0x74, 0x03, 0x75, 0x03, 0x76, 0x03, 0x77, 0x03, + 0x78, 0x03, 0x79, 0x03, 0x7A, 0x03, 0xFD, 0x03, + 0xFE, 0x03, 0xFF, 0x03, 0x7E, 0x03, 0x7F, 0x03, + 0x80, 0x03, 0x81, 0x03, 0x82, 0x03, 0x83, 0x03, + 0x84, 0x03, 0x85, 0x03, 0x86, 0x03, 0x87, 0x03, + 0x88, 0x03, 0x89, 0x03, 0x8A, 0x03, 0x8B, 0x03, + 0x8C, 0x03, 0x8D, 0x03, 0x8E, 0x03, 0x8F, 0x03, + 0x90, 0x03, 0x91, 0x03, 0x92, 0x03, 0x93, 0x03, + 0x94, 0x03, 0x95, 0x03, 0x96, 0x03, 0x97, 0x03, + 0x98, 0x03, 0x99, 0x03, 0x9A, 0x03, 0x9B, 0x03, + 0x9C, 0x03, 0x9D, 0x03, 0x9E, 0x03, 0x9F, 0x03, + 0xA0, 0x03, 0xA1, 0x03, 0xA2, 0x03, 0xA3, 0x03, + 0xA4, 0x03, 0xA5, 0x03, 0xA6, 0x03, 0xA7, 0x03, + 0xA8, 0x03, 0xA9, 0x03, 0xAA, 0x03, 0xAB, 0x03, + 0x86, 0x03, 0x88, 0x03, 0x89, 0x03, 0x8A, 0x03, + 0xB0, 0x03, 0x91, 0x03, 0x92, 0x03, 0x93, 0x03, + 0x94, 0x03, 0x95, 0x03, 0x96, 0x03, 0x97, 0x03, + 0x98, 0x03, 0x99, 0x03, 0x9A, 0x03, 0x9B, 0x03, + 0x9C, 0x03, 0x9D, 0x03, 0x9E, 0x03, 0x9F, 0x03, + 0xA0, 0x03, 0xA1, 0x03, 0xA3, 0x03, 0xA3, 0x03, + 0xA4, 0x03, 0xA5, 0x03, 0xA6, 0x03, 0xA7, 0x03, + 0xA8, 0x03, 0xA9, 0x03, 0xAA, 0x03, 0xAB, 0x03, + 0x8C, 0x03, 0x8E, 0x03, 0x8F, 0x03, 0xCF, 0x03, + 0xD0, 0x03, 0xD1, 0x03, 0xD2, 0x03, 0xD3, 0x03, + 0xD4, 0x03, 0xD5, 0x03, 0xD6, 0x03, 0xD7, 0x03, + 0xD8, 0x03, 0xD8, 0x03, 0xDA, 0x03, 0xDA, 0x03, + 0xDC, 0x03, 0xDC, 0x03, 0xDE, 0x03, 0xDE, 0x03, + 0xE0, 0x03, 0xE0, 0x03, 0xE2, 0x03, 0xE2, 0x03, + 0xE4, 0x03, 0xE4, 0x03, 0xE6, 0x03, 0xE6, 0x03, + 0xE8, 0x03, 0xE8, 0x03, 0xEA, 0x03, 0xEA, 0x03, + 0xEC, 0x03, 0xEC, 0x03, 0xEE, 0x03, 0xEE, 0x03, + 0xF0, 0x03, 0xF1, 0x03, 0xF9, 0x03, 0xF3, 0x03, + 0xF4, 0x03, 0xF5, 0x03, 0xF6, 0x03, 0xF7, 0x03, + 0xF7, 0x03, 0xF9, 0x03, 0xFA, 0x03, 0xFA, 0x03, + 0xFC, 0x03, 0xFD, 0x03, 0xFE, 0x03, 0xFF, 0x03, + 0x00, 0x04, 0x01, 0x04, 0x02, 0x04, 0x03, 0x04, + 0x04, 0x04, 0x05, 0x04, 0x06, 0x04, 0x07, 0x04, + 0x08, 0x04, 0x09, 0x04, 0x0A, 0x04, 0x0B, 0x04, + 0x0C, 0x04, 0x0D, 0x04, 0x0E, 0x04, 0x0F, 0x04, + 0x10, 0x04, 0x11, 0x04, 0x12, 0x04, 0x13, 0x04, + 0x14, 0x04, 0x15, 0x04, 0x16, 0x04, 0x17, 0x04, + 0x18, 0x04, 0x19, 0x04, 0x1A, 0x04, 0x1B, 0x04, + 0x1C, 0x04, 0x1D, 0x04, 0x1E, 0x04, 0x1F, 0x04, + 0x20, 0x04, 0x21, 0x04, 0x22, 0x04, 0x23, 0x04, + 0x24, 0x04, 0x25, 0x04, 0x26, 0x04, 0x27, 0x04, + 0x28, 0x04, 0x29, 0x04, 0x2A, 0x04, 0x2B, 0x04, + 0x2C, 0x04, 0x2D, 0x04, 0x2E, 0x04, 0x2F, 0x04, + 0x10, 0x04, 0x11, 0x04, 0x12, 0x04, 0x13, 0x04, + 0x14, 0x04, 0x15, 0x04, 0x16, 0x04, 0x17, 0x04, + 0x18, 0x04, 0x19, 0x04, 0x1A, 0x04, 0x1B, 0x04, + 0x1C, 0x04, 0x1D, 0x04, 0x1E, 0x04, 0x1F, 0x04, + 0x20, 0x04, 0x21, 0x04, 0x22, 0x04, 0x23, 0x04, + 0x24, 0x04, 0x25, 0x04, 0x26, 0x04, 0x27, 0x04, + 0x28, 0x04, 0x29, 0x04, 0x2A, 0x04, 0x2B, 0x04, + 0x2C, 0x04, 0x2D, 0x04, 0x2E, 0x04, 0x2F, 0x04, + 0x00, 0x04, 0x01, 0x04, 0x02, 0x04, 0x03, 0x04, + 0x04, 0x04, 0x05, 0x04, 0x06, 0x04, 0x07, 0x04, + 0x08, 0x04, 0x09, 0x04, 0x0A, 0x04, 0x0B, 0x04, + 0x0C, 0x04, 0x0D, 0x04, 0x0E, 0x04, 0x0F, 0x04, + 0x60, 0x04, 0x60, 0x04, 0x62, 0x04, 0x62, 0x04, + 0x64, 0x04, 0x64, 0x04, 0x66, 0x04, 0x66, 0x04, + 0x68, 0x04, 0x68, 0x04, 0x6A, 0x04, 0x6A, 0x04, + 0x6C, 0x04, 0x6C, 0x04, 0x6E, 0x04, 0x6E, 0x04, + 0x70, 0x04, 0x70, 0x04, 0x72, 0x04, 0x72, 0x04, + 0x74, 0x04, 0x74, 0x04, 0x76, 0x04, 0x76, 0x04, + 0x78, 0x04, 0x78, 0x04, 0x7A, 0x04, 0x7A, 0x04, + 0x7C, 0x04, 0x7C, 0x04, 0x7E, 0x04, 0x7E, 0x04, + 0x80, 0x04, 0x80, 0x04, 0x82, 0x04, 0x83, 0x04, + 0x84, 0x04, 0x85, 0x04, 0x86, 0x04, 0x87, 0x04, + 0x88, 0x04, 0x89, 0x04, 0x8A, 0x04, 0x8A, 0x04, + 0x8C, 0x04, 0x8C, 0x04, 0x8E, 0x04, 0x8E, 0x04, + 0x90, 0x04, 0x90, 0x04, 0x92, 0x04, 0x92, 0x04, + 0x94, 0x04, 0x94, 0x04, 0x96, 0x04, 0x96, 0x04, + 0x98, 0x04, 0x98, 0x04, 0x9A, 0x04, 0x9A, 0x04, + 0x9C, 0x04, 0x9C, 0x04, 0x9E, 0x04, 0x9E, 0x04, + 0xA0, 0x04, 0xA0, 0x04, 0xA2, 0x04, 0xA2, 0x04, + 0xA4, 0x04, 0xA4, 0x04, 0xA6, 0x04, 0xA6, 0x04, + 0xA8, 0x04, 0xA8, 0x04, 0xAA, 0x04, 0xAA, 0x04, + 0xAC, 0x04, 0xAC, 0x04, 0xAE, 0x04, 0xAE, 0x04, + 0xB0, 0x04, 0xB0, 0x04, 0xB2, 0x04, 0xB2, 0x04, + 0xB4, 0x04, 0xB4, 0x04, 0xB6, 0x04, 0xB6, 0x04, + 0xB8, 0x04, 0xB8, 0x04, 0xBA, 0x04, 0xBA, 0x04, + 0xBC, 0x04, 0xBC, 0x04, 0xBE, 0x04, 0xBE, 0x04, + 0xC0, 0x04, 0xC1, 0x04, 0xC1, 0x04, 0xC3, 0x04, + 0xC3, 0x04, 0xC5, 0x04, 0xC5, 0x04, 0xC7, 0x04, + 0xC7, 0x04, 0xC9, 0x04, 0xC9, 0x04, 0xCB, 0x04, + 0xCB, 0x04, 0xCD, 0x04, 0xCD, 0x04, 0xC0, 0x04, + 0xD0, 0x04, 0xD0, 0x04, 0xD2, 0x04, 0xD2, 0x04, + 0xD4, 0x04, 0xD4, 0x04, 0xD6, 0x04, 0xD6, 0x04, + 0xD8, 0x04, 0xD8, 0x04, 0xDA, 0x04, 0xDA, 0x04, + 0xDC, 0x04, 0xDC, 0x04, 0xDE, 0x04, 0xDE, 0x04, + 0xE0, 0x04, 0xE0, 0x04, 0xE2, 0x04, 0xE2, 0x04, + 0xE4, 0x04, 0xE4, 0x04, 0xE6, 0x04, 0xE6, 0x04, + 0xE8, 0x04, 0xE8, 0x04, 0xEA, 0x04, 0xEA, 0x04, + 0xEC, 0x04, 0xEC, 0x04, 0xEE, 0x04, 0xEE, 0x04, + 0xF0, 0x04, 0xF0, 0x04, 0xF2, 0x04, 0xF2, 0x04, + 0xF4, 0x04, 0xF4, 0x04, 0xF6, 0x04, 0xF6, 0x04, + 0xF8, 0x04, 0xF8, 0x04, 0xFA, 0x04, 0xFA, 0x04, + 0xFC, 0x04, 0xFC, 0x04, 0xFE, 0x04, 0xFE, 0x04, + 0x00, 0x05, 0x00, 0x05, 0x02, 0x05, 0x02, 0x05, + 0x04, 0x05, 0x04, 0x05, 0x06, 0x05, 0x06, 0x05, + 0x08, 0x05, 0x08, 0x05, 0x0A, 0x05, 0x0A, 0x05, + 0x0C, 0x05, 0x0C, 0x05, 0x0E, 0x05, 0x0E, 0x05, + 0x10, 0x05, 0x10, 0x05, 0x12, 0x05, 0x12, 0x05, + 0x14, 0x05, 0x15, 0x05, 0x16, 0x05, 0x17, 0x05, + 0x18, 0x05, 0x19, 0x05, 0x1A, 0x05, 0x1B, 0x05, + 0x1C, 0x05, 0x1D, 0x05, 0x1E, 0x05, 0x1F, 0x05, + 0x20, 0x05, 0x21, 0x05, 0x22, 0x05, 0x23, 0x05, + 0x24, 0x05, 0x25, 0x05, 0x26, 0x05, 0x27, 0x05, + 0x28, 0x05, 0x29, 0x05, 0x2A, 0x05, 0x2B, 0x05, + 0x2C, 0x05, 0x2D, 0x05, 0x2E, 0x05, 0x2F, 0x05, + 0x30, 0x05, 0x31, 0x05, 0x32, 0x05, 0x33, 0x05, + 0x34, 0x05, 0x35, 0x05, 0x36, 0x05, 0x37, 0x05, + 0x38, 0x05, 0x39, 0x05, 0x3A, 0x05, 0x3B, 0x05, + 0x3C, 0x05, 0x3D, 0x05, 0x3E, 0x05, 0x3F, 0x05, + 0x40, 0x05, 0x41, 0x05, 0x42, 0x05, 0x43, 0x05, + 0x44, 0x05, 0x45, 0x05, 0x46, 0x05, 0x47, 0x05, + 0x48, 0x05, 0x49, 0x05, 0x4A, 0x05, 0x4B, 0x05, + 0x4C, 0x05, 0x4D, 0x05, 0x4E, 0x05, 0x4F, 0x05, + 0x50, 0x05, 0x51, 0x05, 0x52, 0x05, 0x53, 0x05, + 0x54, 0x05, 0x55, 0x05, 0x56, 0x05, 0x57, 0x05, + 0x58, 0x05, 0x59, 0x05, 0x5A, 0x05, 0x5B, 0x05, + 0x5C, 0x05, 0x5D, 0x05, 0x5E, 0x05, 0x5F, 0x05, + 0x60, 0x05, 0x31, 0x05, 0x32, 0x05, 0x33, 0x05, + 0x34, 0x05, 0x35, 0x05, 0x36, 0x05, 0x37, 0x05, + 0x38, 0x05, 0x39, 0x05, 0x3A, 0x05, 0x3B, 0x05, + 0x3C, 0x05, 0x3D, 0x05, 0x3E, 0x05, 0x3F, 0x05, + 0x40, 0x05, 0x41, 0x05, 0x42, 0x05, 0x43, 0x05, + 0x44, 0x05, 0x45, 0x05, 0x46, 0x05, 0x47, 0x05, + 0x48, 0x05, 0x49, 0x05, 0x4A, 0x05, 0x4B, 0x05, + 0x4C, 0x05, 0x4D, 0x05, 0x4E, 0x05, 0x4F, 0x05, + 0x50, 0x05, 0x51, 0x05, 0x52, 0x05, 0x53, 0x05, + 0x54, 0x05, 0x55, 0x05, 0x56, 0x05, 0xFF, 0xFF, + 0xF6, 0x17, 0x63, 0x2C, 0x7E, 0x1D, 0x7F, 0x1D, + 0x80, 0x1D, 0x81, 0x1D, 0x82, 0x1D, 0x83, 0x1D, + 0x84, 0x1D, 0x85, 0x1D, 0x86, 0x1D, 0x87, 0x1D, + 0x88, 0x1D, 0x89, 0x1D, 0x8A, 0x1D, 0x8B, 0x1D, + 0x8C, 0x1D, 0x8D, 0x1D, 0x8E, 0x1D, 0x8F, 0x1D, + 0x90, 0x1D, 0x91, 0x1D, 0x92, 0x1D, 0x93, 0x1D, + 0x94, 0x1D, 0x95, 0x1D, 0x96, 0x1D, 0x97, 0x1D, + 0x98, 0x1D, 0x99, 0x1D, 0x9A, 0x1D, 0x9B, 0x1D, + 0x9C, 0x1D, 0x9D, 0x1D, 0x9E, 0x1D, 0x9F, 0x1D, + 0xA0, 0x1D, 0xA1, 0x1D, 0xA2, 0x1D, 0xA3, 0x1D, + 0xA4, 0x1D, 0xA5, 0x1D, 0xA6, 0x1D, 0xA7, 0x1D, + 0xA8, 0x1D, 0xA9, 0x1D, 0xAA, 0x1D, 0xAB, 0x1D, + 0xAC, 0x1D, 0xAD, 0x1D, 0xAE, 0x1D, 0xAF, 0x1D, + 0xB0, 0x1D, 0xB1, 0x1D, 0xB2, 0x1D, 0xB3, 0x1D, + 0xB4, 0x1D, 0xB5, 0x1D, 0xB6, 0x1D, 0xB7, 0x1D, + 0xB8, 0x1D, 0xB9, 0x1D, 0xBA, 0x1D, 0xBB, 0x1D, + 0xBC, 0x1D, 0xBD, 0x1D, 0xBE, 0x1D, 0xBF, 0x1D, + 0xC0, 0x1D, 0xC1, 0x1D, 0xC2, 0x1D, 0xC3, 0x1D, + 0xC4, 0x1D, 0xC5, 0x1D, 0xC6, 0x1D, 0xC7, 0x1D, + 0xC8, 0x1D, 0xC9, 0x1D, 0xCA, 0x1D, 0xCB, 0x1D, + 0xCC, 0x1D, 0xCD, 0x1D, 0xCE, 0x1D, 0xCF, 0x1D, + 0xD0, 0x1D, 0xD1, 0x1D, 0xD2, 0x1D, 0xD3, 0x1D, + 0xD4, 0x1D, 0xD5, 0x1D, 0xD6, 0x1D, 0xD7, 0x1D, + 0xD8, 0x1D, 0xD9, 0x1D, 0xDA, 0x1D, 0xDB, 0x1D, + 0xDC, 0x1D, 0xDD, 0x1D, 0xDE, 0x1D, 0xDF, 0x1D, + 0xE0, 0x1D, 0xE1, 0x1D, 0xE2, 0x1D, 0xE3, 0x1D, + 0xE4, 0x1D, 0xE5, 0x1D, 0xE6, 0x1D, 0xE7, 0x1D, + 0xE8, 0x1D, 0xE9, 0x1D, 0xEA, 0x1D, 0xEB, 0x1D, + 0xEC, 0x1D, 0xED, 0x1D, 0xEE, 0x1D, 0xEF, 0x1D, + 0xF0, 0x1D, 0xF1, 0x1D, 0xF2, 0x1D, 0xF3, 0x1D, + 0xF4, 0x1D, 0xF5, 0x1D, 0xF6, 0x1D, 0xF7, 0x1D, + 0xF8, 0x1D, 0xF9, 0x1D, 0xFA, 0x1D, 0xFB, 0x1D, + 0xFC, 0x1D, 0xFD, 0x1D, 0xFE, 0x1D, 0xFF, 0x1D, + 0x00, 0x1E, 0x00, 0x1E, 0x02, 0x1E, 0x02, 0x1E, + 0x04, 0x1E, 0x04, 0x1E, 0x06, 0x1E, 0x06, 0x1E, + 0x08, 0x1E, 0x08, 0x1E, 0x0A, 0x1E, 0x0A, 0x1E, + 0x0C, 0x1E, 0x0C, 0x1E, 0x0E, 0x1E, 0x0E, 0x1E, + 0x10, 0x1E, 0x10, 0x1E, 0x12, 0x1E, 0x12, 0x1E, + 0x14, 0x1E, 0x14, 0x1E, 0x16, 0x1E, 0x16, 0x1E, + 0x18, 0x1E, 0x18, 0x1E, 0x1A, 0x1E, 0x1A, 0x1E, + 0x1C, 0x1E, 0x1C, 0x1E, 0x1E, 0x1E, 0x1E, 0x1E, + 0x20, 0x1E, 0x20, 0x1E, 0x22, 0x1E, 0x22, 0x1E, + 0x24, 0x1E, 0x24, 0x1E, 0x26, 0x1E, 0x26, 0x1E, + 0x28, 0x1E, 0x28, 0x1E, 0x2A, 0x1E, 0x2A, 0x1E, + 0x2C, 0x1E, 0x2C, 0x1E, 0x2E, 0x1E, 0x2E, 0x1E, + 0x30, 0x1E, 0x30, 0x1E, 0x32, 0x1E, 0x32, 0x1E, + 0x34, 0x1E, 0x34, 0x1E, 0x36, 0x1E, 0x36, 0x1E, + 0x38, 0x1E, 0x38, 0x1E, 0x3A, 0x1E, 0x3A, 0x1E, + 0x3C, 0x1E, 0x3C, 0x1E, 0x3E, 0x1E, 0x3E, 0x1E, + 0x40, 0x1E, 0x40, 0x1E, 0x42, 0x1E, 0x42, 0x1E, + 0x44, 0x1E, 0x44, 0x1E, 0x46, 0x1E, 0x46, 0x1E, + 0x48, 0x1E, 0x48, 0x1E, 0x4A, 0x1E, 0x4A, 0x1E, + 0x4C, 0x1E, 0x4C, 0x1E, 0x4E, 0x1E, 0x4E, 0x1E, + 0x50, 0x1E, 0x50, 0x1E, 0x52, 0x1E, 0x52, 0x1E, + 0x54, 0x1E, 0x54, 0x1E, 0x56, 0x1E, 0x56, 0x1E, + 0x58, 0x1E, 0x58, 0x1E, 0x5A, 0x1E, 0x5A, 0x1E, + 0x5C, 0x1E, 0x5C, 0x1E, 0x5E, 0x1E, 0x5E, 0x1E, + 0x60, 0x1E, 0x60, 0x1E, 0x62, 0x1E, 0x62, 0x1E, + 0x64, 0x1E, 0x64, 0x1E, 0x66, 0x1E, 0x66, 0x1E, + 0x68, 0x1E, 0x68, 0x1E, 0x6A, 0x1E, 0x6A, 0x1E, + 0x6C, 0x1E, 0x6C, 0x1E, 0x6E, 0x1E, 0x6E, 0x1E, + 0x70, 0x1E, 0x70, 0x1E, 0x72, 0x1E, 0x72, 0x1E, + 0x74, 0x1E, 0x74, 0x1E, 0x76, 0x1E, 0x76, 0x1E, + 0x78, 0x1E, 0x78, 0x1E, 0x7A, 0x1E, 0x7A, 0x1E, + 0x7C, 0x1E, 0x7C, 0x1E, 0x7E, 0x1E, 0x7E, 0x1E, + 0x80, 0x1E, 0x80, 0x1E, 0x82, 0x1E, 0x82, 0x1E, + 0x84, 0x1E, 0x84, 0x1E, 0x86, 0x1E, 0x86, 0x1E, + 0x88, 0x1E, 0x88, 0x1E, 0x8A, 0x1E, 0x8A, 0x1E, + 0x8C, 0x1E, 0x8C, 0x1E, 0x8E, 0x1E, 0x8E, 0x1E, + 0x90, 0x1E, 0x90, 0x1E, 0x92, 0x1E, 0x92, 0x1E, + 0x94, 0x1E, 0x94, 0x1E, 0x96, 0x1E, 0x97, 0x1E, + 0x98, 0x1E, 0x99, 0x1E, 0x9A, 0x1E, 0x9B, 0x1E, + 0x9C, 0x1E, 0x9D, 0x1E, 0x9E, 0x1E, 0x9F, 0x1E, + 0xA0, 0x1E, 0xA0, 0x1E, 0xA2, 0x1E, 0xA2, 0x1E, + 0xA4, 0x1E, 0xA4, 0x1E, 0xA6, 0x1E, 0xA6, 0x1E, + 0xA8, 0x1E, 0xA8, 0x1E, 0xAA, 0x1E, 0xAA, 0x1E, + 0xAC, 0x1E, 0xAC, 0x1E, 0xAE, 0x1E, 0xAE, 0x1E, + 0xB0, 0x1E, 0xB0, 0x1E, 0xB2, 0x1E, 0xB2, 0x1E, + 0xB4, 0x1E, 0xB4, 0x1E, 0xB6, 0x1E, 0xB6, 0x1E, + 0xB8, 0x1E, 0xB8, 0x1E, 0xBA, 0x1E, 0xBA, 0x1E, + 0xBC, 0x1E, 0xBC, 0x1E, 0xBE, 0x1E, 0xBE, 0x1E, + 0xC0, 0x1E, 0xC0, 0x1E, 0xC2, 0x1E, 0xC2, 0x1E, + 0xC4, 0x1E, 0xC4, 0x1E, 0xC6, 0x1E, 0xC6, 0x1E, + 0xC8, 0x1E, 0xC8, 0x1E, 0xCA, 0x1E, 0xCA, 0x1E, + 0xCC, 0x1E, 0xCC, 0x1E, 0xCE, 0x1E, 0xCE, 0x1E, + 0xD0, 0x1E, 0xD0, 0x1E, 0xD2, 0x1E, 0xD2, 0x1E, + 0xD4, 0x1E, 0xD4, 0x1E, 0xD6, 0x1E, 0xD6, 0x1E, + 0xD8, 0x1E, 0xD8, 0x1E, 0xDA, 0x1E, 0xDA, 0x1E, + 0xDC, 0x1E, 0xDC, 0x1E, 0xDE, 0x1E, 0xDE, 0x1E, + 0xE0, 0x1E, 0xE0, 0x1E, 0xE2, 0x1E, 0xE2, 0x1E, + 0xE4, 0x1E, 0xE4, 0x1E, 0xE6, 0x1E, 0xE6, 0x1E, + 0xE8, 0x1E, 0xE8, 0x1E, 0xEA, 0x1E, 0xEA, 0x1E, + 0xEC, 0x1E, 0xEC, 0x1E, 0xEE, 0x1E, 0xEE, 0x1E, + 0xF0, 0x1E, 0xF0, 0x1E, 0xF2, 0x1E, 0xF2, 0x1E, + 0xF4, 0x1E, 0xF4, 0x1E, 0xF6, 0x1E, 0xF6, 0x1E, + 0xF8, 0x1E, 0xF8, 0x1E, 0xFA, 0x1E, 0xFB, 0x1E, + 0xFC, 0x1E, 0xFD, 0x1E, 0xFE, 0x1E, 0xFF, 0x1E, + 0x08, 0x1F, 0x09, 0x1F, 0x0A, 0x1F, 0x0B, 0x1F, + 0x0C, 0x1F, 0x0D, 0x1F, 0x0E, 0x1F, 0x0F, 0x1F, + 0x08, 0x1F, 0x09, 0x1F, 0x0A, 0x1F, 0x0B, 0x1F, + 0x0C, 0x1F, 0x0D, 0x1F, 0x0E, 0x1F, 0x0F, 0x1F, + 0x18, 0x1F, 0x19, 0x1F, 0x1A, 0x1F, 0x1B, 0x1F, + 0x1C, 0x1F, 0x1D, 0x1F, 0x16, 0x1F, 0x17, 0x1F, + 0x18, 0x1F, 0x19, 0x1F, 0x1A, 0x1F, 0x1B, 0x1F, + 0x1C, 0x1F, 0x1D, 0x1F, 0x1E, 0x1F, 0x1F, 0x1F, + 0x28, 0x1F, 0x29, 0x1F, 0x2A, 0x1F, 0x2B, 0x1F, + 0x2C, 0x1F, 0x2D, 0x1F, 0x2E, 0x1F, 0x2F, 0x1F, + 0x28, 0x1F, 0x29, 0x1F, 0x2A, 0x1F, 0x2B, 0x1F, + 0x2C, 0x1F, 0x2D, 0x1F, 0x2E, 0x1F, 0x2F, 0x1F, + 0x38, 0x1F, 0x39, 0x1F, 0x3A, 0x1F, 0x3B, 0x1F, + 0x3C, 0x1F, 0x3D, 0x1F, 0x3E, 0x1F, 0x3F, 0x1F, + 0x38, 0x1F, 0x39, 0x1F, 0x3A, 0x1F, 0x3B, 0x1F, + 0x3C, 0x1F, 0x3D, 0x1F, 0x3E, 0x1F, 0x3F, 0x1F, + 0x48, 0x1F, 0x49, 0x1F, 0x4A, 0x1F, 0x4B, 0x1F, + 0x4C, 0x1F, 0x4D, 0x1F, 0x46, 0x1F, 0x47, 0x1F, + 0x48, 0x1F, 0x49, 0x1F, 0x4A, 0x1F, 0x4B, 0x1F, + 0x4C, 0x1F, 0x4D, 0x1F, 0x4E, 0x1F, 0x4F, 0x1F, + 0x50, 0x1F, 0x59, 0x1F, 0x52, 0x1F, 0x5B, 0x1F, + 0x54, 0x1F, 0x5D, 0x1F, 0x56, 0x1F, 0x5F, 0x1F, + 0x58, 0x1F, 0x59, 0x1F, 0x5A, 0x1F, 0x5B, 0x1F, + 0x5C, 0x1F, 0x5D, 0x1F, 0x5E, 0x1F, 0x5F, 0x1F, + 0x68, 0x1F, 0x69, 0x1F, 0x6A, 0x1F, 0x6B, 0x1F, + 0x6C, 0x1F, 0x6D, 0x1F, 0x6E, 0x1F, 0x6F, 0x1F, + 0x68, 0x1F, 0x69, 0x1F, 0x6A, 0x1F, 0x6B, 0x1F, + 0x6C, 0x1F, 0x6D, 0x1F, 0x6E, 0x1F, 0x6F, 0x1F, + 0xBA, 0x1F, 0xBB, 0x1F, 0xC8, 0x1F, 0xC9, 0x1F, + 0xCA, 0x1F, 0xCB, 0x1F, 0xDA, 0x1F, 0xDB, 0x1F, + 0xF8, 0x1F, 0xF9, 0x1F, 0xEA, 0x1F, 0xEB, 0x1F, + 0xFA, 0x1F, 0xFB, 0x1F, 0x7E, 0x1F, 0x7F, 0x1F, + 0x88, 0x1F, 0x89, 0x1F, 0x8A, 0x1F, 0x8B, 0x1F, + 0x8C, 0x1F, 0x8D, 0x1F, 0x8E, 0x1F, 0x8F, 0x1F, + 0x88, 0x1F, 0x89, 0x1F, 0x8A, 0x1F, 0x8B, 0x1F, + 0x8C, 0x1F, 0x8D, 0x1F, 0x8E, 0x1F, 0x8F, 0x1F, + 0x98, 0x1F, 0x99, 0x1F, 0x9A, 0x1F, 0x9B, 0x1F, + 0x9C, 0x1F, 0x9D, 0x1F, 0x9E, 0x1F, 0x9F, 0x1F, + 0x98, 0x1F, 0x99, 0x1F, 0x9A, 0x1F, 0x9B, 0x1F, + 0x9C, 0x1F, 0x9D, 0x1F, 0x9E, 0x1F, 0x9F, 0x1F, + 0xA8, 0x1F, 0xA9, 0x1F, 0xAA, 0x1F, 0xAB, 0x1F, + 0xAC, 0x1F, 0xAD, 0x1F, 0xAE, 0x1F, 0xAF, 0x1F, + 0xA8, 0x1F, 0xA9, 0x1F, 0xAA, 0x1F, 0xAB, 0x1F, + 0xAC, 0x1F, 0xAD, 0x1F, 0xAE, 0x1F, 0xAF, 0x1F, + 0xB8, 0x1F, 0xB9, 0x1F, 0xB2, 0x1F, 0xBC, 0x1F, + 0xB4, 0x1F, 0xB5, 0x1F, 0xB6, 0x1F, 0xB7, 0x1F, + 0xB8, 0x1F, 0xB9, 0x1F, 0xBA, 0x1F, 0xBB, 0x1F, + 0xBC, 0x1F, 0xBD, 0x1F, 0xBE, 0x1F, 0xBF, 0x1F, + 0xC0, 0x1F, 0xC1, 0x1F, 0xC2, 0x1F, 0xC3, 0x1F, + 0xC4, 0x1F, 0xC5, 0x1F, 0xC6, 0x1F, 0xC7, 0x1F, + 0xC8, 0x1F, 0xC9, 0x1F, 0xCA, 0x1F, 0xCB, 0x1F, + 0xC3, 0x1F, 0xCD, 0x1F, 0xCE, 0x1F, 0xCF, 0x1F, + 0xD8, 0x1F, 0xD9, 0x1F, 0xD2, 0x1F, 0xD3, 0x1F, + 0xD4, 0x1F, 0xD5, 0x1F, 0xD6, 0x1F, 0xD7, 0x1F, + 0xD8, 0x1F, 0xD9, 0x1F, 0xDA, 0x1F, 0xDB, 0x1F, + 0xDC, 0x1F, 0xDD, 0x1F, 0xDE, 0x1F, 0xDF, 0x1F, + 0xE8, 0x1F, 0xE9, 0x1F, 0xE2, 0x1F, 0xE3, 0x1F, + 0xE4, 0x1F, 0xEC, 0x1F, 0xE6, 0x1F, 0xE7, 0x1F, + 0xE8, 0x1F, 0xE9, 0x1F, 0xEA, 0x1F, 0xEB, 0x1F, + 0xEC, 0x1F, 0xED, 0x1F, 0xEE, 0x1F, 0xEF, 0x1F, + 0xF0, 0x1F, 0xF1, 0x1F, 0xF2, 0x1F, 0xF3, 0x1F, + 0xF4, 0x1F, 0xF5, 0x1F, 0xF6, 0x1F, 0xF7, 0x1F, + 0xF8, 0x1F, 0xF9, 0x1F, 0xFA, 0x1F, 0xFB, 0x1F, + 0xF3, 0x1F, 0xFD, 0x1F, 0xFE, 0x1F, 0xFF, 0x1F, + 0x00, 0x20, 0x01, 0x20, 0x02, 0x20, 0x03, 0x20, + 0x04, 0x20, 0x05, 0x20, 0x06, 0x20, 0x07, 0x20, + 0x08, 0x20, 0x09, 0x20, 0x0A, 0x20, 0x0B, 0x20, + 0x0C, 0x20, 0x0D, 0x20, 0x0E, 0x20, 0x0F, 0x20, + 0x10, 0x20, 0x11, 0x20, 0x12, 0x20, 0x13, 0x20, + 0x14, 0x20, 0x15, 0x20, 0x16, 0x20, 0x17, 0x20, + 0x18, 0x20, 0x19, 0x20, 0x1A, 0x20, 0x1B, 0x20, + 0x1C, 0x20, 0x1D, 0x20, 0x1E, 0x20, 0x1F, 0x20, + 0x20, 0x20, 0x21, 0x20, 0x22, 0x20, 0x23, 0x20, + 0x24, 0x20, 0x25, 0x20, 0x26, 0x20, 0x27, 0x20, + 0x28, 0x20, 0x29, 0x20, 0x2A, 0x20, 0x2B, 0x20, + 0x2C, 0x20, 0x2D, 0x20, 0x2E, 0x20, 0x2F, 0x20, + 0x30, 0x20, 0x31, 0x20, 0x32, 0x20, 0x33, 0x20, + 0x34, 0x20, 0x35, 0x20, 0x36, 0x20, 0x37, 0x20, + 0x38, 0x20, 0x39, 0x20, 0x3A, 0x20, 0x3B, 0x20, + 0x3C, 0x20, 0x3D, 0x20, 0x3E, 0x20, 0x3F, 0x20, + 0x40, 0x20, 0x41, 0x20, 0x42, 0x20, 0x43, 0x20, + 0x44, 0x20, 0x45, 0x20, 0x46, 0x20, 0x47, 0x20, + 0x48, 0x20, 0x49, 0x20, 0x4A, 0x20, 0x4B, 0x20, + 0x4C, 0x20, 0x4D, 0x20, 0x4E, 0x20, 0x4F, 0x20, + 0x50, 0x20, 0x51, 0x20, 0x52, 0x20, 0x53, 0x20, + 0x54, 0x20, 0x55, 0x20, 0x56, 0x20, 0x57, 0x20, + 0x58, 0x20, 0x59, 0x20, 0x5A, 0x20, 0x5B, 0x20, + 0x5C, 0x20, 0x5D, 0x20, 0x5E, 0x20, 0x5F, 0x20, + 0x60, 0x20, 0x61, 0x20, 0x62, 0x20, 0x63, 0x20, + 0x64, 0x20, 0x65, 0x20, 0x66, 0x20, 0x67, 0x20, + 0x68, 0x20, 0x69, 0x20, 0x6A, 0x20, 0x6B, 0x20, + 0x6C, 0x20, 0x6D, 0x20, 0x6E, 0x20, 0x6F, 0x20, + 0x70, 0x20, 0x71, 0x20, 0x72, 0x20, 0x73, 0x20, + 0x74, 0x20, 0x75, 0x20, 0x76, 0x20, 0x77, 0x20, + 0x78, 0x20, 0x79, 0x20, 0x7A, 0x20, 0x7B, 0x20, + 0x7C, 0x20, 0x7D, 0x20, 0x7E, 0x20, 0x7F, 0x20, + 0x80, 0x20, 0x81, 0x20, 0x82, 0x20, 0x83, 0x20, + 0x84, 0x20, 0x85, 0x20, 0x86, 0x20, 0x87, 0x20, + 0x88, 0x20, 0x89, 0x20, 0x8A, 0x20, 0x8B, 0x20, + 0x8C, 0x20, 0x8D, 0x20, 0x8E, 0x20, 0x8F, 0x20, + 0x90, 0x20, 0x91, 0x20, 0x92, 0x20, 0x93, 0x20, + 0x94, 0x20, 0x95, 0x20, 0x96, 0x20, 0x97, 0x20, + 0x98, 0x20, 0x99, 0x20, 0x9A, 0x20, 0x9B, 0x20, + 0x9C, 0x20, 0x9D, 0x20, 0x9E, 0x20, 0x9F, 0x20, + 0xA0, 0x20, 0xA1, 0x20, 0xA2, 0x20, 0xA3, 0x20, + 0xA4, 0x20, 0xA5, 0x20, 0xA6, 0x20, 0xA7, 0x20, + 0xA8, 0x20, 0xA9, 0x20, 0xAA, 0x20, 0xAB, 0x20, + 0xAC, 0x20, 0xAD, 0x20, 0xAE, 0x20, 0xAF, 0x20, + 0xB0, 0x20, 0xB1, 0x20, 0xB2, 0x20, 0xB3, 0x20, + 0xB4, 0x20, 0xB5, 0x20, 0xB6, 0x20, 0xB7, 0x20, + 0xB8, 0x20, 0xB9, 0x20, 0xBA, 0x20, 0xBB, 0x20, + 0xBC, 0x20, 0xBD, 0x20, 0xBE, 0x20, 0xBF, 0x20, + 0xC0, 0x20, 0xC1, 0x20, 0xC2, 0x20, 0xC3, 0x20, + 0xC4, 0x20, 0xC5, 0x20, 0xC6, 0x20, 0xC7, 0x20, + 0xC8, 0x20, 0xC9, 0x20, 0xCA, 0x20, 0xCB, 0x20, + 0xCC, 0x20, 0xCD, 0x20, 0xCE, 0x20, 0xCF, 0x20, + 0xD0, 0x20, 0xD1, 0x20, 0xD2, 0x20, 0xD3, 0x20, + 0xD4, 0x20, 0xD5, 0x20, 0xD6, 0x20, 0xD7, 0x20, + 0xD8, 0x20, 0xD9, 0x20, 0xDA, 0x20, 0xDB, 0x20, + 0xDC, 0x20, 0xDD, 0x20, 0xDE, 0x20, 0xDF, 0x20, + 0xE0, 0x20, 0xE1, 0x20, 0xE2, 0x20, 0xE3, 0x20, + 0xE4, 0x20, 0xE5, 0x20, 0xE6, 0x20, 0xE7, 0x20, + 0xE8, 0x20, 0xE9, 0x20, 0xEA, 0x20, 0xEB, 0x20, + 0xEC, 0x20, 0xED, 0x20, 0xEE, 0x20, 0xEF, 0x20, + 0xF0, 0x20, 0xF1, 0x20, 0xF2, 0x20, 0xF3, 0x20, + 0xF4, 0x20, 0xF5, 0x20, 0xF6, 0x20, 0xF7, 0x20, + 0xF8, 0x20, 0xF9, 0x20, 0xFA, 0x20, 0xFB, 0x20, + 0xFC, 0x20, 0xFD, 0x20, 0xFE, 0x20, 0xFF, 0x20, + 0x00, 0x21, 0x01, 0x21, 0x02, 0x21, 0x03, 0x21, + 0x04, 0x21, 0x05, 0x21, 0x06, 0x21, 0x07, 0x21, + 0x08, 0x21, 0x09, 0x21, 0x0A, 0x21, 0x0B, 0x21, + 0x0C, 0x21, 0x0D, 0x21, 0x0E, 0x21, 0x0F, 0x21, + 0x10, 0x21, 0x11, 0x21, 0x12, 0x21, 0x13, 0x21, + 0x14, 0x21, 0x15, 0x21, 0x16, 0x21, 0x17, 0x21, + 0x18, 0x21, 0x19, 0x21, 0x1A, 0x21, 0x1B, 0x21, + 0x1C, 0x21, 0x1D, 0x21, 0x1E, 0x21, 0x1F, 0x21, + 0x20, 0x21, 0x21, 0x21, 0x22, 0x21, 0x23, 0x21, + 0x24, 0x21, 0x25, 0x21, 0x26, 0x21, 0x27, 0x21, + 0x28, 0x21, 0x29, 0x21, 0x2A, 0x21, 0x2B, 0x21, + 0x2C, 0x21, 0x2D, 0x21, 0x2E, 0x21, 0x2F, 0x21, + 0x30, 0x21, 0x31, 0x21, 0x32, 0x21, 0x33, 0x21, + 0x34, 0x21, 0x35, 0x21, 0x36, 0x21, 0x37, 0x21, + 0x38, 0x21, 0x39, 0x21, 0x3A, 0x21, 0x3B, 0x21, + 0x3C, 0x21, 0x3D, 0x21, 0x3E, 0x21, 0x3F, 0x21, + 0x40, 0x21, 0x41, 0x21, 0x42, 0x21, 0x43, 0x21, + 0x44, 0x21, 0x45, 0x21, 0x46, 0x21, 0x47, 0x21, + 0x48, 0x21, 0x49, 0x21, 0x4A, 0x21, 0x4B, 0x21, + 0x4C, 0x21, 0x4D, 0x21, 0x32, 0x21, 0x4F, 0x21, + 0x50, 0x21, 0x51, 0x21, 0x52, 0x21, 0x53, 0x21, + 0x54, 0x21, 0x55, 0x21, 0x56, 0x21, 0x57, 0x21, + 0x58, 0x21, 0x59, 0x21, 0x5A, 0x21, 0x5B, 0x21, + 0x5C, 0x21, 0x5D, 0x21, 0x5E, 0x21, 0x5F, 0x21, + 0x60, 0x21, 0x61, 0x21, 0x62, 0x21, 0x63, 0x21, + 0x64, 0x21, 0x65, 0x21, 0x66, 0x21, 0x67, 0x21, + 0x68, 0x21, 0x69, 0x21, 0x6A, 0x21, 0x6B, 0x21, + 0x6C, 0x21, 0x6D, 0x21, 0x6E, 0x21, 0x6F, 0x21, + 0x60, 0x21, 0x61, 0x21, 0x62, 0x21, 0x63, 0x21, + 0x64, 0x21, 0x65, 0x21, 0x66, 0x21, 0x67, 0x21, + 0x68, 0x21, 0x69, 0x21, 0x6A, 0x21, 0x6B, 0x21, + 0x6C, 0x21, 0x6D, 0x21, 0x6E, 0x21, 0x6F, 0x21, + 0x80, 0x21, 0x81, 0x21, 0x82, 0x21, 0x83, 0x21, + 0x83, 0x21, 0xFF, 0xFF, 0x4B, 0x03, 0xB6, 0x24, + 0xB7, 0x24, 0xB8, 0x24, 0xB9, 0x24, 0xBA, 0x24, + 0xBB, 0x24, 0xBC, 0x24, 0xBD, 0x24, 0xBE, 0x24, + 0xBF, 0x24, 0xC0, 0x24, 0xC1, 0x24, 0xC2, 0x24, + 0xC3, 0x24, 0xC4, 0x24, 0xC5, 0x24, 0xC6, 0x24, + 0xC7, 0x24, 0xC8, 0x24, 0xC9, 0x24, 0xCA, 0x24, + 0xCB, 0x24, 0xCC, 0x24, 0xCD, 0x24, 0xCE, 0x24, + 0xCF, 0x24, 0xFF, 0xFF, 0x46, 0x07, 0x00, 0x2C, + 0x01, 0x2C, 0x02, 0x2C, 0x03, 0x2C, 0x04, 0x2C, + 0x05, 0x2C, 0x06, 0x2C, 0x07, 0x2C, 0x08, 0x2C, + 0x09, 0x2C, 0x0A, 0x2C, 0x0B, 0x2C, 0x0C, 0x2C, + 0x0D, 0x2C, 0x0E, 0x2C, 0x0F, 0x2C, 0x10, 0x2C, + 0x11, 0x2C, 0x12, 0x2C, 0x13, 0x2C, 0x14, 0x2C, + 0x15, 0x2C, 0x16, 0x2C, 0x17, 0x2C, 0x18, 0x2C, + 0x19, 0x2C, 0x1A, 0x2C, 0x1B, 0x2C, 0x1C, 0x2C, + 0x1D, 0x2C, 0x1E, 0x2C, 0x1F, 0x2C, 0x20, 0x2C, + 0x21, 0x2C, 0x22, 0x2C, 0x23, 0x2C, 0x24, 0x2C, + 0x25, 0x2C, 0x26, 0x2C, 0x27, 0x2C, 0x28, 0x2C, + 0x29, 0x2C, 0x2A, 0x2C, 0x2B, 0x2C, 0x2C, 0x2C, + 0x2D, 0x2C, 0x2E, 0x2C, 0x5F, 0x2C, 0x60, 0x2C, + 0x60, 0x2C, 0x62, 0x2C, 0x63, 0x2C, 0x64, 0x2C, + 0x65, 0x2C, 0x66, 0x2C, 0x67, 0x2C, 0x67, 0x2C, + 0x69, 0x2C, 0x69, 0x2C, 0x6B, 0x2C, 0x6B, 0x2C, + 0x6D, 0x2C, 0x6E, 0x2C, 0x6F, 0x2C, 0x70, 0x2C, + 0x71, 0x2C, 0x72, 0x2C, 0x73, 0x2C, 0x74, 0x2C, + 0x75, 0x2C, 0x75, 0x2C, 0x77, 0x2C, 0x78, 0x2C, + 0x79, 0x2C, 0x7A, 0x2C, 0x7B, 0x2C, 0x7C, 0x2C, + 0x7D, 0x2C, 0x7E, 0x2C, 0x7F, 0x2C, 0x80, 0x2C, + 0x80, 0x2C, 0x82, 0x2C, 0x82, 0x2C, 0x84, 0x2C, + 0x84, 0x2C, 0x86, 0x2C, 0x86, 0x2C, 0x88, 0x2C, + 0x88, 0x2C, 0x8A, 0x2C, 0x8A, 0x2C, 0x8C, 0x2C, + 0x8C, 0x2C, 0x8E, 0x2C, 0x8E, 0x2C, 0x90, 0x2C, + 0x90, 0x2C, 0x92, 0x2C, 0x92, 0x2C, 0x94, 0x2C, + 0x94, 0x2C, 0x96, 0x2C, 0x96, 0x2C, 0x98, 0x2C, + 0x98, 0x2C, 0x9A, 0x2C, 0x9A, 0x2C, 0x9C, 0x2C, + 0x9C, 0x2C, 0x9E, 0x2C, 0x9E, 0x2C, 0xA0, 0x2C, + 0xA0, 0x2C, 0xA2, 0x2C, 0xA2, 0x2C, 0xA4, 0x2C, + 0xA4, 0x2C, 0xA6, 0x2C, 0xA6, 0x2C, 0xA8, 0x2C, + 0xA8, 0x2C, 0xAA, 0x2C, 0xAA, 0x2C, 0xAC, 0x2C, + 0xAC, 0x2C, 0xAE, 0x2C, 0xAE, 0x2C, 0xB0, 0x2C, + 0xB0, 0x2C, 0xB2, 0x2C, 0xB2, 0x2C, 0xB4, 0x2C, + 0xB4, 0x2C, 0xB6, 0x2C, 0xB6, 0x2C, 0xB8, 0x2C, + 0xB8, 0x2C, 0xBA, 0x2C, 0xBA, 0x2C, 0xBC, 0x2C, + 0xBC, 0x2C, 0xBE, 0x2C, 0xBE, 0x2C, 0xC0, 0x2C, + 0xC0, 0x2C, 0xC2, 0x2C, 0xC2, 0x2C, 0xC4, 0x2C, + 0xC4, 0x2C, 0xC6, 0x2C, 0xC6, 0x2C, 0xC8, 0x2C, + 0xC8, 0x2C, 0xCA, 0x2C, 0xCA, 0x2C, 0xCC, 0x2C, + 0xCC, 0x2C, 0xCE, 0x2C, 0xCE, 0x2C, 0xD0, 0x2C, + 0xD0, 0x2C, 0xD2, 0x2C, 0xD2, 0x2C, 0xD4, 0x2C, + 0xD4, 0x2C, 0xD6, 0x2C, 0xD6, 0x2C, 0xD8, 0x2C, + 0xD8, 0x2C, 0xDA, 0x2C, 0xDA, 0x2C, 0xDC, 0x2C, + 0xDC, 0x2C, 0xDE, 0x2C, 0xDE, 0x2C, 0xE0, 0x2C, + 0xE0, 0x2C, 0xE2, 0x2C, 0xE2, 0x2C, 0xE4, 0x2C, + 0xE5, 0x2C, 0xE6, 0x2C, 0xE7, 0x2C, 0xE8, 0x2C, + 0xE9, 0x2C, 0xEA, 0x2C, 0xEB, 0x2C, 0xEC, 0x2C, + 0xED, 0x2C, 0xEE, 0x2C, 0xEF, 0x2C, 0xF0, 0x2C, + 0xF1, 0x2C, 0xF2, 0x2C, 0xF3, 0x2C, 0xF4, 0x2C, + 0xF5, 0x2C, 0xF6, 0x2C, 0xF7, 0x2C, 0xF8, 0x2C, + 0xF9, 0x2C, 0xFA, 0x2C, 0xFB, 0x2C, 0xFC, 0x2C, + 0xFD, 0x2C, 0xFE, 0x2C, 0xFF, 0x2C, 0xA0, 0x10, + 0xA1, 0x10, 0xA2, 0x10, 0xA3, 0x10, 0xA4, 0x10, + 0xA5, 0x10, 0xA6, 0x10, 0xA7, 0x10, 0xA8, 0x10, + 0xA9, 0x10, 0xAA, 0x10, 0xAB, 0x10, 0xAC, 0x10, + 0xAD, 0x10, 0xAE, 0x10, 0xAF, 0x10, 0xB0, 0x10, + 0xB1, 0x10, 0xB2, 0x10, 0xB3, 0x10, 0xB4, 0x10, + 0xB5, 0x10, 0xB6, 0x10, 0xB7, 0x10, 0xB8, 0x10, + 0xB9, 0x10, 0xBA, 0x10, 0xBB, 0x10, 0xBC, 0x10, + 0xBD, 0x10, 0xBE, 0x10, 0xBF, 0x10, 0xC0, 0x10, + 0xC1, 0x10, 0xC2, 0x10, 0xC3, 0x10, 0xC4, 0x10, + 0xC5, 0x10, 0xFF, 0xFF, 0x1B, 0xD2, 0x21, 0xFF, + 0x22, 0xFF, 0x23, 0xFF, 0x24, 0xFF, 0x25, 0xFF, + 0x26, 0xFF, 0x27, 0xFF, 0x28, 0xFF, 0x29, 0xFF, + 0x2A, 0xFF, 0x2B, 0xFF, 0x2C, 0xFF, 0x2D, 0xFF, + 0x2E, 0xFF, 0x2F, 0xFF, 0x30, 0xFF, 0x31, 0xFF, + 0x32, 0xFF, 0x33, 0xFF, 0x34, 0xFF, 0x35, 0xFF, + 0x36, 0xFF, 0x37, 0xFF, 0x38, 0xFF, 0x39, 0xFF, + 0x3A, 0xFF, 0x5B, 0xFF, 0x5C, 0xFF, 0x5D, 0xFF, + 0x5E, 0xFF, 0x5F, 0xFF, 0x60, 0xFF, 0x61, 0xFF, + 0x62, 0xFF, 0x63, 0xFF, 0x64, 0xFF, 0x65, 0xFF, + 0x66, 0xFF, 0x67, 0xFF, 0x68, 0xFF, 0x69, 0xFF, + 0x6A, 0xFF, 0x6B, 0xFF, 0x6C, 0xFF, 0x6D, 0xFF, + 0x6E, 0xFF, 0x6F, 0xFF, 0x70, 0xFF, 0x71, 0xFF, + 0x72, 0xFF, 0x73, 0xFF, 0x74, 0xFF, 0x75, 0xFF, + 0x76, 0xFF, 0x77, 0xFF, 0x78, 0xFF, 0x79, 0xFF, + 0x7A, 0xFF, 0x7B, 0xFF, 0x7C, 0xFF, 0x7D, 0xFF, + 0x7E, 0xFF, 0x7F, 0xFF, 0x80, 0xFF, 0x81, 0xFF, + 0x82, 0xFF, 0x83, 0xFF, 0x84, 0xFF, 0x85, 0xFF, + 0x86, 0xFF, 0x87, 0xFF, 0x88, 0xFF, 0x89, 0xFF, + 0x8A, 0xFF, 0x8B, 0xFF, 0x8C, 0xFF, 0x8D, 0xFF, + 0x8E, 0xFF, 0x8F, 0xFF, 0x90, 0xFF, 0x91, 0xFF, + 0x92, 0xFF, 0x93, 0xFF, 0x94, 0xFF, 0x95, 0xFF, + 0x96, 0xFF, 0x97, 0xFF, 0x98, 0xFF, 0x99, 0xFF, + 0x9A, 0xFF, 0x9B, 0xFF, 0x9C, 0xFF, 0x9D, 0xFF, + 0x9E, 0xFF, 0x9F, 0xFF, 0xA0, 0xFF, 0xA1, 0xFF, + 0xA2, 0xFF, 0xA3, 0xFF, 0xA4, 0xFF, 0xA5, 0xFF, + 0xA6, 0xFF, 0xA7, 0xFF, 0xA8, 0xFF, 0xA9, 0xFF, + 0xAA, 0xFF, 0xAB, 0xFF, 0xAC, 0xFF, 0xAD, 0xFF, + 0xAE, 0xFF, 0xAF, 0xFF, 0xB0, 0xFF, 0xB1, 0xFF, + 0xB2, 0xFF, 0xB3, 0xFF, 0xB4, 0xFF, 0xB5, 0xFF, + 0xB6, 0xFF, 0xB7, 0xFF, 0xB8, 0xFF, 0xB9, 0xFF, + 0xBA, 0xFF, 0xBB, 0xFF, 0xBC, 0xFF, 0xBD, 0xFF, + 0xBE, 0xFF, 0xBF, 0xFF, 0xC0, 0xFF, 0xC1, 0xFF, + 0xC2, 0xFF, 0xC3, 0xFF, 0xC4, 0xFF, 0xC5, 0xFF, + 0xC6, 0xFF, 0xC7, 0xFF, 0xC8, 0xFF, 0xC9, 0xFF, + 0xCA, 0xFF, 0xCB, 0xFF, 0xCC, 0xFF, 0xCD, 0xFF, + 0xCE, 0xFF, 0xCF, 0xFF, 0xD0, 0xFF, 0xD1, 0xFF, + 0xD2, 0xFF, 0xD3, 0xFF, 0xD4, 0xFF, 0xD5, 0xFF, + 0xD6, 0xFF, 0xD7, 0xFF, 0xD8, 0xFF, 0xD9, 0xFF, + 0xDA, 0xFF, 0xDB, 0xFF, 0xDC, 0xFF, 0xDD, 0xFF, + 0xDE, 0xFF, 0xDF, 0xFF, 0xE0, 0xFF, 0xE1, 0xFF, + 0xE2, 0xFF, 0xE3, 0xFF, 0xE4, 0xFF, 0xE5, 0xFF, + 0xE6, 0xFF, 0xE7, 0xFF, 0xE8, 0xFF, 0xE9, 0xFF, + 0xEA, 0xFF, 0xEB, 0xFF, 0xEC, 0xFF, 0xED, 0xFF, + 0xEE, 0xFF, 0xEF, 0xFF, 0xF0, 0xFF, 0xF1, 0xFF, + 0xF2, 0xFF, 0xF3, 0xFF, 0xF4, 0xFF, 0xF5, 0xFF, + 0xF6, 0xFF, 0xF7, 0xFF, 0xF8, 0xFF, 0xF9, 0xFF, + 0xFA, 0xFF, 0xFB, 0xFF, 0xFC, 0xFF, 0xFD, 0xFF, + 0xFE, 0xFF, 0xFF, 0xFF +}; -- cgit v1.2.3-59-g8ed1b From 5cf89673645f7edfa075afc9e1d531db24706329 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Fri, 30 Aug 2019 08:56:37 +0200 Subject: MAINTAINERS: fix up exfat directory location I messed up on the exfat MAINTAINER entry, the code is in drivers/staging/exfat/ not fs/exfat/ Cc: Valdis Kletnieks Reported-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index a484b36e5117..d39c510037a7 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6100,7 +6100,7 @@ F: include/uapi/linux/mii.h EXFAT FILE SYSTEM M: Valdis Kletnieks S: Maintained -F: fs/exfat/ +F: drivers/staging/exfat/ EXT2 FILE SYSTEM M: Jan Kara -- cgit v1.2.3-59-g8ed1b