From e08e19c331fb249e6dc86365ee80d16045c4aeb1 Mon Sep 17 00:00:00 2001 From: "H. Nikolaus Schaller" Date: Fri, 16 Oct 2015 14:53:38 +0200 Subject: iio:adc: add iio driver for Palmas (twl6035/7) gpadc This driver code was found as: https://android.googlesource.com/kernel/tegra/+/aaabb2e045f31e5a970109ffdaae900dd403d17e/drivers/staging/iio/adc Fixed various compilation issues and test this driver on omap5 evm. Signed-off-by: Pradeep Goudagunta Signed-off-by: H. Nikolaus Schaller Signed-off-by: Marek Belisko Acked-by: Laxman Dewangan Reviewed-by: Jonathan Cameron Acked-by: Lee Jones Signed-off-by: Jonathan Cameron --- include/linux/mfd/palmas.h | 75 +++++++++++++++++++++++++++++++--------------- 1 file changed, 51 insertions(+), 24 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h index 13e1d96935ed..c800dbc42079 100644 --- a/include/linux/mfd/palmas.h +++ b/include/linux/mfd/palmas.h @@ -134,21 +134,32 @@ struct palmas_pmic_driver_data { struct regulator_config config); }; +struct palmas_adc_wakeup_property { + int adc_channel_number; + int adc_high_threshold; + int adc_low_threshold; +}; + struct palmas_gpadc_platform_data { /* Channel 3 current source is only enabled during conversion */ - int ch3_current; + int ch3_current; /* 0: off; 1: 10uA; 2: 400uA; 3: 800 uA */ /* Channel 0 current source can be used for battery detection. * If used for battery detection this will cause a permanent current * consumption depending on current level set here. */ - int ch0_current; + int ch0_current; /* 0: off; 1: 5uA; 2: 15uA; 3: 20 uA */ + bool extended_delay; /* use extended delay for conversion */ /* default BAT_REMOVAL_DAT setting on device probe */ int bat_removal; /* Sets the START_POLARITY bit in the RT_CTRL register */ int start_polarity; + + int auto_conversion_period_ms; + struct palmas_adc_wakeup_property *adc_wakeup1_data; + struct palmas_adc_wakeup_property *adc_wakeup2_data; }; struct palmas_reg_init { @@ -405,28 +416,7 @@ struct palmas_gpadc_calibration { s32 offset_error; }; -struct palmas_gpadc { - struct device *dev; - struct palmas *palmas; - - int ch3_current; - int ch0_current; - - int gpadc_force; - - int bat_removal; - - struct mutex reading_lock; - struct completion irq_complete; - - int eoc_sw_irq; - - struct palmas_gpadc_calibration *palmas_cal_tbl; - - int conv0_channel; - int conv1_channel; - int rt_channel; -}; +#define PALMAS_DATASHEET_NAME(_name) "palmas-gpadc-chan-"#_name struct palmas_gpadc_result { s32 raw_code; @@ -520,6 +510,43 @@ enum palmas_irqs { PALMAS_NUM_IRQ, }; +/* Palmas GPADC Channels */ +enum { + PALMAS_ADC_CH_IN0, + PALMAS_ADC_CH_IN1, + PALMAS_ADC_CH_IN2, + PALMAS_ADC_CH_IN3, + PALMAS_ADC_CH_IN4, + PALMAS_ADC_CH_IN5, + PALMAS_ADC_CH_IN6, + PALMAS_ADC_CH_IN7, + PALMAS_ADC_CH_IN8, + PALMAS_ADC_CH_IN9, + PALMAS_ADC_CH_IN10, + PALMAS_ADC_CH_IN11, + PALMAS_ADC_CH_IN12, + PALMAS_ADC_CH_IN13, + PALMAS_ADC_CH_IN14, + PALMAS_ADC_CH_IN15, + PALMAS_ADC_CH_MAX, +}; + +/* Palmas GPADC Channel0 Current Source */ +enum { + PALMAS_ADC_CH0_CURRENT_SRC_0, + PALMAS_ADC_CH0_CURRENT_SRC_5, + PALMAS_ADC_CH0_CURRENT_SRC_15, + PALMAS_ADC_CH0_CURRENT_SRC_20, +}; + +/* Palmas GPADC Channel3 Current Source */ +enum { + PALMAS_ADC_CH3_CURRENT_SRC_0, + PALMAS_ADC_CH3_CURRENT_SRC_10, + PALMAS_ADC_CH3_CURRENT_SRC_400, + PALMAS_ADC_CH3_CURRENT_SRC_800, +}; + struct palmas_pmic { struct palmas *palmas; struct device *dev; -- cgit v1.3-6-gb490 From b440655b896b2d5a2fb5f918801fb0e281a537cd Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Tue, 13 Oct 2015 18:10:26 +0200 Subject: iio: Add support for indicating fixed watermarks For buffers which have a fixed wake-up watermark the watermark attribute should be read-only. Add a new FIXED_WATERMARK flag to the struct iio_buffer_access_funcs, which can be set by a buffer implementation. Signed-off-by: Lars-Peter Clausen Signed-off-by: Jonathan Cameron --- drivers/iio/industrialio-buffer.c | 5 +++++ include/linux/iio/buffer.h | 8 ++++++++ 2 files changed, 13 insertions(+) (limited to 'include/linux') diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index 5f2c8c8c436e..98a6447a61d3 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c @@ -998,6 +998,8 @@ static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, iio_buffer_show_enable, iio_buffer_store_enable); static DEVICE_ATTR(watermark, S_IRUGO | S_IWUSR, iio_buffer_show_watermark, iio_buffer_store_watermark); +static struct device_attribute dev_attr_watermark_ro = __ATTR(watermark, + S_IRUGO, iio_buffer_show_watermark, NULL); static struct attribute *iio_buffer_attrs[] = { &dev_attr_length.attr, @@ -1040,6 +1042,9 @@ int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev) if (!buffer->access->set_length) attr[0] = &dev_attr_length_ro.attr; + if (buffer->access->flags & INDIO_BUFFER_FLAG_FIXED_WATERMARK) + attr[2] = &dev_attr_watermark_ro.attr; + if (buffer->attrs) memcpy(&attr[ARRAY_SIZE(iio_buffer_attrs)], buffer->attrs, sizeof(struct attribute *) * attrcount); diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h index 1600c55828e0..4d99a53d1fe7 100644 --- a/include/linux/iio/buffer.h +++ b/include/linux/iio/buffer.h @@ -17,6 +17,12 @@ struct iio_buffer; +/** + * INDIO_BUFFER_FLAG_FIXED_WATERMARK - Watermark level of the buffer can not be + * configured. It has a fixed value which will be buffer specific. + */ +#define INDIO_BUFFER_FLAG_FIXED_WATERMARK BIT(0) + /** * struct iio_buffer_access_funcs - access functions for buffers. * @store_to: actually store stuff to the buffer @@ -30,6 +36,7 @@ struct iio_buffer; * @release: called when the last reference to the buffer is dropped, * should free all resources allocated by the buffer. * @modes: Supported operating modes by this buffer type + * @flags: A bitmask combination of INDIO_BUFFER_FLAG_* * * The purpose of this structure is to make the buffer element * modular as event for a given driver, different usecases may require @@ -54,6 +61,7 @@ struct iio_buffer_access_funcs { void (*release)(struct iio_buffer *buffer); unsigned int modes; + unsigned int flags; }; /** -- cgit v1.3-6-gb490 From e18a2ad45caeb11226e49c25068d0f2efe2adf6c Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Tue, 13 Oct 2015 18:10:27 +0200 Subject: iio: Add buffer enable/disable callbacks This patch adds a enable and disable callback that is called when the buffer is enabled/disabled. This can be used by buffer implementations that need to do some setup or teardown work. E.g. a DMA based buffer can use this to start/stop the DMA transfer. Signed-off-by: Lars-Peter Clausen Signed-off-by: Jonathan Cameron --- drivers/iio/industrialio-buffer.c | 36 +++++++++++++++++++++++++++++++++++- include/linux/iio/buffer.h | 8 ++++++++ 2 files changed, 43 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index 98a6447a61d3..a4b164a478c4 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c @@ -568,6 +568,22 @@ static void iio_buffer_deactivate_all(struct iio_dev *indio_dev) iio_buffer_deactivate(buffer); } +static int iio_buffer_enable(struct iio_buffer *buffer, + struct iio_dev *indio_dev) +{ + if (!buffer->access->enable) + return 0; + return buffer->access->enable(buffer, indio_dev); +} + +static int iio_buffer_disable(struct iio_buffer *buffer, + struct iio_dev *indio_dev) +{ + if (!buffer->access->disable) + return 0; + return buffer->access->disable(buffer, indio_dev); +} + static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev, struct iio_buffer *buffer) { @@ -719,6 +735,7 @@ static int iio_verify_update(struct iio_dev *indio_dev, static int iio_enable_buffers(struct iio_dev *indio_dev, struct iio_device_config *config) { + struct iio_buffer *buffer; int ret; indio_dev->active_scan_mask = config->scan_mask; @@ -753,6 +770,12 @@ static int iio_enable_buffers(struct iio_dev *indio_dev, indio_dev->info->hwfifo_set_watermark(indio_dev, config->watermark); + list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { + ret = iio_buffer_enable(buffer, indio_dev); + if (ret) + goto err_disable_buffers; + } + indio_dev->currentmode = config->mode; if (indio_dev->setup_ops->postenable) { @@ -760,12 +783,16 @@ static int iio_enable_buffers(struct iio_dev *indio_dev, if (ret) { dev_dbg(&indio_dev->dev, "Buffer not started: postenable failed (%d)\n", ret); - goto err_run_postdisable; + goto err_disable_buffers; } } return 0; +err_disable_buffers: + list_for_each_entry_continue_reverse(buffer, &indio_dev->buffer_list, + buffer_list) + iio_buffer_disable(buffer, indio_dev); err_run_postdisable: indio_dev->currentmode = INDIO_DIRECT_MODE; if (indio_dev->setup_ops->postdisable) @@ -778,6 +805,7 @@ err_undo_config: static int iio_disable_buffers(struct iio_dev *indio_dev) { + struct iio_buffer *buffer; int ret = 0; int ret2; @@ -798,6 +826,12 @@ static int iio_disable_buffers(struct iio_dev *indio_dev) ret = ret2; } + list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { + ret2 = iio_buffer_disable(buffer, indio_dev); + if (ret2 && !ret) + ret = ret2; + } + indio_dev->currentmode = INDIO_DIRECT_MODE; if (indio_dev->setup_ops->postdisable) { diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h index 4d99a53d1fe7..2ec3ad58e8a0 100644 --- a/include/linux/iio/buffer.h +++ b/include/linux/iio/buffer.h @@ -33,6 +33,11 @@ struct iio_buffer; * storage. * @set_bytes_per_datum:set number of bytes per datum * @set_length: set number of datums in buffer + * @enable: called if the buffer is attached to a device and the + * device starts sampling. Calls are balanced with + * @disable. + * @disable: called if the buffer is attached to a device and the + * device stops sampling. Calles are balanced with @enable. * @release: called when the last reference to the buffer is dropped, * should free all resources allocated by the buffer. * @modes: Supported operating modes by this buffer type @@ -58,6 +63,9 @@ struct iio_buffer_access_funcs { int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd); int (*set_length)(struct iio_buffer *buffer, int length); + int (*enable)(struct iio_buffer *buffer, struct iio_dev *indio_dev); + int (*disable)(struct iio_buffer *buffer, struct iio_dev *indio_dev); + void (*release)(struct iio_buffer *buffer); unsigned int modes; -- cgit v1.3-6-gb490 From 670b19ae9bfdbcb4ce2c2ffb2ec1659a7f4a2074 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Tue, 13 Oct 2015 18:10:28 +0200 Subject: iio: Add generic DMA buffer infrastructure The traditional approach used in IIO to implement buffered capture requires the generation of at least one interrupt per sample. In the interrupt handler the driver reads the sample from the device and copies it to a software buffer. This approach has a rather large per sample overhead associated with it. And while it works fine for samplerates in the range of up to 1000 samples per second it starts to consume a rather large share of the available CPU processing time once we go beyond that, this is especially true on an embedded system with limited processing power. The regular interrupt also causes increased power consumption by not allowing the hardware into deeper sleep states, which is something that becomes more and more important on mobile battery powered devices. And while the recently added watermark support mitigates some of the issues by allowing the device to generate interrupts at a rate lower than the data output rate, this still requires a storage buffer inside the device and even if it exists it is only a few 100 samples deep at most. DMA support on the other hand allows to capture multiple millions or even more samples without any CPU interaction. This allows the CPU to either go to sleep for longer periods or focus on other tasks which increases overall system performance and power consumption. In addition to that some devices might not even offer a way to read the data other than using DMA, which makes DMA mandatory to use for them. The tasks involved in implementing a DMA buffer can be divided into two categories. The first category is memory buffer management (allocation, mapping, etc.) and hooking this up the IIO buffer callbacks like read(), enable(), disable(), etc. The second category of tasks is to setup the DMA hardware and manage the DMA transfers. Tasks from the first category will be very similar for all IIO drivers supporting DMA buffers, while the tasks from the second category will be hardware specific. This patch implements a generic infrastructure that take care of the former tasks. It provides a set of functions that implement the standard IIO buffer iio_buffer_access_funcs callbacks. These can either be used as is or be overloaded and augmented with driver specific code where necessary. For the DMA buffer support infrastructure that is introduced in this series sample data is grouped by so called blocks. A block is the basic unit at which data is exchanged between the application and the hardware. The application is responsible for allocating the memory associated with the block and then passes the block to the hardware. When the hardware has captured the amount of samples equal to size of a block it will notify the application, which can then read the data from the block and process it. The block size can freely chosen (within the constraints of the hardware). This allows to make a trade-off between latency and management overhead. The larger the block size the lower the per sample overhead but the latency between when the data was captured and when the application will be able to access it increases, in a similar way smaller block sizes have a larger per sample management overhead but a lower latency. The ideal block size thus depends on system and application requirements. For the time being the infrastructure only implements a simple double buffered scheme which allocates two blocks each with half the size of the configured buffer size. This provides basic support for capturing continuous uninterrupted data over the existing file-IO ABI. Future extensions to the DMA buffer infrastructure will give applications a more fine grained control over how many blocks are allocated and the size of each block. But this requires userspace ABI additions which are intentionally not part of this patch and will be added separately. Tasks of the second category need to be implemented by a device specific driver. They can be hooked up into the generic infrastructure using two simple callbacks, submit() and abort(). The submit() callback is used to schedule DMA transfers for blocks. Once a DMA transfer has been completed it is expected that the buffer driver calls iio_dma_buffer_block_done() to notify. The abort() callback is used for stopping all pending and active DMA transfers when the buffer is disabled. Signed-off-by: Lars-Peter Clausen Signed-off-by: Jonathan Cameron --- drivers/iio/buffer/Kconfig | 9 + drivers/iio/buffer/Makefile | 1 + drivers/iio/buffer/industrialio-buffer-dma.c | 683 +++++++++++++++++++++++++++ include/linux/iio/buffer-dma.h | 152 ++++++ 4 files changed, 845 insertions(+) create mode 100644 drivers/iio/buffer/industrialio-buffer-dma.c create mode 100644 include/linux/iio/buffer-dma.h (limited to 'include/linux') diff --git a/drivers/iio/buffer/Kconfig b/drivers/iio/buffer/Kconfig index 0a7b2fd3699b..b2fda1afc03e 100644 --- a/drivers/iio/buffer/Kconfig +++ b/drivers/iio/buffer/Kconfig @@ -9,6 +9,15 @@ config IIO_BUFFER_CB Should be selected by any drivers that do in-kernel push usage. That is, those where the data is pushed to the consumer. +config IIO_BUFFER_DMA + tristate + help + Provides the generic IIO DMA buffer infrastructure that can be used by + drivers for devices with DMA support to implement the IIO buffer. + + Should be selected by drivers that want to use the generic DMA buffer + infrastructure. + config IIO_KFIFO_BUF tristate "Industrial I/O buffering based on kfifo" help diff --git a/drivers/iio/buffer/Makefile b/drivers/iio/buffer/Makefile index 4d193b9a9123..bda3f1143e72 100644 --- a/drivers/iio/buffer/Makefile +++ b/drivers/iio/buffer/Makefile @@ -4,5 +4,6 @@ # When adding new entries keep the list in alphabetical order obj-$(CONFIG_IIO_BUFFER_CB) += industrialio-buffer-cb.o +obj-$(CONFIG_IIO_BUFFER_DMA) += industrialio-buffer-dma.o obj-$(CONFIG_IIO_TRIGGERED_BUFFER) += industrialio-triggered-buffer.o obj-$(CONFIG_IIO_KFIFO_BUF) += kfifo_buf.o diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c new file mode 100644 index 000000000000..212cbedc7abb --- /dev/null +++ b/drivers/iio/buffer/industrialio-buffer-dma.c @@ -0,0 +1,683 @@ +/* + * Copyright 2013-2015 Analog Devices Inc. + * Author: Lars-Peter Clausen + * + * Licensed under the GPL-2. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * For DMA buffers the storage is sub-divided into so called blocks. Each block + * has its own memory buffer. The size of the block is the granularity at which + * memory is exchanged between the hardware and the application. Increasing the + * basic unit of data exchange from one sample to one block decreases the + * management overhead that is associated with each sample. E.g. if we say the + * management overhead for one exchange is x and the unit of exchange is one + * sample the overhead will be x for each sample. Whereas when using a block + * which contains n samples the overhead per sample is reduced to x/n. This + * allows to achieve much higher samplerates than what can be sustained with + * the one sample approach. + * + * Blocks are exchanged between the DMA controller and the application via the + * means of two queues. The incoming queue and the outgoing queue. Blocks on the + * incoming queue are waiting for the DMA controller to pick them up and fill + * them with data. Block on the outgoing queue have been filled with data and + * are waiting for the application to dequeue them and read the data. + * + * A block can be in one of the following states: + * * Owned by the application. In this state the application can read data from + * the block. + * * On the incoming list: Blocks on the incoming list are queued up to be + * processed by the DMA controller. + * * Owned by the DMA controller: The DMA controller is processing the block + * and filling it with data. + * * On the outgoing list: Blocks on the outgoing list have been successfully + * processed by the DMA controller and contain data. They can be dequeued by + * the application. + * * Dead: A block that is dead has been marked as to be freed. It might still + * be owned by either the application or the DMA controller at the moment. + * But once they are done processing it instead of going to either the + * incoming or outgoing queue the block will be freed. + * + * In addition to this blocks are reference counted and the memory associated + * with both the block structure as well as the storage memory for the block + * will be freed when the last reference to the block is dropped. This means a + * block must not be accessed without holding a reference. + * + * The iio_dma_buffer implementation provides a generic infrastructure for + * managing the blocks. + * + * A driver for a specific piece of hardware that has DMA capabilities need to + * implement the submit() callback from the iio_dma_buffer_ops structure. This + * callback is supposed to initiate the DMA transfer copying data from the + * converter to the memory region of the block. Once the DMA transfer has been + * completed the driver must call iio_dma_buffer_block_done() for the completed + * block. + * + * Prior to this it must set the bytes_used field of the block contains + * the actual number of bytes in the buffer. Typically this will be equal to the + * size of the block, but if the DMA hardware has certain alignment requirements + * for the transfer length it might choose to use less than the full size. In + * either case it is expected that bytes_used is a multiple of the bytes per + * datum, i.e. the block must not contain partial samples. + * + * The driver must call iio_dma_buffer_block_done() for each block it has + * received through its submit_block() callback, even if it does not actually + * perform a DMA transfer for the block, e.g. because the buffer was disabled + * before the block transfer was started. In this case it should set bytes_used + * to 0. + * + * In addition it is recommended that a driver implements the abort() callback. + * It will be called when the buffer is disabled and can be used to cancel + * pending and stop active transfers. + * + * The specific driver implementation should use the default callback + * implementations provided by this module for the iio_buffer_access_funcs + * struct. It may overload some callbacks with custom variants if the hardware + * has special requirements that are not handled by the generic functions. If a + * driver chooses to overload a callback it has to ensure that the generic + * callback is called from within the custom callback. + */ + +static void iio_buffer_block_release(struct kref *kref) +{ + struct iio_dma_buffer_block *block = container_of(kref, + struct iio_dma_buffer_block, kref); + + WARN_ON(block->state != IIO_BLOCK_STATE_DEAD); + + dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size), + block->vaddr, block->phys_addr); + + iio_buffer_put(&block->queue->buffer); + kfree(block); +} + +static void iio_buffer_block_get(struct iio_dma_buffer_block *block) +{ + kref_get(&block->kref); +} + +static void iio_buffer_block_put(struct iio_dma_buffer_block *block) +{ + kref_put(&block->kref, iio_buffer_block_release); +} + +/* + * dma_free_coherent can sleep, hence we need to take some special care to be + * able to drop a reference from an atomic context. + */ +static LIST_HEAD(iio_dma_buffer_dead_blocks); +static DEFINE_SPINLOCK(iio_dma_buffer_dead_blocks_lock); + +static void iio_dma_buffer_cleanup_worker(struct work_struct *work) +{ + struct iio_dma_buffer_block *block, *_block; + LIST_HEAD(block_list); + + spin_lock_irq(&iio_dma_buffer_dead_blocks_lock); + list_splice_tail_init(&iio_dma_buffer_dead_blocks, &block_list); + spin_unlock_irq(&iio_dma_buffer_dead_blocks_lock); + + list_for_each_entry_safe(block, _block, &block_list, head) + iio_buffer_block_release(&block->kref); +} +static DECLARE_WORK(iio_dma_buffer_cleanup_work, iio_dma_buffer_cleanup_worker); + +static void iio_buffer_block_release_atomic(struct kref *kref) +{ + struct iio_dma_buffer_block *block; + unsigned long flags; + + block = container_of(kref, struct iio_dma_buffer_block, kref); + + spin_lock_irqsave(&iio_dma_buffer_dead_blocks_lock, flags); + list_add_tail(&block->head, &iio_dma_buffer_dead_blocks); + spin_unlock_irqrestore(&iio_dma_buffer_dead_blocks_lock, flags); + + schedule_work(&iio_dma_buffer_cleanup_work); +} + +/* + * Version of iio_buffer_block_put() that can be called from atomic context + */ +static void iio_buffer_block_put_atomic(struct iio_dma_buffer_block *block) +{ + kref_put(&block->kref, iio_buffer_block_release_atomic); +} + +static struct iio_dma_buffer_queue *iio_buffer_to_queue(struct iio_buffer *buf) +{ + return container_of(buf, struct iio_dma_buffer_queue, buffer); +} + +static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block( + struct iio_dma_buffer_queue *queue, size_t size) +{ + struct iio_dma_buffer_block *block; + + block = kzalloc(sizeof(*block), GFP_KERNEL); + if (!block) + return NULL; + + block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size), + &block->phys_addr, GFP_KERNEL); + if (!block->vaddr) { + kfree(block); + return NULL; + } + + block->size = size; + block->state = IIO_BLOCK_STATE_DEQUEUED; + block->queue = queue; + INIT_LIST_HEAD(&block->head); + kref_init(&block->kref); + + iio_buffer_get(&queue->buffer); + + return block; +} + +static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block) +{ + struct iio_dma_buffer_queue *queue = block->queue; + + /* + * The buffer has already been freed by the application, just drop the + * reference. + */ + if (block->state != IIO_BLOCK_STATE_DEAD) { + block->state = IIO_BLOCK_STATE_DONE; + list_add_tail(&block->head, &queue->outgoing); + } +} + +/** + * iio_dma_buffer_block_done() - Indicate that a block has been completed + * @block: The completed block + * + * Should be called when the DMA controller has finished handling the block to + * pass back ownership of the block to the queue. + */ +void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block) +{ + struct iio_dma_buffer_queue *queue = block->queue; + unsigned long flags; + + spin_lock_irqsave(&queue->list_lock, flags); + _iio_dma_buffer_block_done(block); + spin_unlock_irqrestore(&queue->list_lock, flags); + + iio_buffer_block_put_atomic(block); + wake_up_interruptible_poll(&queue->buffer.pollq, POLLIN | POLLRDNORM); +} +EXPORT_SYMBOL_GPL(iio_dma_buffer_block_done); + +/** + * iio_dma_buffer_block_list_abort() - Indicate that a list block has been + * aborted + * @queue: Queue for which to complete blocks. + * @list: List of aborted blocks. All blocks in this list must be from @queue. + * + * Typically called from the abort() callback after the DMA controller has been + * stopped. This will set bytes_used to 0 for each block in the list and then + * hand the blocks back to the queue. + */ +void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue, + struct list_head *list) +{ + struct iio_dma_buffer_block *block, *_block; + unsigned long flags; + + spin_lock_irqsave(&queue->list_lock, flags); + list_for_each_entry_safe(block, _block, list, head) { + list_del(&block->head); + block->bytes_used = 0; + _iio_dma_buffer_block_done(block); + iio_buffer_block_put_atomic(block); + } + spin_unlock_irqrestore(&queue->list_lock, flags); + + wake_up_interruptible_poll(&queue->buffer.pollq, POLLIN | POLLRDNORM); +} +EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort); + +static bool iio_dma_block_reusable(struct iio_dma_buffer_block *block) +{ + /* + * If the core owns the block it can be re-used. This should be the + * default case when enabling the buffer, unless the DMA controller does + * not support abort and has not given back the block yet. + */ + switch (block->state) { + case IIO_BLOCK_STATE_DEQUEUED: + case IIO_BLOCK_STATE_QUEUED: + case IIO_BLOCK_STATE_DONE: + return true; + default: + return false; + } +} + +/** + * iio_dma_buffer_request_update() - DMA buffer request_update callback + * @buffer: The buffer which to request an update + * + * Should be used as the iio_dma_buffer_request_update() callback for + * iio_buffer_access_ops struct for DMA buffers. + */ +int iio_dma_buffer_request_update(struct iio_buffer *buffer) +{ + struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); + struct iio_dma_buffer_block *block; + bool try_reuse = false; + size_t size; + int ret = 0; + int i; + + /* + * Split the buffer into two even parts. This is used as a double + * buffering scheme with usually one block at a time being used by the + * DMA and the other one by the application. + */ + size = DIV_ROUND_UP(queue->buffer.bytes_per_datum * + queue->buffer.length, 2); + + mutex_lock(&queue->lock); + + /* Allocations are page aligned */ + if (PAGE_ALIGN(queue->fileio.block_size) == PAGE_ALIGN(size)) + try_reuse = true; + + queue->fileio.block_size = size; + queue->fileio.active_block = NULL; + + spin_lock_irq(&queue->list_lock); + for (i = 0; i < 2; i++) { + block = queue->fileio.blocks[i]; + + /* If we can't re-use it free it */ + if (block && (!iio_dma_block_reusable(block) || !try_reuse)) + block->state = IIO_BLOCK_STATE_DEAD; + } + + /* + * At this point all blocks are either owned by the core or marked as + * dead. This means we can reset the lists without having to fear + * corrution. + */ + INIT_LIST_HEAD(&queue->outgoing); + spin_unlock_irq(&queue->list_lock); + + INIT_LIST_HEAD(&queue->incoming); + + for (i = 0; i < 2; i++) { + if (queue->fileio.blocks[i]) { + block = queue->fileio.blocks[i]; + if (block->state == IIO_BLOCK_STATE_DEAD) { + /* Could not reuse it */ + iio_buffer_block_put(block); + block = NULL; + } else { + block->size = size; + } + } else { + block = NULL; + } + + if (!block) { + block = iio_dma_buffer_alloc_block(queue, size); + if (!block) { + ret = -ENOMEM; + goto out_unlock; + } + queue->fileio.blocks[i] = block; + } + + block->state = IIO_BLOCK_STATE_QUEUED; + list_add_tail(&block->head, &queue->incoming); + } + +out_unlock: + mutex_unlock(&queue->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(iio_dma_buffer_request_update); + +static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue, + struct iio_dma_buffer_block *block) +{ + int ret; + + /* + * If the hardware has already been removed we put the block into + * limbo. It will neither be on the incoming nor outgoing list, nor will + * it ever complete. It will just wait to be freed eventually. + */ + if (!queue->ops) + return; + + block->state = IIO_BLOCK_STATE_ACTIVE; + iio_buffer_block_get(block); + ret = queue->ops->submit(queue, block); + if (ret) { + /* + * This is a bit of a problem and there is not much we can do + * other then wait for the buffer to be disabled and re-enabled + * and try again. But it should not really happen unless we run + * out of memory or something similar. + * + * TODO: Implement support in the IIO core to allow buffers to + * notify consumers that something went wrong and the buffer + * should be disabled. + */ + iio_buffer_block_put(block); + } +} + +/** + * iio_dma_buffer_enable() - Enable DMA buffer + * @buffer: IIO buffer to enable + * @indio_dev: IIO device the buffer is attached to + * + * Needs to be called when the device that the buffer is attached to starts + * sampling. Typically should be the iio_buffer_access_ops enable callback. + * + * This will allocate the DMA buffers and start the DMA transfers. + */ +int iio_dma_buffer_enable(struct iio_buffer *buffer, + struct iio_dev *indio_dev) +{ + struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); + struct iio_dma_buffer_block *block, *_block; + + mutex_lock(&queue->lock); + queue->active = true; + list_for_each_entry_safe(block, _block, &queue->incoming, head) { + list_del(&block->head); + iio_dma_buffer_submit_block(queue, block); + } + mutex_unlock(&queue->lock); + + return 0; +} +EXPORT_SYMBOL_GPL(iio_dma_buffer_enable); + +/** + * iio_dma_buffer_disable() - Disable DMA buffer + * @buffer: IIO DMA buffer to disable + * @indio_dev: IIO device the buffer is attached to + * + * Needs to be called when the device that the buffer is attached to stops + * sampling. Typically should be the iio_buffer_access_ops disable callback. + */ +int iio_dma_buffer_disable(struct iio_buffer *buffer, + struct iio_dev *indio_dev) +{ + struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); + + mutex_lock(&queue->lock); + queue->active = false; + + if (queue->ops && queue->ops->abort) + queue->ops->abort(queue); + mutex_unlock(&queue->lock); + + return 0; +} +EXPORT_SYMBOL_GPL(iio_dma_buffer_disable); + +static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue, + struct iio_dma_buffer_block *block) +{ + if (block->state == IIO_BLOCK_STATE_DEAD) { + iio_buffer_block_put(block); + } else if (queue->active) { + iio_dma_buffer_submit_block(queue, block); + } else { + block->state = IIO_BLOCK_STATE_QUEUED; + list_add_tail(&block->head, &queue->incoming); + } +} + +static struct iio_dma_buffer_block *iio_dma_buffer_dequeue( + struct iio_dma_buffer_queue *queue) +{ + struct iio_dma_buffer_block *block; + + spin_lock_irq(&queue->list_lock); + block = list_first_entry_or_null(&queue->outgoing, struct + iio_dma_buffer_block, head); + if (block != NULL) { + list_del(&block->head); + block->state = IIO_BLOCK_STATE_DEQUEUED; + } + spin_unlock_irq(&queue->list_lock); + + return block; +} + +/** + * iio_dma_buffer_read() - DMA buffer read callback + * @buffer: Buffer to read form + * @n: Number of bytes to read + * @user_buffer: Userspace buffer to copy the data to + * + * Should be used as the read_first_n callback for iio_buffer_access_ops + * struct for DMA buffers. + */ +int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n, + char __user *user_buffer) +{ + struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); + struct iio_dma_buffer_block *block; + int ret; + + if (n < buffer->bytes_per_datum) + return -EINVAL; + + mutex_lock(&queue->lock); + + if (!queue->fileio.active_block) { + block = iio_dma_buffer_dequeue(queue); + if (block == NULL) { + ret = 0; + goto out_unlock; + } + queue->fileio.pos = 0; + queue->fileio.active_block = block; + } else { + block = queue->fileio.active_block; + } + + n = rounddown(n, buffer->bytes_per_datum); + if (n > block->bytes_used - queue->fileio.pos) + n = block->bytes_used - queue->fileio.pos; + + if (copy_to_user(user_buffer, block->vaddr + queue->fileio.pos, n)) { + ret = -EFAULT; + goto out_unlock; + } + + queue->fileio.pos += n; + + if (queue->fileio.pos == block->bytes_used) { + queue->fileio.active_block = NULL; + iio_dma_buffer_enqueue(queue, block); + } + + ret = n; + +out_unlock: + mutex_unlock(&queue->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(iio_dma_buffer_read); + +/** + * iio_dma_buffer_data_available() - DMA buffer data_available callback + * @buf: Buffer to check for data availability + * + * Should be used as the data_available callback for iio_buffer_access_ops + * struct for DMA buffers. + */ +size_t iio_dma_buffer_data_available(struct iio_buffer *buf) +{ + struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf); + struct iio_dma_buffer_block *block; + size_t data_available = 0; + + /* + * For counting the available bytes we'll use the size of the block not + * the number of actual bytes available in the block. Otherwise it is + * possible that we end up with a value that is lower than the watermark + * but won't increase since all blocks are in use. + */ + + mutex_lock(&queue->lock); + if (queue->fileio.active_block) + data_available += queue->fileio.active_block->size; + + spin_lock_irq(&queue->list_lock); + list_for_each_entry(block, &queue->outgoing, head) + data_available += block->size; + spin_unlock_irq(&queue->list_lock); + mutex_unlock(&queue->lock); + + return data_available; +} +EXPORT_SYMBOL_GPL(iio_dma_buffer_data_available); + +/** + * iio_dma_buffer_set_bytes_per_datum() - DMA buffer set_bytes_per_datum callback + * @buffer: Buffer to set the bytes-per-datum for + * @bpd: The new bytes-per-datum value + * + * Should be used as the set_bytes_per_datum callback for iio_buffer_access_ops + * struct for DMA buffers. + */ +int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd) +{ + buffer->bytes_per_datum = bpd; + + return 0; +} +EXPORT_SYMBOL_GPL(iio_dma_buffer_set_bytes_per_datum); + +/** + * iio_dma_buffer_set_length - DMA buffer set_length callback + * @buffer: Buffer to set the length for + * @length: The new buffer length + * + * Should be used as the set_length callback for iio_buffer_access_ops + * struct for DMA buffers. + */ +int iio_dma_buffer_set_length(struct iio_buffer *buffer, int length) +{ + /* Avoid an invalid state */ + if (length < 2) + length = 2; + buffer->length = length; + buffer->watermark = length / 2; + + return 0; +} +EXPORT_SYMBOL_GPL(iio_dma_buffer_set_length); + +/** + * iio_dma_buffer_init() - Initialize DMA buffer queue + * @queue: Buffer to initialize + * @dev: DMA device + * @ops: DMA buffer queue callback operations + * + * The DMA device will be used by the queue to do DMA memory allocations. So it + * should refer to the device that will perform the DMA to ensure that + * allocations are done from a memory region that can be accessed by the device. + */ +int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue, + struct device *dev, const struct iio_dma_buffer_ops *ops) +{ + iio_buffer_init(&queue->buffer); + queue->buffer.length = PAGE_SIZE; + queue->buffer.watermark = queue->buffer.length / 2; + queue->dev = dev; + queue->ops = ops; + + INIT_LIST_HEAD(&queue->incoming); + INIT_LIST_HEAD(&queue->outgoing); + + mutex_init(&queue->lock); + spin_lock_init(&queue->list_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(iio_dma_buffer_init); + +/** + * iio_dma_buffer_exit() - Cleanup DMA buffer queue + * @queue: Buffer to cleanup + * + * After this function has completed it is safe to free any resources that are + * associated with the buffer and are accessed inside the callback operations. + */ +void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue) +{ + unsigned int i; + + mutex_lock(&queue->lock); + + spin_lock_irq(&queue->list_lock); + for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) { + if (!queue->fileio.blocks[i]) + continue; + queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD; + } + INIT_LIST_HEAD(&queue->outgoing); + spin_unlock_irq(&queue->list_lock); + + INIT_LIST_HEAD(&queue->incoming); + + for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) { + if (!queue->fileio.blocks[i]) + continue; + iio_buffer_block_put(queue->fileio.blocks[i]); + queue->fileio.blocks[i] = NULL; + } + queue->fileio.active_block = NULL; + queue->ops = NULL; + + mutex_unlock(&queue->lock); +} +EXPORT_SYMBOL_GPL(iio_dma_buffer_exit); + +/** + * iio_dma_buffer_release() - Release final buffer resources + * @queue: Buffer to release + * + * Frees resources that can't yet be freed in iio_dma_buffer_exit(). Should be + * called in the buffers release callback implementation right before freeing + * the memory associated with the buffer. + */ +void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue) +{ + mutex_destroy(&queue->lock); +} +EXPORT_SYMBOL_GPL(iio_dma_buffer_release); + +MODULE_AUTHOR("Lars-Peter Clausen "); +MODULE_DESCRIPTION("DMA buffer for the IIO framework"); +MODULE_LICENSE("GPL v2"); diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h new file mode 100644 index 000000000000..767467d886de --- /dev/null +++ b/include/linux/iio/buffer-dma.h @@ -0,0 +1,152 @@ +/* + * Copyright 2013-2015 Analog Devices Inc. + * Author: Lars-Peter Clausen + * + * Licensed under the GPL-2. + */ + +#ifndef __INDUSTRIALIO_DMA_BUFFER_H__ +#define __INDUSTRIALIO_DMA_BUFFER_H__ + +#include +#include +#include +#include +#include + +struct iio_dma_buffer_queue; +struct iio_dma_buffer_ops; +struct device; + +struct iio_buffer_block { + u32 size; + u32 bytes_used; +}; + +/** + * enum iio_block_state - State of a struct iio_dma_buffer_block + * @IIO_BLOCK_STATE_DEQUEUED: Block is not queued + * @IIO_BLOCK_STATE_QUEUED: Block is on the incoming queue + * @IIO_BLOCK_STATE_ACTIVE: Block is currently being processed by the DMA + * @IIO_BLOCK_STATE_DONE: Block is on the outgoing queue + * @IIO_BLOCK_STATE_DEAD: Block has been marked as to be freed + */ +enum iio_block_state { + IIO_BLOCK_STATE_DEQUEUED, + IIO_BLOCK_STATE_QUEUED, + IIO_BLOCK_STATE_ACTIVE, + IIO_BLOCK_STATE_DONE, + IIO_BLOCK_STATE_DEAD, +}; + +/** + * struct iio_dma_buffer_block - IIO buffer block + * @head: List head + * @size: Total size of the block in bytes + * @bytes_used: Number of bytes that contain valid data + * @vaddr: Virutal address of the blocks memory + * @phys_addr: Physical address of the blocks memory + * @queue: Parent DMA buffer queue + * @kref: kref used to manage the lifetime of block + * @state: Current state of the block + */ +struct iio_dma_buffer_block { + /* May only be accessed by the owner of the block */ + struct list_head head; + size_t bytes_used; + + /* + * Set during allocation, constant thereafter. May be accessed read-only + * by anybody holding a reference to the block. + */ + void *vaddr; + dma_addr_t phys_addr; + size_t size; + struct iio_dma_buffer_queue *queue; + + /* Must not be accessed outside the core. */ + struct kref kref; + /* + * Must not be accessed outside the core. Access needs to hold + * queue->list_lock if the block is not owned by the core. + */ + enum iio_block_state state; +}; + +/** + * struct iio_dma_buffer_queue_fileio - FileIO state for the DMA buffer + * @blocks: Buffer blocks used for fileio + * @active_block: Block being used in read() + * @pos: Read offset in the active block + * @block_size: Size of each block + */ +struct iio_dma_buffer_queue_fileio { + struct iio_dma_buffer_block *blocks[2]; + struct iio_dma_buffer_block *active_block; + size_t pos; + size_t block_size; +}; + +/** + * struct iio_dma_buffer_queue - DMA buffer base structure + * @buffer: IIO buffer base structure + * @dev: Parent device + * @ops: DMA buffer callbacks + * @lock: Protects the incoming list, active and the fields in the fileio + * substruct + * @list_lock: Protects lists that contain blocks which can be modified in + * atomic context as well as blocks on those lists. This is the outgoing queue + * list and typically also a list of active blocks in the part that handles + * the DMA controller + * @incoming: List of buffers on the incoming queue + * @outgoing: List of buffers on the outgoing queue + * @active: Whether the buffer is currently active + * @fileio: FileIO state + */ +struct iio_dma_buffer_queue { + struct iio_buffer buffer; + struct device *dev; + const struct iio_dma_buffer_ops *ops; + + struct mutex lock; + spinlock_t list_lock; + struct list_head incoming; + struct list_head outgoing; + + bool active; + + struct iio_dma_buffer_queue_fileio fileio; +}; + +/** + * struct iio_dma_buffer_ops - DMA buffer callback operations + * @submit: Called when a block is submitted to the DMA controller + * @abort: Should abort all pending transfers + */ +struct iio_dma_buffer_ops { + int (*submit)(struct iio_dma_buffer_queue *queue, + struct iio_dma_buffer_block *block); + void (*abort)(struct iio_dma_buffer_queue *queue); +}; + +void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block); +void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue, + struct list_head *list); + +int iio_dma_buffer_enable(struct iio_buffer *buffer, + struct iio_dev *indio_dev); +int iio_dma_buffer_disable(struct iio_buffer *buffer, + struct iio_dev *indio_dev); +int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n, + char __user *user_buffer); +size_t iio_dma_buffer_data_available(struct iio_buffer *buffer); +int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd); +int iio_dma_buffer_set_length(struct iio_buffer *buffer, int length); +int iio_dma_buffer_request_update(struct iio_buffer *buffer); + +int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue, + struct device *dma_dev, const struct iio_dma_buffer_ops *ops); +void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue); +void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue); + +#endif -- cgit v1.3-6-gb490 From 2d6ca60f328450ff5c7802d0857d12e3711348ce Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Tue, 13 Oct 2015 18:10:29 +0200 Subject: iio: Add a DMAengine framework based buffer Add a generic fully device independent DMA buffer implementation that uses the DMAegnine framework to perform the DMA transfers. This can be used by converter drivers that whish to provide a DMA buffer for converters that are connected to a DMA core that implements the DMAengine API. Apart from allocating the buffer using iio_dmaengine_buffer_alloc() and freeing it using iio_dmaengine_buffer_free() no additional converter driver specific code is required when using this DMA buffer implementation. Signed-off-by: Lars-Peter Clausen Signed-off-by: Jonathan Cameron --- drivers/iio/buffer/Kconfig | 11 ++ drivers/iio/buffer/Makefile | 1 + drivers/iio/buffer/industrialio-buffer-dmaengine.c | 213 +++++++++++++++++++++ include/linux/iio/buffer-dmaengine.h | 18 ++ 4 files changed, 243 insertions(+) create mode 100644 drivers/iio/buffer/industrialio-buffer-dmaengine.c create mode 100644 include/linux/iio/buffer-dmaengine.h (limited to 'include/linux') diff --git a/drivers/iio/buffer/Kconfig b/drivers/iio/buffer/Kconfig index b2fda1afc03e..4ffd3db7817f 100644 --- a/drivers/iio/buffer/Kconfig +++ b/drivers/iio/buffer/Kconfig @@ -18,6 +18,17 @@ config IIO_BUFFER_DMA Should be selected by drivers that want to use the generic DMA buffer infrastructure. +config IIO_BUFFER_DMAENGINE + tristate + select IIO_BUFFER_DMA + help + Provides a bonding of the generic IIO DMA buffer infrastructure with the + DMAengine framework. This can be used by converter drivers with a DMA port + connected to an external DMA controller which is supported by the + DMAengine framework. + + Should be selected by drivers that want to use this functionality. + config IIO_KFIFO_BUF tristate "Industrial I/O buffering based on kfifo" help diff --git a/drivers/iio/buffer/Makefile b/drivers/iio/buffer/Makefile index bda3f1143e72..85beaae831ae 100644 --- a/drivers/iio/buffer/Makefile +++ b/drivers/iio/buffer/Makefile @@ -5,5 +5,6 @@ # When adding new entries keep the list in alphabetical order obj-$(CONFIG_IIO_BUFFER_CB) += industrialio-buffer-cb.o obj-$(CONFIG_IIO_BUFFER_DMA) += industrialio-buffer-dma.o +obj-$(CONFIG_IIO_BUFFER_DMAENGINE) += industrialio-buffer-dmaengine.o obj-$(CONFIG_IIO_TRIGGERED_BUFFER) += industrialio-triggered-buffer.o obj-$(CONFIG_IIO_KFIFO_BUF) += kfifo_buf.o diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c new file mode 100644 index 000000000000..ebdb838d3a1c --- /dev/null +++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c @@ -0,0 +1,213 @@ +/* + * Copyright 2014-2015 Analog Devices Inc. + * Author: Lars-Peter Clausen + * + * Licensed under the GPL-2 or later. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +/* + * The IIO DMAengine buffer combines the generic IIO DMA buffer infrastructure + * with the DMAengine framework. The generic IIO DMA buffer infrastructure is + * used to manage the buffer memory and implement the IIO buffer operations + * while the DMAengine framework is used to perform the DMA transfers. Combined + * this results in a device independent fully functional DMA buffer + * implementation that can be used by device drivers for peripherals which are + * connected to a DMA controller which has a DMAengine driver implementation. + */ + +struct dmaengine_buffer { + struct iio_dma_buffer_queue queue; + + struct dma_chan *chan; + struct list_head active; + + size_t align; + size_t max_size; +}; + +static struct dmaengine_buffer *iio_buffer_to_dmaengine_buffer( + struct iio_buffer *buffer) +{ + return container_of(buffer, struct dmaengine_buffer, queue.buffer); +} + +static void iio_dmaengine_buffer_block_done(void *data) +{ + struct iio_dma_buffer_block *block = data; + unsigned long flags; + + spin_lock_irqsave(&block->queue->list_lock, flags); + list_del(&block->head); + spin_unlock_irqrestore(&block->queue->list_lock, flags); + iio_dma_buffer_block_done(block); +} + +static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue, + struct iio_dma_buffer_block *block) +{ + struct dmaengine_buffer *dmaengine_buffer = + iio_buffer_to_dmaengine_buffer(&queue->buffer); + struct dma_async_tx_descriptor *desc; + dma_cookie_t cookie; + + block->bytes_used = min(block->size, dmaengine_buffer->max_size); + block->bytes_used = rounddown(block->bytes_used, + dmaengine_buffer->align); + + desc = dmaengine_prep_slave_single(dmaengine_buffer->chan, + block->phys_addr, block->bytes_used, DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT); + if (!desc) + return -ENOMEM; + + desc->callback = iio_dmaengine_buffer_block_done; + desc->callback_param = block; + + cookie = dmaengine_submit(desc); + if (dma_submit_error(cookie)) + return dma_submit_error(cookie); + + spin_lock_irq(&dmaengine_buffer->queue.list_lock); + list_add_tail(&block->head, &dmaengine_buffer->active); + spin_unlock_irq(&dmaengine_buffer->queue.list_lock); + + dma_async_issue_pending(dmaengine_buffer->chan); + + return 0; +} + +static void iio_dmaengine_buffer_abort(struct iio_dma_buffer_queue *queue) +{ + struct dmaengine_buffer *dmaengine_buffer = + iio_buffer_to_dmaengine_buffer(&queue->buffer); + + dmaengine_terminate_all(dmaengine_buffer->chan); + /* FIXME: There is a slight chance of a race condition here. + * dmaengine_terminate_all() does not guarantee that all transfer + * callbacks have finished running. Need to introduce a + * dmaengine_terminate_all_sync(). + */ + iio_dma_buffer_block_list_abort(queue, &dmaengine_buffer->active); +} + +static void iio_dmaengine_buffer_release(struct iio_buffer *buf) +{ + struct dmaengine_buffer *dmaengine_buffer = + iio_buffer_to_dmaengine_buffer(buf); + + iio_dma_buffer_release(&dmaengine_buffer->queue); + kfree(dmaengine_buffer); +} + +static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops = { + .read_first_n = iio_dma_buffer_read, + .set_bytes_per_datum = iio_dma_buffer_set_bytes_per_datum, + .set_length = iio_dma_buffer_set_length, + .request_update = iio_dma_buffer_request_update, + .enable = iio_dma_buffer_enable, + .disable = iio_dma_buffer_disable, + .data_available = iio_dma_buffer_data_available, + .release = iio_dmaengine_buffer_release, + + .modes = INDIO_BUFFER_HARDWARE, + .flags = INDIO_BUFFER_FLAG_FIXED_WATERMARK, +}; + +static const struct iio_dma_buffer_ops iio_dmaengine_default_ops = { + .submit = iio_dmaengine_buffer_submit_block, + .abort = iio_dmaengine_buffer_abort, +}; + +/** + * iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine + * @dev: Parent device for the buffer + * @channel: DMA channel name, typically "rx". + * + * This allocates a new IIO buffer which internally uses the DMAengine framework + * to perform its transfers. The parent device will be used to request the DMA + * channel. + * + * Once done using the buffer iio_dmaengine_buffer_free() should be used to + * release it. + */ +struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev, + const char *channel) +{ + struct dmaengine_buffer *dmaengine_buffer; + unsigned int width, src_width, dest_width; + struct dma_slave_caps caps; + struct dma_chan *chan; + int ret; + + dmaengine_buffer = kzalloc(sizeof(*dmaengine_buffer), GFP_KERNEL); + if (!dmaengine_buffer) + return ERR_PTR(-ENOMEM); + + chan = dma_request_slave_channel_reason(dev, channel); + if (IS_ERR(chan)) { + ret = PTR_ERR(chan); + goto err_free; + } + + ret = dma_get_slave_caps(chan, &caps); + if (ret < 0) + goto err_free; + + /* Needs to be aligned to the maximum of the minimums */ + if (caps.src_addr_widths) + src_width = __ffs(caps.src_addr_widths); + else + src_width = 1; + if (caps.dst_addr_widths) + dest_width = __ffs(caps.dst_addr_widths); + else + dest_width = 1; + width = max(src_width, dest_width); + + INIT_LIST_HEAD(&dmaengine_buffer->active); + dmaengine_buffer->chan = chan; + dmaengine_buffer->align = width; + dmaengine_buffer->max_size = dma_get_max_seg_size(chan->device->dev); + + iio_dma_buffer_init(&dmaengine_buffer->queue, chan->device->dev, + &iio_dmaengine_default_ops); + + dmaengine_buffer->queue.buffer.access = &iio_dmaengine_buffer_ops; + + return &dmaengine_buffer->queue.buffer; + +err_free: + kfree(dmaengine_buffer); + return ERR_PTR(ret); +} +EXPORT_SYMBOL(iio_dmaengine_buffer_alloc); + +/** + * iio_dmaengine_buffer_free() - Free dmaengine buffer + * @buffer: Buffer to free + * + * Frees a buffer previously allocated with iio_dmaengine_buffer_alloc(). + */ +void iio_dmaengine_buffer_free(struct iio_buffer *buffer) +{ + struct dmaengine_buffer *dmaengine_buffer = + iio_buffer_to_dmaengine_buffer(buffer); + + iio_dma_buffer_exit(&dmaengine_buffer->queue); + dma_release_channel(dmaengine_buffer->chan); + + iio_buffer_put(buffer); +} +EXPORT_SYMBOL_GPL(iio_dmaengine_buffer_free); diff --git a/include/linux/iio/buffer-dmaengine.h b/include/linux/iio/buffer-dmaengine.h new file mode 100644 index 000000000000..5dcddf427bb0 --- /dev/null +++ b/include/linux/iio/buffer-dmaengine.h @@ -0,0 +1,18 @@ +/* + * Copyright 2014-2015 Analog Devices Inc. + * Author: Lars-Peter Clausen + * + * Licensed under the GPL-2 or later. + */ + +#ifndef __IIO_DMAENGINE_H__ +#define __IIO_DMAENGINE_H__ + +struct iio_buffer; +struct device; + +struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev, + const char *channel); +void iio_dmaengine_buffer_free(struct iio_buffer *buffer); + +#endif -- cgit v1.3-6-gb490 From 28b8b26b308e656edfa9467867d5f79212da2ec3 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Fri, 30 Oct 2015 20:33:20 -0700 Subject: mtd: add get/set of_node/flash_node helpers We are going to begin using the mtd->dev.of_node field for MTD device nodes, so let's add helpers for it. Also, we'll be making some conversions on spi_nor (and nand_chip eventually) too, so get that ready with their own helpers. Signed-off-by: Brian Norris Reviewed-by: Boris Brezillon --- include/linux/mtd/mtd.h | 11 +++++++++++ include/linux/mtd/nand.h | 11 +++++++++++ include/linux/mtd/spi-nor.h | 11 +++++++++++ 3 files changed, 33 insertions(+) (limited to 'include/linux') diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index f17fa75809aa..cc84923011c0 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -254,6 +254,17 @@ struct mtd_info { int usecount; }; +static inline void mtd_set_of_node(struct mtd_info *mtd, + struct device_node *np) +{ + mtd->dev.of_node = np; +} + +static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd) +{ + return mtd->dev.of_node; +} + int mtd_erase(struct mtd_info *mtd, struct erase_info *instr); int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, void **virt, resource_size_t *phys); diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 5a9d1d4c2487..4f7c9b97982f 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -719,6 +719,17 @@ struct nand_chip { void *priv; }; +static inline void nand_set_flash_node(struct nand_chip *chip, + struct device_node *np) +{ + chip->flash_node = np; +} + +static inline struct device_node *nand_get_flash_node(struct nand_chip *chip) +{ + return chip->flash_node; +} + /* * NAND Flash Manufacturer ID Codes */ diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index c8723b62c4cd..6d991df8f986 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -185,6 +185,17 @@ struct spi_nor { void *priv; }; +static inline void spi_nor_set_flash_node(struct spi_nor *nor, + struct device_node *np) +{ + nor->flash_node = np; +} + +static inline struct device_node *spi_nor_get_flash_node(struct spi_nor *nor) +{ + return nor->flash_node; +} + /** * spi_nor_scan() - scan the SPI NOR * @nor: the spi_nor structure -- cgit v1.3-6-gb490 From 3b6521eab0386a4854d47b1a01947d7dc46ec98d Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Fri, 30 Oct 2015 20:33:21 -0700 Subject: mtd: ofpart: grab device tree node directly from master device node It seems more logical to use a device node directly associated with the MTD master device (i.e., mtd->dev.of_node field) rather than requiring auxiliary partition parser information to be passed in by the driver in a separate struct. This patch supports the mtd->dev.of_node field and deprecates the parser data 'of_node' field Driver conversions may now follow. Additional side benefit to assigning mtd->dev.of_node rather than using parser data: the driver core will automatically create a device -> node symlink for us. Signed-off-by: Brian Norris Reviewed-by: Boris Brezillon --- drivers/mtd/ofpart.c | 18 ++++++++++-------- include/linux/mtd/partitions.h | 4 +++- 2 files changed, 13 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c index 669c3452f278..7bf996a4cf5e 100644 --- a/drivers/mtd/ofpart.c +++ b/drivers/mtd/ofpart.c @@ -37,10 +37,11 @@ static int parse_ofpart_partitions(struct mtd_info *master, bool dedicated = true; - if (!data) - return 0; - - mtd_node = data->of_node; + /* + * of_node can be provided through auxiliary parser data or (preferred) + * by assigning the master device node + */ + mtd_node = data && data->of_node ? data->of_node : mtd_get_of_node(master); if (!mtd_node) return 0; @@ -149,10 +150,11 @@ static int parse_ofoldpart_partitions(struct mtd_info *master, } *part; const char *names; - if (!data) - return 0; - - dp = data->of_node; + /* + * of_node can be provided through auxiliary parser data or (preferred) + * by assigning the master device node + */ + dp = data && data->of_node ? data->of_node : mtd_get_of_node(master); if (!dp) return 0; diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h index 6a35e6de5da1..e742f34b67eb 100644 --- a/include/linux/mtd/partitions.h +++ b/include/linux/mtd/partitions.h @@ -56,7 +56,9 @@ struct device_node; /** * struct mtd_part_parser_data - used to pass data to MTD partition parsers. * @origin: for RedBoot, start address of MTD device - * @of_node: for OF parsers, device node containing partitioning information + * @of_node: for OF parsers, device node containing partitioning information. + * This field is deprecated, as the device node should simply be + * assigned to the master struct device. */ struct mtd_part_parser_data { unsigned long origin; -- cgit v1.3-6-gb490 From 30069af7348b56eb8c5e1dda7788a531c5f24ca2 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Fri, 30 Oct 2015 20:33:27 -0700 Subject: mtd: spi-nor: drop flash_node field We can just alias to the MTD of_node. Signed-off-by: Brian Norris Reviewed-by: Boris Brezillon --- drivers/mtd/spi-nor/spi-nor.c | 1 - include/linux/mtd/spi-nor.h | 6 ++---- 2 files changed, 2 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index 924d455dadb5..12041e181630 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -1258,7 +1258,6 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode) mtd->flags |= MTD_NO_ERASE; mtd->dev.parent = dev; - mtd_set_of_node(mtd, np); nor->page_size = info->page_size; mtd->writebufsize = nor->page_size; diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index 6d991df8f986..955f268d159a 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -124,7 +124,6 @@ struct mtd_info; * @mtd: point to a mtd_info structure * @lock: the lock for the read/write/erase/lock/unlock operations * @dev: point to a spi device, or a spi nor controller device. - * @flash_node: point to a device node describing this flash instance. * @page_size: the page size of the SPI NOR * @addr_width: number of address bytes * @erase_opcode: the opcode for erasing a sector @@ -155,7 +154,6 @@ struct spi_nor { struct mtd_info mtd; struct mutex lock; struct device *dev; - struct device_node *flash_node; u32 page_size; u8 addr_width; u8 erase_opcode; @@ -188,12 +186,12 @@ struct spi_nor { static inline void spi_nor_set_flash_node(struct spi_nor *nor, struct device_node *np) { - nor->flash_node = np; + mtd_set_of_node(&nor->mtd, np); } static inline struct device_node *spi_nor_get_flash_node(struct spi_nor *nor) { - return nor->flash_node; + return mtd_get_of_node(&nor->mtd); } /** -- cgit v1.3-6-gb490 From e270bca531b40cd0a143176eb093d173b9c6f418 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Fri, 30 Oct 2015 20:33:29 -0700 Subject: mtd: ofpart: drop 'of_node' partition parser data This field is no longer used anywhere, as it is superseded by mtd->dev.of_node. Signed-off-by: Brian Norris Reviewed-by: Boris Brezillon --- drivers/mtd/ofpart.c | 14 ++++---------- include/linux/mtd/partitions.h | 4 ---- 2 files changed, 4 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c index 7bf996a4cf5e..f78d2aea5545 100644 --- a/drivers/mtd/ofpart.c +++ b/drivers/mtd/ofpart.c @@ -37,11 +37,8 @@ static int parse_ofpart_partitions(struct mtd_info *master, bool dedicated = true; - /* - * of_node can be provided through auxiliary parser data or (preferred) - * by assigning the master device node - */ - mtd_node = data && data->of_node ? data->of_node : mtd_get_of_node(master); + /* Pull of_node from the master device node */ + mtd_node = mtd_get_of_node(master); if (!mtd_node) return 0; @@ -150,11 +147,8 @@ static int parse_ofoldpart_partitions(struct mtd_info *master, } *part; const char *names; - /* - * of_node can be provided through auxiliary parser data or (preferred) - * by assigning the master device node - */ - dp = data && data->of_node ? data->of_node : mtd_get_of_node(master); + /* Pull of_node from the master device node */ + dp = mtd_get_of_node(master); if (!dp) return 0; diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h index e742f34b67eb..773975a3c9e6 100644 --- a/include/linux/mtd/partitions.h +++ b/include/linux/mtd/partitions.h @@ -56,13 +56,9 @@ struct device_node; /** * struct mtd_part_parser_data - used to pass data to MTD partition parsers. * @origin: for RedBoot, start address of MTD device - * @of_node: for OF parsers, device node containing partitioning information. - * This field is deprecated, as the device node should simply be - * assigned to the master struct device. */ struct mtd_part_parser_data { unsigned long origin; - struct device_node *of_node; }; -- cgit v1.3-6-gb490 From 26add94cd535d1e000e7871fe69c7bb89e942d67 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Wed, 11 Nov 2015 17:05:56 -0800 Subject: mtd: partitions: kill unused ecclayout struct This field is not used. Reported here: http://lists.infradead.org/pipermail/linux-mtd/2015-October/062417.html Reported-by: Brian Foster Cc: Brian Foster Signed-off-by: Brian Norris Reviewed-by: Boris Brezillon --- include/linux/mtd/partitions.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h index 773975a3c9e6..8421520c10eb 100644 --- a/include/linux/mtd/partitions.h +++ b/include/linux/mtd/partitions.h @@ -41,7 +41,6 @@ struct mtd_partition { uint64_t size; /* partition size */ uint64_t offset; /* offset within the master MTD space */ uint32_t mask_flags; /* master MTD flags to mask out for this partition */ - struct nand_ecclayout *ecclayout; /* out of band layout for this partition (NAND only) */ }; #define MTDPART_OFS_RETAIN (-3) -- cgit v1.3-6-gb490 From b36f09c3c441a6e59eab9315032e7d546571de3f Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Tue, 20 Oct 2015 11:46:28 +0200 Subject: dmaengine: Add transfer termination synchronization support The DMAengine API has a long standing race condition that is inherent to the API itself. Calling dmaengine_terminate_all() is supposed to stop and abort any pending or active transfers that have previously been submitted. Unfortunately it is possible that this operation races against a currently running (or with some drivers also scheduled) completion callback. Since the API allows dmaengine_terminate_all() to be called from atomic context as well as from within a completion callback it is not possible to synchronize to the execution of the completion callback from within dmaengine_terminate_all() itself. This means that a user of the DMAengine API does not know when it is safe to free resources used in the completion callback, which can result in a use-after-free race condition. This patch addresses the issue by introducing an explicit synchronization primitive to the DMAengine API called dmaengine_synchronize(). The existing dmaengine_terminate_all() is deprecated in favor of dmaengine_terminate_sync() and dmaengine_terminate_async(). The former aborts all pending and active transfers and synchronizes to the current context, meaning it will wait until all running completion callbacks have finished. This means it is only possible to call this function from non-atomic context. The later function does not synchronize, but can still be used in atomic context or from within a complete callback. It has to be followed up by dmaengine_synchronize() before a client can free the resources used in a completion callback. In addition to this the semantics of the device_terminate_all() callback are slightly relaxed by this patch. It is now OK for a driver to only schedule the termination of the active transfer, but does not necessarily have to wait until the DMA controller has completely stopped. The driver must ensure though that the controller has stopped and no longer accesses any memory when the device_synchronize() callback returns. This was in part done since most drivers do not pay attention to this anyway at the moment and to emphasize that this needs to be done when the device_synchronize() callback is implemented. But it also helps with implementing support for devices where stopping the controller can require operations that may sleep. Signed-off-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- Documentation/dmaengine/client.txt | 38 ++++++++++++++- Documentation/dmaengine/provider.txt | 20 +++++++- drivers/dma/dmaengine.c | 5 +- include/linux/dmaengine.h | 90 ++++++++++++++++++++++++++++++++++++ 4 files changed, 148 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/Documentation/dmaengine/client.txt b/Documentation/dmaengine/client.txt index 11fb87ff6cd0..d9f9f461102a 100644 --- a/Documentation/dmaengine/client.txt +++ b/Documentation/dmaengine/client.txt @@ -128,7 +128,7 @@ The slave DMA usage consists of following steps: transaction. For cyclic DMA, a callback function may wish to terminate the - DMA via dmaengine_terminate_all(). + DMA via dmaengine_terminate_async(). Therefore, it is important that DMA engine drivers drop any locks before calling the callback function which may cause a @@ -166,12 +166,29 @@ The slave DMA usage consists of following steps: Further APIs: -1. int dmaengine_terminate_all(struct dma_chan *chan) +1. int dmaengine_terminate_sync(struct dma_chan *chan) + int dmaengine_terminate_async(struct dma_chan *chan) + int dmaengine_terminate_all(struct dma_chan *chan) /* DEPRECATED */ This causes all activity for the DMA channel to be stopped, and may discard data in the DMA FIFO which hasn't been fully transferred. No callback functions will be called for any incomplete transfers. + Two variants of this function are available. + + dmaengine_terminate_async() might not wait until the DMA has been fully + stopped or until any running complete callbacks have finished. But it is + possible to call dmaengine_terminate_async() from atomic context or from + within a complete callback. dmaengine_synchronize() must be called before it + is safe to free the memory accessed by the DMA transfer or free resources + accessed from within the complete callback. + + dmaengine_terminate_sync() will wait for the transfer and any running + complete callbacks to finish before it returns. But the function must not be + called from atomic context or from within a complete callback. + + dmaengine_terminate_all() is deprecated and should not be used in new code. + 2. int dmaengine_pause(struct dma_chan *chan) This pauses activity on the DMA channel without data loss. @@ -197,3 +214,20 @@ Further APIs: a running DMA channel. It is recommended that DMA engine users pause or stop (via dmaengine_terminate_all()) the channel before using this API. + +5. void dmaengine_synchronize(struct dma_chan *chan) + + Synchronize the termination of the DMA channel to the current context. + + This function should be used after dmaengine_terminate_async() to synchronize + the termination of the DMA channel to the current context. The function will + wait for the transfer and any running complete callbacks to finish before it + returns. + + If dmaengine_terminate_async() is used to stop the DMA channel this function + must be called before it is safe to free memory accessed by previously + submitted descriptors or to free any resources accessed within the complete + callback of previously submitted descriptors. + + The behavior of this function is undefined if dma_async_issue_pending() has + been called between dmaengine_terminate_async() and this function. diff --git a/Documentation/dmaengine/provider.txt b/Documentation/dmaengine/provider.txt index 67d4ce4df109..122b7f4876bb 100644 --- a/Documentation/dmaengine/provider.txt +++ b/Documentation/dmaengine/provider.txt @@ -327,8 +327,24 @@ supported. * device_terminate_all - Aborts all the pending and ongoing transfers on the channel - - This command should operate synchronously on the channel, - terminating right away all the channels + - For aborted transfers the complete callback should not be called + - Can be called from atomic context or from within a complete + callback of a descriptor. Must not sleep. Drivers must be able + to handle this correctly. + - Termination may be asynchronous. The driver does not have to + wait until the currently active transfer has completely stopped. + See device_synchronize. + + * device_synchronize + - Must synchronize the termination of a channel to the current + context. + - Must make sure that memory for previously submitted + descriptors is no longer accessed by the DMA controller. + - Must make sure that all complete callbacks for previously + submitted descriptors have finished running and none are + scheduled to run. + - May sleep. + Misc notes (stuff that should be documented, but don't really know where to put them) diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 3ecec1445adf..d6fc82e3986f 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -265,8 +265,11 @@ static void dma_chan_put(struct dma_chan *chan) module_put(dma_chan_to_owner(chan)); /* This channel is not in use anymore, free it */ - if (!chan->client_count && chan->device->device_free_chan_resources) + if (!chan->client_count && chan->device->device_free_chan_resources) { + /* Make sure all operations have completed */ + dmaengine_synchronize(chan); chan->device->device_free_chan_resources(chan); + } /* If the channel is used via a DMA request router, free the mapping */ if (chan->router && chan->router->route_free) { diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index c47c68e535e8..4662d9aa6d5a 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -654,6 +654,8 @@ enum dmaengine_alignment { * paused. Returns 0 or an error code * @device_terminate_all: Aborts all transfers on a channel. Returns 0 * or an error code + * @device_synchronize: Synchronizes the termination of a transfers to the + * current context. * @device_tx_status: poll for transaction completion, the optional * txstate parameter can be supplied with a pointer to get a * struct with auxiliary transfer status information, otherwise the call @@ -737,6 +739,7 @@ struct dma_device { int (*device_pause)(struct dma_chan *chan); int (*device_resume)(struct dma_chan *chan); int (*device_terminate_all)(struct dma_chan *chan); + void (*device_synchronize)(struct dma_chan *chan); enum dma_status (*device_tx_status)(struct dma_chan *chan, dma_cookie_t cookie, @@ -828,6 +831,13 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg( src_sg, src_nents, flags); } +/** + * dmaengine_terminate_all() - Terminate all active DMA transfers + * @chan: The channel for which to terminate the transfers + * + * This function is DEPRECATED use either dmaengine_terminate_sync() or + * dmaengine_terminate_async() instead. + */ static inline int dmaengine_terminate_all(struct dma_chan *chan) { if (chan->device->device_terminate_all) @@ -836,6 +846,86 @@ static inline int dmaengine_terminate_all(struct dma_chan *chan) return -ENOSYS; } +/** + * dmaengine_terminate_async() - Terminate all active DMA transfers + * @chan: The channel for which to terminate the transfers + * + * Calling this function will terminate all active and pending descriptors + * that have previously been submitted to the channel. It is not guaranteed + * though that the transfer for the active descriptor has stopped when the + * function returns. Furthermore it is possible the complete callback of a + * submitted transfer is still running when this function returns. + * + * dmaengine_synchronize() needs to be called before it is safe to free + * any memory that is accessed by previously submitted descriptors or before + * freeing any resources accessed from within the completion callback of any + * perviously submitted descriptors. + * + * This function can be called from atomic context as well as from within a + * complete callback of a descriptor submitted on the same channel. + * + * If none of the two conditions above apply consider using + * dmaengine_terminate_sync() instead. + */ +static inline int dmaengine_terminate_async(struct dma_chan *chan) +{ + if (chan->device->device_terminate_all) + return chan->device->device_terminate_all(chan); + + return -EINVAL; +} + +/** + * dmaengine_synchronize() - Synchronize DMA channel termination + * @chan: The channel to synchronize + * + * Synchronizes to the DMA channel termination to the current context. When this + * function returns it is guaranteed that all transfers for previously issued + * descriptors have stopped and and it is safe to free the memory assoicated + * with them. Furthermore it is guaranteed that all complete callback functions + * for a previously submitted descriptor have finished running and it is safe to + * free resources accessed from within the complete callbacks. + * + * The behavior of this function is undefined if dma_async_issue_pending() has + * been called between dmaengine_terminate_async() and this function. + * + * This function must only be called from non-atomic context and must not be + * called from within a complete callback of a descriptor submitted on the same + * channel. + */ +static inline void dmaengine_synchronize(struct dma_chan *chan) +{ + if (chan->device->device_synchronize) + chan->device->device_synchronize(chan); +} + +/** + * dmaengine_terminate_sync() - Terminate all active DMA transfers + * @chan: The channel for which to terminate the transfers + * + * Calling this function will terminate all active and pending transfers + * that have previously been submitted to the channel. It is similar to + * dmaengine_terminate_async() but guarantees that the DMA transfer has actually + * stopped and that all complete callbacks have finished running when the + * function returns. + * + * This function must only be called from non-atomic context and must not be + * called from within a complete callback of a descriptor submitted on the same + * channel. + */ +static inline int dmaengine_terminate_sync(struct dma_chan *chan) +{ + int ret; + + ret = dmaengine_terminate_async(chan); + if (ret) + return ret; + + dmaengine_synchronize(chan); + + return 0; +} + static inline int dmaengine_pause(struct dma_chan *chan) { if (chan->device->device_pause) -- cgit v1.3-6-gb490 From 9eeacd3a2f17438d9d286ff2f78c4709a4148be7 Mon Sep 17 00:00:00 2001 From: Robert Jarzmik Date: Tue, 13 Oct 2015 21:54:29 +0200 Subject: dmaengine: enable DMA_CTRL_REUSE In the current state, the capability of transfer reuse can neither be set by a slave dmaengine driver, nor used by a client driver, because the capability is not available to dma_get_slave_caps(). Fix this by adding a way to declare the capability. Fixes: 272420214d26 ("dmaengine: Add DMA_CTRL_REUSE") Signed-off-by: Robert Jarzmik Signed-off-by: Vinod Koul --- drivers/dma/dmaengine.c | 1 + include/linux/dmaengine.h | 2 ++ 2 files changed, 3 insertions(+) (limited to 'include/linux') diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 3ecec1445adf..4aced6689734 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -493,6 +493,7 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) caps->dst_addr_widths = device->dst_addr_widths; caps->directions = device->directions; caps->residue_granularity = device->residue_granularity; + caps->descriptor_reuse = device->descriptor_reuse; /* * Some devices implement only pause (e.g. to get residuum) but no diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index c47c68e535e8..6f94b5cbd97c 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -659,6 +659,7 @@ enum dmaengine_alignment { * struct with auxiliary transfer status information, otherwise the call * will just return a simple status code * @device_issue_pending: push pending transactions to hardware + * @descriptor_reuse: a submitted transfer can be resubmitted after completion */ struct dma_device { @@ -681,6 +682,7 @@ struct dma_device { u32 src_addr_widths; u32 dst_addr_widths; u32 directions; + bool descriptor_reuse; enum dma_residue_granularity residue_granularity; int (*device_alloc_chan_resources)(struct dma_chan *chan); -- cgit v1.3-6-gb490 From 2bb129ebb23d2dfec3cd9c22dc7defd681cfcd58 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Fri, 13 Nov 2015 12:46:00 +0100 Subject: dmaengine: ioatdma: constify dca_ops structures The dca_ops structure is never modified, so declare it as const. Done with the help of Coccinelle. Signed-off-by: Julia Lawall Acked-by: Dan Williams Signed-off-by: Vinod Koul --- drivers/dca/dca-core.c | 3 ++- drivers/dma/ioat/dca.c | 2 +- include/linux/dca.h | 5 +++-- 3 files changed, 6 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c index 819dfda88236..7afbb28d6a0f 100644 --- a/drivers/dca/dca-core.c +++ b/drivers/dca/dca-core.c @@ -321,7 +321,8 @@ EXPORT_SYMBOL_GPL(dca_get_tag); * @ops - pointer to struct of dca operation function pointers * @priv_size - size of extra mem to be added for provider's needs */ -struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size) +struct dca_provider *alloc_dca_provider(const struct dca_ops *ops, + int priv_size) { struct dca_provider *dca; int alloc_size; diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c index 2cb7c308d5c7..0b9b6b07db9e 100644 --- a/drivers/dma/ioat/dca.c +++ b/drivers/dma/ioat/dca.c @@ -224,7 +224,7 @@ static u8 ioat_dca_get_tag(struct dca_provider *dca, return tag; } -static struct dca_ops ioat_dca_ops = { +static const struct dca_ops ioat_dca_ops = { .add_requester = ioat_dca_add_requester, .remove_requester = ioat_dca_remove_requester, .get_tag = ioat_dca_get_tag, diff --git a/include/linux/dca.h b/include/linux/dca.h index d27a7a05718d..ad956c2e07a8 100644 --- a/include/linux/dca.h +++ b/include/linux/dca.h @@ -34,7 +34,7 @@ void dca_unregister_notify(struct notifier_block *nb); struct dca_provider { struct list_head node; - struct dca_ops *ops; + const struct dca_ops *ops; struct device *cd; int id; }; @@ -53,7 +53,8 @@ struct dca_ops { int (*dev_managed) (struct dca_provider *, struct device *); }; -struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size); +struct dca_provider *alloc_dca_provider(const struct dca_ops *ops, + int priv_size); void free_dca_provider(struct dca_provider *dca); int register_dca_provider(struct dca_provider *dca, struct device *dev); void unregister_dca_provider(struct dca_provider *dca, struct device *dev); -- cgit v1.3-6-gb490 From c0a13aa6da5da19f9eedb562b226ec585aabdca9 Mon Sep 17 00:00:00 2001 From: Vince Hsu Date: Mon, 13 Jul 2015 13:39:39 +0100 Subject: reset: add of_reset_control_get_by_index() Add of_reset_control_get_by_index() to allow the drivers to get reset device without knowing its name. Signed-off-by: Vince Hsu [jonathanh@nvidia.com: Updated stub function to return -ENOTSUPP instead of -ENOSYS which should only be used for system calls.] Signed-off-by: Jon Hunter Signed-off-by: Philipp Zabel --- drivers/reset/core.c | 40 +++++++++++++++++++++++++++++----------- include/linux/reset.h | 9 +++++++++ 2 files changed, 38 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/drivers/reset/core.c b/drivers/reset/core.c index 7955e00d04d4..81ae17d15480 100644 --- a/drivers/reset/core.c +++ b/drivers/reset/core.c @@ -141,27 +141,24 @@ int reset_control_status(struct reset_control *rstc) EXPORT_SYMBOL_GPL(reset_control_status); /** - * of_reset_control_get - Lookup and obtain a reference to a reset controller. + * of_reset_control_get_by_index - Lookup and obtain a reference to a reset + * controller by index. * @node: device to be reset by the controller - * @id: reset line name - * - * Returns a struct reset_control or IS_ERR() condition containing errno. + * @index: index of the reset controller * - * Use of id names is optional. + * This is to be used to perform a list of resets for a device or power domain + * in whatever order. Returns a struct reset_control or IS_ERR() condition + * containing errno. */ -struct reset_control *of_reset_control_get(struct device_node *node, - const char *id) +struct reset_control *of_reset_control_get_by_index(struct device_node *node, + int index) { struct reset_control *rstc = ERR_PTR(-EPROBE_DEFER); struct reset_controller_dev *r, *rcdev; struct of_phandle_args args; - int index = 0; int rstc_id; int ret; - if (id) - index = of_property_match_string(node, - "reset-names", id); ret = of_parse_phandle_with_args(node, "resets", "#reset-cells", index, &args); if (ret) @@ -202,6 +199,27 @@ struct reset_control *of_reset_control_get(struct device_node *node, return rstc; } +EXPORT_SYMBOL_GPL(of_reset_control_get_by_index); + +/** + * of_reset_control_get - Lookup and obtain a reference to a reset controller. + * @node: device to be reset by the controller + * @id: reset line name + * + * Returns a struct reset_control or IS_ERR() condition containing errno. + * + * Use of id names is optional. + */ +struct reset_control *of_reset_control_get(struct device_node *node, + const char *id) +{ + int index = 0; + + if (id) + index = of_property_match_string(node, + "reset-names", id); + return of_reset_control_get_by_index(node, index); +} EXPORT_SYMBOL_GPL(of_reset_control_get); /** diff --git a/include/linux/reset.h b/include/linux/reset.h index 7f65f9cff951..6db74ad3dec7 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h @@ -38,6 +38,9 @@ static inline struct reset_control *devm_reset_control_get_optional( struct reset_control *of_reset_control_get(struct device_node *node, const char *id); +struct reset_control *of_reset_control_get_by_index( + struct device_node *node, int index); + #else static inline int reset_control_reset(struct reset_control *rstc) @@ -106,6 +109,12 @@ static inline struct reset_control *of_reset_control_get( return ERR_PTR(-ENOSYS); } +static inline struct reset_control *of_reset_control_get_by_index( + struct device_node *node, int index) +{ + return ERR_PTR(-ENOTSUPP); +} + #endif /* CONFIG_RESET_CONTROLLER */ #endif -- cgit v1.3-6-gb490 From 39b4da71ca334354f30941067f214ea2f2b92f3e Mon Sep 17 00:00:00 2001 From: Philipp Zabel Date: Thu, 29 Oct 2015 09:55:00 +0100 Subject: reset: use ENOTSUPP instead of ENOSYS ENOSYS is reserved to report invalid syscalls to userspace. Consistently return ENOTSUPP to indicate that the driver doesn't support the functionality or the reset framework is not enabled at all. Signed-off-by: Philipp Zabel --- drivers/reset/core.c | 8 ++++---- include/linux/reset.h | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/drivers/reset/core.c b/drivers/reset/core.c index 77cfc49218c6..9ab929049b9d 100644 --- a/drivers/reset/core.c +++ b/drivers/reset/core.c @@ -95,7 +95,7 @@ int reset_control_reset(struct reset_control *rstc) if (rstc->rcdev->ops->reset) return rstc->rcdev->ops->reset(rstc->rcdev, rstc->id); - return -ENOSYS; + return -ENOTSUPP; } EXPORT_SYMBOL_GPL(reset_control_reset); @@ -108,7 +108,7 @@ int reset_control_assert(struct reset_control *rstc) if (rstc->rcdev->ops->assert) return rstc->rcdev->ops->assert(rstc->rcdev, rstc->id); - return -ENOSYS; + return -ENOTSUPP; } EXPORT_SYMBOL_GPL(reset_control_assert); @@ -121,7 +121,7 @@ int reset_control_deassert(struct reset_control *rstc) if (rstc->rcdev->ops->deassert) return rstc->rcdev->ops->deassert(rstc->rcdev, rstc->id); - return -ENOSYS; + return -ENOTSUPP; } EXPORT_SYMBOL_GPL(reset_control_deassert); @@ -136,7 +136,7 @@ int reset_control_status(struct reset_control *rstc) if (rstc->rcdev->ops->status) return rstc->rcdev->ops->status(rstc->rcdev, rstc->id); - return -ENOSYS; + return -ENOTSUPP; } EXPORT_SYMBOL_GPL(reset_control_status); diff --git a/include/linux/reset.h b/include/linux/reset.h index 6db74ad3dec7..c4c097de0ba9 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h @@ -74,7 +74,7 @@ static inline void reset_control_put(struct reset_control *rstc) static inline int device_reset_optional(struct device *dev) { - return -ENOSYS; + return -ENOTSUPP; } static inline struct reset_control *__must_check reset_control_get( @@ -94,19 +94,19 @@ static inline struct reset_control *__must_check devm_reset_control_get( static inline struct reset_control *reset_control_get_optional( struct device *dev, const char *id) { - return ERR_PTR(-ENOSYS); + return ERR_PTR(-ENOTSUPP); } static inline struct reset_control *devm_reset_control_get_optional( struct device *dev, const char *id) { - return ERR_PTR(-ENOSYS); + return ERR_PTR(-ENOTSUPP); } static inline struct reset_control *of_reset_control_get( struct device_node *node, const char *id) { - return ERR_PTR(-ENOSYS); + return ERR_PTR(-ENOTSUPP); } static inline struct reset_control *of_reset_control_get_by_index( -- cgit v1.3-6-gb490 From 9e8925b67a809bb27ce4b7d352d67f25cf1d7fc5 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Mon, 16 Nov 2015 09:49:34 -0500 Subject: locks: Allow disabling mandatory locking at compile time Mandatory locking appears to be almost unused and buggy and there appears no real interest in doing anything with it. Since effectively no one uses the code and since the code is buggy let's allow it to be disabled at compile time. I would just suggest removing the code but undoubtedly that will break some piece of userspace code somewhere. For the distributions that don't care about this piece of code this gives a nice starting point to make mandatory locking go away. Cc: Benjamin Coddington Cc: Dmitry Vyukov Cc: Jeff Layton Cc: J. Bruce Fields Signed-off-by: "Eric W. Biederman" Signed-off-by: Jeff Layton --- fs/Kconfig | 10 ++++++++ fs/locks.c | 2 ++ fs/namespace.c | 10 ++++++++ include/linux/fs.h | 74 +++++++++++++++++++++++++++++------------------------- 4 files changed, 62 insertions(+), 34 deletions(-) (limited to 'include/linux') diff --git a/fs/Kconfig b/fs/Kconfig index da3f32f1a4e4..59322e6e76f4 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -67,6 +67,16 @@ config FILE_LOCKING for filesystems like NFS and for the flock() system call. Disabling this option saves about 11k. +config MANDATORY_FILE_LOCKING + bool "Enable Mandatory file locking" + depends on FILE_LOCKING + default y + help + This option enables files appropriately marked files on appropriely + mounted filesystems to support mandatory locking. + + To the best of my knowledge this is dead code that no one cares about. + source "fs/notify/Kconfig" source "fs/quota/Kconfig" diff --git a/fs/locks.c b/fs/locks.c index 0d2b3267e2a3..86c94674ab22 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -1191,6 +1191,7 @@ static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl) return error; } +#ifdef CONFIG_MANDATORY_FILE_LOCKING /** * locks_mandatory_locked - Check for an active lock * @file: the file to check @@ -1289,6 +1290,7 @@ int locks_mandatory_area(int read_write, struct inode *inode, } EXPORT_SYMBOL(locks_mandatory_area); +#endif /* CONFIG_MANDATORY_FILE_LOCKING */ static void lease_clear_pending(struct file_lock *fl, int arg) { diff --git a/fs/namespace.c b/fs/namespace.c index 0570729c87fd..4219885e9681 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1584,6 +1584,14 @@ static inline bool may_mount(void) return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN); } +static inline bool may_mandlock(void) +{ +#ifndef CONFIG_MANDATORY_FILE_LOCKING + return false; +#endif + return true; +} + /* * Now umount can handle mount points as well as block devices. * This is important for filesystems which use unnamed block devices. @@ -2677,6 +2685,8 @@ long do_mount(const char *dev_name, const char __user *dir_name, type_page, flags, data_page); if (!retval && !may_mount()) retval = -EPERM; + if (!retval && (flags & MS_MANDLOCK) && !may_mandlock()) + retval = -EPERM; if (retval) goto dput_out; diff --git a/include/linux/fs.h b/include/linux/fs.h index 3aa514254161..cbf08d5c246e 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2030,7 +2030,7 @@ extern struct kobject *fs_kobj; #define FLOCK_VERIFY_READ 1 #define FLOCK_VERIFY_WRITE 2 -#ifdef CONFIG_FILE_LOCKING +#ifdef CONFIG_MANDATORY_FILE_LOCKING extern int locks_mandatory_locked(struct file *); extern int locks_mandatory_area(int, struct inode *, struct file *, loff_t, size_t); @@ -2075,6 +2075,45 @@ static inline int locks_verify_truncate(struct inode *inode, return 0; } +#else /* !CONFIG_MANDATORY_FILE_LOCKING */ + +static inline int locks_mandatory_locked(struct file *file) +{ + return 0; +} + +static inline int locks_mandatory_area(int rw, struct inode *inode, + struct file *filp, loff_t offset, + size_t count) +{ + return 0; +} + +static inline int __mandatory_lock(struct inode *inode) +{ + return 0; +} + +static inline int mandatory_lock(struct inode *inode) +{ + return 0; +} + +static inline int locks_verify_locked(struct file *file) +{ + return 0; +} + +static inline int locks_verify_truncate(struct inode *inode, struct file *filp, + size_t size) +{ + return 0; +} + +#endif /* CONFIG_MANDATORY_FILE_LOCKING */ + + +#ifdef CONFIG_FILE_LOCKING static inline int break_lease(struct inode *inode, unsigned int mode) { /* @@ -2136,39 +2175,6 @@ static inline int break_layout(struct inode *inode, bool wait) } #else /* !CONFIG_FILE_LOCKING */ -static inline int locks_mandatory_locked(struct file *file) -{ - return 0; -} - -static inline int locks_mandatory_area(int rw, struct inode *inode, - struct file *filp, loff_t offset, - size_t count) -{ - return 0; -} - -static inline int __mandatory_lock(struct inode *inode) -{ - return 0; -} - -static inline int mandatory_lock(struct inode *inode) -{ - return 0; -} - -static inline int locks_verify_locked(struct file *file) -{ - return 0; -} - -static inline int locks_verify_truncate(struct inode *inode, struct file *filp, - size_t size) -{ - return 0; -} - static inline int break_lease(struct inode *inode, unsigned int mode) { return 0; -- cgit v1.3-6-gb490 From 4d4142696e18cf30af319031d47bba46853a4605 Mon Sep 17 00:00:00 2001 From: Guillaume Gomez Date: Wed, 23 Sep 2015 12:34:30 +0200 Subject: percpu: Remove unneeded return from void function Signed-off-by: Guillaume Gomez Acked-by: Christoph Lameter Signed-off-by: Tejun Heo --- include/linux/percpu-refcount.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 12c9b485beb7..84f542df7ff5 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -116,7 +116,7 @@ void percpu_ref_reinit(struct percpu_ref *ref); */ static inline void percpu_ref_kill(struct percpu_ref *ref) { - return percpu_ref_kill_and_confirm(ref, NULL); + percpu_ref_kill_and_confirm(ref, NULL); } /* -- cgit v1.3-6-gb490 From b916b785af99088916a122cb37de1bda3fa7f70e Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 28 Oct 2015 12:32:17 +0000 Subject: drivers/perf: kill armpmu_register Nothing outside of drivers/perf/arm_pmu.c should call armpmu_register any more, so it no longer needs to be in include/linux/perf/arm_pmu.h. Additionally, by folding it in to arm_pmu_device_probe we can allow drivers to override struct pmu fields without getting blatted by the armpmu code. This patch folds armpmu_register into arm_pmu_device_probe. The logging to the console is moved to after the PMU is successfully registered with the core perf code. Signed-off-by: Mark Rutland Suggested-by: Will Deacon Cc: Drew Richardson Cc: Pawel Moll Signed-off-by: Will Deacon --- drivers/perf/arm_pmu.c | 15 ++++++--------- include/linux/perf/arm_pmu.h | 2 -- 2 files changed, 6 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index be3755c973e9..166637f2917c 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -551,14 +551,6 @@ static void armpmu_init(struct arm_pmu *armpmu) }; } -int armpmu_register(struct arm_pmu *armpmu, int type) -{ - armpmu_init(armpmu); - pr_info("enabled with %s PMU driver, %d counters available\n", - armpmu->name, armpmu->num_events); - return perf_pmu_register(&armpmu->pmu, armpmu->name, type); -} - /* Set at runtime when we know what CPU type we are. */ static struct arm_pmu *__oprofile_cpu_pmu; @@ -887,6 +879,8 @@ int arm_pmu_device_probe(struct platform_device *pdev, return -ENOMEM; } + armpmu_init(pmu); + if (!__oprofile_cpu_pmu) __oprofile_cpu_pmu = pmu; @@ -912,10 +906,13 @@ int arm_pmu_device_probe(struct platform_device *pdev, if (ret) goto out_free; - ret = armpmu_register(pmu, -1); + ret = perf_pmu_register(&pmu->pmu, pmu->name, -1); if (ret) goto out_destroy; + pr_info("enabled with %s PMU driver, %d counters available\n", + pmu->name, pmu->num_events); + return 0; out_destroy: diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index bfa673bb822d..83b5e34c6580 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -111,8 +111,6 @@ struct arm_pmu { #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) -int armpmu_register(struct arm_pmu *armpmu, int type); - u64 armpmu_event_update(struct perf_event *event); int armpmu_event_set_period(struct perf_event *event); -- cgit v1.3-6-gb490 From 18fc93fd64129c96432812cb44f59c963871889b Mon Sep 17 00:00:00 2001 From: Jungseok Lee Date: Wed, 4 Nov 2015 13:26:07 +0000 Subject: percpu: remove PERCPU_ENOUGH_ROOM which is stale definition As pure cleanup, this patch removes PERCPU_ENOUGH_ROOM which is not used any more. That is, no code refers to the definition. Acked-by: Christoph Lameter Signed-off-by: Jungseok Lee Signed-off-by: Tejun Heo --- arch/ia64/include/asm/percpu.h | 2 -- include/linux/percpu.h | 6 ------ 2 files changed, 8 deletions(-) (limited to 'include/linux') diff --git a/arch/ia64/include/asm/percpu.h b/arch/ia64/include/asm/percpu.h index 0ec484d2dcbc..b9295793a5e2 100644 --- a/arch/ia64/include/asm/percpu.h +++ b/arch/ia64/include/asm/percpu.h @@ -6,8 +6,6 @@ * David Mosberger-Tang */ -#define PERCPU_ENOUGH_ROOM PERCPU_PAGE_SIZE - #ifdef __ASSEMBLY__ # define THIS_CPU(var) (var) /* use this to mark accesses to per-CPU variables... */ #else /* !__ASSEMBLY__ */ diff --git a/include/linux/percpu.h b/include/linux/percpu.h index caebf2a758dc..4bc6dafb703e 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -18,12 +18,6 @@ #define PERCPU_MODULE_RESERVE 0 #endif -#ifndef PERCPU_ENOUGH_ROOM -#define PERCPU_ENOUGH_ROOM \ - (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \ - PERCPU_MODULE_RESERVE) -#endif - /* minimum unit size, also is the maximum supported allocation size */ #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10) -- cgit v1.3-6-gb490 From 34c06254ff82a815fdccdfae7517a06c9b768cee Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 5 Nov 2015 00:12:24 -0500 Subject: cgroup: fix cftype->file_offset handling 6f60eade2433 ("cgroup: generalize obtaining the handles of and notifying cgroup files") introduced cftype->file_offset so that the handles for per-css file instances can be recorded. These handles then can be used, for example, to generate file modified notifications. Unfortunately, it made the wrong assumption that files are created once for a given css and removed on its destruction. Due to the dependencies among subsystems, a css may be hidden from userland and then later shown again. This is implemented by removing and re-creating the affected files, so the associated kernfs_node for a given cgroup file may change over time. This incorrect assumption led to the corruption of css->files lists. Reimplement cftype->file_offset handling so that cgroup_file->kn is protected by a lock and updated as files are created and destroyed. This also makes keeping them on per-cgroup list unnecessary. Signed-off-by: Tejun Heo Reported-by: James Sedgwick Fixes: 6f60eade2433 ("cgroup: generalize obtaining the handles of and notifying cgroup files") Acked-by: Johannes Weiner Acked-by: Zefan Li --- include/linux/cgroup-defs.h | 4 ---- include/linux/cgroup.h | 14 +------------- kernel/cgroup.c | 42 ++++++++++++++++++++++++++++++++++-------- 3 files changed, 35 insertions(+), 25 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 60d44b26276d..869fd4a3d28e 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -90,7 +90,6 @@ enum { */ struct cgroup_file { /* do not access any fields from outside cgroup core */ - struct list_head node; /* anchored at css->files */ struct kernfs_node *kn; }; @@ -134,9 +133,6 @@ struct cgroup_subsys_state { */ u64 serial_nr; - /* all cgroup_files associated with this css */ - struct list_head files; - /* percpu_ref killing and RCU release */ struct rcu_head rcu_head; struct work_struct destroy_work; diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 22e3754f89c5..f64083030ad5 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -88,6 +88,7 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); int cgroup_rm_cftypes(struct cftype *cfts); +void cgroup_file_notify(struct cgroup_file *cfile); char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen); int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry); @@ -516,19 +517,6 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp) pr_cont_kernfs_path(cgrp->kn); } -/** - * cgroup_file_notify - generate a file modified event for a cgroup_file - * @cfile: target cgroup_file - * - * @cfile must have been obtained by setting cftype->file_offset. - */ -static inline void cgroup_file_notify(struct cgroup_file *cfile) -{ - /* might not have been created due to one of the CFTYPE selector flags */ - if (cfile->kn) - kernfs_notify(cfile->kn); -} - #else /* !CONFIG_CGROUPS */ struct cgroup_subsys_state; diff --git a/kernel/cgroup.c b/kernel/cgroup.c index f1603c153890..b316debadeb3 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -97,6 +97,12 @@ static DEFINE_SPINLOCK(css_set_lock); */ static DEFINE_SPINLOCK(cgroup_idr_lock); +/* + * Protects cgroup_file->kn for !self csses. It synchronizes notifications + * against file removal/re-creation across css hiding. + */ +static DEFINE_SPINLOCK(cgroup_file_kn_lock); + /* * Protects cgroup_subsys->release_agent_path. Modifying it also requires * cgroup_mutex. Reading requires either cgroup_mutex or this spinlock. @@ -1393,6 +1399,16 @@ static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft) char name[CGROUP_FILE_NAME_MAX]; lockdep_assert_held(&cgroup_mutex); + + if (cft->file_offset) { + struct cgroup_subsys_state *css = cgroup_css(cgrp, cft->ss); + struct cgroup_file *cfile = (void *)css + cft->file_offset; + + spin_lock_irq(&cgroup_file_kn_lock); + cfile->kn = NULL; + spin_unlock_irq(&cgroup_file_kn_lock); + } + kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name)); } @@ -1856,7 +1872,6 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp) INIT_LIST_HEAD(&cgrp->self.sibling); INIT_LIST_HEAD(&cgrp->self.children); - INIT_LIST_HEAD(&cgrp->self.files); INIT_LIST_HEAD(&cgrp->cset_links); INIT_LIST_HEAD(&cgrp->pidlists); mutex_init(&cgrp->pidlist_mutex); @@ -3313,9 +3328,9 @@ static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp, if (cft->file_offset) { struct cgroup_file *cfile = (void *)css + cft->file_offset; - kernfs_get(kn); + spin_lock_irq(&cgroup_file_kn_lock); cfile->kn = kn; - list_add(&cfile->node, &css->files); + spin_unlock_irq(&cgroup_file_kn_lock); } return 0; @@ -3552,6 +3567,22 @@ int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) return cgroup_add_cftypes(ss, cfts); } +/** + * cgroup_file_notify - generate a file modified event for a cgroup_file + * @cfile: target cgroup_file + * + * @cfile must have been obtained by setting cftype->file_offset. + */ +void cgroup_file_notify(struct cgroup_file *cfile) +{ + unsigned long flags; + + spin_lock_irqsave(&cgroup_file_kn_lock, flags); + if (cfile->kn) + kernfs_notify(cfile->kn); + spin_unlock_irqrestore(&cgroup_file_kn_lock, flags); +} + /** * cgroup_task_count - count the number of tasks in a cgroup. * @cgrp: the cgroup in question @@ -4613,13 +4644,9 @@ static void css_free_work_fn(struct work_struct *work) container_of(work, struct cgroup_subsys_state, destroy_work); struct cgroup_subsys *ss = css->ss; struct cgroup *cgrp = css->cgroup; - struct cgroup_file *cfile; percpu_ref_exit(&css->refcnt); - list_for_each_entry(cfile, &css->files, node) - kernfs_put(cfile->kn); - if (ss) { /* css free path */ int id = css->id; @@ -4724,7 +4751,6 @@ static void init_and_link_css(struct cgroup_subsys_state *css, css->ss = ss; INIT_LIST_HEAD(&css->sibling); INIT_LIST_HEAD(&css->children); - INIT_LIST_HEAD(&css->files); css->serial_nr = css_serial_nr_next++; if (cgroup_parent(cgrp)) { -- cgit v1.3-6-gb490 From 67e9c74b8a873408c27ac9a8e4c1d1c8d72c93ff Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 16 Nov 2015 11:13:34 -0500 Subject: cgroup: replace __DEVEL__sane_behavior with cgroup2 fs type With major controllers - cpu, memory and io - shaping up for the unified hierarchy, cgroup2 is about ready to be, gradually, released into the wild. Replace __DEVEL__sane_behavior flag which was used to select the unified hierarchy with a separate filesystem type "cgroup2" so that unified hierarchy can be mounted as follows. mount -t cgroup2 none $MOUNT_POINT The cgroup2 fs has its own magic number - 0x63677270 ("cgrp"). v2: Assign a different magic number to cgroup2 fs. Signed-off-by: Tejun Heo Acked-by: Li Zefan Cc: Johannes Weiner --- Documentation/cgroups/unified-hierarchy.txt | 6 ++-- include/linux/cgroup-defs.h | 1 - include/uapi/linux/magic.h | 1 + kernel/cgroup.c | 47 ++++++++++++++--------------- 4 files changed, 26 insertions(+), 29 deletions(-) (limited to 'include/linux') diff --git a/Documentation/cgroups/unified-hierarchy.txt b/Documentation/cgroups/unified-hierarchy.txt index 781b1d475bcf..c1f0e8780960 100644 --- a/Documentation/cgroups/unified-hierarchy.txt +++ b/Documentation/cgroups/unified-hierarchy.txt @@ -94,11 +94,9 @@ the process. 2-1. Mounting -Currently, unified hierarchy can be mounted with the following mount -command. Note that this is still under development and scheduled to -change soon. +Unified hierarchy can be mounted with the following mount command. - mount -t cgroup -o __DEVEL__sane_behavior cgroup $MOUNT_POINT + mount -t cgroup2 none $MOUNT_POINT All controllers which support the unified hierarchy and are not bound to other hierarchies are automatically bound to unified hierarchy and diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 869fd4a3d28e..80e2ae655208 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -66,7 +66,6 @@ enum { /* cgroup_root->flags */ enum { - CGRP_ROOT_SANE_BEHAVIOR = (1 << 0), /* __DEVEL__sane_behavior specified */ CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */ CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */ }; diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h index accb036bbc9c..b283d56c1db9 100644 --- a/include/uapi/linux/magic.h +++ b/include/uapi/linux/magic.h @@ -54,6 +54,7 @@ #define SMB_SUPER_MAGIC 0x517B #define CGROUP_SUPER_MAGIC 0x27e0eb +#define CGROUP2_SUPER_MAGIC 0x63677270 #define STACK_END_MAGIC 0x57AC6E9D diff --git a/kernel/cgroup.c b/kernel/cgroup.c index b316debadeb3..af0886262f58 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -211,6 +211,7 @@ static unsigned long have_free_callback __read_mostly; /* Ditto for the can_fork callback. */ static unsigned long have_canfork_callback __read_mostly; +static struct file_system_type cgroup2_fs_type; static struct cftype cgroup_dfl_base_files[]; static struct cftype cgroup_legacy_base_files[]; @@ -1641,10 +1642,6 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) all_ss = true; continue; } - if (!strcmp(token, "__DEVEL__sane_behavior")) { - opts->flags |= CGRP_ROOT_SANE_BEHAVIOR; - continue; - } if (!strcmp(token, "noprefix")) { opts->flags |= CGRP_ROOT_NOPREFIX; continue; @@ -1711,15 +1708,6 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) return -ENOENT; } - if (opts->flags & CGRP_ROOT_SANE_BEHAVIOR) { - pr_warn("sane_behavior: this is still under development and its behaviors will change, proceed at your own risk\n"); - if (nr_opts != 1) { - pr_err("sane_behavior: no other mount options allowed\n"); - return -EINVAL; - } - return 0; - } - /* * If the 'all' option was specified select all the subsystems, * otherwise if 'none', 'name=' and a subsystem name options were @@ -1998,6 +1986,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, int flags, const char *unused_dev_name, void *data) { + bool is_v2 = fs_type == &cgroup2_fs_type; struct super_block *pinned_sb = NULL; struct cgroup_subsys *ss; struct cgroup_root *root; @@ -2014,6 +2003,17 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, if (!use_task_css_set_links) cgroup_enable_task_cg_lists(); + if (is_v2) { + if (data) { + pr_err("cgroup2: unknown option \"%s\"\n", (char *)data); + return ERR_PTR(-EINVAL); + } + cgrp_dfl_root_visible = true; + root = &cgrp_dfl_root; + cgroup_get(&root->cgrp); + goto out_mount; + } + mutex_lock(&cgroup_mutex); /* First find the desired set of subsystems */ @@ -2021,15 +2021,6 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, if (ret) goto out_unlock; - /* look for a matching existing root */ - if (opts.flags & CGRP_ROOT_SANE_BEHAVIOR) { - cgrp_dfl_root_visible = true; - root = &cgrp_dfl_root; - cgroup_get(&root->cgrp); - ret = 0; - goto out_unlock; - } - /* * Destruction of cgroup root is asynchronous, so subsystems may * still be dying after the previous unmount. Let's drain the @@ -2140,9 +2131,10 @@ out_free: if (ret) return ERR_PTR(ret); - +out_mount: dentry = kernfs_mount(fs_type, flags, root->kf_root, - CGROUP_SUPER_MAGIC, &new_sb); + is_v2 ? CGROUP2_SUPER_MAGIC : CGROUP_SUPER_MAGIC, + &new_sb); if (IS_ERR(dentry) || !new_sb) cgroup_put(&root->cgrp); @@ -2185,6 +2177,12 @@ static struct file_system_type cgroup_fs_type = { .kill_sb = cgroup_kill_sb, }; +static struct file_system_type cgroup2_fs_type = { + .name = "cgroup2", + .mount = cgroup_mount, + .kill_sb = cgroup_kill_sb, +}; + /** * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy * @task: target task @@ -5315,6 +5313,7 @@ int __init cgroup_init(void) WARN_ON(sysfs_create_mount_point(fs_kobj, "cgroup")); WARN_ON(register_filesystem(&cgroup_fs_type)); + WARN_ON(register_filesystem(&cgroup2_fs_type)); WARN_ON(!proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations)); return 0; -- cgit v1.3-6-gb490 From eb4b0ec75ec34e90bd1594c665f16de0cb4e3bf9 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Mon, 16 Nov 2015 08:35:53 -0200 Subject: [media] include/media: move platform_data to linux/platform_data/media Let's not mix platform_data headers with the core headers. Instead, let's create a subdir at linux/platform_data and move the headers to that common place, adding it to MAINTAINERS. The headers were moved with: mkdir include/linux/platform_data/media/; git mv include/media/gpio-ir-recv.h include/media/ir-rx51.h include/media/mmp-camera.h include/media/omap1_camera.h include/media/omap4iss.h include/media/s5p_hdmi.h include/media/si4713.h include/media/sii9234.h include/media/smiapp.h include/media/soc_camera.h include/media/soc_camera_platform.h include/media/timb_radio.h include/media/timb_video.h include/linux/platform_data/media/ And the references fixed with this script: MAIN_DIR="linux/platform_data/" PREV_DIR="media/" DIRS="media/" echo "Checking affected files" >&2 for i in $DIRS; do for j in $(find include/$MAIN_DIR/$i -type f -name '*.h'); do n=`basename $j` git grep -l $n done done|sort|uniq >files && ( echo "Handling files..." >&2; echo "for i in \$(cat files|grep -v Documentation); do cat \$i | \\"; ( cd include/$MAIN_DIR; for j in $DIRS; do for i in $(ls $j); do echo "perl -ne 's,(include [\\\"\\<])$PREV_DIR($i)([\\\"\\>]),\1$MAIN_DIR$j\2\3,; print \$_' |\\"; done; done; echo "cat > a && mv a \$i; done"; ); echo "Handling documentation..." >&2; echo "for i in MAINTAINERS \$(cat files); do cat \$i | \\"; ( cd include/$MAIN_DIR; for j in $DIRS; do for i in $(ls $j); do echo " perl -ne 's,include/$PREV_DIR($i)\b,include/$MAIN_DIR$j\1,; print \$_' |\\"; done; done; echo "cat > a && mv a \$i; done" ); ) >script && . ./script Signed-off-by: Mauro Carvalho Chehab Acked-by: Arnd Bergmann Acked-by: Lee Jones Acked-by: Krzysztof Kozlowski --- Documentation/video4linux/omap4_camera.txt | 2 +- Documentation/video4linux/si4713.txt | 2 +- MAINTAINERS | 1 + arch/arm/mach-omap1/include/mach/camera.h | 2 +- arch/arm/mach-omap2/board-rx51-peripherals.c | 4 +- arch/arm/plat-samsung/devs.c | 2 +- arch/sh/boards/mach-ap325rxa/setup.c | 2 +- drivers/media/platform/marvell-ccic/mmp-driver.c | 2 +- drivers/media/platform/s5p-tv/hdmi_drv.c | 2 +- drivers/media/platform/s5p-tv/sii9234_drv.c | 2 +- drivers/media/platform/soc_camera/omap1_camera.c | 2 +- .../platform/soc_camera/soc_camera_platform.c | 2 +- drivers/media/platform/timblogiw.c | 2 +- drivers/media/radio/radio-timb.c | 2 +- drivers/media/radio/si4713/radio-usb-si4713.c | 2 +- drivers/media/radio/si4713/si4713.h | 2 +- drivers/media/rc/gpio-ir-recv.c | 2 +- drivers/media/rc/ir-rx51.c | 2 +- drivers/mfd/timberdale.c | 4 +- drivers/staging/media/omap4iss/iss.h | 2 +- drivers/staging/media/omap4iss/iss_csiphy.h | 2 +- include/linux/platform_data/media/gpio-ir-recv.h | 23 ++++++ include/linux/platform_data/media/ir-rx51.h | 10 +++ include/linux/platform_data/media/mmp-camera.h | 9 +++ include/linux/platform_data/media/omap1_camera.h | 35 +++++++++ include/linux/platform_data/media/omap4iss.h | 65 +++++++++++++++++ include/linux/platform_data/media/s5p_hdmi.h | 36 ++++++++++ include/linux/platform_data/media/si4713.h | 48 +++++++++++++ include/linux/platform_data/media/sii9234.h | 24 +++++++ .../platform_data/media/soc_camera_platform.h | 83 ++++++++++++++++++++++ include/linux/platform_data/media/timb_radio.h | 30 ++++++++ include/linux/platform_data/media/timb_video.h | 33 +++++++++ include/media/gpio-ir-recv.h | 24 ------- include/media/ir-rx51.h | 10 --- include/media/mmp-camera.h | 9 --- include/media/omap1_camera.h | 35 --------- include/media/omap4iss.h | 65 ----------------- include/media/s5p_hdmi.h | 37 ---------- include/media/si4713.h | 48 ------------- include/media/sii9234.h | 24 ------- include/media/soc_camera_platform.h | 83 ---------------------- include/media/timb_radio.h | 30 -------- include/media/timb_video.h | 33 --------- 43 files changed, 419 insertions(+), 420 deletions(-) create mode 100644 include/linux/platform_data/media/gpio-ir-recv.h create mode 100644 include/linux/platform_data/media/ir-rx51.h create mode 100644 include/linux/platform_data/media/mmp-camera.h create mode 100644 include/linux/platform_data/media/omap1_camera.h create mode 100644 include/linux/platform_data/media/omap4iss.h create mode 100644 include/linux/platform_data/media/s5p_hdmi.h create mode 100644 include/linux/platform_data/media/si4713.h create mode 100644 include/linux/platform_data/media/sii9234.h create mode 100644 include/linux/platform_data/media/soc_camera_platform.h create mode 100644 include/linux/platform_data/media/timb_radio.h create mode 100644 include/linux/platform_data/media/timb_video.h delete mode 100644 include/media/gpio-ir-recv.h delete mode 100644 include/media/ir-rx51.h delete mode 100644 include/media/mmp-camera.h delete mode 100644 include/media/omap1_camera.h delete mode 100644 include/media/omap4iss.h delete mode 100644 include/media/s5p_hdmi.h delete mode 100644 include/media/si4713.h delete mode 100644 include/media/sii9234.h delete mode 100644 include/media/soc_camera_platform.h delete mode 100644 include/media/timb_radio.h delete mode 100644 include/media/timb_video.h (limited to 'include/linux') diff --git a/Documentation/video4linux/omap4_camera.txt b/Documentation/video4linux/omap4_camera.txt index 25d9b40a4651..a6734aa77242 100644 --- a/Documentation/video4linux/omap4_camera.txt +++ b/Documentation/video4linux/omap4_camera.txt @@ -47,7 +47,7 @@ Tested platforms File list --------- drivers/staging/media/omap4iss/ -include/media/omap4iss.h +include/linux/platform_data/media/omap4iss.h References ---------- diff --git a/Documentation/video4linux/si4713.txt b/Documentation/video4linux/si4713.txt index 2e7392a4fee1..2ddc6b095a76 100644 --- a/Documentation/video4linux/si4713.txt +++ b/Documentation/video4linux/si4713.txt @@ -157,7 +157,7 @@ int main (int argc, char *argv[]) } The struct si4713_rnl and SI4713_IOC_MEASURE_RNL are defined under -include/media/si4713.h. +include/linux/platform_data/media/si4713.h. Stereo/Mono and RDS subchannels =============================== diff --git a/MAINTAINERS b/MAINTAINERS index dc1787719c2a..96521eb39270 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6895,6 +6895,7 @@ F: Documentation/video4linux/ F: Documentation/DocBook/media/ F: drivers/media/ F: drivers/staging/media/ +F: include/linux/platform_data/media/ F: include/media/ F: include/uapi/linux/dvb/ F: include/uapi/linux/videodev2.h diff --git a/arch/arm/mach-omap1/include/mach/camera.h b/arch/arm/mach-omap1/include/mach/camera.h index 847d00f0bb0a..caa6c0d6f0ac 100644 --- a/arch/arm/mach-omap1/include/mach/camera.h +++ b/arch/arm/mach-omap1/include/mach/camera.h @@ -1,7 +1,7 @@ #ifndef __ASM_ARCH_CAMERA_H_ #define __ASM_ARCH_CAMERA_H_ -#include +#include void omap1_camera_init(void *); diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c index 14edcd7a2a1d..0a0567f8e8a0 100644 --- a/arch/arm/mach-omap2/board-rx51-peripherals.c +++ b/arch/arm/mach-omap2/board-rx51-peripherals.c @@ -39,7 +39,7 @@ #include #include -#include +#include #include #include @@ -48,7 +48,7 @@ #include