From ebdc02dd4c4b7ef16abfb44acad36010fa31ee0f Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Fri, 21 Feb 2020 18:33:13 +0100 Subject: drm: Add DRM_MODE_TYPE_USERDEF flag to probed modes matching a video= argument drm_helper_probe_add_cmdline_mode() prefers using a probed mode matching a video= argument over calculating our own timings for the user specified mode using CVT or GTF. But userspace code which is auto-configuring the mode may want to know that the user has specified that mode on the kernel commandline so that it can pick that mode over the mode which is marked as DRM_MODE_TYPE_PREFERRED. This commit sets the DRM_MODE_TYPE_USERDEF flag on the matching mode, just as we would do on the user-specified mode when no matching probed mode is found. Signed-off-by: Hans de Goede Reviewed-by: Emil Velikov Signed-off-by: Emil Velikov Link: https://patchwork.freedesktop.org/patch/msgid/20200221173313.510235-2-hdegoede@redhat.com --- include/drm/drm_modes.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h index 730fc31de4fb..6a4c4b26caad 100644 --- a/include/drm/drm_modes.h +++ b/include/drm/drm_modes.h @@ -256,7 +256,8 @@ struct drm_display_mode { * - DRM_MODE_TYPE_DRIVER: Mode created by the driver, which is all of * them really. Drivers must set this bit for all modes they create * and expose to userspace. - * - DRM_MODE_TYPE_USERDEF: Mode defined via kernel command line + * - DRM_MODE_TYPE_USERDEF: Mode defined or selected via the kernel + * command line. * * Plus a big list of flags which shouldn't be used at all, but are * still around since these flags are also used in the userspace ABI. -- cgit v1.2.3-59-g8ed1b From e33f4234263da26154be0b1e5ffc7a2c5e52c3e3 Mon Sep 17 00:00:00 2001 From: Emil Velikov Date: Fri, 15 May 2020 10:50:45 +0100 Subject: drm/doc: add WARNING for drm_device::struct_mutex The mutex should be used, only by legacy drivers. Add a big warning to deter people from using it. Suggested-by: Daniel Vetter Reviewed-by: Daniel Vetter Signed-off-by: Emil Velikov Acked-by: Thomas Zimmermann Link: https://patchwork.freedesktop.org/patch/msgid/20200515095118.2743122-6-emil.l.velikov@gmail.com --- include/drm/drm_device.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include') diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h index a55874db9dd4..0988351d743c 100644 --- a/include/drm/drm_device.h +++ b/include/drm/drm_device.h @@ -146,6 +146,9 @@ struct drm_device { * @struct_mutex: * * Lock for others (not &drm_minor.master and &drm_file.is_master) + * + * WARNING: + * Only drivers annotated with DRIVER_LEGACY should be using this. */ struct mutex struct_mutex; -- cgit v1.2.3-59-g8ed1b From 1a9458aeb8eb48bfa5f9b3e7682bddc28fd0b85e Mon Sep 17 00:00:00 2001 From: Emil Velikov Date: Fri, 15 May 2020 10:50:49 +0100 Subject: drm: remove drm_driver::gem_free_object No drivers set the callback, so remove it all together. Signed-off-by: Emil Velikov Acked-by: Sam Ravnborg Reviewed-by: Thomas Zimmermann Reviewed-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20200515095118.2743122-10-emil.l.velikov@gmail.com --- drivers/gpu/drm/drm_gem.c | 22 +++------------------- include/drm/drm_drv.h | 8 -------- include/drm/drm_gem.h | 5 +++-- 3 files changed, 6 insertions(+), 29 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index eb0017985d91..21c69e71d685 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -975,15 +975,10 @@ drm_gem_object_free(struct kref *kref) container_of(kref, struct drm_gem_object, refcount); struct drm_device *dev = obj->dev; - if (obj->funcs) { + if (obj->funcs) obj->funcs->free(obj); - } else if (dev->driver->gem_free_object_unlocked) { + else if (dev->driver->gem_free_object_unlocked) dev->driver->gem_free_object_unlocked(obj); - } else if (dev->driver->gem_free_object) { - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - - dev->driver->gem_free_object(obj); - } } EXPORT_SYMBOL(drm_gem_object_free); @@ -999,21 +994,10 @@ EXPORT_SYMBOL(drm_gem_object_free); void drm_gem_object_put_unlocked(struct drm_gem_object *obj) { - struct drm_device *dev; - if (!obj) return; - dev = obj->dev; - - if (dev->driver->gem_free_object) { - might_lock(&dev->struct_mutex); - if (kref_put_mutex(&obj->refcount, drm_gem_object_free, - &dev->struct_mutex)) - mutex_unlock(&dev->struct_mutex); - } else { - kref_put(&obj->refcount, drm_gem_object_free); - } + kref_put(&obj->refcount, drm_gem_object_free); } EXPORT_SYMBOL(drm_gem_object_put_unlocked); diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index 6d457652f199..e6eff508f687 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -327,14 +327,6 @@ struct drm_driver { */ void (*debugfs_init)(struct drm_minor *minor); - /** - * @gem_free_object: deconstructor for drm_gem_objects - * - * This is deprecated and should not be used by new drivers. Use - * &drm_gem_object_funcs.free instead. - */ - void (*gem_free_object) (struct drm_gem_object *obj); - /** * @gem_free_object_unlocked: deconstructor for drm_gem_objects * diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index 0b375069cd48..ec2d24a60a76 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -272,8 +272,9 @@ struct drm_gem_object { * attachment point for the device. This is invariant over the lifetime * of a gem object. * - * The &drm_driver.gem_free_object callback is responsible for cleaning - * up the dma_buf attachment and references acquired at import time. + * The &drm_driver.gem_free_object_unlocked callback is responsible for + * cleaning up the dma_buf attachment and references acquired at import + * time. * * Note that the drm gem/prime core does not depend upon drivers setting * this field any more. So for drivers where this doesn't make sense -- cgit v1.2.3-59-g8ed1b From b5d250744cccfb40024de663ea1f4da04e6d959c Mon Sep 17 00:00:00 2001 From: Emil Velikov Date: Fri, 15 May 2020 10:50:50 +0100 Subject: drm/gem: fold drm_gem_object_put_unlocked and __drm_gem_object_put() With earlier patch we removed the overhead so now we can lift the helper into the header effectively folding it with __drm_object_put. v2: drop struct_mutex references (Daniel) Signed-off-by: Emil Velikov Acked-by: Sam Ravnborg (v1) Reviewed-by: Daniel Vetter Acked-by: Thomas Zimmermann Link: https://patchwork.freedesktop.org/patch/msgid/20200515095118.2743122-11-emil.l.velikov@gmail.com --- drivers/gpu/drm/drm_gem.c | 19 ------------------- drivers/gpu/drm/i915/gem/i915_gem_object.h | 2 +- include/drm/drm_drv.h | 2 -- include/drm/drm_gem.h | 16 +++------------- 4 files changed, 4 insertions(+), 35 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 21c69e71d685..9ba4b1520d48 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -982,25 +982,6 @@ drm_gem_object_free(struct kref *kref) } EXPORT_SYMBOL(drm_gem_object_free); -/** - * drm_gem_object_put_unlocked - drop a GEM buffer object reference - * @obj: GEM buffer object - * - * This releases a reference to @obj. Callers must not hold the - * &drm_device.struct_mutex lock when calling this function. - * - * See also __drm_gem_object_put(). - */ -void -drm_gem_object_put_unlocked(struct drm_gem_object *obj) -{ - if (!obj) - return; - - kref_put(&obj->refcount, drm_gem_object_free); -} -EXPORT_SYMBOL(drm_gem_object_put_unlocked); - /** * drm_gem_object_put - release a GEM buffer object reference * @obj: GEM buffer object diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index 2faa481cc18f..41351cbf31b5 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -105,7 +105,7 @@ __attribute__((nonnull)) static inline void i915_gem_object_put(struct drm_i915_gem_object *obj) { - __drm_gem_object_put(&obj->base); + drm_gem_object_put_unlocked(&obj->base); } #define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv) diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index e6eff508f687..bb924cddc09c 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -332,8 +332,6 @@ struct drm_driver { * * This is deprecated and should not be used by new drivers. Use * &drm_gem_object_funcs.free instead. - * Compared to @gem_free_object this is not encumbered with - * &drm_device.struct_mutex legacy locking schemes. */ void (*gem_free_object_unlocked) (struct drm_gem_object *obj); diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index ec2d24a60a76..c3bdade093ae 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -364,27 +364,17 @@ static inline void drm_gem_object_get(struct drm_gem_object *obj) } /** - * __drm_gem_object_put - raw function to release a GEM buffer object reference + * drm_gem_object_put_unlocked - drop a GEM buffer object reference * @obj: GEM buffer object * - * This function is meant to be used by drivers which are not encumbered with - * &drm_device.struct_mutex legacy locking and which are using the - * gem_free_object_unlocked callback. It avoids all the locking checks and - * locking overhead of drm_gem_object_put() and drm_gem_object_put_unlocked(). - * - * Drivers should never call this directly in their code. Instead they should - * wrap it up into a ``driver_gem_object_put(struct driver_gem_object *obj)`` - * wrapper function, and use that. Shared code should never call this, to - * avoid breaking drivers by accident which still depend upon - * &drm_device.struct_mutex locking. + * This releases a reference to @obj. */ static inline void -__drm_gem_object_put(struct drm_gem_object *obj) +drm_gem_object_put_unlocked(struct drm_gem_object *obj) { kref_put(&obj->refcount, drm_gem_object_free); } -void drm_gem_object_put_unlocked(struct drm_gem_object *obj); void drm_gem_object_put(struct drm_gem_object *obj); int drm_gem_handle_create(struct drm_file *file_priv, -- cgit v1.2.3-59-g8ed1b From eecd7fd8bf58d5d59f948d2655e41760d7cf17d9 Mon Sep 17 00:00:00 2001 From: Emil Velikov Date: Fri, 15 May 2020 10:50:51 +0100 Subject: drm/gem: add _locked suffix to drm_gem_object_put Vast majority of DRM (core and drivers) are struct_mutex free. As such we have only a handful of cases where the locked helper should be used. Make that stand out a little bit better. Done via the following script: __from=drm_gem_object_put __to=drm_gem_object_put_locked for __file in $(git grep --name-only --word-regexp $__from); do sed -i "s/\<$__from\>/$__to/g" $__file; done Cc: Rob Clark Cc: Sean Paul Cc: linux-arm-msm@vger.kernel.org Signed-off-by: Emil Velikov Acked-by: Sam Ravnborg Reviewed-by: Steven Price Acked-by: Thomas Zimmermann Link: https://patchwork.freedesktop.org/patch/msgid/20200515095118.2743122-12-emil.l.velikov@gmail.com --- drivers/gpu/drm/drm_gem.c | 6 +++--- drivers/gpu/drm/msm/adreno/a5xx_debugfs.c | 4 ++-- drivers/gpu/drm/msm/msm_drv.c | 2 +- drivers/gpu/drm/msm/msm_gem.c | 6 +++--- drivers/gpu/drm/msm/msm_gem_submit.c | 2 +- drivers/gpu/drm/msm/msm_gpu.c | 2 +- include/drm/drm_gem.h | 4 ++-- 7 files changed, 13 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 9ba4b1520d48..d1a7f1844128 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -983,7 +983,7 @@ drm_gem_object_free(struct kref *kref) EXPORT_SYMBOL(drm_gem_object_free); /** - * drm_gem_object_put - release a GEM buffer object reference + * drm_gem_object_put_locked - release a GEM buffer object reference * @obj: GEM buffer object * * This releases a reference to @obj. Callers must hold the @@ -994,7 +994,7 @@ EXPORT_SYMBOL(drm_gem_object_free); * drm_gem_object_put_unlocked() instead. */ void -drm_gem_object_put(struct drm_gem_object *obj) +drm_gem_object_put_locked(struct drm_gem_object *obj) { if (obj) { WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); @@ -1002,7 +1002,7 @@ drm_gem_object_put(struct drm_gem_object *obj) kref_put(&obj->refcount, drm_gem_object_free); } } -EXPORT_SYMBOL(drm_gem_object_put); +EXPORT_SYMBOL(drm_gem_object_put_locked); /** * drm_gem_vm_open - vma->ops->open implementation for GEM diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c index 8cae2ca4af6b..68eddac7771c 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c @@ -124,13 +124,13 @@ reset_set(void *data, u64 val) if (a5xx_gpu->pm4_bo) { msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace); - drm_gem_object_put(a5xx_gpu->pm4_bo); + drm_gem_object_put_locked(a5xx_gpu->pm4_bo); a5xx_gpu->pm4_bo = NULL; } if (a5xx_gpu->pfp_bo) { msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace); - drm_gem_object_put(a5xx_gpu->pfp_bo); + drm_gem_object_put_locked(a5xx_gpu->pfp_bo); a5xx_gpu->pfp_bo = NULL; } diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 29295dee2a2e..6baed5b43ea3 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -932,7 +932,7 @@ static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data, ret = 0; } - drm_gem_object_put(obj); + drm_gem_object_put_locked(obj); unlock: mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 5a6a79fbc9d6..8696c405f709 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -879,7 +879,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) } #endif -/* don't call directly! Use drm_gem_object_put() and friends */ +/* don't call directly! Use drm_gem_object_put_locked() and friends */ void msm_gem_free_object(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); @@ -1183,7 +1183,7 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size, return vaddr; err: if (locked) - drm_gem_object_put(obj); + drm_gem_object_put_locked(obj); else drm_gem_object_put_unlocked(obj); @@ -1215,7 +1215,7 @@ void msm_gem_kernel_put(struct drm_gem_object *bo, msm_gem_unpin_iova(bo, aspace); if (locked) - drm_gem_object_put(bo); + drm_gem_object_put_locked(bo); else drm_gem_object_put_unlocked(bo); } diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 385d4965a8d0..8f450a245cfb 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -387,7 +387,7 @@ static void submit_cleanup(struct msm_gem_submit *submit) struct msm_gem_object *msm_obj = submit->bos[i].obj; submit_unlock_unpin_bo(submit, i, false); list_del_init(&msm_obj->submit_entry); - drm_gem_object_put(&msm_obj->base); + drm_gem_object_put_locked(&msm_obj->base); } } diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 615c5cda5389..86a68f96c48d 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -694,7 +694,7 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring, /* move to inactive: */ msm_gem_move_to_inactive(&msm_obj->base); msm_gem_unpin_iova(&msm_obj->base, submit->aspace); - drm_gem_object_put(&msm_obj->base); + drm_gem_object_put_locked(&msm_obj->base); } pm_runtime_mark_last_busy(&gpu->pdev->dev); diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index c3bdade093ae..a231a2b3f5ac 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -187,7 +187,7 @@ struct drm_gem_object { * * Reference count of this object * - * Please use drm_gem_object_get() to acquire and drm_gem_object_put() + * Please use drm_gem_object_get() to acquire and drm_gem_object_put_locked() * or drm_gem_object_put_unlocked() to release a reference to a GEM * buffer object. */ @@ -375,7 +375,7 @@ drm_gem_object_put_unlocked(struct drm_gem_object *obj) kref_put(&obj->refcount, drm_gem_object_free); } -void drm_gem_object_put(struct drm_gem_object *obj); +void drm_gem_object_put_locked(struct drm_gem_object *obj); int drm_gem_handle_create(struct drm_file *file_priv, struct drm_gem_object *obj, -- cgit v1.2.3-59-g8ed1b From 2f4dd13d4bb8a85f6d5b66a18989509924e4f5e9 Mon Sep 17 00:00:00 2001 From: Emil Velikov Date: Fri, 15 May 2020 10:50:52 +0100 Subject: drm/gem: add drm_gem_object_put helper Spelling out _unlocked for each and every driver is a annoying. Especially if we consider how many drivers, do not know (or need to) about the horror stories involving struct_mutex. Add helper, which will allow us to transition the drivers one by one, dropping the suffix. v2: add missing space after function name (Jani) Signed-off-by: Emil Velikov Acked-by: Sam Ravnborg (v1) Reviewed-by: Steven Price Acked-by: Thomas Zimmermann Link: https://patchwork.freedesktop.org/patch/msgid/20200515095118.2743122-13-emil.l.velikov@gmail.com --- include/drm/drm_gem.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index a231a2b3f5ac..2f7b86c0649c 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -364,17 +364,19 @@ static inline void drm_gem_object_get(struct drm_gem_object *obj) } /** - * drm_gem_object_put_unlocked - drop a GEM buffer object reference + * drm_gem_object_put - drop a GEM buffer object reference * @obj: GEM buffer object * * This releases a reference to @obj. */ static inline void -drm_gem_object_put_unlocked(struct drm_gem_object *obj) +drm_gem_object_put(struct drm_gem_object *obj) { kref_put(&obj->refcount, drm_gem_object_free); } +#define drm_gem_object_put_unlocked drm_gem_object_put + void drm_gem_object_put_locked(struct drm_gem_object *obj); int drm_gem_handle_create(struct drm_file *file_priv, -- cgit v1.2.3-59-g8ed1b From be6ee102341bc4d07e050dda119ecb91229bc654 Mon Sep 17 00:00:00 2001 From: Emil Velikov Date: Fri, 15 May 2020 10:50:53 +0100 Subject: drm: remove _unlocked suffix in drm_gem_object_put_unlocked Spelling out _unlocked for each and every driver is a annoying. Especially if we consider how many drivers, do not know (or need to) about the horror stories involving struct_mutex. Just drop the suffix. It makes the API cleaner. Done via the following script: __from=drm_gem_object_put_unlocked __to=drm_gem_object_put for __file in $(git grep --name-only $__from); do sed -i "s/$__from/$__to/g" $__file; done Pay special attention to the compat #define v2: keep sed and #define removal separate Cc: David Airlie Cc: Daniel Vetter Signed-off-by: Emil Velikov Acked-by: Sam Ravnborg (v1) Reviewed-by: Steven Price Acked-by: Thomas Zimmermann Link: https://patchwork.freedesktop.org/patch/msgid/20200515095118.2743122-14-emil.l.velikov@gmail.com --- Documentation/gpu/drm-mm.rst | 2 +- drivers/gpu/drm/drm_client.c | 2 +- drivers/gpu/drm/drm_gem.c | 26 +++++++++++++------------- drivers/gpu/drm/drm_gem_cma_helper.c | 8 ++++---- drivers/gpu/drm/drm_gem_framebuffer_helper.c | 6 +++--- drivers/gpu/drm/drm_gem_shmem_helper.c | 4 ++-- drivers/gpu/drm/drm_gem_ttm_helper.c | 2 +- drivers/gpu/drm/drm_gem_vram_helper.c | 10 +++++----- drivers/gpu/drm/drm_prime.c | 6 +++--- include/drm/drm_gem.h | 2 +- 10 files changed, 34 insertions(+), 34 deletions(-) (limited to 'include') diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst index 5ba2ead8f317..8c8540ee859c 100644 --- a/Documentation/gpu/drm-mm.rst +++ b/Documentation/gpu/drm-mm.rst @@ -178,7 +178,7 @@ GEM Objects Lifetime -------------------- All GEM objects are reference-counted by the GEM core. References can be -acquired and release by calling drm_gem_object_get() and drm_gem_object_put_unlocked() +acquired and release by calling drm_gem_object_get() and drm_gem_object_put() respectively. When the last reference to a GEM object is released the GEM core calls diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c index 8cb93f5209a4..536a22747b51 100644 --- a/drivers/gpu/drm/drm_client.c +++ b/drivers/gpu/drm/drm_client.c @@ -237,7 +237,7 @@ static void drm_client_buffer_delete(struct drm_client_buffer *buffer) drm_gem_vunmap(buffer->gem, buffer->vaddr); if (buffer->gem) - drm_gem_object_put_unlocked(buffer->gem); + drm_gem_object_put(buffer->gem); if (buffer->handle) drm_mode_destroy_dumb(dev, buffer->handle, buffer->client->file); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index d1a7f1844128..efc0367841e2 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -235,7 +235,7 @@ drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj) mutex_unlock(&dev->object_name_lock); if (final) - drm_gem_object_put_unlocked(obj); + drm_gem_object_put(obj); } /* @@ -331,7 +331,7 @@ int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, *offset = drm_vma_node_offset_addr(&obj->vma_node); out: - drm_gem_object_put_unlocked(obj); + drm_gem_object_put(obj); return ret; } @@ -690,7 +690,7 @@ static int objects_lookup(struct drm_file *filp, u32 *handle, int count, * Returns: * * @objs filled in with GEM object pointers. Returned GEM objects need to be - * released with drm_gem_object_put_unlocked(). -ENOENT is returned on a lookup + * released with drm_gem_object_put(). -ENOENT is returned on a lookup * failure. 0 is returned on success. * */ @@ -785,7 +785,7 @@ long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, else if (ret > 0) ret = 0; - drm_gem_object_put_unlocked(obj); + drm_gem_object_put(obj); return ret; } @@ -860,7 +860,7 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data, err: mutex_unlock(&dev->object_name_lock); - drm_gem_object_put_unlocked(obj); + drm_gem_object_put(obj); return ret; } @@ -898,7 +898,7 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data, /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ ret = drm_gem_handle_create_tail(file_priv, obj, &handle); - drm_gem_object_put_unlocked(obj); + drm_gem_object_put(obj); if (ret) return ret; @@ -991,7 +991,7 @@ EXPORT_SYMBOL(drm_gem_object_free); * driver doesn't use &drm_device.struct_mutex for anything. * * For drivers not encumbered with legacy locking use - * drm_gem_object_put_unlocked() instead. + * drm_gem_object_put() instead. */ void drm_gem_object_put_locked(struct drm_gem_object *obj) @@ -1030,7 +1030,7 @@ void drm_gem_vm_close(struct vm_area_struct *vma) { struct drm_gem_object *obj = vma->vm_private_data; - drm_gem_object_put_unlocked(obj); + drm_gem_object_put(obj); } EXPORT_SYMBOL(drm_gem_vm_close); @@ -1079,7 +1079,7 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, if (obj->funcs && obj->funcs->mmap) { ret = obj->funcs->mmap(obj, vma); if (ret) { - drm_gem_object_put_unlocked(obj); + drm_gem_object_put(obj); return ret; } WARN_ON(!(vma->vm_flags & VM_DONTEXPAND)); @@ -1089,7 +1089,7 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, else if (dev->driver->gem_vm_ops) vma->vm_ops = dev->driver->gem_vm_ops; else { - drm_gem_object_put_unlocked(obj); + drm_gem_object_put(obj); return -EINVAL; } @@ -1155,13 +1155,13 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) return -EINVAL; if (!drm_vma_node_is_allowed(node, priv)) { - drm_gem_object_put_unlocked(obj); + drm_gem_object_put(obj); return -EACCES; } if (node->readonly) { if (vma->vm_flags & VM_WRITE) { - drm_gem_object_put_unlocked(obj); + drm_gem_object_put(obj); return -EINVAL; } @@ -1171,7 +1171,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma); - drm_gem_object_put_unlocked(obj); + drm_gem_object_put(obj); return ret; } diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index 12e98fb28229..b3db3ca7bd7a 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -114,7 +114,7 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, return cma_obj; error: - drm_gem_object_put_unlocked(&cma_obj->base); + drm_gem_object_put(&cma_obj->base); return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(drm_gem_cma_create); @@ -156,7 +156,7 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv, */ ret = drm_gem_handle_create(file_priv, gem_obj, handle); /* drop reference from allocate - handle holds it now. */ - drm_gem_object_put_unlocked(gem_obj); + drm_gem_object_put(gem_obj); if (ret) return ERR_PTR(ret); @@ -380,13 +380,13 @@ unsigned long drm_gem_cma_get_unmapped_area(struct file *filp, return -EINVAL; if (!drm_vma_node_is_allowed(node, priv)) { - drm_gem_object_put_unlocked(obj); + drm_gem_object_put(obj); return -EACCES; } cma_obj = to_drm_gem_cma_obj(obj); - drm_gem_object_put_unlocked(obj); + drm_gem_object_put(obj); return cma_obj->vaddr ? (unsigned long)cma_obj->vaddr : -EINVAL; } diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c index ccc2c71fa491..109d11fb4cd4 100644 --- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c +++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c @@ -95,7 +95,7 @@ void drm_gem_fb_destroy(struct drm_framebuffer *fb) int i; for (i = 0; i < 4; i++) - drm_gem_object_put_unlocked(fb->obj[i]); + drm_gem_object_put(fb->obj[i]); drm_framebuffer_cleanup(fb); kfree(fb); @@ -175,7 +175,7 @@ int drm_gem_fb_init_with_funcs(struct drm_device *dev, + mode_cmd->offsets[i]; if (objs[i]->size < min_size) { - drm_gem_object_put_unlocked(objs[i]); + drm_gem_object_put(objs[i]); ret = -EINVAL; goto err_gem_object_put; } @@ -189,7 +189,7 @@ int drm_gem_fb_init_with_funcs(struct drm_device *dev, err_gem_object_put: for (i--; i >= 0; i--) - drm_gem_object_put_unlocked(objs[i]); + drm_gem_object_put(objs[i]); return ret; } diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index df31e5782eed..339eee79ea52 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -360,7 +360,7 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv, */ ret = drm_gem_handle_create(file_priv, &shmem->base, handle); /* drop reference from allocate - handle holds it now. */ - drm_gem_object_put_unlocked(&shmem->base); + drm_gem_object_put(&shmem->base); if (ret) return ERR_PTR(ret); @@ -684,7 +684,7 @@ drm_gem_shmem_prime_import_sg_table(struct drm_device *dev, err_free_array: kvfree(shmem->pages); err_free_gem: - drm_gem_object_put_unlocked(&shmem->base); + drm_gem_object_put(&shmem->base); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c index 605a8a3da7f9..892b2288a104 100644 --- a/drivers/gpu/drm/drm_gem_ttm_helper.c +++ b/drivers/gpu/drm/drm_gem_ttm_helper.c @@ -74,7 +74,7 @@ int drm_gem_ttm_mmap(struct drm_gem_object *gem, * ttm has its own object refcounting, so drop gem reference * to avoid double accounting counting. */ - drm_gem_object_put_unlocked(gem); + drm_gem_object_put(gem); return 0; } diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index 8b2d5c945c95..0023ce1d2cf7 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -618,9 +618,9 @@ int drm_gem_vram_fill_create_dumb(struct drm_file *file, ret = drm_gem_handle_create(file, &gbo->bo.base, &handle); if (ret) - goto err_drm_gem_object_put_unlocked; + goto err_drm_gem_object_put; - drm_gem_object_put_unlocked(&gbo->bo.base); + drm_gem_object_put(&gbo->bo.base); args->pitch = pitch; args->size = size; @@ -628,8 +628,8 @@ int drm_gem_vram_fill_create_dumb(struct drm_file *file, return 0; -err_drm_gem_object_put_unlocked: - drm_gem_object_put_unlocked(&gbo->bo.base); +err_drm_gem_object_put: + drm_gem_object_put(&gbo->bo.base); return ret; } EXPORT_SYMBOL(drm_gem_vram_fill_create_dumb); @@ -737,7 +737,7 @@ int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file, gbo = drm_gem_vram_of_gem(gem); *offset = drm_gem_vram_mmap_offset(gbo); - drm_gem_object_put_unlocked(gem); + drm_gem_object_put(gem); return 0; } diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 282774e469ac..bbfc713bfdc3 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -270,7 +270,7 @@ void drm_gem_dmabuf_release(struct dma_buf *dma_buf) struct drm_device *dev = obj->dev; /* drop the reference on the export fd holds */ - drm_gem_object_put_unlocked(obj); + drm_gem_object_put(obj); drm_dev_put(dev); } @@ -329,7 +329,7 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev, /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */ ret = drm_gem_handle_create_tail(file_priv, obj, handle); - drm_gem_object_put_unlocked(obj); + drm_gem_object_put(obj); if (ret) goto out_put; @@ -500,7 +500,7 @@ out_have_handle: fail_put_dmabuf: dma_buf_put(dmabuf); out: - drm_gem_object_put_unlocked(obj); + drm_gem_object_put(obj); out_unlock: mutex_unlock(&file_priv->prime.lock); diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index 2f7b86c0649c..10c5d561eb18 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -188,7 +188,7 @@ struct drm_gem_object { * Reference count of this object * * Please use drm_gem_object_get() to acquire and drm_gem_object_put_locked() - * or drm_gem_object_put_unlocked() to release a reference to a GEM + * or drm_gem_object_put() to release a reference to a GEM * buffer object. */ struct kref refcount; -- cgit v1.2.3-59-g8ed1b From ab15d56e27be47b7feebffd6e8319ece01959fbf Mon Sep 17 00:00:00 2001 From: Emil Velikov Date: Fri, 15 May 2020 10:51:18 +0100 Subject: drm: remove transient drm_gem_object_put_unlocked() As of last commit, all the drivers have been updated away from the _unlocked helper. As such we can now remove the transient #define. v2: keep sed and #define removal separate Cc: David Airlie Cc: Daniel Vetter Signed-off-by: Emil Velikov Acked-by: Sam Ravnborg (v1) Acked-by: Thomas Zimmermann Link: https://patchwork.freedesktop.org/patch/msgid/20200515095118.2743122-39-emil.l.velikov@gmail.com --- include/drm/drm_gem.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include') diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index 10c5d561eb18..52173abdf500 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -375,8 +375,6 @@ drm_gem_object_put(struct drm_gem_object *obj) kref_put(&obj->refcount, drm_gem_object_free); } -#define drm_gem_object_put_unlocked drm_gem_object_put - void drm_gem_object_put_locked(struct drm_gem_object *obj); int drm_gem_handle_create(struct drm_file *file_priv, -- cgit v1.2.3-59-g8ed1b From 123f62de419f2a49449629ef822ed2c393a4781c Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Mon, 4 May 2020 15:52:06 -0700 Subject: drm/i915/rkl: Add RKL platform info and PCI ids Introduce the basic platform definition, macros, and PCI IDs. Bspec: 44501 Cc: Lucas De Marchi Cc: Caz Yokoyama Cc: Aditya Swarup Signed-off-by: Matt Roper Acked-by: Caz Yokoyama Reviewed-by: Anusha Srivatsa Signed-off-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20200504225227.464666-2-matthew.d.roper@intel.com --- drivers/gpu/drm/i915/i915_drv.h | 8 ++++++++ drivers/gpu/drm/i915/i915_pci.c | 10 ++++++++++ drivers/gpu/drm/i915/intel_device_info.c | 1 + drivers/gpu/drm/i915/intel_device_info.h | 1 + include/drm/i915_pciids.h | 9 +++++++++ 5 files changed, 29 insertions(+) (limited to 'include') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 0abbefa457f8..1fd7fdbed553 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1414,6 +1414,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE) #define IS_ELKHARTLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE) #define IS_TIGERLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_TIGERLAKE) +#define IS_ROCKETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ROCKETLAKE) #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \ (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00) #define IS_BDW_ULT(dev_priv) \ @@ -1527,6 +1528,13 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define IS_TGL_REVID(p, since, until) \ (IS_TIGERLAKE(p) && IS_REVID(p, since, until)) +#define RKL_REVID_A0 0x0 +#define RKL_REVID_B0 0x1 +#define RKL_REVID_C0 0x4 + +#define IS_RKL_REVID(p, since, until) \ + (IS_ROCKETLAKE(p) && IS_REVID(p, since, until)) + #define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp) #define IS_GEN9_LP(dev_priv) (IS_GEN(dev_priv, 9) && IS_LP(dev_priv)) #define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv)) diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 193048ce3c3a..eb6d4a0c9196 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -863,6 +863,15 @@ static const struct intel_device_info tgl_info = { BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), }; +static const struct intel_device_info rkl_info = { + GEN12_FEATURES, + PLATFORM(INTEL_ROCKETLAKE), + .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), + .require_force_probe = 1, + .engine_mask = + BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0), +}; + #define GEN12_DGFX_FEATURES \ GEN12_FEATURES, \ .is_dgfx = 1 @@ -941,6 +950,7 @@ static const struct pci_device_id pciidlist[] = { INTEL_ICL_11_IDS(&icl_info), INTEL_EHL_IDS(&ehl_info), INTEL_TGL_12_IDS(&tgl_info), + INTEL_RKL_IDS(&rkl_info), {0, 0, 0} }; MODULE_DEVICE_TABLE(pci, pciidlist); diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 8a635bd4d5d8..e5e6836f8fa0 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -61,6 +61,7 @@ static const char * const platform_names[] = { PLATFORM_NAME(ICELAKE), PLATFORM_NAME(ELKHARTLAKE), PLATFORM_NAME(TIGERLAKE), + PLATFORM_NAME(ROCKETLAKE), }; #undef PLATFORM_NAME diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index 62e03ffa377e..c912acd06109 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -80,6 +80,7 @@ enum intel_platform { INTEL_ELKHARTLAKE, /* gen12 */ INTEL_TIGERLAKE, + INTEL_ROCKETLAKE, INTEL_MAX_PLATFORMS }; diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index 662d8351c87a..bc989de2aac2 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -605,4 +605,13 @@ INTEL_VGA_DEVICE(0x9AD9, info), \ INTEL_VGA_DEVICE(0x9AF8, info) +/* RKL */ +#define INTEL_RKL_IDS(info) \ + INTEL_VGA_DEVICE(0x4C80, info), \ + INTEL_VGA_DEVICE(0x4C8A, info), \ + INTEL_VGA_DEVICE(0x4C8B, info), \ + INTEL_VGA_DEVICE(0x4C8C, info), \ + INTEL_VGA_DEVICE(0x4C90, info), \ + INTEL_VGA_DEVICE(0x4C9A, info) + #endif /* _I915_PCIIDS_H */ -- cgit v1.2.3-59-g8ed1b From 0e799e840a07e9cd843149be6811fd895d20a5a0 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 20 May 2020 15:23:47 +0100 Subject: drm: Restore the NULL check for drm_gem_object_put() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some users want to pass NULL to drm_gem_object_put(), but those using __drm_gem_object_put() did not. Compromise, have both and let the compiler sort it out. drm_gem_fb_destroy() calls drm_gem_object_put() with NULL obj causing: [ 11.584209] BUG: kernel NULL pointer dereference, address: 0000000000000000 [ 11.584213] #PF: supervisor write access in kernel mode [ 11.584215] #PF: error_code(0x0002) - not-present page [ 11.584216] PGD 0 P4D 0 [ 11.584220] Oops: 0002 [#1] SMP NOPTI [ 11.584223] CPU: 7 PID: 1571 Comm: gnome-shell Tainted: G E 5.7.0-rc1-1-default+ #27 [ 11.584225] Hardware name: Micro-Star International Co., Ltd. MS-7A31/X370 XPOWER GAMING TITANIUM (MS-7A31), BIOS 1.MR 12/03/2019 [ 11.584237] RIP: 0010:drm_gem_fb_destroy+0x28/0x70 [drm_kms_helper] [ 11.584256] Call Trace: [ 11.584279] drm_mode_rmfb+0x189/0x1c0 [drm] [ 11.584299] ? drm_mode_rmfb+0x1c0/0x1c0 [drm] [ 11.584314] drm_ioctl_kernel+0xaa/0xf0 [drm] [ 11.584329] drm_ioctl+0x1ff/0x3b0 [drm] [ 11.584347] ? drm_mode_rmfb+0x1c0/0x1c0 [drm] [ 11.584421] amdgpu_drm_ioctl+0x49/0x80 [amdgpu] [ 11.584427] ksys_ioctl+0x87/0xc0 [ 11.584430] __x64_sys_ioctl+0x16/0x20 [ 11.584434] do_syscall_64+0x5f/0x240 [ 11.584438] entry_SYSCALL_64_after_hwframe+0x44/0xa9 [ 11.584440] RIP: 0033:0x7f0ef80f7227 Reported-by: Nirmoy Das Fixes: b5d250744ccc ("drm/gem: fold drm_gem_object_put_unlocked and __drm_gem_object_put()") Signed-off-by: Chris Wilson Cc: Nirmoy Das Cc: Emil Velikov Cc: Christian König . Acked-by: Nirmoy Das Reviewed-by: Emil Velikov Link: https://patchwork.freedesktop.org/patch/msgid/20200520142347.29060-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_object.h | 2 +- include/drm/drm_gem.h | 10 +++++++++- 2 files changed, 10 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index aba7517c2837..2faa481cc18f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -105,7 +105,7 @@ __attribute__((nonnull)) static inline void i915_gem_object_put(struct drm_i915_gem_object *obj) { - drm_gem_object_put(&obj->base); + __drm_gem_object_put(&obj->base); } #define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv) diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index 52173abdf500..2410ff0a8e86 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -363,6 +363,13 @@ static inline void drm_gem_object_get(struct drm_gem_object *obj) kref_get(&obj->refcount); } +__attribute__((nonnull)) +static inline void +__drm_gem_object_put(struct drm_gem_object *obj) +{ + kref_put(&obj->refcount, drm_gem_object_free); +} + /** * drm_gem_object_put - drop a GEM buffer object reference * @obj: GEM buffer object @@ -372,7 +379,8 @@ static inline void drm_gem_object_get(struct drm_gem_object *obj) static inline void drm_gem_object_put(struct drm_gem_object *obj) { - kref_put(&obj->refcount, drm_gem_object_free); + if (obj) + __drm_gem_object_put(obj); } void drm_gem_object_put_locked(struct drm_gem_object *obj); -- cgit v1.2.3-59-g8ed1b From c9c03e3cf07299bf635e6fadad8d09106d26252f Mon Sep 17 00:00:00 2001 From: Noralf Trønnes Date: Sat, 9 May 2020 16:16:12 +0200 Subject: drm/client: Add drm_client_framebuffer_flush() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some drivers need explicit flushing of buffer changes, add a function that does that. v2: - Put all clip rect stuff inside if statement (Sam) Reviewed-by: Sam Ravnborg Signed-off-by: Noralf Trønnes Link: https://patchwork.freedesktop.org/patch/msgid/20200509141619.32970-4-noralf@tronnes.org --- drivers/gpu/drm/drm_client.c | 33 +++++++++++++++++++++++++++++++++ include/drm/drm_client.h | 1 + 2 files changed, 34 insertions(+) (limited to 'include') diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c index 536a22747b51..495f47d23d87 100644 --- a/drivers/gpu/drm/drm_client.c +++ b/drivers/gpu/drm/drm_client.c @@ -437,6 +437,39 @@ void drm_client_framebuffer_delete(struct drm_client_buffer *buffer) } EXPORT_SYMBOL(drm_client_framebuffer_delete); +/** + * drm_client_framebuffer_flush - Manually flush client framebuffer + * @buffer: DRM client buffer (can be NULL) + * @rect: Damage rectangle (if NULL flushes all) + * + * This calls &drm_framebuffer_funcs->dirty (if present) to flush buffer changes + * for drivers that need it. + * + * Returns: + * Zero on success or negative error code on failure. + */ +int drm_client_framebuffer_flush(struct drm_client_buffer *buffer, struct drm_rect *rect) +{ + if (!buffer || !buffer->fb || !buffer->fb->funcs->dirty) + return 0; + + if (rect) { + struct drm_clip_rect clip = { + .x1 = rect->x1, + .y1 = rect->y1, + .x2 = rect->x2, + .y2 = rect->y2, + }; + + return buffer->fb->funcs->dirty(buffer->fb, buffer->client->file, + 0, 0, &clip, 1); + } + + return buffer->fb->funcs->dirty(buffer->fb, buffer->client->file, + 0, 0, NULL, 0); +} +EXPORT_SYMBOL(drm_client_framebuffer_flush); + #ifdef CONFIG_DEBUG_FS static int drm_client_debugfs_internal_clients(struct seq_file *m, void *data) { diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h index eb259c2547af..565e83358d3c 100644 --- a/include/drm/drm_client.h +++ b/include/drm/drm_client.h @@ -154,6 +154,7 @@ struct drm_client_buffer { struct drm_client_buffer * drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format); void drm_client_framebuffer_delete(struct drm_client_buffer *buffer); +int drm_client_framebuffer_flush(struct drm_client_buffer *buffer, struct drm_rect *rect); void *drm_client_buffer_vmap(struct drm_client_buffer *buffer); void drm_client_buffer_vunmap(struct drm_client_buffer *buffer); -- cgit v1.2.3-59-g8ed1b From 64593f2a6fc933bb9a410bc3f8c261f3e57a9601 Mon Sep 17 00:00:00 2001 From: Noralf Trønnes Date: Sat, 9 May 2020 16:16:13 +0200 Subject: drm/client: Add drm_client_modeset_check() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a way for client to check the configuration before comitting. v2: - Fix docs (Sam) Reviewed-by: Sam Ravnborg Signed-off-by: Noralf Trønnes Link: https://patchwork.freedesktop.org/patch/msgid/20200509141619.32970-5-noralf@tronnes.org --- drivers/gpu/drm/drm_client_modeset.c | 35 +++++++++++++++++++++++++++++++---- include/drm/drm_client.h | 1 + 2 files changed, 32 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c index 66e785b738eb..d0778bb159bc 100644 --- a/drivers/gpu/drm/drm_client_modeset.c +++ b/drivers/gpu/drm/drm_client_modeset.c @@ -969,7 +969,7 @@ bool drm_client_rotation(struct drm_mode_set *modeset, unsigned int *rotation) } EXPORT_SYMBOL(drm_client_rotation); -static int drm_client_modeset_commit_atomic(struct drm_client_dev *client, bool active) +static int drm_client_modeset_commit_atomic(struct drm_client_dev *client, bool active, bool check) { struct drm_device *dev = client->dev; struct drm_plane *plane; @@ -1036,7 +1036,10 @@ retry: } } - ret = drm_atomic_commit(state); + if (check) + ret = drm_atomic_check_only(state); + else + ret = drm_atomic_commit(state); out_state: if (ret == -EDEADLK) @@ -1097,6 +1100,30 @@ out: return ret; } +/** + * drm_client_modeset_check() - Check modeset configuration + * @client: DRM client + * + * Check modeset configuration. + * + * Returns: + * Zero on success or negative error code on failure. + */ +int drm_client_modeset_check(struct drm_client_dev *client) +{ + int ret; + + if (!drm_drv_uses_atomic_modeset(client->dev)) + return 0; + + mutex_lock(&client->modeset_mutex); + ret = drm_client_modeset_commit_atomic(client, true, true); + mutex_unlock(&client->modeset_mutex); + + return ret; +} +EXPORT_SYMBOL(drm_client_modeset_check); + /** * drm_client_modeset_commit_locked() - Force commit CRTC configuration * @client: DRM client @@ -1115,7 +1142,7 @@ int drm_client_modeset_commit_locked(struct drm_client_dev *client) mutex_lock(&client->modeset_mutex); if (drm_drv_uses_atomic_modeset(dev)) - ret = drm_client_modeset_commit_atomic(client, true); + ret = drm_client_modeset_commit_atomic(client, true, false); else ret = drm_client_modeset_commit_legacy(client); mutex_unlock(&client->modeset_mutex); @@ -1191,7 +1218,7 @@ int drm_client_modeset_dpms(struct drm_client_dev *client, int mode) mutex_lock(&client->modeset_mutex); if (drm_drv_uses_atomic_modeset(dev)) - ret = drm_client_modeset_commit_atomic(client, mode == DRM_MODE_DPMS_ON); + ret = drm_client_modeset_commit_atomic(client, mode == DRM_MODE_DPMS_ON, false); else drm_client_modeset_dpms_legacy(client, mode); mutex_unlock(&client->modeset_mutex); diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h index 565e83358d3c..7aaea665bfc2 100644 --- a/include/drm/drm_client.h +++ b/include/drm/drm_client.h @@ -162,6 +162,7 @@ int drm_client_modeset_create(struct drm_client_dev *client); void drm_client_modeset_free(struct drm_client_dev *client); int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width, unsigned int height); bool drm_client_rotation(struct drm_mode_set *modeset, unsigned int *rotation); +int drm_client_modeset_check(struct drm_client_dev *client); int drm_client_modeset_commit_locked(struct drm_client_dev *client); int drm_client_modeset_commit(struct drm_client_dev *client); int drm_client_modeset_dpms(struct drm_client_dev *client, int mode); -- cgit v1.2.3-59-g8ed1b From bd34cea2a0e4b0c7a8e73d6cbaf694b233769d9d Mon Sep 17 00:00:00 2001 From: Noralf Trønnes Date: Sat, 9 May 2020 16:16:16 +0200 Subject: drm/format-helper: Add drm_fb_swab() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This replaces drm_fb_swab16() with drm_fb_swab() supporting 16 and 32-bit. Also make pixel line caching optional. v2: - Bail out on cpp != 2 && 4 (Sam) Reviewed-by: Sam Ravnborg Signed-off-by: Noralf Trønnes Link: https://patchwork.freedesktop.org/patch/msgid/20200509141619.32970-8-noralf@tronnes.org --- drivers/gpu/drm/drm_format_helper.c | 61 +++++++++++++++++++++++++------------ drivers/gpu/drm/drm_mipi_dbi.c | 2 +- include/drm/drm_format_helper.h | 4 +-- 3 files changed, 44 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c index 3b818f2b2392..c043ca364c86 100644 --- a/drivers/gpu/drm/drm_format_helper.c +++ b/drivers/gpu/drm/drm_format_helper.c @@ -79,39 +79,60 @@ void drm_fb_memcpy_dstclip(void __iomem *dst, void *vaddr, EXPORT_SYMBOL(drm_fb_memcpy_dstclip); /** - * drm_fb_swab16 - Swap bytes into clip buffer - * @dst: RGB565 destination buffer - * @vaddr: RGB565 source buffer + * drm_fb_swab - Swap bytes into clip buffer + * @dst: Destination buffer + * @src: Source buffer * @fb: DRM framebuffer * @clip: Clip rectangle area to copy + * @cached: Source buffer is mapped cached (eg. not write-combined) + * + * If @cached is false a temporary buffer is used to cache one pixel line at a + * time to speed up slow uncached reads. + * + * This function does not apply clipping on dst, i.e. the destination + * is a small buffer containing the clip rect only. */ -void drm_fb_swab16(u16 *dst, void *vaddr, struct drm_framebuffer *fb, - struct drm_rect *clip) +void drm_fb_swab(void *dst, void *src, struct drm_framebuffer *fb, + struct drm_rect *clip, bool cached) { - size_t len = (clip->x2 - clip->x1) * sizeof(u16); + u8 cpp = fb->format->cpp[0]; + size_t len = drm_rect_width(clip) * cpp; + u16 *src16, *dst16 = dst; + u32 *src32, *dst32 = dst; unsigned int x, y; - u16 *src, *buf; + void *buf = NULL; - /* - * The cma memory is write-combined so reads are uncached. - * Speed up by fetching one line at a time. - */ - buf = kmalloc(len, GFP_KERNEL); - if (!buf) + if (WARN_ON_ONCE(cpp != 2 && cpp != 4)) return; + if (!cached) + buf = kmalloc(len, GFP_KERNEL); + + src += clip_offset(clip, fb->pitches[0], cpp); + for (y = clip->y1; y < clip->y2; y++) { - src = vaddr + (y * fb->pitches[0]); - src += clip->x1; - memcpy(buf, src, len); - src = buf; - for (x = clip->x1; x < clip->x2; x++) - *dst++ = swab16(*src++); + if (buf) { + memcpy(buf, src, len); + src16 = buf; + src32 = buf; + } else { + src16 = src; + src32 = src; + } + + for (x = clip->x1; x < clip->x2; x++) { + if (cpp == 4) + *dst32++ = swab32(*src32++); + else + *dst16++ = swab16(*src16++); + } + + src += fb->pitches[0]; } kfree(buf); } -EXPORT_SYMBOL(drm_fb_swab16); +EXPORT_SYMBOL(drm_fb_swab); static void drm_fb_xrgb8888_to_rgb565_line(u16 *dbuf, u32 *sbuf, unsigned int pixels, diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c index bb27c82757f1..fd8d672972a9 100644 --- a/drivers/gpu/drm/drm_mipi_dbi.c +++ b/drivers/gpu/drm/drm_mipi_dbi.c @@ -217,7 +217,7 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb, switch (fb->format->format) { case DRM_FORMAT_RGB565: if (swap) - drm_fb_swab16(dst, src, fb, clip); + drm_fb_swab(dst, src, fb, clip, !import_attach); else drm_fb_memcpy(dst, src, fb, clip); break; diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h index ac220aa1a245..5f9e37032468 100644 --- a/include/drm/drm_format_helper.h +++ b/include/drm/drm_format_helper.h @@ -14,8 +14,8 @@ void drm_fb_memcpy(void *dst, void *vaddr, struct drm_framebuffer *fb, void drm_fb_memcpy_dstclip(void __iomem *dst, void *vaddr, struct drm_framebuffer *fb, struct drm_rect *clip); -void drm_fb_swab16(u16 *dst, void *vaddr, struct drm_framebuffer *fb, - struct drm_rect *clip); +void drm_fb_swab(void *dst, void *src, struct drm_framebuffer *fb, + struct drm_rect *clip, bool cached); void drm_fb_xrgb8888_to_rgb565(void *dst, void *vaddr, struct drm_framebuffer *fb, struct drm_rect *clip, bool swab); -- cgit v1.2.3-59-g8ed1b From 0425662fdf05665235e768e2fbcb4ced12432b43 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 28 Apr 2020 20:19:27 +0300 Subject: drm: Nuke mode->vrefresh MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Get rid of mode->vrefresh and just calculate it on demand. Saves a bit of space and avoids the cached value getting out of sync with reality. Mostly done with cocci, with the following manual fixups: - Remove the now empty loop in drm_helper_probe_single_connector_modes() - Fix __MODE() macro in ch7006_mode.c - Fix DRM_MODE_ARG() macro in drm_modes.h - Remove leftover comment from samsung_s6d16d0_mode - Drop the TODO @@ @@ struct drm_display_mode { ... - int vrefresh; ... }; @@ identifier N; expression E; @@ struct drm_display_mode N = { - .vrefresh = E }; @@ identifier N; expression E; @@ struct drm_display_mode N[...] = { ..., { - .vrefresh = E } ,... }; @@ expression E; @@ { DRM_MODE(...), - .vrefresh = E, } @@ identifier M, R; @@ int drm_mode_vrefresh(const struct drm_display_mode *M) { ... - if (M->vrefresh > 0) - R = M->vrefresh; - else if (...) { ... } ... } @@ struct drm_display_mode *p; expression E; @@ ( - p->vrefresh = E; | - p->vrefresh + drm_mode_vrefresh(p) ) @@ struct drm_display_mode s; expression E; @@ ( - s.vrefresh = E; | - s.vrefresh + drm_mode_vrefresh(&s) ) @@ expression E; @@ - drm_mode_vrefresh(E) ? drm_mode_vrefresh(E) : drm_mode_vrefresh(E) + drm_mode_vrefresh(E) @find_substruct@ identifier X; identifier S; @@ struct X { ... struct drm_display_mode S; ... }; @@ identifier find_substruct.S; expression E; identifier I; @@ { .S = { - .vrefresh = E } } @@ identifier find_substruct.S; identifier find_substruct.X; expression E; identifier I; @@ struct X I[...] = { ..., .S = { - .vrefresh = E } ,... }; v2: Drop TODO v3: Rebase v4: Rebase Cc: Andrzej Hajda Cc: Neil Armstrong Cc: Laurent Pinchart Cc: Jonas Karlman Cc: Jernej Skrabec Cc: Inki Dae Cc: Joonyoung Shim Cc: Seung-Woo Kim Cc: Kyungmin Park Cc: Linus Walleij Cc: CK Hu Cc: Philipp Zabel Cc: Ben Skeggs Cc: Thierry Reding Cc: Sam Ravnborg Cc: Jerry Han Cc: Icenowy Zheng Cc: Jagan Teki Cc: Stefan Mavrodiev Cc: Robert Chiras Cc: "Guido Günther" Cc: Purism Kernel Team Cc: Benjamin Gaignard Cc: Vincent Abriou Cc: VMware Graphics Cc: Thomas Hellstrom Cc: linux-amlogic@lists.infradead.org Cc: nouveau@lists.freedesktop.org Reviewed-by: Laurent Pinchart Reviewed-by: Emil Velikov Reviewed-by: Sam Ravnborg Acked-by: Linus Walleij Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200428171940.19552-4-ville.syrjala@linux.intel.com --- Documentation/gpu/todo.rst | 20 -- drivers/gpu/drm/bridge/sii902x.c | 2 +- drivers/gpu/drm/drm_client_modeset.c | 2 +- drivers/gpu/drm/drm_edid.c | 328 ++++++++++----------- drivers/gpu/drm/drm_modes.c | 9 +- drivers/gpu/drm/drm_probe_helper.c | 3 - drivers/gpu/drm/exynos/exynos_hdmi.c | 5 +- drivers/gpu/drm/exynos/exynos_mixer.c | 2 +- drivers/gpu/drm/i2c/ch7006_mode.c | 1 - drivers/gpu/drm/i915/display/intel_display.c | 1 - .../gpu/drm/i915/display/intel_display_debugfs.c | 4 +- drivers/gpu/drm/i915/display/intel_dp.c | 10 +- drivers/gpu/drm/i915/display/intel_tv.c | 3 - drivers/gpu/drm/mcde/mcde_dsi.c | 6 +- drivers/gpu/drm/mediatek/mtk_drm_crtc.c | 4 +- drivers/gpu/drm/mediatek/mtk_hdmi.c | 2 +- drivers/gpu/drm/meson/meson_venc_cvbs.c | 2 - drivers/gpu/drm/nouveau/nouveau_connector.c | 5 +- drivers/gpu/drm/panel/panel-arm-versatile.c | 4 - .../gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c | 1 - drivers/gpu/drm/panel/panel-boe-himax8279d.c | 3 +- drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c | 7 +- drivers/gpu/drm/panel/panel-elida-kd35t133.c | 3 +- drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c | 3 +- .../gpu/drm/panel/panel-feiyang-fy07024di26a30d.c | 3 +- drivers/gpu/drm/panel/panel-ilitek-ili9322.c | 7 - drivers/gpu/drm/panel/panel-ilitek-ili9881c.c | 3 +- drivers/gpu/drm/panel/panel-innolux-p079zca.c | 4 +- drivers/gpu/drm/panel/panel-jdi-lt070me05000.c | 3 +- drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c | 3 +- drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c | 3 +- drivers/gpu/drm/panel/panel-lg-lb035q02.c | 1 - drivers/gpu/drm/panel/panel-lg-lg4573.c | 3 +- drivers/gpu/drm/panel/panel-nec-nl8048hl11.c | 1 - drivers/gpu/drm/panel/panel-novatek-nt35510.c | 1 - drivers/gpu/drm/panel/panel-novatek-nt39016.c | 2 - drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c | 1 - drivers/gpu/drm/panel/panel-orisetech-otm8009a.c | 3 +- drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c | 3 +- .../gpu/drm/panel/panel-panasonic-vvx10f034n00.c | 3 +- .../gpu/drm/panel/panel-raspberrypi-touchscreen.c | 4 +- drivers/gpu/drm/panel/panel-raydium-rm67191.c | 3 +- drivers/gpu/drm/panel/panel-raydium-rm68200.c | 3 +- drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c | 5 +- drivers/gpu/drm/panel/panel-ronbo-rb070d30.c | 1 - drivers/gpu/drm/panel/panel-samsung-s6d16d0.c | 6 - drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c | 4 +- drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c | 3 +- drivers/gpu/drm/panel/panel-samsung-s6e63m0.c | 3 +- .../drm/panel/panel-samsung-s6e88a0-ams452ef01.c | 1 - drivers/gpu/drm/panel/panel-seiko-43wvf1g.c | 3 +- drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c | 3 +- drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c | 1 - drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c | 3 +- drivers/gpu/drm/panel/panel-simple.c | 91 +----- drivers/gpu/drm/panel/panel-sitronix-st7701.c | 2 +- drivers/gpu/drm/panel/panel-sitronix-st7789v.c | 3 +- drivers/gpu/drm/panel/panel-sony-acx424akp.c | 2 - drivers/gpu/drm/panel/panel-sony-acx565akm.c | 1 - drivers/gpu/drm/panel/panel-tpo-td028ttec1.c | 1 - drivers/gpu/drm/panel/panel-tpo-td043mtea1.c | 1 - drivers/gpu/drm/panel/panel-tpo-tpg110.c | 5 - drivers/gpu/drm/panel/panel-truly-nt35597.c | 1 - drivers/gpu/drm/panel/panel-visionox-rm69299.c | 1 - drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c | 3 +- drivers/gpu/drm/sti/sti_hda.c | 1 - drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 2 - include/drm/drm_modes.h | 12 +- 68 files changed, 218 insertions(+), 425 deletions(-) (limited to 'include') diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst index 2ce52c5917f8..4c2b72f14316 100644 --- a/Documentation/gpu/todo.rst +++ b/Documentation/gpu/todo.rst @@ -327,26 +327,6 @@ Contact: Laurent Pinchart, Daniel Vetter Level: Intermediate (mostly because it is a huge tasks without good partial milestones, not technically itself that challenging) -Convert direct mode.vrefresh accesses to use drm_mode_vrefresh() ----------------------------------------------------------------- - -drm_display_mode.vrefresh isn't guaranteed to be populated. As such, using it -is risky and has been known to cause div-by-zero bugs. Fortunately, drm core -has helper which will use mode.vrefresh if it's !0 and will calculate it from -the timings when it's 0. - -Use simple search/replace, or (more fun) cocci to replace instances of direct -vrefresh access with a call to the helper. Check out -https://lists.freedesktop.org/archives/dri-devel/2019-January/205186.html for -inspiration. - -Once all instances of vrefresh have been converted, remove vrefresh from -drm_display_mode to avoid future use. - -Contact: Sean Paul - -Level: Starter - connector register/unregister fixes ----------------------------------- diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c index 6dad025f8da7..19d8ae59ea03 100644 --- a/drivers/gpu/drm/bridge/sii902x.c +++ b/drivers/gpu/drm/bridge/sii902x.c @@ -360,7 +360,7 @@ static void sii902x_bridge_mode_set(struct drm_bridge *bridge, buf[0] = pixel_clock_10kHz & 0xff; buf[1] = pixel_clock_10kHz >> 8; - buf[2] = adj->vrefresh; + buf[2] = drm_mode_vrefresh(adj); buf[3] = 0x00; buf[4] = adj->hdisplay; buf[5] = adj->hdisplay >> 8; diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c index d0778bb159bc..b7e9e1c2564c 100644 --- a/drivers/gpu/drm/drm_client_modeset.c +++ b/drivers/gpu/drm/drm_client_modeset.c @@ -186,7 +186,7 @@ again: continue; if (cmdline_mode->refresh_specified) { - if (mode->vrefresh != cmdline_mode->refresh) + if (drm_mode_vrefresh(mode) != cmdline_mode->refresh) continue; } diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 3bd95c4b02eb..57cac677269d 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -719,662 +719,662 @@ static const struct drm_display_mode edid_cea_modes_1[] = { { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, 752, 800, 0, 480, 490, 492, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), - .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, /* 2 - 720x480@60Hz 4:3 */ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736, 798, 858, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), - .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, /* 3 - 720x480@60Hz 16:9 */ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736, 798, 858, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), - .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 4 - 1280x720@60Hz 16:9 */ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390, 1430, 1650, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 5 - 1920x1080i@60Hz 16:9 */ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008, 2052, 2200, 0, 1080, 1084, 1094, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE), - .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 6 - 720(1440)x480i@60Hz 4:3 */ { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500, 720, 739, 801, 858, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), - .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, /* 7 - 720(1440)x480i@60Hz 16:9 */ { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500, 720, 739, 801, 858, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), - .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 8 - 720(1440)x240@60Hz 4:3 */ { DRM_MODE("720x240", DRM_MODE_TYPE_DRIVER, 13500, 720, 739, 801, 858, 0, 240, 244, 247, 262, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_DBLCLK), - .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, /* 9 - 720(1440)x240@60Hz 16:9 */ { DRM_MODE("720x240", DRM_MODE_TYPE_DRIVER, 13500, 720, 739, 801, 858, 0, 240, 244, 247, 262, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_DBLCLK), - .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 10 - 2880x480i@60Hz 4:3 */ { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, 3204, 3432, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE), - .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, /* 11 - 2880x480i@60Hz 16:9 */ { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, 3204, 3432, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE), - .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 12 - 2880x240@60Hz 4:3 */ { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, 3204, 3432, 0, 240, 244, 247, 262, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), - .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, /* 13 - 2880x240@60Hz 16:9 */ { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, 3204, 3432, 0, 240, 244, 247, 262, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), - .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 14 - 1440x480@60Hz 4:3 */ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472, 1596, 1716, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), - .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, /* 15 - 1440x480@60Hz 16:9 */ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472, 1596, 1716, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), - .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 16 - 1920x1080@60Hz 16:9 */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008, 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 17 - 720x576@50Hz 4:3 */ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732, 796, 864, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), - .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, /* 18 - 720x576@50Hz 16:9 */ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732, 796, 864, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), - .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 19 - 1280x720@50Hz 16:9 */ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720, 1760, 1980, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 20 - 1920x1080i@50Hz 16:9 */ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448, 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE), - .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 21 - 720(1440)x576i@50Hz 4:3 */ { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, 720, 732, 795, 864, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), - .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, /* 22 - 720(1440)x576i@50Hz 16:9 */ { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, 720, 732, 795, 864, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), - .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 23 - 720(1440)x288@50Hz 4:3 */ { DRM_MODE("720x288", DRM_MODE_TYPE_DRIVER, 13500, 720, 732, 795, 864, 0, 288, 290, 293, 312, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_DBLCLK), - .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, /* 24 - 720(1440)x288@50Hz 16:9 */ { DRM_MODE("720x288", DRM_MODE_TYPE_DRIVER, 13500, 720, 732, 795, 864, 0, 288, 290, 293, 312, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_DBLCLK), - .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 25 - 2880x576i@50Hz 4:3 */ { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, 3180, 3456, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE), - .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, /* 26 - 2880x576i@50Hz 16:9 */ { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, 3180, 3456, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE), - .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 27 - 2880x288@50Hz 4:3 */ { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, 3180, 3456, 0, 288, 290, 293, 312, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), - .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, /* 28 - 2880x288@50Hz 16:9 */ { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, 3180, 3456, 0, 288, 290, 293, 312, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), - .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 29 - 1440x576@50Hz 4:3 */ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, 1592, 1728, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), - .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, /* 30 - 1440x576@50Hz 16:9 */ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, 1592, 1728, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), - .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 31 - 1920x1080@50Hz 16:9 */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448, 2492, 2640, 0, 1080, 1084, 1089, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 32 - 1920x1080@24Hz 16:9 */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558, 2602, 2750, 0, 1080, 1084, 1089, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 33 - 1920x1080@25Hz 16:9 */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448, 2492, 2640, 0, 1080, 1084, 1089, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 34 - 1920x1080@30Hz 16:9 */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008, 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 35 - 2880x480@60Hz 4:3 */ { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944, 3192, 3432, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), - .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, /* 36 - 2880x480@60Hz 16:9 */ { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944, 3192, 3432, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), - .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 37 - 2880x576@50Hz 4:3 */ { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928, 3184, 3456, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), - .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, /* 38 - 2880x576@50Hz 16:9 */ { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928, 3184, 3456, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), - .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 39 - 1920x1080i@50Hz 16:9 */ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952, 2120, 2304, 0, 1080, 1126, 1136, 1250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE), - .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 40 - 1920x1080i@100Hz 16:9 */ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448, 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE), - .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 41 - 1280x720@100Hz 16:9 */ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720, 1760, 1980, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 42 - 720x576@100Hz 4:3 */ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732, 796, 864, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), - .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, /* 43 - 720x576@100Hz 16:9 */ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732, 796, 864, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), - .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 44 - 720(1440)x576i@100Hz 4:3 */ { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 27000, 720, 732, 795, 864, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), - .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, /* 45 - 720(1440)x576i@100Hz 16:9 */ { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 27000, 720, 732, 795, 864, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), - .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 46 - 1920x1080i@120Hz 16:9 */ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008, 2052, 2200, 0, 1080, 1084, 1094, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE), - .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 47 - 1280x720@120Hz 16:9 */ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390, 1430, 1650, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 48 - 720x480@120Hz 4:3 */ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736, 798, 858, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), - .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, /* 49 - 720x480@120Hz 16:9 */ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736, 798, 858, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), - .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 50 - 720(1440)x480i@120Hz 4:3 */ { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 27000, 720, 739, 801, 858, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), - .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, /* 51 - 720(1440)x480i@120Hz 16:9 */ { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 27000, 720, 739, 801, 858, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), - .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 52 - 720x576@200Hz 4:3 */ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732, 796, 864, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), - .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, /* 53 - 720x576@200Hz 16:9 */ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732, 796, 864, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), - .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 54 - 720(1440)x576i@200Hz 4:3 */ { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 54000, 720, 732, 795, 864, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), - .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, /* 55 - 720(1440)x576i@200Hz 16:9 */ { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 54000, 720, 732, 795, 864, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), - .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 56 - 720x480@240Hz 4:3 */ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736, 798, 858, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), - .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, /* 57 - 720x480@240Hz 16:9 */ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736, 798, 858, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), - .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 58 - 720(1440)x480i@240Hz 4:3 */ { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739, 801, 858, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), - .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, /* 59 - 720(1440)x480i@240Hz 16:9 */ { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739, 801, 858, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), - .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 60 - 1280x720@24Hz 16:9 */ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040, 3080, 3300, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 61 - 1280x720@25Hz 16:9 */ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700, 3740, 3960, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 62 - 1280x720@30Hz 16:9 */ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040, 3080, 3300, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 63 - 1920x1080@120Hz 16:9 */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008, 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 64 - 1920x1080@100Hz 16:9 */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448, 2492, 2640, 0, 1080, 1084, 1089, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 65 - 1280x720@24Hz 64:27 */ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040, 3080, 3300, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 66 - 1280x720@25Hz 64:27 */ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700, 3740, 3960, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 67 - 1280x720@30Hz 64:27 */ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040, 3080, 3300, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 68 - 1280x720@50Hz 64:27 */ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720, 1760, 1980, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 69 - 1280x720@60Hz 64:27 */ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390, 1430, 1650, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 70 - 1280x720@100Hz 64:27 */ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720, 1760, 1980, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 71 - 1280x720@120Hz 64:27 */ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390, 1430, 1650, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 72 - 1920x1080@24Hz 64:27 */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558, 2602, 2750, 0, 1080, 1084, 1089, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 73 - 1920x1080@25Hz 64:27 */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448, 2492, 2640, 0, 1080, 1084, 1089, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 74 - 1920x1080@30Hz 64:27 */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008, 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 75 - 1920x1080@50Hz 64:27 */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448, 2492, 2640, 0, 1080, 1084, 1089, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 76 - 1920x1080@60Hz 64:27 */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008, 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 77 - 1920x1080@100Hz 64:27 */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448, 2492, 2640, 0, 1080, 1084, 1089, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 78 - 1920x1080@120Hz 64:27 */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008, 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 79 - 1680x720@24Hz 64:27 */ { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 3040, 3080, 3300, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 80 - 1680x720@25Hz 64:27 */ { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2908, 2948, 3168, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 81 - 1680x720@30Hz 64:27 */ { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2380, 2420, 2640, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 82 - 1680x720@50Hz 64:27 */ { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 82500, 1680, 1940, 1980, 2200, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 83 - 1680x720@60Hz 64:27 */ { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 99000, 1680, 1940, 1980, 2200, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 84 - 1680x720@100Hz 64:27 */ { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 165000, 1680, 1740, 1780, 2000, 0, 720, 725, 730, 825, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 85 - 1680x720@120Hz 64:27 */ { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 198000, 1680, 1740, 1780, 2000, 0, 720, 725, 730, 825, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 86 - 2560x1080@24Hz 64:27 */ { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 99000, 2560, 3558, 3602, 3750, 0, 1080, 1084, 1089, 1100, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 87 - 2560x1080@25Hz 64:27 */ { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 90000, 2560, 3008, 3052, 3200, 0, 1080, 1084, 1089, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 88 - 2560x1080@30Hz 64:27 */ { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 118800, 2560, 3328, 3372, 3520, 0, 1080, 1084, 1089, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 89 - 2560x1080@50Hz 64:27 */ { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 185625, 2560, 3108, 3152, 3300, 0, 1080, 1084, 1089, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 90 - 2560x1080@60Hz 64:27 */ { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 198000, 2560, 2808, 2852, 3000, 0, 1080, 1084, 1089, 1100, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 91 - 2560x1080@100Hz 64:27 */ { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 371250, 2560, 2778, 2822, 2970, 0, 1080, 1084, 1089, 1250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 92 - 2560x1080@120Hz 64:27 */ { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 495000, 2560, 3108, 3152, 3300, 0, 1080, 1084, 1089, 1250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 93 - 3840x2160@24Hz 16:9 */ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116, 5204, 5500, 0, 2160, 2168, 2178, 2250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 94 - 3840x2160@25Hz 16:9 */ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896, 4984, 5280, 0, 2160, 2168, 2178, 2250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 95 - 3840x2160@30Hz 16:9 */ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016, 4104, 4400, 0, 2160, 2168, 2178, 2250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 96 - 3840x2160@50Hz 16:9 */ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896, 4984, 5280, 0, 2160, 2168, 2178, 2250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 97 - 3840x2160@60Hz 16:9 */ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016, 4104, 4400, 0, 2160, 2168, 2178, 2250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 98 - 4096x2160@24Hz 256:135 */ { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5116, 5204, 5500, 0, 2160, 2168, 2178, 2250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, }, /* 99 - 4096x2160@25Hz 256:135 */ { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5064, 5152, 5280, 0, 2160, 2168, 2178, 2250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, }, /* 100 - 4096x2160@30Hz 256:135 */ { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 4184, 4272, 4400, 0, 2160, 2168, 2178, 2250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, }, /* 101 - 4096x2160@50Hz 256:135 */ { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5064, 5152, 5280, 0, 2160, 2168, 2178, 2250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, }, /* 102 - 4096x2160@60Hz 256:135 */ { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 4184, 4272, 4400, 0, 2160, 2168, 2178, 2250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, }, /* 103 - 3840x2160@24Hz 64:27 */ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116, 5204, 5500, 0, 2160, 2168, 2178, 2250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 104 - 3840x2160@25Hz 64:27 */ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896, 4984, 5280, 0, 2160, 2168, 2178, 2250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 105 - 3840x2160@30Hz 64:27 */ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016, 4104, 4400, 0, 2160, 2168, 2178, 2250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 106 - 3840x2160@50Hz 64:27 */ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896, 4984, 5280, 0, 2160, 2168, 2178, 2250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 107 - 3840x2160@60Hz 64:27 */ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016, 4104, 4400, 0, 2160, 2168, 2178, 2250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 108 - 1280x720@48Hz 16:9 */ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 90000, 1280, 2240, 2280, 2500, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 109 - 1280x720@48Hz 64:27 */ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 90000, 1280, 2240, 2280, 2500, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 110 - 1680x720@48Hz 64:27 */ { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 99000, 1680, 2490, 2530, 2750, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 111 - 1920x1080@48Hz 16:9 */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2558, 2602, 2750, 0, 1080, 1084, 1089, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 112 - 1920x1080@48Hz 64:27 */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2558, 2602, 2750, 0, 1080, 1084, 1089, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 113 - 2560x1080@48Hz 64:27 */ { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 198000, 2560, 3558, 3602, 3750, 0, 1080, 1084, 1089, 1100, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 114 - 3840x2160@48Hz 16:9 */ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 5116, 5204, 5500, 0, 2160, 2168, 2178, 2250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 115 - 4096x2160@48Hz 256:135 */ { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5116, 5204, 5500, 0, 2160, 2168, 2178, 2250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, }, /* 116 - 3840x2160@48Hz 64:27 */ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 5116, 5204, 5500, 0, 2160, 2168, 2178, 2250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 117 - 3840x2160@100Hz 16:9 */ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 1188000, 3840, 4896, 4984, 5280, 0, 2160, 2168, 2178, 2250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 118 - 3840x2160@120Hz 16:9 */ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 1188000, 3840, 4016, 4104, 4400, 0, 2160, 2168, 2178, 2250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 119 - 3840x2160@100Hz 64:27 */ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 1188000, 3840, 4896, 4984, 5280, 0, 2160, 2168, 2178, 2250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 120 - 3840x2160@120Hz 64:27 */ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 1188000, 3840, 4016, 4104, 4400, 0, 2160, 2168, 2178, 2250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 121 - 5120x2160@24Hz 64:27 */ { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 396000, 5120, 7116, 7204, 7500, 0, 2160, 2168, 2178, 2200, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 122 - 5120x2160@25Hz 64:27 */ { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 396000, 5120, 6816, 6904, 7200, 0, 2160, 2168, 2178, 2200, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 123 - 5120x2160@30Hz 64:27 */ { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 396000, 5120, 5784, 5872, 6000, 0, 2160, 2168, 2178, 2200, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 124 - 5120x2160@48Hz 64:27 */ { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 742500, 5120, 5866, 5954, 6250, 0, 2160, 2168, 2178, 2475, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 125 - 5120x2160@50Hz 64:27 */ { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 742500, 5120, 6216, 6304, 6600, 0, 2160, 2168, 2178, 2250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 126 - 5120x2160@60Hz 64:27 */ { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 742500, 5120, 5284, 5372, 5500, 0, 2160, 2168, 2178, 2250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 127 - 5120x2160@100Hz 64:27 */ { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 1485000, 5120, 6216, 6304, 6600, 0, 2160, 2168, 2178, 2250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, }; /* @@ -1387,137 +1387,137 @@ static const struct drm_display_mode edid_cea_modes_193[] = { { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 1485000, 5120, 5284, 5372, 5500, 0, 2160, 2168, 2178, 2250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 194 - 7680x4320@24Hz 16:9 */ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 1188000, 7680, 10232, 10408, 11000, 0, 4320, 4336, 4356, 4500, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 195 - 7680x4320@25Hz 16:9 */ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 1188000, 7680, 10032, 10208, 10800, 0, 4320, 4336, 4356, 4400, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 196 - 7680x4320@30Hz 16:9 */ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 1188000, 7680, 8232, 8408, 9000, 0, 4320, 4336, 4356, 4400, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 197 - 7680x4320@48Hz 16:9 */ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 2376000, 7680, 10232, 10408, 11000, 0, 4320, 4336, 4356, 4500, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 198 - 7680x4320@50Hz 16:9 */ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 2376000, 7680, 10032, 10208, 10800, 0, 4320, 4336, 4356, 4400, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 199 - 7680x4320@60Hz 16:9 */ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 2376000, 7680, 8232, 8408, 9000, 0, 4320, 4336, 4356, 4400, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 200 - 7680x4320@100Hz 16:9 */ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 4752000, 7680, 9792, 9968, 10560, 0, 4320, 4336, 4356, 4500, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 201 - 7680x4320@120Hz 16:9 */ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 4752000, 7680, 8032, 8208, 8800, 0, 4320, 4336, 4356, 4500, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 202 - 7680x4320@24Hz 64:27 */ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 1188000, 7680, 10232, 10408, 11000, 0, 4320, 4336, 4356, 4500, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 203 - 7680x4320@25Hz 64:27 */ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 1188000, 7680, 10032, 10208, 10800, 0, 4320, 4336, 4356, 4400, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 204 - 7680x4320@30Hz 64:27 */ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 1188000, 7680, 8232, 8408, 9000, 0, 4320, 4336, 4356, 4400, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 205 - 7680x4320@48Hz 64:27 */ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 2376000, 7680, 10232, 10408, 11000, 0, 4320, 4336, 4356, 4500, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 206 - 7680x4320@50Hz 64:27 */ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 2376000, 7680, 10032, 10208, 10800, 0, 4320, 4336, 4356, 4400, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 207 - 7680x4320@60Hz 64:27 */ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 2376000, 7680, 8232, 8408, 9000, 0, 4320, 4336, 4356, 4400, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 208 - 7680x4320@100Hz 64:27 */ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 4752000, 7680, 9792, 9968, 10560, 0, 4320, 4336, 4356, 4500, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 209 - 7680x4320@120Hz 64:27 */ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 4752000, 7680, 8032, 8208, 8800, 0, 4320, 4336, 4356, 4500, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 210 - 10240x4320@24Hz 64:27 */ { DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 1485000, 10240, 11732, 11908, 12500, 0, 4320, 4336, 4356, 4950, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 211 - 10240x4320@25Hz 64:27 */ { DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 1485000, 10240, 12732, 12908, 13500, 0, 4320, 4336, 4356, 4400, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 212 - 10240x4320@30Hz 64:27 */ { DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 1485000, 10240, 10528, 10704, 11000, 0, 4320, 4336, 4356, 4500, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 213 - 10240x4320@48Hz 64:27 */ { DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 2970000, 10240, 11732, 11908, 12500, 0, 4320, 4336, 4356, 4950, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 214 - 10240x4320@50Hz 64:27 */ { DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 2970000, 10240, 12732, 12908, 13500, 0, 4320, 4336, 4356, 4400, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 215 - 10240x4320@60Hz 64:27 */ { DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 2970000, 10240, 10528, 10704, 11000, 0, 4320, 4336, 4356, 4500, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 216 - 10240x4320@100Hz 64:27 */ { DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 5940000, 10240, 12432, 12608, 13200, 0, 4320, 4336, 4356, 4500, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 217 - 10240x4320@120Hz 64:27 */ { DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 5940000, 10240, 10528, 10704, 11000, 0, 4320, 4336, 4356, 4500, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, /* 218 - 4096x2160@100Hz 256:135 */ { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 1188000, 4096, 4896, 4984, 5280, 0, 2160, 2168, 2178, 2250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, }, /* 219 - 4096x2160@120Hz 256:135 */ { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 1188000, 4096, 4184, 4272, 4400, 0, 2160, 2168, 2178, 2250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, }, }; /* @@ -1531,25 +1531,25 @@ static const struct drm_display_mode edid_4k_modes[] = { 3840, 4016, 4104, 4400, 0, 2160, 2168, 2178, 2250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 2 - 3840x2160@25Hz */ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896, 4984, 5280, 0, 2160, 2168, 2178, 2250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 3 - 3840x2160@24Hz */ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116, 5204, 5500, 0, 2160, 2168, 2178, 2250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 4 - 4096x2160@24Hz (SMPTE) */ { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5116, 5204, 5500, 0, 2160, 2168, 2178, 2250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), - .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, }, + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, }, }; /*** DDC fetch and block validation ***/ @@ -2145,10 +2145,8 @@ static void edid_fixup_preferred(struct drm_connector *connector, if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode)) preferred_mode = cur_mode; - cur_vrefresh = cur_mode->vrefresh ? - cur_mode->vrefresh : drm_mode_vrefresh(cur_mode); - preferred_vrefresh = preferred_mode->vrefresh ? - preferred_mode->vrefresh : drm_mode_vrefresh(preferred_mode); + cur_vrefresh = drm_mode_vrefresh(cur_mode); + preferred_vrefresh = drm_mode_vrefresh(preferred_mode); /* At a given size, try to get closest to target refresh */ if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) && MODE_REFRESH_DIFF(cur_vrefresh, target_refresh) < @@ -2653,7 +2651,6 @@ set_size: } mode->type = DRM_MODE_TYPE_DRIVER; - mode->vrefresh = drm_mode_vrefresh(mode); drm_mode_set_name(mode); return mode; @@ -3298,7 +3295,7 @@ cea_mode_alternate_clock(const struct drm_display_mode *cea_mode) { unsigned int clock = cea_mode->clock; - if (cea_mode->vrefresh % 6 != 0) + if (drm_mode_vrefresh(cea_mode) % 6 != 0) return clock; /* @@ -3625,8 +3622,6 @@ drm_display_mode_from_vic_index(struct drm_connector *connector, if (!newmode) return NULL; - newmode->vrefresh = 0; - return newmode; } @@ -5161,7 +5156,6 @@ static struct drm_display_mode *drm_mode_displayid_detailed(struct drm_device *d if (timings->flags & 0x80) mode->type |= DRM_MODE_TYPE_PREFERRED; - mode->vrefresh = drm_mode_vrefresh(mode); drm_mode_set_name(mode); return mode; diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index fec1c33b3045..e3d5f011f7bd 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -759,9 +759,7 @@ int drm_mode_vrefresh(const struct drm_display_mode *mode) { int refresh = 0; - if (mode->vrefresh > 0) - refresh = mode->vrefresh; - else if (mode->htotal > 0 && mode->vtotal > 0) { + if (mode->htotal > 0 && mode->vtotal > 0) { unsigned int num, den; num = mode->clock * 1000; @@ -1308,7 +1306,7 @@ static int drm_mode_compare(void *priv, struct list_head *lh_a, struct list_head if (diff) return diff; - diff = b->vrefresh - a->vrefresh; + diff = drm_mode_vrefresh(b) - drm_mode_vrefresh(a); if (diff) return diff; @@ -1921,7 +1919,7 @@ void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out, out->vsync_end = in->vsync_end; out->vtotal = in->vtotal; out->vscan = in->vscan; - out->vrefresh = in->vrefresh; + out->vrefresh = drm_mode_vrefresh(in); out->flags = in->flags; out->type = in->type; @@ -1981,7 +1979,6 @@ int drm_mode_convert_umode(struct drm_device *dev, out->vsync_end = in->vsync_end; out->vtotal = in->vtotal; out->vscan = in->vscan; - out->vrefresh = in->vrefresh; out->flags = in->flags; /* * Old xf86-video-vmware (possibly others too) used to diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 466dfbba8256..26e997f1524f 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -534,9 +534,6 @@ prune: if (list_empty(&connector->modes)) return 0; - list_for_each_entry(mode, &connector->modes, head) - mode->vrefresh = drm_mode_vrefresh(mode); - drm_mode_sort(&connector->modes); DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id, diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 95dd399aa9cc..8c3f5b21eff4 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -921,7 +921,8 @@ static int hdmi_mode_valid(struct drm_connector *connector, DRM_DEV_DEBUG_KMS(hdata->dev, "xres=%d, yres=%d, refresh=%d, intl=%d clock=%d\n", - mode->hdisplay, mode->vdisplay, mode->vrefresh, + mode->hdisplay, mode->vdisplay, + drm_mode_vrefresh(mode), (mode->flags & DRM_MODE_FLAG_INTERLACE) ? true : false, mode->clock * 1000); @@ -1020,7 +1021,7 @@ static bool hdmi_mode_fixup(struct drm_encoder *encoder, DRM_DEV_DEBUG_KMS(dev->dev, "Adjusted Mode: [%d]x[%d] [%d]Hz\n", m->hdisplay, m->vdisplay, - m->vrefresh); + drm_mode_vrefresh(m)); drm_mode_copy(adjusted_mode, m); break; diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 21b726baedea..72f890529c12 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -1046,7 +1046,7 @@ static int mixer_mode_valid(struct exynos_drm_crtc *crtc, u32 w = mode->hdisplay, h = mode->vdisplay; DRM_DEV_DEBUG_KMS(ctx->dev, "xres=%d, yres=%d, refresh=%d, intl=%d\n", - w, h, mode->vrefresh, + w, h, drm_mode_vrefresh(mode), !!(mode->flags & DRM_MODE_FLAG_INTERLACE)); if (ctx->mxr_ver == MXR_VER_128_0_0_184) diff --git a/drivers/gpu/drm/i2c/ch7006_mode.c b/drivers/gpu/drm/i2c/ch7006_mode.c index bb5f67f10edb..6afe6d0ee630 100644 --- a/drivers/gpu/drm/i2c/ch7006_mode.c +++ b/drivers/gpu/drm/i2c/ch7006_mode.c @@ -121,7 +121,6 @@ const struct ch7006_tv_norm_info ch7006_tv_norms[] = { .vscan = 0, \ .flags = DRM_MODE_FLAG_##hsynp##HSYNC | \ DRM_MODE_FLAG_##vsynp##VSYNC, \ - .vrefresh = 0, \ }, \ .enc_hdisp = e_hd, \ .enc_vdisp = e_vd, \ diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index ec7e943fd877..a9a5e619fda8 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -8891,7 +8891,6 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode, mode->clock = pipe_config->hw.adjusted_mode.crtc_clock; - mode->vrefresh = drm_mode_vrefresh(mode); drm_mode_set_name(mode); } diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index bdeea2e02642..bbf92ae69407 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -1098,10 +1098,10 @@ static void drrs_status_per_crtc(struct seq_file *m, seq_puts(m, "\n\t\t"); if (drrs->refresh_rate_type == DRRS_HIGH_RR) { seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n"); - vrefresh = panel->fixed_mode->vrefresh; + vrefresh = drm_mode_vrefresh(panel->fixed_mode); } else if (drrs->refresh_rate_type == DRRS_LOW_RR) { seq_puts(m, "DRRS_State: DRRS_LOW_RR\n"); - vrefresh = panel->downclock_mode->vrefresh; + vrefresh = drm_mode_vrefresh(panel->downclock_mode); } else { seq_printf(m, "DRRS_State: Unknown(%d)\n", drrs->refresh_rate_type); diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index d5aa85e57514..571009f158b7 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -7356,7 +7356,7 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, return; } - if (intel_dp->attached_connector->panel.downclock_mode->vrefresh == + if (drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode) == refresh_rate) index = DRRS_LOW_RR; @@ -7469,7 +7469,7 @@ void intel_edp_drrs_disable(struct intel_dp *intel_dp, if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) intel_dp_set_drrs_state(dev_priv, old_crtc_state, - intel_dp->attached_connector->panel.fixed_mode->vrefresh); + drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode)); dev_priv->drrs.dp = NULL; mutex_unlock(&dev_priv->drrs.mutex); @@ -7502,7 +7502,7 @@ static void intel_edp_drrs_downclock_work(struct work_struct *work) struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc; intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, - intel_dp->attached_connector->panel.downclock_mode->vrefresh); + drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode)); } unlock: @@ -7548,7 +7548,7 @@ void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, /* invalidate means busy screen hence upclock */ if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, - intel_dp->attached_connector->panel.fixed_mode->vrefresh); + drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode)); mutex_unlock(&dev_priv->drrs.mutex); } @@ -7594,7 +7594,7 @@ void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, /* flush means busy screen hence upclock */ if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, - intel_dp->attached_connector->panel.fixed_mode->vrefresh); + drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode)); /* * flush also means no more activity hence schedule downclock, if all diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c index d2e3a3a323e9..193067fd1b2b 100644 --- a/drivers/gpu/drm/i915/display/intel_tv.c +++ b/drivers/gpu/drm/i915/display/intel_tv.c @@ -1036,9 +1036,6 @@ intel_tv_mode_to_mode(struct drm_display_mode *mode, /* TV has it's own notion of sync and other mode flags, so clear them. */ mode->flags = 0; - mode->vrefresh = 0; - mode->vrefresh = drm_mode_vrefresh(mode); - snprintf(mode->name, sizeof(mode->name), "%dx%d%c (%s)", mode->hdisplay, mode->vdisplay, diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c index f303369305a3..ed28eaa42251 100644 --- a/drivers/gpu/drm/mcde/mcde_dsi.c +++ b/drivers/gpu/drm/mcde/mcde_dsi.c @@ -538,7 +538,7 @@ static void mcde_dsi_setup_video_mode(struct mcde_dsi *d, */ /* (ps/s) / (pixels/s) = ps/pixels */ pclk = DIV_ROUND_UP_ULL(1000000000000, - (mode->vrefresh * mode->htotal * mode->vtotal)); + (drm_mode_vrefresh(mode) * mode->htotal * mode->vtotal)); dev_dbg(d->dev, "picoseconds between two pixels: %llu\n", pclk); @@ -568,7 +568,7 @@ static void mcde_dsi_setup_video_mode(struct mcde_dsi *d, bpl *= d->mdsi->lanes; dev_dbg(d->dev, "calculated bytes per line: %llu @ %d Hz with HS %lu Hz\n", - bpl, mode->vrefresh, d->mdsi->hs_rate); + bpl, drm_mode_vrefresh(mode), d->mdsi->hs_rate); /* * 6 is header + checksum, header = 4 bytes, checksum = 2 bytes @@ -644,7 +644,7 @@ static void mcde_dsi_setup_video_mode(struct mcde_dsi *d, dev_err(d->dev, "video block does not fit on line!\n"); dev_err(d->dev, "calculated bytes per line: %llu @ %d Hz\n", - bpl, mode->vrefresh); + bpl, drm_mode_vrefresh(mode)); dev_err(d->dev, "bytes per line (blkline_pck) %u bytes\n", blkline_pck); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index fe85e487e477..a7dba4ced902 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -164,7 +164,7 @@ static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) state->pending_width = crtc->mode.hdisplay; state->pending_height = crtc->mode.vdisplay; - state->pending_vrefresh = crtc->mode.vrefresh; + state->pending_vrefresh = drm_mode_vrefresh(&crtc->mode); wmb(); /* Make sure the above parameters are set before update */ state->pending_config = true; } @@ -263,7 +263,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) width = crtc->state->adjusted_mode.hdisplay; height = crtc->state->adjusted_mode.vdisplay; - vrefresh = crtc->state->adjusted_mode.vrefresh; + vrefresh = drm_mode_vrefresh(&crtc->state->adjusted_mode); drm_for_each_encoder(encoder, crtc->dev) { if (encoder->crtc != crtc) diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index ff43a3d80410..86cf19f5c9ca 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -1258,7 +1258,7 @@ static int mtk_hdmi_conn_mode_valid(struct drm_connector *conn, struct drm_bridge *next_bridge; dev_dbg(hdmi->dev, "xres=%d, yres=%d, refresh=%d, intl=%d clock=%d\n", - mode->hdisplay, mode->vdisplay, mode->vrefresh, + mode->hdisplay, mode->vdisplay, drm_mode_vrefresh(mode), !!(mode->flags & DRM_MODE_FLAG_INTERLACE), mode->clock * 1000); next_bridge = drm_bridge_get_next_bridge(&hdmi->bridge); diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.c b/drivers/gpu/drm/meson/meson_venc_cvbs.c index 541f9eb2a135..f1747fde1fe0 100644 --- a/drivers/gpu/drm/meson/meson_venc_cvbs.c +++ b/drivers/gpu/drm/meson/meson_venc_cvbs.c @@ -48,7 +48,6 @@ struct meson_cvbs_mode meson_cvbs_modes[MESON_CVBS_MODES_COUNT] = { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, 720, 732, 795, 864, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_INTERLACE), - .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, }, @@ -58,7 +57,6 @@ struct meson_cvbs_mode meson_cvbs_modes[MESON_CVBS_MODES_COUNT] = { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500, 720, 739, 801, 858, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_INTERLACE), - .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, }, diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 9a9a7f5003d3..ac80b1ac459c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -59,7 +59,6 @@ nouveau_conn_native_mode(struct drm_connector *connector) int high_w = 0, high_h = 0, high_v = 0; list_for_each_entry(mode, &connector->probed_modes, head) { - mode->vrefresh = drm_mode_vrefresh(mode); if (helper->mode_valid(connector, mode) != MODE_OK || (mode->flags & DRM_MODE_FLAG_INTERLACE)) continue; @@ -80,12 +79,12 @@ nouveau_conn_native_mode(struct drm_connector *connector) continue; if (mode->hdisplay == high_w && mode->vdisplay == high_h && - mode->vrefresh < high_v) + drm_mode_vrefresh(mode) < high_v) continue; high_w = mode->hdisplay; high_h = mode->vdisplay; - high_v = mode->vrefresh; + high_v = drm_mode_vrefresh(mode); largest = mode; } diff --git a/drivers/gpu/drm/panel/panel-arm-versatile.c b/drivers/gpu/drm/panel/panel-arm-versatile.c index 41444a73c980..47b37fef7ee8 100644 --- a/drivers/gpu/drm/panel/panel-arm-versatile.c +++ b/drivers/gpu/drm/panel/panel-arm-versatile.c @@ -143,7 +143,6 @@ static const struct versatile_panel_type versatile_panels[] = { .vsync_start = 240 + 5, .vsync_end = 240 + 5 + 6, .vtotal = 240 + 5 + 6 + 5, - .vrefresh = 116, .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, }, }, @@ -167,7 +166,6 @@ static const struct versatile_panel_type versatile_panels[] = { .vsync_start = 480 + 11, .vsync_end = 480 + 11 + 2, .vtotal = 480 + 11 + 2 + 32, - .vrefresh = 60, .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, }, }, @@ -190,7 +188,6 @@ static const struct versatile_panel_type versatile_panels[] = { .vsync_start = 220 + 0, .vsync_end = 220 + 0 + 2, .vtotal = 220 + 0 + 2 + 1, - .vrefresh = 390, .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, }, .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE, @@ -214,7 +211,6 @@ static const struct versatile_panel_type versatile_panels[] = { .vsync_start = 320 + 2, .vsync_end = 320 + 2 + 2, .vtotal = 320 + 2 + 2 + 2, - .vrefresh = 116, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, }, .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE, diff --git a/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c b/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c index 39e0f0373f3c..9a5b7644d756 100644 --- a/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c +++ b/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c @@ -183,7 +183,6 @@ static const struct drm_display_mode tm5p5_nt35596_mode = { .vsync_start = 1920 + 4, .vsync_end = 1920 + 4 + 2, .vtotal = 1920 + 4 + 2 + 4, - .vrefresh = 60, .width_mm = 68, .height_mm = 121, }; diff --git a/drivers/gpu/drm/panel/panel-boe-himax8279d.c b/drivers/gpu/drm/panel/panel-boe-himax8279d.c index 74d58ee7d04c..7c27bd5e3486 100644 --- a/drivers/gpu/drm/panel/panel-boe-himax8279d.c +++ b/drivers/gpu/drm/panel/panel-boe-himax8279d.c @@ -229,7 +229,7 @@ static int boe_panel_get_modes(struct drm_panel *panel, mode = drm_mode_duplicate(connector->dev, m); if (!mode) { DRM_DEV_ERROR(pinfo->base.dev, "failed to add mode %ux%u@%u\n", - m->hdisplay, m->vdisplay, m->vrefresh); + m->hdisplay, m->vdisplay, drm_mode_vrefresh(m)); return -ENOMEM; } @@ -262,7 +262,6 @@ static const struct drm_display_mode default_display_mode = { .vsync_start = 1920 + 10, .vsync_end = 1920 + 10 + 14, .vtotal = 1920 + 10 + 14 + 4, - .vrefresh = 60, }; /* 8 inch */ diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c index 46fe1805c588..db5b866357f2 100644 --- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c +++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c @@ -594,7 +594,6 @@ static const struct drm_display_mode boe_tv101wum_nl6_default_mode = { .vsync_start = 1920 + 10, .vsync_end = 1920 + 10 + 14, .vtotal = 1920 + 10 + 14 + 4, - .vrefresh = 60, }; static const struct panel_desc boe_tv101wum_nl6_desc = { @@ -622,7 +621,6 @@ static const struct drm_display_mode auo_kd101n80_45na_default_mode = { .vsync_start = 1920 + 16, .vsync_end = 1920 + 16 + 4, .vtotal = 1920 + 16 + 4 + 16, - .vrefresh = 60, }; static const struct panel_desc auo_kd101n80_45na_desc = { @@ -650,7 +648,6 @@ static const struct drm_display_mode boe_tv101wum_n53_default_mode = { .vsync_start = 1920 + 20, .vsync_end = 1920 + 20 + 4, .vtotal = 1920 + 20 + 4 + 10, - .vrefresh = 60, .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, }; @@ -678,7 +675,6 @@ static const struct drm_display_mode auo_b101uan08_3_default_mode = { .vsync_start = 1920 + 34, .vsync_end = 1920 + 34 + 2, .vtotal = 1920 + 34 + 2 + 24, - .vrefresh = 60, .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, }; @@ -706,7 +702,6 @@ static const struct drm_display_mode boe_tv105wum_nw0_default_mode = { .vsync_start = 1920 + 20, .vsync_end = 1920 + 20 + 4, .vtotal = 1920 + 20 + 4 + 10, - .vrefresh = 60, .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, }; @@ -734,7 +729,7 @@ static int boe_panel_get_modes(struct drm_panel *panel, mode = drm_mode_duplicate(connector->dev, m); if (!mode) { dev_err(panel->dev, "failed to add mode %ux%u@%u\n", - m->hdisplay, m->vdisplay, m->vrefresh); + m->hdisplay, m->vdisplay, drm_mode_vrefresh(m)); return -ENOMEM; } diff --git a/drivers/gpu/drm/panel/panel-elida-kd35t133.c b/drivers/gpu/drm/panel/panel-elida-kd35t133.c index 711ded453c44..2338d22e23b1 100644 --- a/drivers/gpu/drm/panel/panel-elida-kd35t133.c +++ b/drivers/gpu/drm/panel/panel-elida-kd35t133.c @@ -197,7 +197,6 @@ static const struct drm_display_mode default_mode = { .vsync_start = 480 + 2, .vsync_end = 480 + 2 + 1, .vtotal = 480 + 2 + 1 + 2, - .vrefresh = 60, .clock = 17000, .width_mm = 42, .height_mm = 82, @@ -213,7 +212,7 @@ static int kd35t133_get_modes(struct drm_panel *panel, if (!mode) { DRM_DEV_ERROR(ctx->dev, "Failed to add mode %ux%u@%u\n", default_mode.hdisplay, default_mode.vdisplay, - default_mode.vrefresh); + drm_mode_vrefresh(&default_mode)); return -ENOMEM; } diff --git a/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c b/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c index fddbfddf6566..54610651ecdb 100644 --- a/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c +++ b/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c @@ -392,7 +392,6 @@ static int k101_im2ba02_unprepare(struct drm_panel *panel) static const struct drm_display_mode k101_im2ba02_default_mode = { .clock = 70000, - .vrefresh = 60, .hdisplay = 800, .hsync_start = 800 + 20, @@ -420,7 +419,7 @@ static int k101_im2ba02_get_modes(struct drm_panel *panel, DRM_DEV_ERROR(&ctx->dsi->dev, "failed to add mode %ux%ux@%u\n", k101_im2ba02_default_mode.hdisplay, k101_im2ba02_default_mode.vdisplay, - k101_im2ba02_default_mode.vrefresh); + drm_mode_vrefresh(&k101_im2ba02_default_mode)); return -ENOMEM; } diff --git a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c index 95b789ab9d29..19a6274b10f5 100644 --- a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c +++ b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c @@ -153,7 +153,6 @@ static const struct drm_display_mode feiyang_default_mode = { .vsync_start = 600 + 12, .vsync_end = 600 + 12 + 2, .vtotal = 600 + 12 + 2 + 21, - .vrefresh = 60, .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, }; @@ -169,7 +168,7 @@ static int feiyang_get_modes(struct drm_panel *panel, DRM_DEV_ERROR(&ctx->dsi->dev, "failed to add mode %ux%ux@%u\n", feiyang_default_mode.hdisplay, feiyang_default_mode.vdisplay, - feiyang_default_mode.vrefresh); + drm_mode_vrefresh(&feiyang_default_mode)); return -ENOMEM; } diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c index 873b1c7059bd..67a64d1999f6 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c @@ -549,7 +549,6 @@ static const struct drm_display_mode srgb_320x240_mode = { .vsync_start = 240 + 4, .vsync_end = 240 + 4 + 1, .vtotal = 262, - .vrefresh = 60, .flags = 0, }; @@ -563,7 +562,6 @@ static const struct drm_display_mode srgb_360x240_mode = { .vsync_start = 240 + 21, .vsync_end = 240 + 21 + 1, .vtotal = 262, - .vrefresh = 60, .flags = 0, }; @@ -578,7 +576,6 @@ static const struct drm_display_mode prgb_320x240_mode = { .vsync_start = 240 + 4, .vsync_end = 240 + 4 + 1, .vtotal = 262, - .vrefresh = 60, .flags = 0, }; @@ -593,7 +590,6 @@ static const struct drm_display_mode yuv_640x320_mode = { .vsync_start = 320 + 4, .vsync_end = 320 + 4 + 1, .vtotal = 320 + 4 + 1 + 18, - .vrefresh = 60, .flags = 0, }; @@ -607,7 +603,6 @@ static const struct drm_display_mode yuv_720x360_mode = { .vsync_start = 360 + 4, .vsync_end = 360 + 4 + 1, .vtotal = 360 + 4 + 1 + 18, - .vrefresh = 60, .flags = 0, }; @@ -622,7 +617,6 @@ static const struct drm_display_mode itu_r_bt_656_640_mode = { .vsync_start = 480 + 4, .vsync_end = 480 + 4 + 1, .vtotal = 500, - .vrefresh = 60, .flags = 0, }; @@ -637,7 +631,6 @@ static const struct drm_display_mode itu_r_bt_656_720_mode = { .vsync_start = 480 + 4, .vsync_end = 480 + 4 + 1, .vtotal = 500, - .vrefresh = 60, .flags = 0, }; diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c index f54077c216a3..3ed8635a6fbd 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c @@ -370,7 +370,6 @@ static int ili9881c_unprepare(struct drm_panel *panel) static const struct drm_display_mode bananapi_default_mode = { .clock = 62000, - .vrefresh = 60, .hdisplay = 720, .hsync_start = 720 + 10, @@ -394,7 +393,7 @@ static int ili9881c_get_modes(struct drm_panel *panel, dev_err(&ctx->dsi->dev, "failed to add mode %ux%ux@%u\n", bananapi_default_mode.hdisplay, bananapi_default_mode.vdisplay, - bananapi_default_mode.vrefresh); + drm_mode_vrefresh(&bananapi_default_mode)); return -ENOMEM; } diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c index 7419f1f0acee..fdf030f4cf92 100644 --- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c +++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c @@ -223,7 +223,6 @@ static const struct drm_display_mode innolux_p079zca_mode = { .vsync_start = 1024 + 20, .vsync_end = 1024 + 20 + 4, .vtotal = 1024 + 20 + 4 + 20, - .vrefresh = 60, }; static const struct panel_desc innolux_p079zca_panel_desc = { @@ -257,7 +256,6 @@ static const struct drm_display_mode innolux_p097pfg_mode = { .vsync_start = 2048 + 100, .vsync_end = 2048 + 100 + 2, .vtotal = 2048 + 100 + 2 + 18, - .vrefresh = 60, }; /* @@ -401,7 +399,7 @@ static int innolux_panel_get_modes(struct drm_panel *panel, mode = drm_mode_duplicate(connector->dev, m); if (!mode) { DRM_DEV_ERROR(panel->dev, "failed to add mode %ux%ux@%u\n", - m->hdisplay, m->vdisplay, m->vrefresh); + m->hdisplay, m->vdisplay, drm_mode_vrefresh(m)); return -ENOMEM; } diff --git a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c index 4bfd8c877c8e..1e3fd6633981 100644 --- a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c +++ b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c @@ -296,7 +296,6 @@ static const struct drm_display_mode default_mode = { .vsync_start = 1920 + 3, .vsync_end = 1920 + 3 + 5, .vtotal = 1920 + 3 + 5 + 6, - .vrefresh = 60, .flags = 0, }; @@ -311,7 +310,7 @@ static int jdi_panel_get_modes(struct drm_panel *panel, if (!mode) { dev_err(dev, "failed to add mode %ux%ux@%u\n", default_mode.hdisplay, default_mode.vdisplay, - default_mode.vrefresh); + drm_mode_vrefresh(&default_mode)); return -ENOMEM; } diff --git a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c index bac1a2a06c92..0d397af23afe 100644 --- a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c +++ b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c @@ -318,7 +318,6 @@ static const struct drm_display_mode default_mode = { .vsync_start = 2048 + 95, .vsync_end = 2048 + 95 + 2, .vtotal = 2048 + 95 + 2 + 23, - .vrefresh = 60, }; static int kingdisplay_panel_get_modes(struct drm_panel *panel, @@ -330,7 +329,7 @@ static int kingdisplay_panel_get_modes(struct drm_panel *panel, if (!mode) { DRM_DEV_ERROR(panel->dev, "failed to add mode %ux%ux@%u\n", default_mode.hdisplay, default_mode.vdisplay, - default_mode.vrefresh); + drm_mode_vrefresh(&default_mode)); return -ENOMEM; } diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c b/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c index 113ab9c0396b..0f6a248c47a5 100644 --- a/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c +++ b/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c @@ -376,7 +376,6 @@ static const struct drm_display_mode default_mode = { .vsync_start = 1280 + 30, .vsync_end = 1280 + 30 + 4, .vtotal = 1280 + 30 + 4 + 12, - .vrefresh = 60, .clock = 69217, .width_mm = 62, .height_mm = 110, @@ -392,7 +391,7 @@ static int ltk500hd1829_get_modes(struct drm_panel *panel, if (!mode) { DRM_DEV_ERROR(ctx->dev, "failed to add mode %ux%ux@%u\n", default_mode.hdisplay, default_mode.vdisplay, - default_mode.vrefresh); + drm_mode_vrefresh(&default_mode)); return -ENOMEM; } diff --git a/drivers/gpu/drm/panel/panel-lg-lb035q02.c b/drivers/gpu/drm/panel/panel-lg-lb035q02.c index e90efeaba4ad..14456b9cd5c0 100644 --- a/drivers/gpu/drm/panel/panel-lg-lb035q02.c +++ b/drivers/gpu/drm/panel/panel-lg-lb035q02.c @@ -134,7 +134,6 @@ static const struct drm_display_mode lb035q02_mode = { .vsync_start = 240 + 4, .vsync_end = 240 + 4 + 2, .vtotal = 240 + 4 + 2 + 18, - .vrefresh = 60, .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, .width_mm = 70, diff --git a/drivers/gpu/drm/panel/panel-lg-lg4573.c b/drivers/gpu/drm/panel/panel-lg-lg4573.c index 5907f2503755..aedc485d0727 100644 --- a/drivers/gpu/drm/panel/panel-lg-lg4573.c +++ b/drivers/gpu/drm/panel/panel-lg-lg4573.c @@ -206,7 +206,6 @@ static const struct drm_display_mode default_mode = { .vsync_start = 800 + 15, .vsync_end = 800 + 15 + 15, .vtotal = 800 + 15 + 15 + 15, - .vrefresh = 60, }; static int lg4573_get_modes(struct drm_panel *panel, @@ -218,7 +217,7 @@ static int lg4573_get_modes(struct drm_panel *panel, if (!mode) { dev_err(panel->dev, "failed to add mode %ux%ux@%u\n", default_mode.hdisplay, default_mode.vdisplay, - default_mode.vrefresh); + drm_mode_vrefresh(&default_mode)); return -ENOMEM; } diff --git a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c index c4f83f6384e1..f894971c1c7c 100644 --- a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c +++ b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c @@ -116,7 +116,6 @@ static const struct drm_display_mode nl8048_mode = { .vsync_start = 480 + 3, .vsync_end = 480 + 3 + 1, .vtotal = 480 + 3 + 1 + 4, - .vrefresh = 60, .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, .width_mm = 89, diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c index 4a8fa908a2cf..e98d54df00e7 100644 --- a/drivers/gpu/drm/panel/panel-novatek-nt35510.c +++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c @@ -1028,7 +1028,6 @@ static const struct nt35510_config nt35510_hydis_hva40wv1 = { .vsync_start = 800 + 2, /* VFP = 2 */ .vsync_end = 800 + 2 + 0, /* VSync = 0 */ .vtotal = 800 + 2 + 0 + 5, /* VBP = 5 */ - .vrefresh = 60, /* Calculated */ .flags = 0, }, /* 0x09: AVDD = 5.6V */ diff --git a/drivers/gpu/drm/panel/panel-novatek-nt39016.c b/drivers/gpu/drm/panel/panel-novatek-nt39016.c index 05cae8d62d56..79be3dc4e817 100644 --- a/drivers/gpu/drm/panel/panel-novatek-nt39016.c +++ b/drivers/gpu/drm/panel/panel-novatek-nt39016.c @@ -335,7 +335,6 @@ static const struct drm_display_mode kd035g6_display_modes[] = { .vsync_start = 240 + 5, .vsync_end = 240 + 5 + 1, .vtotal = 240 + 5 + 1 + 4, - .vrefresh = 60, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, }, { /* 50 Hz */ @@ -348,7 +347,6 @@ static const struct drm_display_mode kd035g6_display_modes[] = { .vsync_start = 240 + 5, .vsync_end = 240 + 5 + 1, .vtotal = 240 + 5 + 1 + 4, - .vrefresh = 50, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, }, }; diff --git a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c index 09deb99981a4..ecd76b5391d3 100644 --- a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c +++ b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c @@ -170,7 +170,6 @@ static int lcd_olinuxino_get_modes(struct drm_panel *panel, lcd_mode->vpw; mode->vtotal = lcd_mode->vactive + lcd_mode->vfp + lcd_mode->vpw + lcd_mode->vbp; - mode->vrefresh = lcd_mode->refresh; /* Always make the first mode preferred */ if (i == 0) diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c index bb0c992171e8..895ee3d1371e 100644 --- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c +++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c @@ -81,7 +81,6 @@ static const struct drm_display_mode default_mode = { .vsync_start = 800 + 15, .vsync_end = 800 + 15 + 10, .vtotal = 800 + 15 + 10 + 14, - .vrefresh = 50, .flags = 0, .width_mm = 52, .height_mm = 86, @@ -358,7 +357,7 @@ static int otm8009a_get_modes(struct drm_panel *panel, if (!mode) { DRM_ERROR("failed to add mode %ux%ux@%u\n", default_mode.hdisplay, default_mode.vdisplay, - default_mode.vrefresh); + drm_mode_vrefresh(&default_mode)); return -ENOMEM; } diff --git a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c index 3a0229d60095..11b3d01aca56 100644 --- a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c +++ b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c @@ -102,7 +102,6 @@ static const struct drm_display_mode default_mode_osd101t2587 = { .vsync_start = 1200 + 24, .vsync_end = 1200 + 24 + 6, .vtotal = 1200 + 24 + 6 + 48, - .vrefresh = 60, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, }; @@ -117,7 +116,7 @@ static int osd101t2587_panel_get_modes(struct drm_panel *panel, dev_err(panel->dev, "failed to add mode %ux%ux@%u\n", osd101t2587->default_mode->hdisplay, osd101t2587->default_mode->vdisplay, - osd101t2587->default_mode->vrefresh); + drm_mode_vrefresh(osd101t2587->default_mode)); return -ENOMEM; } diff --git a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c index 69693451462e..627dfcf8adb4 100644 --- a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c +++ b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c @@ -149,7 +149,6 @@ static const struct drm_display_mode default_mode = { .vsync_start = 1200 + 24, .vsync_end = 1200 + 24 + 6, .vtotal = 1200 + 24 + 6 + 48, - .vrefresh = 60, }; static int wuxga_nt_panel_get_modes(struct drm_panel *panel, @@ -161,7 +160,7 @@ static int wuxga_nt_panel_get_modes(struct drm_panel *panel, if (!mode) { dev_err(panel->dev, "failed to add mode %ux%u@%u\n", default_mode.hdisplay, default_mode.vdisplay, - default_mode.vrefresh); + drm_mode_vrefresh(&default_mode)); return -ENOMEM; } diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c index 8f078b7dd89e..e50ee26474cf 100644 --- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c +++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c @@ -209,7 +209,6 @@ static const struct drm_display_mode rpi_touchscreen_modes[] = { .vsync_start = 480 + 7, .vsync_end = 480 + 7 + 2, .vtotal = 480 + 7 + 2 + 21, - .vrefresh = 60, }, }; @@ -322,7 +321,8 @@ static int rpi_touchscreen_get_modes(struct drm_panel *panel, mode = drm_mode_duplicate(connector->dev, m); if (!mode) { dev_err(panel->dev, "failed to add mode %ux%u@%u\n", - m->hdisplay, m->vdisplay, m->vrefresh); + m->hdisplay, m->vdisplay, + drm_mode_vrefresh(m)); continue; } diff --git a/drivers/gpu/drm/panel/panel-raydium-rm67191.c b/drivers/gpu/drm/panel/panel-raydium-rm67191.c index 313637d53d28..d001c52e0ca9 100644 --- a/drivers/gpu/drm/panel/panel-raydium-rm67191.c +++ b/drivers/gpu/drm/panel/panel-raydium-rm67191.c @@ -218,7 +218,6 @@ static const struct drm_display_mode default_mode = { .vsync_start = 1920 + 10, .vsync_end = 1920 + 10 + 2, .vtotal = 1920 + 10 + 2 + 4, - .vrefresh = 60, .width_mm = 68, .height_mm = 121, .flags = DRM_MODE_FLAG_NHSYNC | @@ -445,7 +444,7 @@ static int rad_panel_get_modes(struct drm_panel *panel, if (!mode) { DRM_DEV_ERROR(panel->dev, "failed to add mode %ux%ux@%u\n", default_mode.hdisplay, default_mode.vdisplay, - default_mode.vrefresh); + drm_mode_vrefresh(&default_mode)); return -ENOMEM; } diff --git a/drivers/gpu/drm/panel/panel-raydium-rm68200.c b/drivers/gpu/drm/panel/panel-raydium-rm68200.c index e8982948e0ea..81ae8be62d15 100644 --- a/drivers/gpu/drm/panel/panel-raydium-rm68200.c +++ b/drivers/gpu/drm/panel/panel-raydium-rm68200.c @@ -92,7 +92,6 @@ static const struct drm_display_mode default_mode = { .vsync_start = 1280 + 12, .vsync_end = 1280 + 12 + 4, .vtotal = 1280 + 12 + 4 + 12, - .vrefresh = 50, .flags = 0, .width_mm = 68, .height_mm = 122, @@ -339,7 +338,7 @@ static int rm68200_get_modes(struct drm_panel *panel, if (!mode) { DRM_ERROR("failed to add mode %ux%ux@%u\n", default_mode.hdisplay, default_mode.vdisplay, - default_mode.vrefresh); + drm_mode_vrefresh(&default_mode)); return -ENOMEM; } diff --git a/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c b/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c index 38ff742bc120..da4e373291f9 100644 --- a/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c +++ b/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c @@ -223,7 +223,6 @@ static const struct drm_display_mode default_mode = { .vsync_start = 1440 + 20, .vsync_end = 1440 + 20 + 4, .vtotal = 1440 + 20 + 4 + 12, - .vrefresh = 60, .clock = 75276, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, .width_mm = 65, @@ -240,7 +239,7 @@ static int jh057n_get_modes(struct drm_panel *panel, if (!mode) { DRM_DEV_ERROR(ctx->dev, "Failed to add mode %ux%u@%u\n", default_mode.hdisplay, default_mode.vdisplay, - default_mode.vrefresh); + drm_mode_vrefresh(&default_mode)); return -ENOMEM; } @@ -360,7 +359,7 @@ static int jh057n_probe(struct mipi_dsi_device *dsi) DRM_DEV_INFO(dev, "%ux%u@%u %ubpp dsi %udl - ready\n", default_mode.hdisplay, default_mode.vdisplay, - default_mode.vrefresh, + drm_mode_vrefresh(&default_mode), mipi_dsi_pixel_format_to_bpp(dsi->format), dsi->lanes); jh057n_debugfs_init(ctx); diff --git a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c index ef18559e237e..a7b0b3e39e1a 100644 --- a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c +++ b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c @@ -103,7 +103,6 @@ static const struct drm_display_mode default_mode = { .vsync_start = 600 + 12, .vsync_end = 600 + 12 + 10, .vtotal = 600 + 12 + 10 + 13, - .vrefresh = 60, .width_mm = 154, .height_mm = 85, diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c index 2150043dcf6b..f02645d396ac 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c @@ -37,12 +37,6 @@ static const struct drm_display_mode samsung_s6d16d0_mode = { .vsync_start = 480 + 1, .vsync_end = 480 + 1 + 1, .vtotal = 480 + 1 + 1 + 1, - /* - * This depends on the clocking HS vs LP rate, this value - * is calculated as: - * vrefresh = (clock * 1000) / (htotal*vtotal) - */ - .vrefresh = 816, .width_mm = 84, .height_mm = 48, }; diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c index 36ebd5a4ac7b..80ef122e7466 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c @@ -617,7 +617,6 @@ static const struct drm_display_mode s6e3ha2_mode = { .vsync_start = 2560 + 1, .vsync_end = 2560 + 1 + 1, .vtotal = 2560 + 1 + 1 + 15, - .vrefresh = 60, .flags = 0, }; @@ -636,7 +635,6 @@ static const struct drm_display_mode s6e3hf2_mode = { .vsync_start = 2560 + 1, .vsync_end = 2560 + 1 + 1, .vtotal = 2560 + 1 + 1 + 15, - .vrefresh = 60, .flags = 0, }; @@ -655,7 +653,7 @@ static int s6e3ha2_get_modes(struct drm_panel *panel, if (!mode) { DRM_ERROR("failed to add mode %ux%ux@%u\n", ctx->desc->mode->hdisplay, ctx->desc->mode->vdisplay, - ctx->desc->mode->vrefresh); + drm_mode_vrefresh(ctx->desc->mode)); return -ENOMEM; } diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c index a3570e0a90a8..1247656d73bf 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c @@ -52,7 +52,6 @@ static const struct drm_display_mode default_mode = { .vsync_start = 320 + 150, .vsync_end = 320 + 150 + 1, .vtotal = 320 + 150 + 1 + 2, - .vrefresh = 30, .flags = 0, }; @@ -409,7 +408,7 @@ static int s6e63j0x03_get_modes(struct drm_panel *panel, if (!mode) { DRM_ERROR("failed to add mode %ux%ux@%u\n", default_mode.hdisplay, default_mode.vdisplay, - default_mode.vrefresh); + drm_mode_vrefresh(&default_mode)); return -ENOMEM; } diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c index a5f76eb4fa25..64421347bfd4 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c @@ -117,7 +117,6 @@ static const struct drm_display_mode default_mode = { .vsync_start = 800 + 28, .vsync_end = 800 + 28 + 2, .vtotal = 800 + 28 + 2 + 1, - .vrefresh = 60, .width_mm = 53, .height_mm = 89, .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, @@ -371,7 +370,7 @@ static int s6e63m0_get_modes(struct drm_panel *panel, if (!mode) { DRM_ERROR("failed to add mode %ux%ux@%u\n", default_mode.hdisplay, default_mode.vdisplay, - default_mode.vrefresh); + drm_mode_vrefresh(&default_mode)); return -ENOMEM; } diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c index 9d843fcc3a22..485eabecfcc9 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c @@ -177,7 +177,6 @@ static const struct drm_display_mode s6e88a0_ams452ef01_mode = { .vsync_start = 960 + 14, .vsync_end = 960 + 14 + 2, .vtotal = 960 + 14 + 2 + 8, - .vrefresh = 60, .width_mm = 56, .height_mm = 100, }; diff --git a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c index 40fcbbbacb2c..e417dc4921c2 100644 --- a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c +++ b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c @@ -92,7 +92,8 @@ static int seiko_panel_get_fixed_modes(struct seiko_panel *panel, mode = drm_mode_duplicate(connector->dev, m); if (!mode) { dev_err(panel->base.dev, "failed to add mode %ux%u@%u\n", - m->hdisplay, m->vdisplay, m->vrefresh); + m->hdisplay, m->vdisplay, + drm_mode_vrefresh(m)); continue; } diff --git a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c index b5d1977221a7..f07324b705b3 100644 --- a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c +++ b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c @@ -269,7 +269,6 @@ static const struct drm_display_mode default_mode = { .vsync_start = 1600 + 4, .vsync_end = 1600 + 4 + 8, .vtotal = 1600 + 4 + 8 + 32, - .vrefresh = 60, }; static int sharp_panel_get_modes(struct drm_panel *panel, @@ -281,7 +280,7 @@ static int sharp_panel_get_modes(struct drm_panel *panel, if (!mode) { dev_err(panel->dev, "failed to add mode %ux%ux@%u\n", default_mode.hdisplay, default_mode.vdisplay, - default_mode.vrefresh); + drm_mode_vrefresh(&default_mode)); return -ENOMEM; } diff --git a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c index 1cf3f02435c1..d7bf13b9e1d6 100644 --- a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c +++ b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c @@ -93,7 +93,6 @@ static const struct drm_display_mode ls037v7dw01_mode = { .vsync_start = 640 + 1, .vsync_end = 640 + 1 + 1, .vtotal = 640 + 1 + 1 + 1, - .vrefresh = 58, .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, .width_mm = 56, diff --git a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c index ce586c6d70c7..b2e58935529c 100644 --- a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c +++ b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c @@ -201,7 +201,6 @@ static const struct drm_display_mode default_mode = { .vsync_start = 960 + 3, .vsync_end = 960 + 3 + 15, .vtotal = 960 + 3 + 15 + 1, - .vrefresh = 60, }; static int sharp_nt_panel_get_modes(struct drm_panel *panel, @@ -213,7 +212,7 @@ static int sharp_nt_panel_get_modes(struct drm_panel *panel, if (!mode) { dev_err(panel->dev, "failed to add mode %ux%u@%u\n", default_mode.hdisplay, default_mode.vdisplay, - default_mode.vrefresh); + drm_mode_vrefresh(&default_mode)); return -ENOMEM; } diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index b6ecd1552132..b067f66cea0e 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -163,7 +163,8 @@ static unsigned int panel_simple_get_display_modes(struct panel_simple *panel, mode = drm_mode_duplicate(connector->dev, m); if (!mode) { dev_err(panel->base.dev, "failed to add mode %ux%u@%u\n", - m->hdisplay, m->vdisplay, m->vrefresh); + m->hdisplay, m->vdisplay, + drm_mode_vrefresh(m)); continue; } @@ -602,7 +603,6 @@ static const struct drm_display_mode ampire_am_480272h3tmqw_t01h_mode = { .vsync_start = 272 + 2, .vsync_end = 272 + 2 + 10, .vtotal = 272 + 2 + 10 + 2, - .vrefresh = 60, .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, }; @@ -627,7 +627,6 @@ static const struct drm_display_mode ampire_am800480r3tmqwa1h_mode = { .vsync_start = 480 + 2, .vsync_end = 480 + 2 + 45, .vtotal = 480 + 2 + 45 + 0, - .vrefresh = 60, .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, }; @@ -678,7 +677,6 @@ static const struct drm_display_mode auo_b101aw03_mode = { .vsync_start = 600 + 16, .vsync_end = 600 + 16 + 6, .vtotal = 600 + 16 + 6 + 16, - .vrefresh = 60, }; static const struct panel_desc auo_b101aw03 = { @@ -723,7 +721,6 @@ static const struct drm_display_mode auo_b101xtn01_mode = { .vsync_start = 768 + 14, .vsync_end = 768 + 14 + 42, .vtotal = 768 + 14 + 42, - .vrefresh = 60, .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, }; @@ -747,7 +744,6 @@ static const struct drm_display_mode auo_b116xak01_mode = { .vsync_start = 768 + 4, .vsync_end = 768 + 4 + 6, .vtotal = 768 + 4 + 6 + 15, - .vrefresh = 60, .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, }; @@ -776,7 +772,6 @@ static const struct drm_display_mode auo_b116xw03_mode = { .vsync_start = 768 + 10, .vsync_end = 768 + 10 + 12, .vtotal = 768 + 10 + 12 + 6, - .vrefresh = 60, }; static const struct panel_desc auo_b116xw03 = { @@ -799,7 +794,6 @@ static const struct drm_display_mode auo_b133xtn01_mode = { .vsync_start = 768 + 3, .vsync_end = 768 + 3 + 6, .vtotal = 768 + 3 + 6 + 13, - .vrefresh = 60, }; static const struct panel_desc auo_b133xtn01 = { @@ -822,7 +816,6 @@ static const struct drm_display_mode auo_b133htn01_mode = { .vsync_start = 1080 + 25, .vsync_end = 1080 + 25 + 10, .vtotal = 1080 + 25 + 10 + 10, - .vrefresh = 60, }; static const struct panel_desc auo_b133htn01 = { @@ -878,7 +871,6 @@ static const struct drm_display_mode auo_g101evn010_mode = { .vsync_start = 800 + 8, .vsync_end = 800 + 8 + 2, .vtotal = 800 + 8 + 2 + 6, - .vrefresh = 60, }; static const struct panel_desc auo_g101evn010 = { @@ -903,7 +895,6 @@ static const struct drm_display_mode auo_g104sn02_mode = { .vsync_start = 600 + 10, .vsync_end = 600 + 10 + 35, .vtotal = 600 + 10 + 35 + 2, - .vrefresh = 60, }; static const struct panel_desc auo_g104sn02 = { @@ -926,7 +917,6 @@ static const struct drm_display_mode auo_g121ean01_mode = { .vsync_start = 800 + 6, .vsync_end = 800 + 6 + 4, .vtotal = 800 + 6 + 4 + 10, - .vrefresh = 60, }; static const struct panel_desc auo_g121ean01 = { @@ -981,7 +971,6 @@ static const struct drm_display_mode auo_g156xtn01_mode = { .vsync_start = 768 + 4, .vsync_end = 768 + 4 + 4, .vtotal = 806, - .vrefresh = 60, }; static const struct panel_desc auo_g156xtn01 = { @@ -1095,7 +1084,6 @@ static const struct drm_display_mode auo_t215hvn01_mode = { .vsync_start = 1080 + 4, .vsync_end = 1080 + 4 + 5, .vtotal = 1080 + 4 + 5 + 36, - .vrefresh = 60, }; static const struct panel_desc auo_t215hvn01 = { @@ -1122,7 +1110,6 @@ static const struct drm_display_mode avic_tm070ddh03_mode = { .vsync_start = 600 + 17, .vsync_end = 600 + 17 + 1, .vtotal = 600 + 17 + 1 + 17, - .vrefresh = 60, }; static const struct panel_desc avic_tm070ddh03 = { @@ -1172,7 +1159,6 @@ static const struct drm_display_mode boe_hv070wsa_mode = { .vsync_start = 600 + 10, .vsync_end = 600 + 10 + 10, .vtotal = 600 + 10 + 10 + 10, - .vrefresh = 60, }; static const struct panel_desc boe_hv070wsa = { @@ -1195,7 +1181,6 @@ static const struct drm_display_mode boe_nv101wxmn51_modes[] = { .vsync_start = 800 + 3, .vsync_end = 800 + 3 + 5, .vtotal = 800 + 3 + 5 + 24, - .vrefresh = 60, }, { .clock = 57500, @@ -1207,7 +1192,6 @@ static const struct drm_display_mode boe_nv101wxmn51_modes[] = { .vsync_start = 800 + 3, .vsync_end = 800 + 3 + 5, .vtotal = 800 + 3 + 5 + 24, - .vrefresh = 48, }, }; @@ -1237,7 +1221,6 @@ static const struct drm_display_mode boe_nv133fhm_n61_modes = { .vsync_start = 1080 + 3, .vsync_end = 1080 + 3 + 6, .vtotal = 1080 + 3 + 6 + 31, - .vrefresh = 60, }; /* Also used for boe_nv133fhm_n62 */ @@ -1269,7 +1252,6 @@ static const struct drm_display_mode boe_nv140fhmn49_modes[] = { .vsync_start = 1080 + 3, .vsync_end = 1080 + 3 + 5, .vtotal = 1125, - .vrefresh = 60, }, }; @@ -1300,7 +1282,6 @@ static const struct drm_display_mode cdtech_s043wq26h_ct7_mode = { .vsync_start = 272 + 8, .vsync_end = 272 + 8 + 8, .vtotal = 272 + 8 + 8 + 8, - .vrefresh = 60, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, }; @@ -1325,7 +1306,6 @@ static const struct drm_display_mode cdtech_s070wv95_ct16_mode = { .vsync_start = 480 + 29, .vsync_end = 480 + 29 + 13, .vtotal = 480 + 29 + 13 + 3, - .vrefresh = 60, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, }; @@ -1349,7 +1329,6 @@ static const struct drm_display_mode chunghwa_claa070wp03xg_mode = { .vsync_start = 1280 + 1, .vsync_end = 1280 + 1 + 7, .vtotal = 1280 + 1 + 7 + 15, - .vrefresh = 60, .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, }; @@ -1373,7 +1352,6 @@ static const struct drm_display_mode chunghwa_claa101wa01a_mode = { .vsync_start = 768 + 4, .vsync_end = 768 + 4 + 4, .vtotal = 768 + 4 + 4 + 4, - .vrefresh = 60, }; static const struct panel_desc chunghwa_claa101wa01a = { @@ -1396,7 +1374,6 @@ static const struct drm_display_mode chunghwa_claa101wb01_mode = { .vsync_start = 768 + 16, .vsync_end = 768 + 16 + 8, .vtotal = 768 + 16 + 8 + 16, - .vrefresh = 60, }; static const struct panel_desc chunghwa_claa101wb01 = { @@ -1419,7 +1396,6 @@ static const struct drm_display_mode dataimage_scf0700c48ggu18_mode = { .vsync_start = 480 + 10, .vsync_end = 480 + 10 + 2, .vtotal = 480 + 10 + 2 + 33, - .vrefresh = 60, .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, }; @@ -1506,7 +1482,6 @@ static const struct drm_display_mode edt_et035012dm6_mode = { .vsync_start = 240 + 4, .vsync_end = 240 + 4 + 4, .vtotal = 240 + 4 + 4 + 14, - .vrefresh = 60, .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, }; @@ -1538,7 +1513,6 @@ static const struct drm_display_mode edt_etm043080dh6gp_mode = { .vsync_start = 288 + 2, .vsync_end = 288 + 2 + 4, .vtotal = 288 + 2 + 4 + 10, - .vrefresh = 60, }; static const struct panel_desc edt_etm043080dh6gp = { @@ -1563,7 +1537,6 @@ static const struct drm_display_mode edt_etm0430g0dh6_mode = { .vsync_start = 272 + 2, .vsync_end = 272 + 2 + 10, .vtotal = 272 + 2 + 10 + 2, - .vrefresh = 60, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, }; @@ -1587,7 +1560,6 @@ static const struct drm_display_mode edt_et057090dhu_mode = { .vsync_start = 480 + 10, .vsync_end = 480 + 10 + 3, .vtotal = 480 + 10 + 3 + 32, - .vrefresh = 60, .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, }; @@ -1613,7 +1585,6 @@ static const struct drm_display_mode edt_etm0700g0dh6_mode = { .vsync_start = 480 + 10, .vsync_end = 480 + 10 + 2, .vtotal = 480 + 10 + 2 + 33, - .vrefresh = 60, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, }; @@ -1678,7 +1649,6 @@ static const struct drm_display_mode foxlink_fl500wvr00_a0t_mode = { .vsync_start = 480 + 37, .vsync_end = 480 + 37 + 2, .vtotal = 480 + 37 + 2 + 8, - .vrefresh = 60, }; static const struct panel_desc foxlink_fl500wvr00_a0t = { @@ -1702,7 +1672,6 @@ static const struct drm_display_mode frida_frd350h54004_mode = { .vsync_start = 240 + 2, .vsync_end = 240 + 2 + 6, .vtotal = 240 + 2 + 6 + 2, - .vrefresh = 60, .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, }; @@ -1729,7 +1698,6 @@ static const struct drm_display_mode friendlyarm_hd702e_mode = { .vsync_start = 1280 + 4, .vsync_end = 1280 + 4 + 8, .vtotal = 1280 + 4 + 8 + 4, - .vrefresh = 60, .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, }; @@ -1752,7 +1720,6 @@ static const struct drm_display_mode giantplus_gpg482739qs5_mode = { .vsync_start = 272 + 8, .vsync_end = 272 + 8 + 1, .vtotal = 272 + 8 + 1 + 8, - .vrefresh = 60, }; static const struct panel_desc giantplus_gpg482739qs5 = { @@ -1856,7 +1823,6 @@ static const struct drm_display_mode hitachi_tx23d38vm0caa_mode = { .vsync_start = 480 + 16, .vsync_end = 480 + 16 + 13, .vtotal = 480 + 16 + 13 + 16, - .vrefresh = 60, }; static const struct panel_desc hitachi_tx23d38vm0caa = { @@ -1883,7 +1849,6 @@ static const struct drm_display_mode innolux_at043tn24_mode = { .vsync_start = 272 + 2, .vsync_end = 272 + 2 + 10, .vtotal = 272 + 2 + 10 + 2, - .vrefresh = 60, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, }; @@ -1909,7 +1874,6 @@ static const struct drm_display_mode innolux_at070tn92_mode = { .vsync_start = 480 + 22, .vsync_end = 480 + 22 + 10, .vtotal = 480 + 22 + 23 + 10, - .vrefresh = 60, }; static const struct panel_desc innolux_at070tn92 = { @@ -2020,7 +1984,6 @@ static const struct drm_display_mode innolux_g121x1_l03_mode = { .vsync_start = 768 + 38, .vsync_end = 768 + 38 + 1, .vtotal = 768 + 38 + 1 + 0, - .vrefresh = 60, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, }; @@ -2082,7 +2045,6 @@ static const struct drm_display_mode innolux_n156bge_l21_mode = { .vsync_start = 768 + 2, .vsync_end = 768 + 2 + 6, .vtotal = 768 + 2 + 6 + 12, - .vrefresh = 60, }; static const struct panel_desc innolux_n156bge_l21 = { @@ -2105,7 +2067,6 @@ static const struct drm_display_mode innolux_p120zdg_bf1_mode = { .vsync_start = 1440 + 3, .vsync_end = 1440 + 3 + 10, .vtotal = 1440 + 3 + 10 + 27, - .vrefresh = 60, .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, }; @@ -2133,7 +2094,6 @@ static const struct drm_display_mode innolux_zj070na_01p_mode = { .vsync_start = 600 + 16, .vsync_end = 600 + 16 + 4, .vtotal = 600 + 16 + 4 + 16, - .vrefresh = 60, }; static const struct panel_desc innolux_zj070na_01p = { @@ -2156,7 +2116,6 @@ static const struct drm_display_mode ivo_m133nwf4_r0_mode = { .vsync_start = 1080 + 3, .vsync_end = 1080 + 3 + 12, .vtotal = 1080 + 3 + 12 + 17, - .vrefresh = 60, .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, }; @@ -2260,7 +2219,6 @@ static const struct drm_display_mode lemaker_bl035_rgb_002_mode = { .vsync_start = 240 + 4, .vsync_end = 240 + 4 + 3, .vtotal = 240 + 4 + 3 + 15, - .vrefresh = 60, }; static const struct panel_desc lemaker_bl035_rgb_002 = { @@ -2284,7 +2242,6 @@ static const struct drm_display_mode lg_lb070wv8_mode = { .vsync_start = 480 + 10, .vsync_end = 480 + 10 + 25, .vtotal = 480 + 10 + 25 + 10, - .vrefresh = 60, }; static const struct panel_desc lg_lb070wv8 = { @@ -2309,7 +2266,6 @@ static const struct drm_display_mode lg_lp079qx1_sp0v_mode = { .vsync_start = 2048 + 8, .vsync_end = 2048 + 8 + 4, .vtotal = 2048 + 8 + 4 + 8, - .vrefresh = 60, .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, }; @@ -2332,7 +2288,6 @@ static const struct drm_display_mode lg_lp097qx1_spa1_mode = { .vsync_start = 1536 + 3, .vsync_end = 1536 + 3 + 1, .vtotal = 1536 + 3 + 1 + 9, - .vrefresh = 60, }; static const struct panel_desc lg_lp097qx1_spa1 = { @@ -2354,7 +2309,6 @@ static const struct drm_display_mode lg_lp120up1_mode = { .vsync_start = 1280 + 4, .vsync_end = 1280 + 4 + 4, .vtotal = 1280 + 4 + 4 + 12, - .vrefresh = 60, }; static const struct panel_desc lg_lp120up1 = { @@ -2378,7 +2332,6 @@ static const struct drm_display_mode lg_lp129qe_mode = { .vsync_start = 1700 + 3, .vsync_end = 1700 + 3 + 10, .vtotal = 1700 + 3 + 10 + 36, - .vrefresh = 60, }; static const struct panel_desc lg_lp129qe = { @@ -2459,7 +2412,6 @@ static const struct drm_display_mode mitsubishi_aa070mc01_mode = { .vsync_start = 480 + 0, .vsync_end = 480 + 48 + 1, .vtotal = 480 + 48 + 1 + 0, - .vrefresh = 60, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, }; @@ -2474,7 +2426,6 @@ static const struct drm_display_mode logicpd_type_28_mode = { .vsync_start = 272 + 2, .vsync_end = 272 + 2 + 11, .vtotal = 272 + 2 + 11 + 3, - .vrefresh = 60, .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, }; @@ -2554,7 +2505,6 @@ static const struct drm_display_mode nec_nl4827hc19_05b_mode = { .vsync_start = 272 + 2, .vsync_end = 272 + 2 + 4, .vtotal = 272 + 2 + 4 + 2, - .vrefresh = 74, .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, }; @@ -2580,7 +2530,6 @@ static const struct drm_display_mode netron_dy_e231732_mode = { .vsync_start = 600 + 127, .vsync_end = 600 + 127 + 20, .vtotal = 600 + 127 + 20 + 3, - .vrefresh = 60, }; static const struct panel_desc netron_dy_e231732 = { @@ -2604,7 +2553,6 @@ static const struct drm_display_mode neweast_wjfh116008a_modes[] = { .vsync_start = 1080 + 3, .vsync_end = 1080 + 3 + 5, .vtotal = 1080 + 3 + 5 + 23, - .vrefresh = 60, .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, }, { .clock = 110920, @@ -2616,7 +2564,6 @@ static const struct drm_display_mode neweast_wjfh116008a_modes[] = { .vsync_start = 1080 + 3, .vsync_end = 1080 + 3 + 5, .vtotal = 1080 + 3 + 5 + 23, - .vrefresh = 48, .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, } }; @@ -2648,7 +2595,6 @@ static const struct drm_display_mode newhaven_nhd_43_480272ef_atxl_mode = { .vsync_start = 272 + 2, .vsync_end = 272 + 2 + 10, .vtotal = 272 + 2 + 10 + 2, - .vrefresh = 60, .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, }; @@ -2756,7 +2702,6 @@ static const struct drm_display_mode olimex_lcd_olinuxino_43ts_mode = { .vsync_start = 272 + 8, .vsync_end = 272 + 8 + 5, .vtotal = 272 + 8 + 5 + 3, - .vrefresh = 60, }; static const struct panel_desc olimex_lcd_olinuxino_43ts = { @@ -2784,7 +2729,6 @@ static const struct drm_display_mode ontat_yx700wv03_mode = { .vsync_start = 483, .vsync_end = 493, .vtotal = 500, - .vrefresh = 60, .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, }; @@ -2813,7 +2757,6 @@ static const struct drm_display_mode ortustech_com37h3m_mode = { .vsync_start = 640 + 4, .vsync_end = 640 + 4 + 2, .vtotal = 640 + 4 + 2 + 4, - .vrefresh = 60, .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, }; @@ -2840,7 +2783,6 @@ static const struct drm_display_mode ortustech_com43h4m85ulc_mode = { .vsync_start = 800 + 3, .vsync_end = 800 + 3 + 3, .vtotal = 800 + 3 + 3 + 3, - .vrefresh = 60, }; static const struct panel_desc ortustech_com43h4m85ulc = { @@ -2866,7 +2808,6 @@ static const struct drm_display_mode osddisplays_osd070t1718_19ts_mode = { .vsync_start = 480 + 22, .vsync_end = 480 + 22 + 13, .vtotal = 480 + 22 + 13 + 10, - .vrefresh = 60, .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, }; @@ -2894,7 +2835,6 @@ static const struct drm_display_mode pda_91_00156_a0_mode = { .vsync_start = 480 + 1, .vsync_end = 480 + 1 + 23, .vtotal = 480 + 1 + 23 + 22, - .vrefresh = 60, }; static const struct panel_desc pda_91_00156_a0 = { @@ -2918,7 +2858,6 @@ static const struct drm_display_mode qd43003c0_40_mode = { .vsync_start = 272 + 4, .vsync_end = 272 + 4 + 10, .vtotal = 272 + 4 + 10 + 2, - .vrefresh = 60, }; static const struct panel_desc qd43003c0_40 = { @@ -2972,7 +2911,6 @@ static const struct drm_display_mode rocktech_rk101ii01d_ct_mode = { .vsync_start = 800 + 2, .vsync_end = 800 + 2 + 5, .vtotal = 800 + 2 + 5 + 16, - .vrefresh = 60, }; static const struct panel_desc rocktech_rk101ii01d_ct = { @@ -3001,7 +2939,6 @@ static const struct drm_display_mode samsung_lsn122dl01_c01_mode = { .vsync_start = 1600 + 2, .vsync_end = 1600 + 2 + 5, .vtotal = 1600 + 2 + 5 + 57, - .vrefresh = 60, }; static const struct panel_desc samsung_lsn122dl01_c01 = { @@ -3023,7 +2960,6 @@ static const struct drm_display_mode samsung_ltn101nt05_mode = { .vsync_start = 600 + 3, .vsync_end = 600 + 3 + 6, .vtotal = 600 + 3 + 6 + 61, - .vrefresh = 60, }; static const struct panel_desc samsung_ltn101nt05 = { @@ -3046,7 +2982,6 @@ static const struct drm_display_mode samsung_ltn140at29_301_mode = { .vsync_start = 768 + 2, .vsync_end = 768 + 2 + 5, .vtotal = 768 + 2 + 5 + 17, - .vrefresh = 60, }; static const struct panel_desc samsung_ltn140at29_301 = { @@ -3093,7 +3028,6 @@ static const struct drm_display_mode sharp_ld_d5116z01b_mode = { .vsync_start = 1280 + 3, .vsync_end = 1280 + 3 + 10, .vtotal = 1280 + 3 + 10 + 57, - .vrefresh = 60, .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, }; @@ -3119,7 +3053,6 @@ static const struct drm_display_mode sharp_lq070y3dg3b_mode = { .vsync_start = 480 + 8, .vsync_end = 480 + 8 + 2, .vtotal = 480 + 8 + 2 + 35, - .vrefresh = 60, .flags = DISPLAY_FLAGS_PIXDATA_POSEDGE, }; @@ -3146,7 +3079,6 @@ static const struct drm_display_mode sharp_lq035q7db03_mode = { .vsync_start = 320 + 9, .vsync_end = 320 + 9 + 1, .vtotal = 320 + 9 + 1 + 7, - .vrefresh = 60, }; static const struct panel_desc sharp_lq035q7db03 = { @@ -3250,7 +3182,6 @@ static const struct drm_display_mode shelly_sca07010_bfn_lnn_mode = { .vsync_start = 480 + 1, .vsync_end = 480 + 1 + 23, .vtotal = 480 + 1 + 23 + 22, - .vrefresh = 60, }; static const struct panel_desc shelly_sca07010_bfn_lnn = { @@ -3273,7 +3204,6 @@ static const struct drm_display_mode starry_kr070pe2t_mode = { .vsync_start = 480 + 22, .vsync_end = 480 + 22 + 1, .vtotal = 480 + 22 + 1 + 22, - .vrefresh = 60, }; static const struct panel_desc starry_kr070pe2t = { @@ -3299,7 +3229,6 @@ static const struct drm_display_mode starry_kr122ea0sra_mode = { .vsync_start = 1200 + 15, .vsync_end = 1200 + 15 + 2, .vtotal = 1200 + 15 + 2 + 18, - .vrefresh = 60, .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, }; @@ -3327,7 +3256,6 @@ static const struct drm_display_mode tfc_s9700rtwv43tr_01b_mode = { .vsync_start = 480 + 13, .vsync_end = 480 + 13 + 2, .vtotal = 480 + 13 + 2 + 29, - .vrefresh = 62, }; static const struct panel_desc tfc_s9700rtwv43tr_01b = { @@ -3403,7 +3331,6 @@ static const struct drm_display_mode ti_nspire_cx_lcd_mode[] = { .vsync_start = 240 + 3, .vsync_end = 240 + 3 + 1, .vtotal = 240 + 3 + 1 + 17, - .vrefresh = 60, .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, }, }; @@ -3431,7 +3358,6 @@ static const struct drm_display_mode ti_nspire_classic_lcd_mode[] = { .vsync_start = 240 + 0, .vsync_end = 240 + 0 + 1, .vtotal = 240 + 0 + 1 + 0, - .vrefresh = 60, .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, }, }; @@ -3460,7 +3386,6 @@ static const struct drm_display_mode toshiba_lt089ac29000_mode = { .vsync_start = 768 + 20, .vsync_end = 768 + 20 + 7, .vtotal = 768 + 20 + 7 + 3, - .vrefresh = 60, }; static const struct panel_desc toshiba_lt089ac29000 = { @@ -3485,7 +3410,6 @@ static const struct drm_display_mode tpk_f07a_0102_mode = { .vsync_start = 480 + 10, .vsync_end = 480 + 10 + 2, .vtotal = 480 + 10 + 2 + 33, - .vrefresh = 60, }; static const struct panel_desc tpk_f07a_0102 = { @@ -3508,7 +3432,6 @@ static const struct drm_display_mode tpk_f10a_0102_mode = { .vsync_start = 600 + 20, .vsync_end = 600 + 20 + 5, .vtotal = 600 + 20 + 5 + 25, - .vrefresh = 60, }; static const struct panel_desc tpk_f10a_0102 = { @@ -3567,7 +3490,6 @@ static const struct drm_display_mode vl050_8048nt_c01_mode = { .vsync_start = 480 + 22, .vsync_end = 480 + 22 + 10, .vtotal = 480 + 22 + 10 + 23, - .vrefresh = 60, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, }; @@ -3593,7 +3515,6 @@ static const struct drm_display_mode winstar_wf35ltiacd_mode = { .vsync_start = 240 + 4, .vsync_end = 240 + 4 + 3, .vtotal = 240 + 4 + 3 + 15, - .vrefresh = 60, .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, }; @@ -3619,7 +3540,6 @@ static const struct drm_display_mode arm_rtsm_mode[] = { .vsync_start = 768 + 3, .vsync_end = 768 + 3 + 6, .vtotal = 768 + 3 + 6 + 29, - .vrefresh = 60, .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, }, }; @@ -4070,7 +3990,6 @@ static const struct drm_display_mode auo_b080uan01_mode = { .vsync_start = 1920 + 9, .vsync_end = 1920 + 9 + 2, .vtotal = 1920 + 9 + 2 + 8, - .vrefresh = 60, }; static const struct panel_desc_dsi auo_b080uan01 = { @@ -4098,7 +4017,6 @@ static const struct drm_display_mode boe_tv080wum_nl0_mode = { .vsync_start = 1920 + 21, .vsync_end = 1920 + 21 + 3, .vtotal = 1920 + 21 + 3 + 18, - .vrefresh = 60, .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, }; @@ -4128,7 +4046,6 @@ static const struct drm_display_mode lg_ld070wx3_sl01_mode = { .vsync_start = 1280 + 28, .vsync_end = 1280 + 28 + 1, .vtotal = 1280 + 28 + 1 + 14, - .vrefresh = 60, }; static const struct panel_desc_dsi lg_ld070wx3_sl01 = { @@ -4156,7 +4073,6 @@ static const struct drm_display_mode lg_lh500wx1_sd03_mode = { .vsync_start = 1280 + 8, .vsync_end = 1280 + 8 + 4, .vtotal = 1280 + 8 + 4 + 12, - .vrefresh = 60, }; static const struct panel_desc_dsi lg_lh500wx1_sd03 = { @@ -4184,7 +4100,6 @@ static const struct drm_display_mode panasonic_vvx10f004b00_mode = { .vsync_start = 1200 + 17, .vsync_end = 1200 + 17 + 2, .vtotal = 1200 + 17 + 2 + 16, - .vrefresh = 60, }; static const struct panel_desc_dsi panasonic_vvx10f004b00 = { @@ -4213,7 +4128,6 @@ static const struct drm_display_mode lg_acx467akm_7_mode = { .vsync_start = 1920 + 2, .vsync_end = 1920 + 2 + 2, .vtotal = 1920 + 2 + 2 + 2, - .vrefresh = 60, }; static const struct panel_desc_dsi lg_acx467akm_7 = { @@ -4241,7 +4155,6 @@ static const struct drm_display_mode osd101t2045_53ts_mode = { .vsync_start = 1200 + 16, .vsync_end = 1200 + 16 + 2, .vtotal = 1200 + 16 + 2 + 16, - .vrefresh = 60, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, }; diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c index 4b4f2558e3b4..692041ae4eb6 100644 --- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c +++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c @@ -272,7 +272,7 @@ static int st7701_get_modes(struct drm_panel *panel, DRM_DEV_ERROR(&st7701->dsi->dev, "failed to add mode %ux%ux@%u\n", desc_mode->hdisplay, desc_mode->vdisplay, - desc_mode->vrefresh); + drm_mode_vrefresh(desc_mode)); return -ENOMEM; } diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c index cc02c54c1b2e..3513ae40efa8 100644 --- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c +++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c @@ -165,7 +165,6 @@ static const struct drm_display_mode default_mode = { .vsync_start = 320 + 8, .vsync_end = 320 + 8 + 4, .vtotal = 320 + 8 + 4 + 4, - .vrefresh = 60, }; static int st7789v_get_modes(struct drm_panel *panel, @@ -177,7 +176,7 @@ static int st7789v_get_modes(struct drm_panel *panel, if (!mode) { dev_err(panel->dev, "failed to add mode %ux%ux@%u\n", default_mode.hdisplay, default_mode.vdisplay, - default_mode.vrefresh); + drm_mode_vrefresh(&default_mode)); return -ENOMEM; } diff --git a/drivers/gpu/drm/panel/panel-sony-acx424akp.c b/drivers/gpu/drm/panel/panel-sony-acx424akp.c index c91e55b2d7a3..97a1b4790d3c 100644 --- a/drivers/gpu/drm/panel/panel-sony-acx424akp.c +++ b/drivers/gpu/drm/panel/panel-sony-acx424akp.c @@ -57,7 +57,6 @@ static const struct drm_display_mode sony_acx424akp_vid_mode = { .vsync_start = 864 + 14, .vsync_end = 864 + 14 + 1, .vtotal = 864 + 14 + 1 + 11, - .vrefresh = 60, .width_mm = 48, .height_mm = 84, .flags = DRM_MODE_FLAG_PVSYNC, @@ -81,7 +80,6 @@ static const struct drm_display_mode sony_acx424akp_cmd_mode = { * Some desired refresh rate, experiments at the maximum "pixel" * clock speed (HS clock 420 MHz) yields around 117Hz. */ - .vrefresh = 60, .width_mm = 48, .height_mm = 84, }; diff --git a/drivers/gpu/drm/panel/panel-sony-acx565akm.c b/drivers/gpu/drm/panel/panel-sony-acx565akm.c index 5c4b6f6e5c2d..fc6a7e451abe 100644 --- a/drivers/gpu/drm/panel/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/panel/panel-sony-acx565akm.c @@ -514,7 +514,6 @@ static const struct drm_display_mode acx565akm_mode = { .vsync_start = 480 + 3, .vsync_end = 480 + 3 + 3, .vtotal = 480 + 3 + 3 + 4, - .vrefresh = 57, .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, .width_mm = 77, diff --git a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c index aeca15dfeb3c..58d683cc5215 100644 --- a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c +++ b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c @@ -281,7 +281,6 @@ static const struct drm_display_mode td028ttec1_mode = { .vsync_start = 640 + 4, .vsync_end = 640 + 4 + 2, .vtotal = 640 + 4 + 2 + 2, - .vrefresh = 66, .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, .width_mm = 43, diff --git a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c index 75f1f1f1b6de..9b2a356c4d9a 100644 --- a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c +++ b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c @@ -339,7 +339,6 @@ static const struct drm_display_mode td043mtea1_mode = { .vsync_start = 480 + 39, .vsync_end = 480 + 39 + 1, .vtotal = 480 + 39 + 1 + 34, - .vrefresh = 60, .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, .width_mm = 94, diff --git a/drivers/gpu/drm/panel/panel-tpo-tpg110.c b/drivers/gpu/drm/panel/panel-tpo-tpg110.c index 8472d018c16f..c7a2f0ae5ba5 100644 --- a/drivers/gpu/drm/panel/panel-tpo-tpg110.c +++ b/drivers/gpu/drm/panel/panel-tpo-tpg110.c @@ -112,7 +112,6 @@ static const struct tpg110_panel_mode tpg110_modes[] = { .vsync_start = 480 + 10, .vsync_end = 480 + 10 + 1, .vtotal = 480 + 10 + 1 + 35, - .vrefresh = 60, }, .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, }, @@ -129,7 +128,6 @@ static const struct tpg110_panel_mode tpg110_modes[] = { .vsync_start = 480 + 18, .vsync_end = 480 + 18 + 1, .vtotal = 480 + 18 + 1 + 27, - .vrefresh = 60, }, .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, }, @@ -146,7 +144,6 @@ static const struct tpg110_panel_mode tpg110_modes[] = { .vsync_start = 272 + 2, .vsync_end = 272 + 2 + 1, .vtotal = 272 + 2 + 1 + 12, - .vrefresh = 60, }, .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, }, @@ -163,7 +160,6 @@ static const struct tpg110_panel_mode tpg110_modes[] = { .vsync_start = 640 + 4, .vsync_end = 640 + 4 + 1, .vtotal = 640 + 4 + 1 + 8, - .vrefresh = 60, }, .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, }, @@ -180,7 +176,6 @@ static const struct tpg110_panel_mode tpg110_modes[] = { .vsync_start = 240 + 2, .vsync_end = 240 + 2 + 1, .vtotal = 240 + 2 + 1 + 20, - .vrefresh = 60, }, .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, }, diff --git a/drivers/gpu/drm/panel/panel-truly-nt35597.c b/drivers/gpu/drm/panel/panel-truly-nt35597.c index f0ad6081570f..9b9c167b8dc8 100644 --- a/drivers/gpu/drm/panel/panel-truly-nt35597.c +++ b/drivers/gpu/drm/panel/panel-truly-nt35597.c @@ -534,7 +534,6 @@ static const struct drm_display_mode qcom_sdm845_mtp_2k_mode = { .vsync_start = 2560 + 8, .vsync_end = 2560 + 8 + 1, .vtotal = 2560 + 8 + 1 + 7, - .vrefresh = 60, .flags = 0, }; diff --git a/drivers/gpu/drm/panel/panel-visionox-rm69299.c b/drivers/gpu/drm/panel/panel-visionox-rm69299.c index 42f299ad3804..a12976b497ce 100644 --- a/drivers/gpu/drm/panel/panel-visionox-rm69299.c +++ b/drivers/gpu/drm/panel/panel-visionox-rm69299.c @@ -168,7 +168,6 @@ static const struct drm_display_mode visionox_rm69299_1080x2248_60hz = { .vsync_start = 2248 + 56, .vsync_end = 2248 + 56 + 4, .vtotal = 2248 + 56 + 4 + 4, - .vrefresh = 60, .flags = 0, }; diff --git a/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c b/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c index 1645aceab597..8a3b2f906e63 100644 --- a/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c +++ b/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c @@ -243,7 +243,6 @@ static const struct drm_display_mode default_mode = { .vsync_start = 1280 + 22, .vsync_end = 1280 + 22 + 4, .vtotal = 1280 + 22 + 4 + 11, - .vrefresh = 60, .clock = 64000, .width_mm = 68, .height_mm = 121, @@ -259,7 +258,7 @@ static int xpp055c272_get_modes(struct drm_panel *panel, if (!mode) { DRM_DEV_ERROR(ctx->dev, "Failed to add mode %ux%u@%u\n", default_mode.hdisplay, default_mode.vdisplay, - default_mode.vrefresh); + drm_mode_vrefresh(&default_mode)); return -ENOMEM; } diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c index a1ec891eaf3a..5c2b650b561d 100644 --- a/drivers/gpu/drm/sti/sti_hda.c +++ b/drivers/gpu/drm/sti/sti_hda.c @@ -586,7 +586,6 @@ static int sti_hda_connector_get_modes(struct drm_connector *connector) &hda_supported_modes[i].mode); if (!mode) continue; - mode->vrefresh = drm_mode_vrefresh(mode); /* the first mode is the preferred mode */ if (i == 0) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 04d66592f605..3c97654b5a43 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -2138,7 +2138,6 @@ void vmw_guess_mode_timing(struct drm_display_mode *mode) mode->vtotal = mode->vsync_end + 50; mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6; - mode->vrefresh = drm_mode_vrefresh(mode); } @@ -2212,7 +2211,6 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector, mode = drm_mode_duplicate(dev, bmode); if (!mode) return 0; - mode->vrefresh = drm_mode_vrefresh(mode); drm_mode_probed_add(connector, mode); } diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h index 6a4c4b26caad..01ec0c6d1cb5 100644 --- a/include/drm/drm_modes.h +++ b/include/drm/drm_modes.h @@ -381,16 +381,6 @@ struct drm_display_mode { */ int private_flags; - /** - * @vrefresh: - * - * Vertical refresh rate, for debug output in human readable form. Not - * used in a functional way. - * - * This value is in Hz. - */ - int vrefresh; - /** * @picture_aspect_ratio: * @@ -422,7 +412,7 @@ struct drm_display_mode { * @m: display mode */ #define DRM_MODE_ARG(m) \ - (m)->name, (m)->vrefresh, (m)->clock, \ + (m)->name, drm_mode_vrefresh(m), (m)->clock, \ (m)->hdisplay, (m)->hsync_start, (m)->hsync_end, (m)->htotal, \ (m)->vdisplay, (m)->vsync_start, (m)->vsync_end, (m)->vtotal, \ (m)->type, (m)->flags -- cgit v1.2.3-59-g8ed1b From 2159e4629addf29e310c342801a2cb3fe82cc3b4 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 28 Apr 2020 20:19:29 +0300 Subject: drm: Shrink {width,height}_mm to u16 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of supporting ~2000km wide displayes let's limit ourselves to ~65m. That seems plenty big enough to me. Even with EDID_QUIRK_DETAILED_IN_CM EDIDs seem to be limited to 10*0xfff which fits into the 16 bits. Reviewed-by: Sam Ravnborg Reviewed-by: Emil Velikov Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200428171940.19552-6-ville.syrjala@linux.intel.com --- include/drm/drm_modes.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h index 01ec0c6d1cb5..b93b5dcda392 100644 --- a/include/drm/drm_modes.h +++ b/include/drm/drm_modes.h @@ -331,7 +331,7 @@ struct drm_display_mode { * Addressable size of the output in mm, projectors should set this to * 0. */ - int width_mm; + u16 width_mm; /** * @height_mm: @@ -339,7 +339,7 @@ struct drm_display_mode { * Addressable size of the output in mm, projectors should set this to * 0. */ - int height_mm; + u16 height_mm; /** * @crtc_clock: -- cgit v1.2.3-59-g8ed1b From 3542cc54a1bb17fed85f1503b6978a95107c2ba8 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 28 Apr 2020 20:19:30 +0300 Subject: drm: Shrink mode->type to u8 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We only have 7 bits defined for mode->type. Shrink the storage to u8. Reviewed-by: Sam Ravnborg Reviewed-by: Emil Velikov Reviewed-by: Daniel Vetter Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200428171940.19552-7-ville.syrjala@linux.intel.com --- include/drm/drm_modes.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h index b93b5dcda392..0e74e3761182 100644 --- a/include/drm/drm_modes.h +++ b/include/drm/drm_modes.h @@ -271,7 +271,7 @@ struct drm_display_mode { * which are stuck around for hysterical raisins only. No one has an * idea what they were meant for. Don't use. */ - unsigned int type; + u8 type; /** * @clock: -- cgit v1.2.3-59-g8ed1b From 6a494eab2cf981eaa6a5f814dd65b35b652e123a Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 28 Apr 2020 20:19:31 +0300 Subject: drm: Make mode->flags u32 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The mode flags are direclty exposed in the uapi as u32. Use the same size type to store them internally. Reviewed-by: Sam Ravnborg Reviewed-by: Emil Velikov Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200428171940.19552-8-ville.syrjala@linux.intel.com --- include/drm/drm_modes.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h index 0e74e3761182..5266d890c4b0 100644 --- a/include/drm/drm_modes.h +++ b/include/drm/drm_modes.h @@ -323,7 +323,7 @@ struct drm_display_mode { * - DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF: frame split into left and * right parts. */ - unsigned int flags; + u32 flags; /** * @width_mm: -- cgit v1.2.3-59-g8ed1b From d857e167963f2faf75727764497b51c942ffe2fc Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 28 Apr 2020 20:19:32 +0300 Subject: drm: Shrink drm_display_mode timings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Store the timings (apart from the clock) as u16. The uapi mode struct already uses u16 for everything so using something bigger internally doesn't really help us. Reviewed-by: Sam Ravnborg Reviewed-by: Emil Velikov Reviewed-by: Daniel Vetter Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200428171940.19552-9-ville.syrjala@linux.intel.com --- drivers/gpu/drm/drm_modes.c | 7 ------- include/drm/drm_modes.h | 46 ++++++++++++++++++++++----------------------- 2 files changed, 23 insertions(+), 30 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index e3d5f011f7bd..77d68120931a 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -1901,13 +1901,6 @@ EXPORT_SYMBOL(drm_mode_create_from_cmdline_mode); void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out, const struct drm_display_mode *in) { - WARN(in->hdisplay > USHRT_MAX || in->hsync_start > USHRT_MAX || - in->hsync_end > USHRT_MAX || in->htotal > USHRT_MAX || - in->hskew > USHRT_MAX || in->vdisplay > USHRT_MAX || - in->vsync_start > USHRT_MAX || in->vsync_end > USHRT_MAX || - in->vtotal > USHRT_MAX || in->vscan > USHRT_MAX, - "timing values too large for mode info\n"); - out->clock = in->clock; out->hdisplay = in->hdisplay; out->hsync_start = in->hsync_start; diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h index 5266d890c4b0..a72e755b238d 100644 --- a/include/drm/drm_modes.h +++ b/include/drm/drm_modes.h @@ -279,16 +279,16 @@ struct drm_display_mode { * Pixel clock in kHz. */ int clock; /* in kHz */ - int hdisplay; - int hsync_start; - int hsync_end; - int htotal; - int hskew; - int vdisplay; - int vsync_start; - int vsync_end; - int vtotal; - int vscan; + u16 hdisplay; + u16 hsync_start; + u16 hsync_end; + u16 htotal; + u16 hskew; + u16 vdisplay; + u16 vsync_start; + u16 vsync_end; + u16 vtotal; + u16 vscan; /** * @flags: * @@ -357,19 +357,19 @@ struct drm_display_mode { * difference is exactly a factor of 10. */ int crtc_clock; - int crtc_hdisplay; - int crtc_hblank_start; - int crtc_hblank_end; - int crtc_hsync_start; - int crtc_hsync_end; - int crtc_htotal; - int crtc_hskew; - int crtc_vdisplay; - int crtc_vblank_start; - int crtc_vblank_end; - int crtc_vsync_start; - int crtc_vsync_end; - int crtc_vtotal; + u16 crtc_hdisplay; + u16 crtc_hblank_start; + u16 crtc_hblank_end; + u16 crtc_hsync_start; + u16 crtc_hsync_end; + u16 crtc_htotal; + u16 crtc_hskew; + u16 crtc_vdisplay; + u16 crtc_vblank_start; + u16 crtc_vblank_end; + u16 crtc_vsync_start; + u16 crtc_vsync_end; + u16 crtc_vtotal; /** * @private_flags: -- cgit v1.2.3-59-g8ed1b From 42acb06b01b12a2205757a46d3ff75e38840968f Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 28 Apr 2020 20:19:34 +0300 Subject: drm: pahole struct drm_display_mode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reorganize drm_display_mode to eliminate all the holes. We'll put all the actual timings to the start of the struct and all the extra junk to the end. Gets the size down to 136 bytes on 64bit and 120 bytes on 32bit. With a bit more work we should be able to get this below the two cacheline mark even on 64bit. v2: Rebase due to DRM_MODE_TYPE_USERDEF comment change Reviewed-by: Emil Velikov Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200428171940.19552-11-ville.syrjala@linux.intel.com --- include/drm/drm_modes.h | 141 ++++++++++++++++++++++++------------------------ 1 file changed, 71 insertions(+), 70 deletions(-) (limited to 'include') diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h index a72e755b238d..fc47a183526e 100644 --- a/include/drm/drm_modes.h +++ b/include/drm/drm_modes.h @@ -222,57 +222,6 @@ enum drm_mode_status { * For printing you can use %DRM_MODE_FMT and DRM_MODE_ARG(). */ struct drm_display_mode { - /** - * @head: - * - * struct list_head for mode lists. - */ - struct list_head head; - - /** - * @name: - * - * Human-readable name of the mode, filled out with drm_mode_set_name(). - */ - char name[DRM_DISPLAY_MODE_LEN]; - - /** - * @status: - * - * Status of the mode, used to filter out modes not supported by the - * hardware. See enum &drm_mode_status. - */ - enum drm_mode_status status; - - /** - * @type: - * - * A bitmask of flags, mostly about the source of a mode. Possible flags - * are: - * - * - DRM_MODE_TYPE_PREFERRED: Preferred mode, usually the native - * resolution of an LCD panel. There should only be one preferred - * mode per connector at any given time. - * - DRM_MODE_TYPE_DRIVER: Mode created by the driver, which is all of - * them really. Drivers must set this bit for all modes they create - * and expose to userspace. - * - DRM_MODE_TYPE_USERDEF: Mode defined or selected via the kernel - * command line. - * - * Plus a big list of flags which shouldn't be used at all, but are - * still around since these flags are also used in the userspace ABI. - * We no longer accept modes with these types though: - * - * - DRM_MODE_TYPE_BUILTIN: Meant for hard-coded modes, unused. - * Use DRM_MODE_TYPE_DRIVER instead. - * - DRM_MODE_TYPE_DEFAULT: Again a leftover, use - * DRM_MODE_TYPE_PREFERRED instead. - * - DRM_MODE_TYPE_CLOCK_C and DRM_MODE_TYPE_CRTC_C: Define leftovers - * which are stuck around for hysterical raisins only. No one has an - * idea what they were meant for. Don't use. - */ - u8 type; - /** * @clock: * @@ -325,22 +274,6 @@ struct drm_display_mode { */ u32 flags; - /** - * @width_mm: - * - * Addressable size of the output in mm, projectors should set this to - * 0. - */ - u16 width_mm; - - /** - * @height_mm: - * - * Addressable size of the output in mm, projectors should set this to - * 0. - */ - u16 height_mm; - /** * @crtc_clock: * @@ -371,6 +304,51 @@ struct drm_display_mode { u16 crtc_vsync_end; u16 crtc_vtotal; + /** + * @width_mm: + * + * Addressable size of the output in mm, projectors should set this to + * 0. + */ + u16 width_mm; + + /** + * @height_mm: + * + * Addressable size of the output in mm, projectors should set this to + * 0. + */ + u16 height_mm; + + /** + * @type: + * + * A bitmask of flags, mostly about the source of a mode. Possible flags + * are: + * + * - DRM_MODE_TYPE_PREFERRED: Preferred mode, usually the native + * resolution of an LCD panel. There should only be one preferred + * mode per connector at any given time. + * - DRM_MODE_TYPE_DRIVER: Mode created by the driver, which is all of + * them really. Drivers must set this bit for all modes they create + * and expose to userspace. + * - DRM_MODE_TYPE_USERDEF: Mode defined or selected via the kernel + * command line. + * + * Plus a big list of flags which shouldn't be used at all, but are + * still around since these flags are also used in the userspace ABI. + * We no longer accept modes with these types though: + * + * - DRM_MODE_TYPE_BUILTIN: Meant for hard-coded modes, unused. + * Use DRM_MODE_TYPE_DRIVER instead. + * - DRM_MODE_TYPE_DEFAULT: Again a leftover, use + * DRM_MODE_TYPE_PREFERRED instead. + * - DRM_MODE_TYPE_CLOCK_C and DRM_MODE_TYPE_CRTC_C: Define leftovers + * which are stuck around for hysterical raisins only. No one has an + * idea what they were meant for. Don't use. + */ + u8 type; + /** * @private_flags: * @@ -382,11 +360,11 @@ struct drm_display_mode { int private_flags; /** - * @picture_aspect_ratio: + * @head: * - * Field for setting the HDMI picture aspect ratio of a mode. + * struct list_head for mode lists. */ - enum hdmi_picture_aspect picture_aspect_ratio; + struct list_head head; /** * @export_head: @@ -400,6 +378,29 @@ struct drm_display_mode { * avoid overhead of protecting it by mode_config.mutex. */ struct list_head export_head; + + /** + * @name: + * + * Human-readable name of the mode, filled out with drm_mode_set_name(). + */ + char name[DRM_DISPLAY_MODE_LEN]; + + /** + * @status: + * + * Status of the mode, used to filter out modes not supported by the + * hardware. See enum &drm_mode_status. + */ + enum drm_mode_status status; + + /** + * @picture_aspect_ratio: + * + * Field for setting the HDMI picture aspect ratio of a mode. + */ + enum hdmi_picture_aspect picture_aspect_ratio; + }; /** -- cgit v1.2.3-59-g8ed1b From 130c88931f6cbdb4513d307b4a13fcfff08a8041 Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Tue, 26 May 2020 20:53:21 -0400 Subject: drm/amdgpu: Improve the MTYPE comments Use words insteads of acronyms for better understanding. Signed-off-by: Yong Zhao Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- include/uapi/drm/amdgpu_drm.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 4e873dcbe68f..3218576e109d 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -502,15 +502,15 @@ struct drm_amdgpu_gem_op { #define AMDGPU_VM_MTYPE_MASK (0xf << 5) /* Default MTYPE. Pre-AI must use this. Recommended for newer ASICs. */ #define AMDGPU_VM_MTYPE_DEFAULT (0 << 5) -/* Use NC MTYPE instead of default MTYPE */ +/* Use Non Coherent MTYPE instead of default MTYPE */ #define AMDGPU_VM_MTYPE_NC (1 << 5) -/* Use WC MTYPE instead of default MTYPE */ +/* Use Write Combine MTYPE instead of default MTYPE */ #define AMDGPU_VM_MTYPE_WC (2 << 5) -/* Use CC MTYPE instead of default MTYPE */ +/* Use Cache Coherent MTYPE instead of default MTYPE */ #define AMDGPU_VM_MTYPE_CC (3 << 5) -/* Use UC MTYPE instead of default MTYPE */ +/* Use UnCached MTYPE instead of default MTYPE */ #define AMDGPU_VM_MTYPE_UC (4 << 5) -/* Use RW MTYPE instead of default MTYPE */ +/* Use Read Write MTYPE instead of default MTYPE */ #define AMDGPU_VM_MTYPE_RW (5 << 5) struct drm_amdgpu_gem_va { -- cgit v1.2.3-59-g8ed1b From d43be2554b58621a21cb5f54b32db2263b3008b6 Mon Sep 17 00:00:00 2001 From: Bernard Zhao Date: Mon, 27 Apr 2020 01:05:23 -0700 Subject: drivers: video: hdmi: cleanup coding style in video a bit Eliminate the magic numbers, add vendor infoframe size macro like other hdmi modules. Signed-off-by: Bernard Zhao Cc: Uma Shankar Cc: Ville Syrjala Cc: Shashank Sharma Cc: Laurent Pinchart Cc: Daniel Vetter Cc: opensource.kernel@vivo.com [b.zolnierkie: add "hdmi" to the patch summary] [b.zolnierkie: fix "vender" -> vendor" typo in the patch description] Signed-off-by: Bartlomiej Zolnierkiewicz Link: https://patchwork.freedesktop.org/patch/msgid/20200427080530.3234-1-bernard@vivo.com --- drivers/video/hdmi.c | 2 +- include/linux/hdmi.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c index 856a8c4e84a2..f693076f2e5f 100644 --- a/drivers/video/hdmi.c +++ b/drivers/video/hdmi.c @@ -495,7 +495,7 @@ int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame) * value */ frame->s3d_struct = HDMI_3D_STRUCTURE_INVALID; - frame->length = 4; + frame->length = HDMI_VENDOR_INFOFRAME_SIZE; return 0; } diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h index 9613d796cfb1..ff25aeb95ee4 100644 --- a/include/linux/hdmi.h +++ b/include/linux/hdmi.h @@ -57,6 +57,7 @@ enum hdmi_infoframe_type { #define HDMI_SPD_INFOFRAME_SIZE 25 #define HDMI_AUDIO_INFOFRAME_SIZE 10 #define HDMI_DRM_INFOFRAME_SIZE 26 +#define HDMI_VENDOR_INFOFRAME_SIZE 4 #define HDMI_INFOFRAME_SIZE(type) \ (HDMI_INFOFRAME_HEADER_SIZE + HDMI_ ## type ## _INFOFRAME_SIZE) -- cgit v1.2.3-59-g8ed1b From ccaf72d3c252a647e7bdd944d1d4fd0aa9adfe9e Mon Sep 17 00:00:00 2001 From: Likun Gao Date: Mon, 18 Mar 2019 21:15:25 +0800 Subject: drm/amdgpu: add sienna_cichlid asic type Signed-off-by: Likun Gao Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 + include/drm/amd_asic_type.h | 1 + 2 files changed, 2 insertions(+) (limited to 'include') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index b633171281f8..5d1e445eef21 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -112,6 +112,7 @@ const char *amdgpu_asic_name[] = { "NAVI10", "NAVI14", "NAVI12", + "SIENNA_CICHLID", "LAST", }; diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h index b1230e33d506..0c5bd1134460 100644 --- a/include/drm/amd_asic_type.h +++ b/include/drm/amd_asic_type.h @@ -54,6 +54,7 @@ enum amd_asic_type { CHIP_NAVI10, /* 25 */ CHIP_NAVI14, /* 26 */ CHIP_NAVI12, /* 27 */ + CHIP_SIENNA_CICHLID, /* 28 */ CHIP_LAST, }; -- cgit v1.2.3-59-g8ed1b From 26eb603b59a2f8babfd36307d302f3e8f387dc98 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Fri, 5 Jun 2020 09:32:05 +0200 Subject: drm/cma-helper: Rename symbols from drm_cma_gem_ to drm_gem_cma_ This fixes the naming of several symbols within CMA helpers. No functional changes are made. Signed-off-by: Thomas Zimmermann Reviewed-by: Emil Velikov Link: https://patchwork.freedesktop.org/patch/msgid/20200605073247.4057-2-tzimmermann@suse.de --- drivers/gpu/drm/aspeed/aspeed_gfx_drv.c | 2 +- drivers/gpu/drm/drm_gem_cma_helper.c | 10 +++++----- include/drm/drm_gem_cma_helper.h | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c index 6b27242b9ee3..5e7ea0459d01 100644 --- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c +++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c @@ -188,7 +188,7 @@ DEFINE_DRM_GEM_CMA_FOPS(fops); static struct drm_driver aspeed_gfx_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, - .gem_create_object = drm_cma_gem_create_object_default_funcs, + .gem_create_object = drm_gem_cma_create_object_default_funcs, .dumb_create = drm_gem_cma_dumb_create, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index b3db3ca7bd7a..842e2fa33235 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -572,7 +572,7 @@ void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr) } EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vunmap); -static const struct drm_gem_object_funcs drm_cma_gem_default_funcs = { +static const struct drm_gem_object_funcs drm_gem_cma_default_funcs = { .free = drm_gem_cma_free_object, .print_info = drm_gem_cma_print_info, .get_sg_table = drm_gem_cma_prime_get_sg_table, @@ -581,7 +581,7 @@ static const struct drm_gem_object_funcs drm_cma_gem_default_funcs = { }; /** - * drm_cma_gem_create_object_default_funcs - Create a CMA GEM object with a + * drm_gem_cma_create_object_default_funcs - Create a CMA GEM object with a * default function table * @dev: DRM device * @size: Size of the object to allocate @@ -593,7 +593,7 @@ static const struct drm_gem_object_funcs drm_cma_gem_default_funcs = { * A pointer to a allocated GEM object or an error pointer on failure. */ struct drm_gem_object * -drm_cma_gem_create_object_default_funcs(struct drm_device *dev, size_t size) +drm_gem_cma_create_object_default_funcs(struct drm_device *dev, size_t size) { struct drm_gem_cma_object *cma_obj; @@ -601,11 +601,11 @@ drm_cma_gem_create_object_default_funcs(struct drm_device *dev, size_t size) if (!cma_obj) return NULL; - cma_obj->base.funcs = &drm_cma_gem_default_funcs; + cma_obj->base.funcs = &drm_gem_cma_default_funcs; return &cma_obj->base; } -EXPORT_SYMBOL(drm_cma_gem_create_object_default_funcs); +EXPORT_SYMBOL(drm_gem_cma_create_object_default_funcs); /** * drm_gem_cma_prime_import_sg_table_vmap - PRIME import another driver's diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h index 947ac95eb24a..64b7e9d42129 100644 --- a/include/drm/drm_gem_cma_helper.h +++ b/include/drm/drm_gem_cma_helper.h @@ -107,7 +107,7 @@ void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj); void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr); struct drm_gem_object * -drm_cma_gem_create_object_default_funcs(struct drm_device *dev, size_t size); +drm_gem_cma_create_object_default_funcs(struct drm_device *dev, size_t size); /** * DRM_GEM_CMA_VMAP_DRIVER_OPS - CMA GEM driver operations ensuring a virtual @@ -118,7 +118,7 @@ drm_cma_gem_create_object_default_funcs(struct drm_device *dev, size_t size); * imported buffers. */ #define DRM_GEM_CMA_VMAP_DRIVER_OPS \ - .gem_create_object = drm_cma_gem_create_object_default_funcs, \ + .gem_create_object = drm_gem_cma_create_object_default_funcs, \ .dumb_create = drm_gem_cma_dumb_create, \ .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ -- cgit v1.2.3-59-g8ed1b From 06d6620164b2989468cd4ea809645e6483623314 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Fri, 5 Jun 2020 09:32:06 +0200 Subject: drm/cma-helper: Rework DRM_GEM_CMA_VMAP_DRIVER_OPS macro Rename the macro to DRM_GEM_CMA_DRIVER_OPS_VMAP to align naming with SHMEM helpers and drm_gem_cma_prime_import_sg_table_vmap(). An variant of the macro is provided for drivers that override the default .dumb_create callback. Adapt drivers to the changes. v3: * rename macro to signal implicit vmap on imported buffers v2: * provide DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE Signed-off-by: Thomas Zimmermann Reviewed-by: Sam Ravnborg Reviewed-by: Laurent Pinchart Reviewed-by: Emil Velikov Link: https://patchwork.freedesktop.org/patch/msgid/20200605073247.4057-3-tzimmermann@suse.de --- drivers/gpu/drm/drm_gem_cma_helper.c | 2 +- drivers/gpu/drm/sun4i/sun4i_drv.c | 3 +-- drivers/gpu/drm/tidss/tidss_drv.c | 2 +- drivers/gpu/drm/tiny/hx8357d.c | 2 +- drivers/gpu/drm/tiny/ili9225.c | 2 +- drivers/gpu/drm/tiny/ili9341.c | 2 +- drivers/gpu/drm/tiny/ili9486.c | 2 +- drivers/gpu/drm/tiny/mi0283qt.c | 2 +- drivers/gpu/drm/tiny/repaper.c | 2 +- drivers/gpu/drm/tiny/st7586.c | 2 +- drivers/gpu/drm/tiny/st7735r.c | 2 +- include/drm/drm_gem_cma_helper.h | 30 ++++++++++++++++++++++++++---- 12 files changed, 37 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index 842e2fa33235..06a5b9ee1fe0 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -620,7 +620,7 @@ EXPORT_SYMBOL(drm_gem_cma_create_object_default_funcs); * address set. This address is released when the object is freed. * * This function can be used as the &drm_driver.gem_prime_import_sg_table - * callback. The DRM_GEM_CMA_VMAP_DRIVER_OPS() macro provides a shortcut to set + * callback. The &DRM_GEM_CMA_DRIVER_OPS_VMAP macro provides a shortcut to set * the necessary DRM driver operations. * * Returns: diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index 328272ff77d8..29861fc81b35 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -52,8 +52,7 @@ static struct drm_driver sun4i_drv_driver = { .minor = 0, /* GEM Operations */ - DRM_GEM_CMA_VMAP_DRIVER_OPS, - .dumb_create = drm_sun4i_gem_dumb_create, + DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(drm_sun4i_gem_dumb_create), }; static int sun4i_drv_bind(struct device *dev) diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c index 99edc66ebdef..fee2f6fa3506 100644 --- a/drivers/gpu/drm/tidss/tidss_drv.c +++ b/drivers/gpu/drm/tidss/tidss_drv.c @@ -112,7 +112,7 @@ static struct drm_driver tidss_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &tidss_fops, .release = tidss_release, - DRM_GEM_CMA_VMAP_DRIVER_OPS, + DRM_GEM_CMA_DRIVER_OPS_VMAP, .name = "tidss", .desc = "TI Keystone DSS", .date = "20180215", diff --git a/drivers/gpu/drm/tiny/hx8357d.c b/drivers/gpu/drm/tiny/hx8357d.c index b4bc358a3269..0998309b0d95 100644 --- a/drivers/gpu/drm/tiny/hx8357d.c +++ b/drivers/gpu/drm/tiny/hx8357d.c @@ -196,7 +196,7 @@ DEFINE_DRM_GEM_CMA_FOPS(hx8357d_fops); static struct drm_driver hx8357d_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &hx8357d_fops, - DRM_GEM_CMA_VMAP_DRIVER_OPS, + DRM_GEM_CMA_DRIVER_OPS_VMAP, .debugfs_init = mipi_dbi_debugfs_init, .name = "hx8357d", .desc = "HX8357D", diff --git a/drivers/gpu/drm/tiny/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c index d1a5ab6747d5..16400064320f 100644 --- a/drivers/gpu/drm/tiny/ili9225.c +++ b/drivers/gpu/drm/tiny/ili9225.c @@ -346,7 +346,7 @@ DEFINE_DRM_GEM_CMA_FOPS(ili9225_fops); static struct drm_driver ili9225_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &ili9225_fops, - DRM_GEM_CMA_VMAP_DRIVER_OPS, + DRM_GEM_CMA_DRIVER_OPS_VMAP, .name = "ili9225", .desc = "Ilitek ILI9225", .date = "20171106", diff --git a/drivers/gpu/drm/tiny/ili9341.c b/drivers/gpu/drm/tiny/ili9341.c index bb819f45a5d3..d39c39df56ad 100644 --- a/drivers/gpu/drm/tiny/ili9341.c +++ b/drivers/gpu/drm/tiny/ili9341.c @@ -152,7 +152,7 @@ DEFINE_DRM_GEM_CMA_FOPS(ili9341_fops); static struct drm_driver ili9341_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &ili9341_fops, - DRM_GEM_CMA_VMAP_DRIVER_OPS, + DRM_GEM_CMA_DRIVER_OPS_VMAP, .debugfs_init = mipi_dbi_debugfs_init, .name = "ili9341", .desc = "Ilitek ILI9341", diff --git a/drivers/gpu/drm/tiny/ili9486.c b/drivers/gpu/drm/tiny/ili9486.c index 2702ea557d29..403af68fa440 100644 --- a/drivers/gpu/drm/tiny/ili9486.c +++ b/drivers/gpu/drm/tiny/ili9486.c @@ -165,7 +165,7 @@ DEFINE_DRM_GEM_CMA_FOPS(ili9486_fops); static struct drm_driver ili9486_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &ili9486_fops, - DRM_GEM_CMA_VMAP_DRIVER_OPS, + DRM_GEM_CMA_DRIVER_OPS_VMAP, .debugfs_init = mipi_dbi_debugfs_init, .name = "ili9486", .desc = "Ilitek ILI9486", diff --git a/drivers/gpu/drm/tiny/mi0283qt.c b/drivers/gpu/drm/tiny/mi0283qt.c index 08ac549ab0f7..2131b4268c00 100644 --- a/drivers/gpu/drm/tiny/mi0283qt.c +++ b/drivers/gpu/drm/tiny/mi0283qt.c @@ -156,7 +156,7 @@ DEFINE_DRM_GEM_CMA_FOPS(mi0283qt_fops); static struct drm_driver mi0283qt_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &mi0283qt_fops, - DRM_GEM_CMA_VMAP_DRIVER_OPS, + DRM_GEM_CMA_DRIVER_OPS_VMAP, .debugfs_init = mipi_dbi_debugfs_init, .name = "mi0283qt", .desc = "Multi-Inno MI0283QT", diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c index 1c0e7169545b..08164e2a2d13 100644 --- a/drivers/gpu/drm/tiny/repaper.c +++ b/drivers/gpu/drm/tiny/repaper.c @@ -946,7 +946,7 @@ DEFINE_DRM_GEM_CMA_FOPS(repaper_fops); static struct drm_driver repaper_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &repaper_fops, - DRM_GEM_CMA_VMAP_DRIVER_OPS, + DRM_GEM_CMA_DRIVER_OPS_VMAP, .name = "repaper", .desc = "Pervasive Displays RePaper e-ink panels", .date = "20170405", diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c index 2a1fae422f7a..1311e5df8721 100644 --- a/drivers/gpu/drm/tiny/st7586.c +++ b/drivers/gpu/drm/tiny/st7586.c @@ -285,7 +285,7 @@ DEFINE_DRM_GEM_CMA_FOPS(st7586_fops); static struct drm_driver st7586_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &st7586_fops, - DRM_GEM_CMA_VMAP_DRIVER_OPS, + DRM_GEM_CMA_DRIVER_OPS_VMAP, .debugfs_init = mipi_dbi_debugfs_init, .name = "st7586", .desc = "Sitronix ST7586", diff --git a/drivers/gpu/drm/tiny/st7735r.c b/drivers/gpu/drm/tiny/st7735r.c index 0af1b15efdf8..c0bc2a18edde 100644 --- a/drivers/gpu/drm/tiny/st7735r.c +++ b/drivers/gpu/drm/tiny/st7735r.c @@ -157,7 +157,7 @@ DEFINE_DRM_GEM_CMA_FOPS(st7735r_fops); static struct drm_driver st7735r_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &st7735r_fops, - DRM_GEM_CMA_VMAP_DRIVER_OPS, + DRM_GEM_CMA_DRIVER_OPS_VMAP, .debugfs_init = mipi_dbi_debugfs_init, .name = "st7735r", .desc = "Sitronix ST7735R", diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h index 64b7e9d42129..2cdf333ae585 100644 --- a/include/drm/drm_gem_cma_helper.h +++ b/include/drm/drm_gem_cma_helper.h @@ -110,21 +110,43 @@ struct drm_gem_object * drm_gem_cma_create_object_default_funcs(struct drm_device *dev, size_t size); /** - * DRM_GEM_CMA_VMAP_DRIVER_OPS - CMA GEM driver operations ensuring a virtual - * address on the buffer + * DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE - CMA GEM driver operations + * ensuring a virtual address + * on the buffer + * @dumb_create_func: callback function for .dumb_create * * This macro provides a shortcut for setting the default GEM operations in the * &drm_driver structure for drivers that need the virtual address also on * imported buffers. + * + * This macro is a variant of DRM_GEM_CMA_DRIVER_OPS_VMAP for drivers that + * override the default implementation of &struct rm_driver.dumb_create. Use + * DRM_GEM_CMA_DRIVER_OPS_VMAP if possible. */ -#define DRM_GEM_CMA_VMAP_DRIVER_OPS \ +#define DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(dumb_create_func) \ .gem_create_object = drm_gem_cma_create_object_default_funcs, \ - .dumb_create = drm_gem_cma_dumb_create, \ + .dumb_create = (dumb_create_func), \ .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table_vmap, \ .gem_prime_mmap = drm_gem_prime_mmap +/** + * DRM_GEM_CMA_DRIVER_OPS_VMAP - CMA GEM driver operations ensuring a virtual + * address on the buffer + * + * This macro provides a shortcut for setting the default GEM operations in the + * &drm_driver structure for drivers that need the virtual address also on + * imported buffers. + * + * Drivers that come with their own implementation of + * &struct drm_driver.dumb_create should use + * DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE() instead. Use + * DRM_GEM_CMA_DRIVER_OPS_VMAP if possible. + */ +#define DRM_GEM_CMA_DRIVER_OPS_VMAP \ + DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(drm_gem_cma_dumb_create) + struct drm_gem_object * drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *drm, struct dma_buf_attachment *attach, -- cgit v1.2.3-59-g8ed1b From 654bf12bad116f434c17b7669895ffaab96e16c2 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Fri, 5 Jun 2020 09:32:07 +0200 Subject: drm/cma-helper: Add DRM_GEM_CMA_DRIVER_OPS to set default GEM CMA functions The macro to DRM_GEM_CMA_DRIVER_OPS sets GEM callbacks in struct drm_driver to their defaults for CMA. A variant of the macro is provided for drivers that override the default .dumb_create callback. Adapt drivers to the changes. v4: * remove parenthesis around dumb_create_func * fix grammar in commit message Signed-off-by: Thomas Zimmermann Reviewed-by: Sam Ravnborg Reviewed-by: Laurent Pinchart Reviewed-by: Emil Velikov Link: https://patchwork.freedesktop.org/patch/msgid/20200605073247.4057-4-tzimmermann@suse.de --- include/drm/drm_gem_cma_helper.h | 48 ++++++++++++++++++++++++++++++++++++---- 1 file changed, 44 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h index 2cdf333ae585..2bfa2502607a 100644 --- a/include/drm/drm_gem_cma_helper.h +++ b/include/drm/drm_gem_cma_helper.h @@ -109,6 +109,42 @@ void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr); struct drm_gem_object * drm_gem_cma_create_object_default_funcs(struct drm_device *dev, size_t size); +/** + * DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE - CMA GEM driver operations + * @dumb_create_func: callback function for .dumb_create + * + * This macro provides a shortcut for setting the default GEM operations in the + * &drm_driver structure. + * + * This macro is a variant of DRM_GEM_CMA_DRIVER_OPS for drivers that + * override the default implementation of &struct rm_driver.dumb_create. Use + * DRM_GEM_CMA_DRIVER_OPS if possible. Drivers that require a virtual address + * on imported buffers should use + * DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE() instead. + */ +#define DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(dumb_create_func) \ + .gem_create_object = drm_gem_cma_create_object_default_funcs, \ + .dumb_create = (dumb_create_func), \ + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ + .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, \ + .gem_prime_mmap = drm_gem_cma_prime_mmap + +/** + * DRM_GEM_CMA_DRIVER_OPS - CMA GEM driver operations + * + * This macro provides a shortcut for setting the default GEM operations in the + * &drm_driver structure. + * + * Drivers that come with their own implementation of + * &struct drm_driver.dumb_create should use + * DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE() instead. Use + * DRM_GEM_CMA_DRIVER_OPS if possible. Drivers that require a virtual address + * on imported buffers should use DRM_GEM_CMA_DRIVER_OPS_VMAP instead. + */ +#define DRM_GEM_CMA_DRIVER_OPS \ + DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(drm_gem_cma_dumb_create) + /** * DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE - CMA GEM driver operations * ensuring a virtual address @@ -120,12 +156,14 @@ drm_gem_cma_create_object_default_funcs(struct drm_device *dev, size_t size); * imported buffers. * * This macro is a variant of DRM_GEM_CMA_DRIVER_OPS_VMAP for drivers that - * override the default implementation of &struct rm_driver.dumb_create. Use - * DRM_GEM_CMA_DRIVER_OPS_VMAP if possible. + * override the default implementation of &struct drm_driver.dumb_create. Use + * DRM_GEM_CMA_DRIVER_OPS_VMAP if possible. Drivers that do not require a + * virtual address on imported buffers should use + * DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE() instead. */ #define DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(dumb_create_func) \ .gem_create_object = drm_gem_cma_create_object_default_funcs, \ - .dumb_create = (dumb_create_func), \ + .dumb_create = dumb_create_func, \ .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table_vmap, \ @@ -142,7 +180,9 @@ drm_gem_cma_create_object_default_funcs(struct drm_device *dev, size_t size); * Drivers that come with their own implementation of * &struct drm_driver.dumb_create should use * DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE() instead. Use - * DRM_GEM_CMA_DRIVER_OPS_VMAP if possible. + * DRM_GEM_CMA_DRIVER_OPS_VMAP if possible. Drivers that do not require a + * virtual address on imported buffers should use DRM_GEM_CMA_DRIVER_OPS + * instead. */ #define DRM_GEM_CMA_DRIVER_OPS_VMAP \ DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(drm_gem_cma_dumb_create) -- cgit v1.2.3-59-g8ed1b From ad0f449bebc79b01583c711684fefcdc9620320a Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Fri, 5 Jun 2020 09:32:47 +0200 Subject: drm: Remove struct drm_driver.gem_print_info The .gem_print_info callback in struct drm_driver is obsolete and has no users left. Remove it. Signed-off-by: Thomas Zimmermann Suggested-by: Emil Velikov Reviewed-by: Emil Velikov Reviewed-by: Laurent Pinchart Link: https://patchwork.freedesktop.org/patch/msgid/20200605073247.4057-44-tzimmermann@suse.de --- drivers/gpu/drm/drm_gem.c | 2 -- include/drm/drm_drv.h | 17 ----------------- 2 files changed, 19 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 94dd94230fd1..a57f5379fc08 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -1199,8 +1199,6 @@ void drm_gem_print_info(struct drm_printer *p, unsigned int indent, if (obj->funcs && obj->funcs->print_info) obj->funcs->print_info(p, indent, obj); - else if (obj->dev->driver->gem_print_info) - obj->dev->driver->gem_print_info(p, indent, obj); } int drm_gem_pin(struct drm_gem_object *obj) diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index bb924cddc09c..8f110a28b6a2 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -353,23 +353,6 @@ struct drm_driver { */ void (*gem_close_object) (struct drm_gem_object *, struct drm_file *); - /** - * @gem_print_info: - * - * This callback is deprecated in favour of - * &drm_gem_object_funcs.print_info. - * - * If driver subclasses struct &drm_gem_object, it can implement this - * optional hook for printing additional driver specific info. - * - * drm_printf_indent() should be used in the callback passing it the - * indent argument. - * - * This callback is called from drm_gem_print_info(). - */ - void (*gem_print_info)(struct drm_printer *p, unsigned int indent, - const struct drm_gem_object *obj); - /** * @gem_create_object: constructor for gem objects * -- cgit v1.2.3-59-g8ed1b From d18ee06b4889bf7da412069ef60baeaaca3ac6f4 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Tue, 9 Jun 2020 11:08:19 +0200 Subject: drm/shmem-helper: Add .gem_create_object helper that sets map_cached flag The helper drm_gem_shmem_create_object_cached() allocates an GEM SHMEM object and sets the map_cached flag. Useful for drivers that want cached mappings. v3: * style fixes Signed-off-by: Thomas Zimmermann Reviewed-by: Emil Velikov Link: https://patchwork.freedesktop.org/patch/msgid/20200609090820.20256-2-tzimmermann@suse.de --- drivers/gpu/drm/drm_gem_shmem_helper.c | 27 +++++++++++++++++++++++++++ include/drm/drm_gem_shmem_helper.h | 4 ++++ 2 files changed, 31 insertions(+) (limited to 'include') diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index f750063968ef..0a7e3b664bc2 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -460,6 +460,33 @@ bool drm_gem_shmem_purge(struct drm_gem_object *obj) } EXPORT_SYMBOL(drm_gem_shmem_purge); +/** + * drm_gem_shmem_create_object_cached - Create a shmem buffer object with + * cached mappings + * @dev: DRM device + * @size: Size of the object to allocate + * + * By default, shmem buffer objects use writecombine mappings. This + * function implements struct drm_driver.gem_create_object for shmem + * buffer objects with cached mappings. + * + * Returns: + * A struct drm_gem_shmem_object * on success or NULL negative on failure. + */ +struct drm_gem_object * +drm_gem_shmem_create_object_cached(struct drm_device *dev, size_t size) +{ + struct drm_gem_shmem_object *shmem; + + shmem = kzalloc(sizeof(*shmem), GFP_KERNEL); + if (!shmem) + return NULL; + shmem->map_cached = true; + + return &shmem->base; +} +EXPORT_SYMBOL(drm_gem_shmem_create_object_cached); + /** * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object * @file: DRM file structure to create the dumb buffer for diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h index 294b2931c4cc..5381f0c8cf6f 100644 --- a/include/drm/drm_gem_shmem_helper.h +++ b/include/drm/drm_gem_shmem_helper.h @@ -132,6 +132,10 @@ struct drm_gem_shmem_object * drm_gem_shmem_create_with_handle(struct drm_file *file_priv, struct drm_device *dev, size_t size, uint32_t *handle); + +struct drm_gem_object * +drm_gem_shmem_create_object_cached(struct drm_device *dev, size_t size); + int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args); -- cgit v1.2.3-59-g8ed1b From fedbfcc6f7977d9033ba8dec1c8044f3c1faf3d1 Mon Sep 17 00:00:00 2001 From: Kieran Bingham Date: Tue, 9 Jun 2020 13:46:01 +0100 Subject: drivers: gpu: drm: Fix trivial spelling The word 'descriptor' is misspelled throughout the tree. Fix it up accordingly: decriptors -> descriptors Signed-off-by: Kieran Bingham Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20200609124610.3445662-9-kieran.bingham+renesas@ideasonboard.com --- drivers/gpu/drm/drm_dp_helper.c | 2 +- include/drm/drm_dp_helper.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 2510f7991dcf..ff0afd830d26 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -1362,7 +1362,7 @@ EXPORT_SYMBOL(drm_dp_get_edid_quirks); /** * drm_dp_read_desc - read sink/branch descriptor from DPCD * @aux: DisplayPort AUX channel - * @desc: Device decriptor to fill from DPCD + * @desc: Device descriptor to fill from DPCD * @is_branch: true for branch devices, false for sink devices * * Read DPCD 0x400 (sink) or 0x500 (branch) into @desc. Also debug log the diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 09e674c228b9..aed08e8cb22c 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -1561,7 +1561,7 @@ enum drm_dp_quirk { /** * drm_dp_has_quirk() - does the DP device have a specific quirk - * @desc: Device decriptor filled by drm_dp_read_desc() + * @desc: Device descriptor filled by drm_dp_read_desc() * @edid_quirks: Optional quirk bitmask filled by drm_dp_get_edid_quirks() * @quirk: Quirk to query for * -- cgit v1.2.3-59-g8ed1b From 471bdd0df0d5048d1e1bb0aa5da44f44f9856fb1 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Thu, 4 Jun 2020 21:45:00 +0300 Subject: drm/i915/dp_mst: Work around out-of-spec adapters filtering short pulses MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some TypeC -> native DP adapters, at least the Club 3D CAC-1557 adapter, incorrectly filter out HPD short pulses with a duration less than ~540 usec, leading to MST probe failures. According to the DP Standard 2.0 section 5.1.4: - DP sinks should generate short pulses in the 500 usec -> 1 msec range - DP sources should detect short pulses in the 250 usec -> 2 msec range According to the DP Alt Mode on TypeC Standard section 3.9.2, adapters should detect and forward short pulses according to how sources should detect them as specified in the DP Standard (250 usec -> 2 msec). Based on the above filtering out short pulses with a duration less than 540 usec is incorrect. To make such adapters work add support for a driver polling on MST inerrupt flags, and wire this up in the i915 driver. The sink can clear an interrupt it raised after 110 msec if the source doesn't respond, so use a 50 msec poll period to avoid missing an interrupt. Polling of the MST interrupt flags is explicitly allowed by the DP Standard. This fixes MST probe failures I saw using this adapter and a DELL U2515H monitor. v2: - Fix the wait event timeout for the no-poll case. v3 (Ville): - Fix the short pulse duration limits in the commit log prescribed by the DP Standard. - Add code comment explaining why/how polling is used. - Factor out a helper to schedule the port's hpd irq handler and move it to the rest of hotplug handlers. - Document the new MST callback. - s/update_hpd_irq_state/poll_hpd_irq/ Cc: Ville Syrjälä Signed-off-by: Imre Deak Reviewed-by: Ville Syrjälä Reviewed-by: Lyude Paul Link: https://patchwork.freedesktop.org/patch/msgid/20200604184500.23730-2-imre.deak@intel.com --- drivers/gpu/drm/drm_dp_mst_topology.c | 32 +++++++++++++++++++++++++--- drivers/gpu/drm/i915/display/intel_dp_mst.c | 10 +++++++++ drivers/gpu/drm/i915/display/intel_hotplug.c | 18 ++++++++++++++++ drivers/gpu/drm/i915/display/intel_hotplug.h | 2 ++ include/drm/drm_dp_mst_helper.h | 9 ++++++++ 5 files changed, 68 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 469d3569d5ca..ca936ef41312 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -1178,11 +1178,37 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb, struct drm_dp_sideband_msg_tx *txmsg) { struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; + unsigned long wait_timeout = msecs_to_jiffies(4000); + unsigned long wait_expires = jiffies + wait_timeout; int ret; - ret = wait_event_timeout(mgr->tx_waitq, - check_txmsg_state(mgr, txmsg), - (4 * HZ)); + for (;;) { + /* + * If the driver provides a way for this, change to + * poll-waiting for the MST reply interrupt if we didn't receive + * it for 50 msec. This would cater for cases where the HPD + * pulse signal got lost somewhere, even though the sink raised + * the corresponding MST interrupt correctly. One example is the + * Club 3D CAC-1557 TypeC -> DP adapter which for some reason + * filters out short pulses with a duration less than ~540 usec. + * + * The poll period is 50 msec to avoid missing an interrupt + * after the sink has cleared it (after a 110msec timeout + * since it raised the interrupt). + */ + ret = wait_event_timeout(mgr->tx_waitq, + check_txmsg_state(mgr, txmsg), + mgr->cbs->poll_hpd_irq ? + msecs_to_jiffies(50) : + wait_timeout); + + if (ret || !mgr->cbs->poll_hpd_irq || + time_after(jiffies, wait_expires)) + break; + + mgr->cbs->poll_hpd_irq(mgr); + } + mutex_lock(&mgr->qlock); if (ret > 0) { if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) { diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 35debce71366..57ef82285fee 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -33,6 +33,7 @@ #include "intel_connector.h" #include "intel_ddi.h" #include "intel_display_types.h" +#include "intel_hotplug.h" #include "intel_dp.h" #include "intel_dp_mst.h" #include "intel_dpio_phy.h" @@ -747,8 +748,17 @@ err: return NULL; } +static void +intel_dp_mst_poll_hpd_irq(struct drm_dp_mst_topology_mgr *mgr) +{ + struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr); + + intel_hpd_trigger_irq(dp_to_dig_port(intel_dp)); +} + static const struct drm_dp_mst_topology_cbs mst_cbs = { .add_connector = intel_dp_add_mst_connector, + .poll_hpd_irq = intel_dp_mst_poll_hpd_irq, }; static struct intel_dp_mst_encoder * diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c index a091442efba4..21445e5afbc2 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug.c @@ -348,6 +348,24 @@ static void i915_digport_work_func(struct work_struct *work) } } +/** + * intel_hpd_trigger_irq - trigger an hpd irq event for a port + * @dig_port: digital port + * + * Trigger an HPD interrupt event for the given port, emulating a short pulse + * generated by the sink, and schedule the dig port work to handle it. + */ +void intel_hpd_trigger_irq(struct intel_digital_port *dig_port) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + + spin_lock_irq(&i915->irq_lock); + i915->hotplug.short_port_mask |= BIT(dig_port->base.port); + spin_unlock_irq(&i915->irq_lock); + + queue_work(i915->hotplug.dp_wq, &i915->hotplug.dig_port_work); +} + /* * Handle hotplug events outside the interrupt handler proper. */ diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h index 1e6b4fda2900..febf1132a298 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.h +++ b/drivers/gpu/drm/i915/display/intel_hotplug.h @@ -10,6 +10,7 @@ struct drm_i915_private; struct intel_connector; +struct intel_digital_port; struct intel_encoder; enum port; @@ -19,6 +20,7 @@ enum intel_hotplug_state intel_encoder_hotplug(struct intel_encoder *encoder, bool irq_received); void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 pin_mask, u32 long_mask); +void intel_hpd_trigger_irq(struct intel_digital_port *dig_port); void intel_hpd_init(struct drm_i915_private *dev_priv); void intel_hpd_init_work(struct drm_i915_private *dev_priv); void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index 9e1ffcd7cb68..b230ff6f7081 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h @@ -475,6 +475,15 @@ struct drm_dp_mst_topology_mgr; struct drm_dp_mst_topology_cbs { /* create a connector for a port */ struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path); + /* + * Checks for any pending MST interrupts, passing them to MST core for + * processing, the same way an HPD IRQ pulse handler would do this. + * If provided MST core calls this callback from a poll-waiting loop + * when waiting for MST down message replies. The driver is expected + * to guard against a race between this callback and the driver's HPD + * IRQ pulse handler. + */ + void (*poll_hpd_irq)(struct drm_dp_mst_topology_mgr *mgr); }; #define DP_MAX_PAYLOAD (sizeof(unsigned long) * 8) -- cgit v1.2.3-59-g8ed1b From 72822c3bfa8ef7c997692acd580c73c263107592 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Wed, 10 Jun 2020 16:47:04 +0300 Subject: drm/dp_mst: Fix flushing the delayed port/mstb destroy work Atm, a pending delayed destroy work during module removal will be canceled, leaving behind MST ports, mstbs. Fix this by using a dedicated workqueue which will be drained of requeued items as well when destroying it. v2: - Check if wq is NULL before calling destroy_workqueue(). Cc: Lyude Paul Cc: Stanislav Lisovskiy Reviewed-by: Stanislav Lisovskiy Reviewed-by: Lyude Paul Signed-off-by: Imre Deak Link: https://patchwork.freedesktop.org/patch/msgid/20200610134704.25270-1-imre.deak@intel.com --- drivers/gpu/drm/drm_dp_mst_topology.c | 19 ++++++++++++++++--- include/drm/drm_dp_mst_helper.h | 8 ++++++++ 2 files changed, 24 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 8ff4caf410cb..104f97909bb9 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -1630,7 +1630,7 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref) mutex_lock(&mgr->delayed_destroy_lock); list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list); mutex_unlock(&mgr->delayed_destroy_lock); - schedule_work(&mgr->delayed_destroy_work); + queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work); } /** @@ -1747,7 +1747,7 @@ static void drm_dp_destroy_port(struct kref *kref) mutex_lock(&mgr->delayed_destroy_lock); list_add(&port->next, &mgr->destroy_port_list); mutex_unlock(&mgr->delayed_destroy_lock); - schedule_work(&mgr->delayed_destroy_work); + queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work); } /** @@ -5203,6 +5203,15 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, INIT_LIST_HEAD(&mgr->destroy_port_list); INIT_LIST_HEAD(&mgr->destroy_branch_device_list); INIT_LIST_HEAD(&mgr->up_req_list); + + /* + * delayed_destroy_work will be queued on a dedicated WQ, so that any + * requeuing will be also flushed when deiniting the topology manager. + */ + mgr->delayed_destroy_wq = alloc_ordered_workqueue("drm_dp_mst_wq", 0); + if (mgr->delayed_destroy_wq == NULL) + return -ENOMEM; + INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work); INIT_WORK(&mgr->tx_work, drm_dp_tx_work); INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work); @@ -5247,7 +5256,11 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) { drm_dp_mst_topology_mgr_set_mst(mgr, false); flush_work(&mgr->work); - cancel_work_sync(&mgr->delayed_destroy_work); + /* The following will also drain any requeued work on the WQ. */ + if (mgr->delayed_destroy_wq) { + destroy_workqueue(mgr->delayed_destroy_wq); + mgr->delayed_destroy_wq = NULL; + } mutex_lock(&mgr->payload_lock); kfree(mgr->payloads); mgr->payloads = NULL; diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index b230ff6f7081..8b9eb4db3381 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h @@ -681,6 +681,14 @@ struct drm_dp_mst_topology_mgr { * @destroy_branch_device_list. */ struct mutex delayed_destroy_lock; + + /** + * @delayed_destroy_wq: Workqueue used for delayed_destroy_work items. + * A dedicated WQ makes it possible to drain any requeued work items + * on it. + */ + struct workqueue_struct *delayed_destroy_wq; + /** * @delayed_destroy_work: Work item to destroy MST port and branch * devices, needed to avoid locking inversion. -- cgit v1.2.3-59-g8ed1b From 00398e1d518309328e8ba7dff00881538ac22c6a Mon Sep 17 00:00:00 2001 From: Alain Michaud Date: Thu, 11 Jun 2020 19:50:41 +0000 Subject: Bluetooth: Add support for BT_PKT_STATUS CMSG data for SCO connections This change adds support for reporting the BT_PKT_STATUS to the socket CMSG data to allow the implementation of a packet loss correction on erroneous data received on the SCO socket. The patch was partially developed by Marcel Holtmann and validated by Hsin-yu Chao. Signed-off-by: Alain Michaud Signed-off-by: Marcel Holtmann --- include/net/bluetooth/bluetooth.h | 10 ++++++++++ include/net/bluetooth/sco.h | 2 ++ net/bluetooth/af_bluetooth.c | 3 +++ net/bluetooth/hci_core.c | 1 + net/bluetooth/sco.c | 32 ++++++++++++++++++++++++++++++++ 5 files changed, 48 insertions(+) (limited to 'include') diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index 18190055374c..7ee8041af803 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -147,6 +147,10 @@ struct bt_voice { #define BT_MODE_LE_FLOWCTL 0x03 #define BT_MODE_EXT_FLOWCTL 0x04 +#define BT_PKT_STATUS 16 + +#define BT_SCM_PKT_STATUS 0x03 + __printf(1, 2) void bt_info(const char *fmt, ...); __printf(1, 2) @@ -286,6 +290,7 @@ struct bt_sock { struct sock *parent; unsigned long flags; void (*skb_msg_name)(struct sk_buff *, void *, int *); + void (*skb_put_cmsg)(struct sk_buff *, struct msghdr *, struct sock *); }; enum { @@ -335,6 +340,10 @@ struct l2cap_ctrl { struct l2cap_chan *chan; }; +struct sco_ctrl { + u8 pkt_status; +}; + struct hci_dev; typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status, u16 opcode); @@ -361,6 +370,7 @@ struct bt_skb_cb { u8 incoming:1; union { struct l2cap_ctrl l2cap; + struct sco_ctrl sco; struct hci_ctrl hci; }; }; diff --git a/include/net/bluetooth/sco.h b/include/net/bluetooth/sco.h index f40ddb4264fc..1aa2e14b6c94 100644 --- a/include/net/bluetooth/sco.h +++ b/include/net/bluetooth/sco.h @@ -46,4 +46,6 @@ struct sco_conninfo { __u8 dev_class[3]; }; +#define SCO_CMSG_PKT_STATUS 0x01 + #endif /* __SCO_H */ diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index b751a7c1b20f..4ef6a54403aa 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -286,6 +286,9 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, if (msg->msg_name && bt_sk(sk)->skb_msg_name) bt_sk(sk)->skb_msg_name(skb, msg->msg_name, &msg->msg_namelen); + + if (bt_sk(sk)->skb_put_cmsg) + bt_sk(sk)->skb_put_cmsg(skb, msg, sk); } skb_free_datagram(sk, skb); diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 83ce665d3cbf..00458a8c26f8 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -4554,6 +4554,7 @@ static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) if (conn) { /* Send to upper protocol */ + bt_cb(skb)->sco.pkt_status = flags & 0x03; sco_recv_scodata(conn, skb); return; } else { diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index c8c3d38cdc7b..83a48860bb5d 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -66,6 +66,7 @@ struct sco_pinfo { bdaddr_t dst; __u32 flags; __u16 setting; + __u8 cmsg_mask; struct sco_conn *conn; }; @@ -449,6 +450,15 @@ static void sco_sock_close(struct sock *sk) sco_sock_kill(sk); } +static void sco_skb_put_cmsg(struct sk_buff *skb, struct msghdr *msg, + struct sock *sk) +{ + if (sco_pi(sk)->cmsg_mask & SCO_CMSG_PKT_STATUS) + put_cmsg(msg, SOL_BLUETOOTH, BT_SCM_PKT_STATUS, + sizeof(bt_cb(skb)->sco.pkt_status), + &bt_cb(skb)->sco.pkt_status); +} + static void sco_sock_init(struct sock *sk, struct sock *parent) { BT_DBG("sk %p", sk); @@ -457,6 +467,8 @@ static void sco_sock_init(struct sock *sk, struct sock *parent) sk->sk_type = parent->sk_type; bt_sk(sk)->flags = bt_sk(parent)->flags; security_sk_clone(parent, sk); + } else { + bt_sk(sk)->skb_put_cmsg = sco_skb_put_cmsg; } } @@ -846,6 +858,18 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname, sco_pi(sk)->setting = voice.setting; break; + case BT_PKT_STATUS: + if (get_user(opt, (u32 __user *)optval)) { + err = -EFAULT; + break; + } + + if (opt) + sco_pi(sk)->cmsg_mask |= SCO_CMSG_PKT_STATUS; + else + sco_pi(sk)->cmsg_mask &= SCO_CMSG_PKT_STATUS; + break; + default: err = -ENOPROTOOPT; break; @@ -923,6 +947,7 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname, int len, err = 0; struct bt_voice voice; u32 phys; + int pkt_status; BT_DBG("sk %p", sk); @@ -969,6 +994,13 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname, err = -EFAULT; break; + case BT_PKT_STATUS: + pkt_status = (sco_pi(sk)->cmsg_mask & SCO_CMSG_PKT_STATUS); + + if (put_user(pkt_status, (int __user *)optval)) + err = -EFAULT; + break; + default: err = -ENOPROTOOPT; break; -- cgit v1.2.3-59-g8ed1b From 32929e1f4ad9adf71f655028e4dd5d87adb97f52 Mon Sep 17 00:00:00 2001 From: Alain Michaud Date: Thu, 11 Jun 2020 14:26:10 +0000 Subject: Bluetooth: Use only 8 bits for the HCI CMSG state flags This change implements suggestions from the code review of the SCO CMSG state flag patch. Signed-off-by: Alain Michaud Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci_sock.h | 4 ++-- net/bluetooth/hci_sock.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/net/bluetooth/hci_sock.h b/include/net/bluetooth/hci_sock.h index 9352bb1bf34c..9949870f7d78 100644 --- a/include/net/bluetooth/hci_sock.h +++ b/include/net/bluetooth/hci_sock.h @@ -31,8 +31,8 @@ #define HCI_TIME_STAMP 3 /* CMSG flags */ -#define HCI_CMSG_DIR 0x0001 -#define HCI_CMSG_TSTAMP 0x0002 +#define HCI_CMSG_DIR 0x01 +#define HCI_CMSG_TSTAMP 0x02 struct sockaddr_hci { sa_family_t hci_family; diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index caf38a8ea6a8..d5627967fc25 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -52,7 +52,7 @@ struct hci_pinfo { struct bt_sock bt; struct hci_dev *hdev; struct hci_filter filter; - __u32 cmsg_mask; + __u8 cmsg_mask; unsigned short channel; unsigned long flags; __u32 cookie; @@ -1399,7 +1399,7 @@ done: static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) { - __u32 mask = hci_pi(sk)->cmsg_mask; + __u8 mask = hci_pi(sk)->cmsg_mask; if (mask & HCI_CMSG_DIR) { int incoming = bt_cb(skb)->incoming; -- cgit v1.2.3-59-g8ed1b From 7e90de4ac1099d3f4e26023853d4aefd0d2a1dea Mon Sep 17 00:00:00 2001 From: Alain Michaud Date: Thu, 11 Jun 2020 02:01:55 +0000 Subject: Bluetooth: mgmt: read/set system parameter definitions This patch submits the corresponding kernel definitions to mgmt.h. This is submitted before the implementation to avoid any conflicts in values allocations. Signed-off-by: Alain Michaud Reviewed-by: Abhishek Pandit-Subedi Reviewed-by: Yu Liu Signed-off-by: Marcel Holtmann --- include/net/bluetooth/mgmt.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'include') diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h index 16e0d87bd8fa..e515288f328f 100644 --- a/include/net/bluetooth/mgmt.h +++ b/include/net/bluetooth/mgmt.h @@ -52,6 +52,12 @@ struct mgmt_hdr { __le16 len; } __packed; +struct mgmt_tlv { + __le16 type; + __u8 length; + __u8 value[]; +} __packed; + struct mgmt_addr_info { bdaddr_t bdaddr; __u8 type; @@ -702,6 +708,18 @@ struct mgmt_rp_set_exp_feature { __le32 flags; } __packed; +#define MGMT_OP_READ_DEF_SYSTEM_CONFIG 0x004b +#define MGMT_READ_DEF_SYSTEM_CONFIG_SIZE 0 + +#define MGMT_OP_SET_DEF_SYSTEM_CONFIG 0x004c +#define MGMT_SET_DEF_SYSTEM_CONFIG_SIZE 0 + +#define MGMT_OP_READ_DEF_RUNTIME_CONFIG 0x004d +#define MGMT_READ_DEF_RUNTIME_CONFIG_SIZE 0 + +#define MGMT_OP_SET_DEF_RUNTIME_CONFIG 0x004e +#define MGMT_SET_DEF_RUNTIME_CONFIG_SIZE 0 + #define MGMT_EV_CMD_COMPLETE 0x0001 struct mgmt_ev_cmd_complete { __le16 opcode; -- cgit v1.2.3-59-g8ed1b From 10873f99ced274cbfc119f55e7e57a0f047a0799 Mon Sep 17 00:00:00 2001 From: Alain Michaud Date: Thu, 11 Jun 2020 02:01:56 +0000 Subject: Bluetooth: centralize default value initialization. This patch centralized the initialization of default parameters. This is required to allow clients to more easily customize the default system parameters. Signed-off-by: Alain Michaud Reviewed-by: Abhishek Pandit-Subedi Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci_core.h | 18 ++++++++++++++++++ net/bluetooth/hci_conn.c | 14 ++++---------- net/bluetooth/hci_core.c | 14 +++++++++++++- net/bluetooth/hci_request.c | 15 +++++---------- 4 files changed, 40 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index cdd4f1db8670..0d5dbb6cb5a0 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -295,6 +295,14 @@ struct hci_dev { __u8 le_scan_type; __u16 le_scan_interval; __u16 le_scan_window; + __u16 le_scan_int_suspend; + __u16 le_scan_window_suspend; + __u16 le_scan_int_discovery; + __u16 le_scan_window_discovery; + __u16 le_scan_int_adv_monitor; + __u16 le_scan_window_adv_monitor; + __u16 le_scan_int_connect; + __u16 le_scan_window_connect; __u16 le_conn_min_interval; __u16 le_conn_max_interval; __u16 le_conn_latency; @@ -323,6 +331,16 @@ struct hci_dev { __u16 devid_product; __u16 devid_version; + __u8 def_page_scan_type; + __u16 def_page_scan_int; + __u16 def_page_scan_window; + __u8 def_inq_scan_type; + __u16 def_inq_scan_int; + __u16 def_inq_scan_window; + __u16 def_br_lsto; + __u16 def_page_timeout; + __u16 def_multi_adv_rotation_duration; + __u16 pkt_type; __u16 esco_type; __u16 link_policy; diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 307800fd18e6..9bdffc4e79b0 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -789,11 +789,8 @@ static void set_ext_conn_params(struct hci_conn *conn, memset(p, 0, sizeof(*p)); - /* Set window to be the same value as the interval to - * enable continuous scanning. - */ - p->scan_interval = cpu_to_le16(hdev->le_scan_interval); - p->scan_window = p->scan_interval; + p->scan_interval = cpu_to_le16(hdev->le_scan_int_connect); + p->scan_window = cpu_to_le16(hdev->le_scan_window_connect); p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); p->conn_latency = cpu_to_le16(conn->le_conn_latency); @@ -875,11 +872,8 @@ static void hci_req_add_le_create_conn(struct hci_request *req, memset(&cp, 0, sizeof(cp)); - /* Set window to be the same value as the interval to enable - * continuous scanning. - */ - cp.scan_interval = cpu_to_le16(hdev->le_scan_interval); - cp.scan_window = cp.scan_interval; + cp.scan_interval = cpu_to_le16(hdev->le_scan_int_connect); + cp.scan_window = cpu_to_le16(hdev->le_scan_window_connect); bacpy(&cp.peer_addr, &conn->dst); cp.peer_addr_type = conn->dst_type; diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 00458a8c26f8..4f1052a7c488 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -2982,7 +2982,7 @@ int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags, adv_instance->remaining_time = timeout; if (duration == 0) - adv_instance->duration = HCI_DEFAULT_ADV_DURATION; + adv_instance->duration = hdev->def_multi_adv_rotation_duration; else adv_instance->duration = duration; @@ -3400,6 +3400,12 @@ struct hci_dev *hci_alloc_dev(void) hdev->le_adv_max_interval = 0x0800; hdev->le_scan_interval = 0x0060; hdev->le_scan_window = 0x0030; + hdev->le_scan_int_suspend = 0x0400; + hdev->le_scan_window_suspend = 0x0012; + hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT; + hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN; + hdev->le_scan_int_connect = 0x0060; + hdev->le_scan_window_connect = 0x0060; hdev->le_conn_min_interval = 0x0018; hdev->le_conn_max_interval = 0x0028; hdev->le_conn_latency = 0x0000; @@ -3415,6 +3421,7 @@ struct hci_dev *hci_alloc_dev(void) hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M; hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M; hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES; + hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION; hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT; hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT; @@ -3423,6 +3430,11 @@ struct hci_dev *hci_alloc_dev(void) hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT; hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE; + /* default 1.28 sec page scan */ + hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD; + hdev->def_page_scan_int = 0x0800; + hdev->def_page_scan_window = 0x0012; + mutex_init(&hdev->lock); mutex_init(&hdev->req_lock); diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index 1acf5b8e0910..a7f572ad38ef 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -34,9 +34,6 @@ #define HCI_REQ_PEND 1 #define HCI_REQ_CANCELED 2 -#define LE_SUSPEND_SCAN_WINDOW 0x0012 -#define LE_SUSPEND_SCAN_INTERVAL 0x0400 - void hci_req_init(struct hci_request *req, struct hci_dev *hdev) { skb_queue_head_init(&req->cmd_q); @@ -366,13 +363,11 @@ void __hci_req_write_fast_connectable(struct hci_request *req, bool enable) /* 160 msec page scan interval */ acp.interval = cpu_to_le16(0x0100); } else { - type = PAGE_SCAN_TYPE_STANDARD; /* default */ - - /* default 1.28 sec page scan */ - acp.interval = cpu_to_le16(0x0800); + type = hdev->def_page_scan_type; + acp.interval = cpu_to_le16(hdev->def_page_scan_int); } - acp.window = cpu_to_le16(0x0012); + acp.window = cpu_to_le16(hdev->def_page_scan_window); if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval || __cpu_to_le16(hdev->page_scan_window) != acp.window) @@ -927,8 +922,8 @@ void hci_req_add_le_passive_scan(struct hci_request *req) filter_policy |= 0x02; if (hdev->suspended) { - window = LE_SUSPEND_SCAN_WINDOW; - interval = LE_SUSPEND_SCAN_INTERVAL; + window = hdev->le_scan_window_suspend; + interval = hdev->le_scan_int_suspend; } else { window = hdev->le_scan_window; interval = hdev->le_scan_interval; -- cgit v1.2.3-59-g8ed1b From d1492bbd470280c687e6677a355f923435bdb3ac Mon Sep 17 00:00:00 2001 From: Jishnu Prakash Date: Thu, 28 May 2020 22:24:24 +0530 Subject: iio: adc: Add PMIC7 ADC bindings Add documentation for PMIC7 ADC peripheral. For the PMIC7-type PMICs, ADC peripheral is present in HW for the following PMICs: PMK8350, PM8350, PM8350b, PMR735a and PMR735b. Of these, only the ADC peripheral on PMK8350 is exposed directly to SW. If SW needs to communicate with ADCs on other PMICs, it specifies the PMIC to PMK8350 through the newly added SID register and communication between PMK8350 ADC and other PMIC ADCs is carried out through PBS(Programmable Boot Sequence) at the firmware level. In addition, add definitions for ADC channels and virtual channel definitions (combination of ADC channel number and PMIC SID number) per PMIC, to be used by ADC clients for PMIC7. Signed-off-by: Jishnu Prakash Reviewed-by: Amit Kucheria Reviewed-by: Rob Herring Signed-off-by: Jonathan Cameron --- .../bindings/iio/adc/qcom,spmi-vadc.yaml | 38 ++++++++-- include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h | 67 ++++++++++++++++ include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h | 88 ++++++++++++++++++++++ include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h | 46 +++++++++++ include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h | 28 +++++++ include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h | 28 +++++++ include/dt-bindings/iio/qcom,spmi-vadc.h | 78 ++++++++++++++++++- 7 files changed, 366 insertions(+), 7 deletions(-) create mode 100644 include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h create mode 100644 include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h create mode 100644 include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h create mode 100644 include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h create mode 100644 include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h (limited to 'include') diff --git a/Documentation/devicetree/bindings/iio/adc/qcom,spmi-vadc.yaml b/Documentation/devicetree/bindings/iio/adc/qcom,spmi-vadc.yaml index de8d243f01c2..e6263b617941 100644 --- a/Documentation/devicetree/bindings/iio/adc/qcom,spmi-vadc.yaml +++ b/Documentation/devicetree/bindings/iio/adc/qcom,spmi-vadc.yaml @@ -13,7 +13,7 @@ maintainers: description: | SPMI PMIC voltage ADC (VADC) provides interface to clients to read voltage. The VADC is a 15-bit sigma-delta ADC. - SPMI PMIC5 voltage ADC (ADC) provides interface to clients to read + SPMI PMIC5/PMIC7 voltage ADC (ADC) provides interface to clients to read voltage. The VADC is a 16-bit sigma-delta ADC. properties: @@ -28,6 +28,7 @@ properties: - qcom,spmi-vadc - qcom,spmi-adc5 - qcom,spmi-adc-rev2 + - qcom,spmi-adc7 reg: description: VADC base address in the SPMI PMIC register map @@ -70,6 +71,8 @@ patternProperties: description: | ADC channel number. See include/dt-bindings/iio/qcom,spmi-vadc.h + For PMIC7 ADC, the channel numbers are specified separately per PMIC + in the PMIC-specific files in include/dt-bindings/iio/. label: $ref: /schemas/types.yaml#/definitions/string @@ -113,11 +116,11 @@ patternProperties: channel calibration. If property is not found, channel will be calibrated with 0.625V and 1.25V reference channels, also known as absolute calibration. - - For compatible property "qcom,spmi-adc5" and "qcom,spmi-adc-rev2", - if this property is specified VADC will use the VDD reference (1.875V) - and GND for channel calibration. If property is not found, channel - will be calibrated with 0V and 1.25V reference channels, also known - as absolute calibration. + - For compatible property "qcom,spmi-adc5", "qcom,spmi-adc7" and + "qcom,spmi-adc-rev2", if this property is specified VADC will use + the VDD reference (1.875V) and GND for channel calibration. If + property is not found, channel will be calibrated with 0V and 1.25V + reference channels, also known as absolute calibration. type: boolean qcom,hw-settle-time: @@ -208,6 +211,29 @@ allOf: enum: [ 1, 2, 4, 8, 16 ] default: 1 + - if: + properties: + compatible: + contains: + const: qcom,spmi-adc7 + + then: + patternProperties: + "^.*@[0-9a-f]+$": + properties: + qcom,decimation: + enum: [ 85, 340, 1360 ] + default: 1360 + + qcom,hw-settle-time: + enum: [ 15, 100, 200, 300, 400, 500, 600, 700, 1000, 2000, 4000, + 8000, 16000, 32000, 64000, 128000 ] + default: 15 + + qcom,avg-samples: + enum: [ 1, 2, 4, 8, 16 ] + default: 1 + examples: - | spmi_bus { diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h b/include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h new file mode 100644 index 000000000000..9426f27a1946 --- /dev/null +++ b/include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_PM8350_H +#define _DT_BINDINGS_QCOM_SPMI_VADC_PM8350_H + +#ifndef PM8350_SID +#define PM8350_SID 1 +#endif + +/* ADC channels for PM8350_ADC for PMIC7 */ +#define PM8350_ADC7_REF_GND (PM8350_SID << 8 | 0x0) +#define PM8350_ADC7_1P25VREF (PM8350_SID << 8 | 0x01) +#define PM8350_ADC7_VREF_VADC (PM8350_SID << 8 | 0x02) +#define PM8350_ADC7_DIE_TEMP (PM8350_SID << 8 | 0x03) + +#define PM8350_ADC7_AMUX_THM1 (PM8350_SID << 8 | 0x04) +#define PM8350_ADC7_AMUX_THM2 (PM8350_SID << 8 | 0x05) +#define PM8350_ADC7_AMUX_THM3 (PM8350_SID << 8 | 0x06) +#define PM8350_ADC7_AMUX_THM4 (PM8350_SID << 8 | 0x07) +#define PM8350_ADC7_AMUX_THM5 (PM8350_SID << 8 | 0x08) +#define PM8350_ADC7_GPIO1 (PM8350_SID << 8 | 0x0a) +#define PM8350_ADC7_GPIO2 (PM8350_SID << 8 | 0x0b) +#define PM8350_ADC7_GPIO3 (PM8350_SID << 8 | 0x0c) +#define PM8350_ADC7_GPIO4 (PM8350_SID << 8 | 0x0d) + +/* 30k pull-up1 */ +#define PM8350_ADC7_AMUX_THM1_30K_PU (PM8350_SID << 8 | 0x24) +#define PM8350_ADC7_AMUX_THM2_30K_PU (PM8350_SID << 8 | 0x25) +#define PM8350_ADC7_AMUX_THM3_30K_PU (PM8350_SID << 8 | 0x26) +#define PM8350_ADC7_AMUX_THM4_30K_PU (PM8350_SID << 8 | 0x27) +#define PM8350_ADC7_AMUX_THM5_30K_PU (PM8350_SID << 8 | 0x28) +#define PM8350_ADC7_GPIO1_30K_PU (PM8350_SID << 8 | 0x2a) +#define PM8350_ADC7_GPIO2_30K_PU (PM8350_SID << 8 | 0x2b) +#define PM8350_ADC7_GPIO3_30K_PU (PM8350_SID << 8 | 0x2c) +#define PM8350_ADC7_GPIO4_30K_PU (PM8350_SID << 8 | 0x2d) + +/* 100k pull-up2 */ +#define PM8350_ADC7_AMUX_THM1_100K_PU (PM8350_SID << 8 | 0x44) +#define PM8350_ADC7_AMUX_THM2_100K_PU (PM8350_SID << 8 | 0x45) +#define PM8350_ADC7_AMUX_THM3_100K_PU (PM8350_SID << 8 | 0x46) +#define PM8350_ADC7_AMUX_THM4_100K_PU (PM8350_SID << 8 | 0x47) +#define PM8350_ADC7_AMUX_THM5_100K_PU (PM8350_SID << 8 | 0x48) +#define PM8350_ADC7_GPIO1_100K_PU (PM8350_SID << 8 | 0x4a) +#define PM8350_ADC7_GPIO2_100K_PU (PM8350_SID << 8 | 0x4b) +#define PM8350_ADC7_GPIO3_100K_PU (PM8350_SID << 8 | 0x4c) +#define PM8350_ADC7_GPIO4_100K_PU (PM8350_SID << 8 | 0x4d) + +/* 400k pull-up3 */ +#define PM8350_ADC7_AMUX_THM1_400K_PU (PM8350_SID << 8 | 0x64) +#define PM8350_ADC7_AMUX_THM2_400K_PU (PM8350_SID << 8 | 0x65) +#define PM8350_ADC7_AMUX_THM3_400K_PU (PM8350_SID << 8 | 0x66) +#define PM8350_ADC7_AMUX_THM4_400K_PU (PM8350_SID << 8 | 0x67) +#define PM8350_ADC7_AMUX_THM5_400K_PU (PM8350_SID << 8 | 0x68) +#define PM8350_ADC7_GPIO1_400K_PU (PM8350_SID << 8 | 0x6a) +#define PM8350_ADC7_GPIO2_400K_PU (PM8350_SID << 8 | 0x6b) +#define PM8350_ADC7_GPIO3_400K_PU (PM8350_SID << 8 | 0x6c) +#define PM8350_ADC7_GPIO4_400K_PU (PM8350_SID << 8 | 0x6d) + +/* 1/3 Divider */ +#define PM8350_ADC7_GPIO4_DIV3 (PM8350_SID << 8 | 0x8d) + +#define PM8350_ADC7_VPH_PWR (PM8350_SID << 8 | 0x8e) + +#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PM8350_H */ diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h b/include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h new file mode 100644 index 000000000000..dc2497c27e16 --- /dev/null +++ b/include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_PM8350B_H +#define _DT_BINDINGS_QCOM_SPMI_VADC_PM8350B_H + +#ifndef PM8350B_SID +#define PM8350B_SID 3 +#endif + +/* ADC channels for PM8350B_ADC for PMIC7 */ +#define PM8350B_ADC7_REF_GND (PM8350B_SID << 8 | 0x0) +#define PM8350B_ADC7_1P25VREF (PM8350B_SID << 8 | 0x01) +#define PM8350B_ADC7_VREF_VADC (PM8350B_SID << 8 | 0x02) +#define PM8350B_ADC7_DIE_TEMP (PM8350B_SID << 8 | 0x03) + +#define PM8350B_ADC7_AMUX_THM1 (PM8350B_SID << 8 | 0x04) +#define PM8350B_ADC7_AMUX_THM2 (PM8350B_SID << 8 | 0x05) +#define PM8350B_ADC7_AMUX_THM3 (PM8350B_SID << 8 | 0x06) +#define PM8350B_ADC7_AMUX_THM4 (PM8350B_SID << 8 | 0x07) +#define PM8350B_ADC7_AMUX_THM5 (PM8350B_SID << 8 | 0x08) +#define PM8350B_ADC7_AMUX_THM6 (PM8350B_SID << 8 | 0x09) +#define PM8350B_ADC7_GPIO1 (PM8350B_SID << 8 | 0x0a) +#define PM8350B_ADC7_GPIO2 (PM8350B_SID << 8 | 0x0b) +#define PM8350B_ADC7_GPIO3 (PM8350B_SID << 8 | 0x0c) +#define PM8350B_ADC7_GPIO4 (PM8350B_SID << 8 | 0x0d) + +#define PM8350B_ADC7_CHG_TEMP (PM8350B_SID << 8 | 0x10) +#define PM8350B_ADC7_USB_IN_V_16 (PM8350B_SID << 8 | 0x11) +#define PM8350B_ADC7_VDC_16 (PM8350B_SID << 8 | 0x12) +#define PM8350B_ADC7_CC1_ID (PM8350B_SID << 8 | 0x13) +#define PM8350B_ADC7_VREF_BAT_THERM (PM8350B_SID << 8 | 0x15) +#define PM8350B_ADC7_IIN_FB (PM8350B_SID << 8 | 0x17) + +/* 30k pull-up1 */ +#define PM8350B_ADC7_AMUX_THM1_30K_PU (PM8350B_SID << 8 | 0x24) +#define PM8350B_ADC7_AMUX_THM2_30K_PU (PM8350B_SID << 8 | 0x25) +#define PM8350B_ADC7_AMUX_THM3_30K_PU (PM8350B_SID << 8 | 0x26) +#define PM8350B_ADC7_AMUX_THM4_30K_PU (PM8350B_SID << 8 | 0x27) +#define PM8350B_ADC7_AMUX_THM5_30K_PU (PM8350B_SID << 8 | 0x28) +#define PM8350B_ADC7_AMUX_THM6_30K_PU (PM8350B_SID << 8 | 0x29) +#define PM8350B_ADC7_GPIO1_30K_PU (PM8350B_SID << 8 | 0x2a) +#define PM8350B_ADC7_GPIO2_30K_PU (PM8350B_SID << 8 | 0x2b) +#define PM8350B_ADC7_GPIO3_30K_PU (PM8350B_SID << 8 | 0x2c) +#define PM8350B_ADC7_GPIO4_30K_PU (PM8350B_SID << 8 | 0x2d) +#define PM8350B_ADC7_CC1_ID_30K_PU (PM8350B_SID << 8 | 0x33) + +/* 100k pull-up2 */ +#define PM8350B_ADC7_AMUX_THM1_100K_PU (PM8350B_SID << 8 | 0x44) +#define PM8350B_ADC7_AMUX_THM2_100K_PU (PM8350B_SID << 8 | 0x45) +#define PM8350B_ADC7_AMUX_THM3_100K_PU (PM8350B_SID << 8 | 0x46) +#define PM8350B_ADC7_AMUX_THM4_100K_PU (PM8350B_SID << 8 | 0x47) +#define PM8350B_ADC7_AMUX_THM5_100K_PU (PM8350B_SID << 8 | 0x48) +#define PM8350B_ADC7_AMUX_THM6_100K_PU (PM8350B_SID << 8 | 0x49) +#define PM8350B_ADC7_GPIO1_100K_PU (PM8350B_SID << 8 | 0x4a) +#define PM8350B_ADC7_GPIO2_100K_PU (PM8350B_SID << 8 | 0x4b) +#define PM8350B_ADC7_GPIO3_100K_PU (PM8350B_SID << 8 | 0x4c) +#define PM8350B_ADC7_GPIO4_100K_PU (PM8350B_SID << 8 | 0x4d) +#define PM8350B_ADC7_CC1_ID_100K_PU (PM8350B_SID << 8 | 0x53) + +/* 400k pull-up3 */ +#define PM8350B_ADC7_AMUX_THM1_400K_PU (PM8350B_SID << 8 | 0x64) +#define PM8350B_ADC7_AMUX_THM2_400K_PU (PM8350B_SID << 8 | 0x65) +#define PM8350B_ADC7_AMUX_THM3_400K_PU (PM8350B_SID << 8 | 0x66) +#define PM8350B_ADC7_AMUX_THM4_400K_PU (PM8350B_SID << 8 | 0x67) +#define PM8350B_ADC7_AMUX_THM5_400K_PU (PM8350B_SID << 8 | 0x68) +#define PM8350B_ADC7_AMUX_THM6_400K_PU (PM8350B_SID << 8 | 0x69) +#define PM8350B_ADC7_GPIO1_400K_PU (PM8350B_SID << 8 | 0x6a) +#define PM8350B_ADC7_GPIO2_400K_PU (PM8350B_SID << 8 | 0x6b) +#define PM8350B_ADC7_GPIO3_400K_PU (PM8350B_SID << 8 | 0x6c) +#define PM8350B_ADC7_GPIO4_400K_PU (PM8350B_SID << 8 | 0x6d) +#define PM8350B_ADC7_CC1_ID_400K_PU (PM8350B_SID << 8 | 0x73) + +/* 1/3 Divider */ +#define PM8350B_ADC7_GPIO1_DIV3 (PM8350B_SID << 8 | 0x8a) +#define PM8350B_ADC7_GPIO2_DIV3 (PM8350B_SID << 8 | 0x8b) +#define PM8350B_ADC7_GPIO3_DIV3 (PM8350B_SID << 8 | 0x8c) +#define PM8350B_ADC7_GPIO4_DIV3 (PM8350B_SID << 8 | 0x8d) + +#define PM8350B_ADC7_VPH_PWR (PM8350B_SID << 8 | 0x8e) +#define PM8350B_ADC7_VBAT_SNS (PM8350B_SID << 8 | 0x8f) + +#define PM8350B_ADC7_SBUx (PM8350B_SID << 8 | 0x94) +#define PM8350B_ADC7_VBAT_2S_MID (PM8350B_SID << 8 | 0x96) + +#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PM8350B_H */ diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h b/include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h new file mode 100644 index 000000000000..6c296870e95b --- /dev/null +++ b/include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_PMK8350_H +#define _DT_BINDINGS_QCOM_SPMI_VADC_PMK8350_H + +#ifndef PMK8350_SID +#define PMK8350_SID 0 +#endif + +/* ADC channels for PMK8350_ADC for PMIC7 */ +#define PMK8350_ADC7_REF_GND (PMK8350_SID << 8 | 0x0) +#define PMK8350_ADC7_1P25VREF (PMK8350_SID << 8 | 0x01) +#define PMK8350_ADC7_VREF_VADC (PMK8350_SID << 8 | 0x02) +#define PMK8350_ADC7_DIE_TEMP (PMK8350_SID << 8 | 0x03) + +#define PMK8350_ADC7_AMUX_THM1 (PMK8350_SID << 8 | 0x04) +#define PMK8350_ADC7_AMUX_THM2 (PMK8350_SID << 8 | 0x05) +#define PMK8350_ADC7_AMUX_THM3 (PMK8350_SID << 8 | 0x06) +#define PMK8350_ADC7_AMUX_THM4 (PMK8350_SID << 8 | 0x07) +#define PMK8350_ADC7_AMUX_THM5 (PMK8350_SID << 8 | 0x08) + +/* 30k pull-up1 */ +#define PMK8350_ADC7_AMUX_THM1_30K_PU (PMK8350_SID << 8 | 0x24) +#define PMK8350_ADC7_AMUX_THM2_30K_PU (PMK8350_SID << 8 | 0x25) +#define PMK8350_ADC7_AMUX_THM3_30K_PU (PMK8350_SID << 8 | 0x26) +#define PMK8350_ADC7_AMUX_THM4_30K_PU (PMK8350_SID << 8 | 0x27) +#define PMK8350_ADC7_AMUX_THM5_30K_PU (PMK8350_SID << 8 | 0x28) + +/* 100k pull-up2 */ +#define PMK8350_ADC7_AMUX_THM1_100K_PU (PMK8350_SID << 8 | 0x44) +#define PMK8350_ADC7_AMUX_THM2_100K_PU (PMK8350_SID << 8 | 0x45) +#define PMK8350_ADC7_AMUX_THM3_100K_PU (PMK8350_SID << 8 | 0x46) +#define PMK8350_ADC7_AMUX_THM4_100K_PU (PMK8350_SID << 8 | 0x47) +#define PMK8350_ADC7_AMUX_THM5_100K_PU (PMK8350_SID << 8 | 0x48) + +/* 400k pull-up3 */ +#define PMK8350_ADC7_AMUX_THM1_400K_PU (PMK8350_SID << 8 | 0x64) +#define PMK8350_ADC7_AMUX_THM2_400K_PU (PMK8350_SID << 8 | 0x65) +#define PMK8350_ADC7_AMUX_THM3_400K_PU (PMK8350_SID << 8 | 0x66) +#define PMK8350_ADC7_AMUX_THM4_400K_PU (PMK8350_SID << 8 | 0x67) +#define PMK8350_ADC7_AMUX_THM5_400K_PU (PMK8350_SID << 8 | 0x68) + +#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PMK8350_H */ diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h b/include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h new file mode 100644 index 000000000000..d6df1b19e5ff --- /dev/null +++ b/include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_PMR735A_H +#define _DT_BINDINGS_QCOM_SPMI_VADC_PMR735A_H + +#ifndef PMR735A_SID +#define PMR735A_SID 4 +#endif + +/* ADC channels for PMR735A_ADC for PMIC7 */ +#define PMR735A_ADC7_REF_GND (PMR735A_SID << 8 | 0x0) +#define PMR735A_ADC7_1P25VREF (PMR735A_SID << 8 | 0x01) +#define PMR735A_ADC7_VREF_VADC (PMR735A_SID << 8 | 0x02) +#define PMR735A_ADC7_DIE_TEMP (PMR735A_SID << 8 | 0x03) + +#define PMR735A_ADC7_GPIO1 (PMR735A_SID << 8 | 0x0a) +#define PMR735A_ADC7_GPIO2 (PMR735A_SID << 8 | 0x0b) +#define PMR735A_ADC7_GPIO3 (PMR735A_SID << 8 | 0x0c) + +/* 100k pull-up2 */ +#define PMR735A_ADC7_GPIO1_100K_PU (PMR735A_SID << 8 | 0x4a) +#define PMR735A_ADC7_GPIO2_100K_PU (PMR735A_SID << 8 | 0x4b) +#define PMR735A_ADC7_GPIO3_100K_PU (PMR735A_SID << 8 | 0x4c) + +#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PMR735A_H */ diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h b/include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h new file mode 100644 index 000000000000..8da0e7dab315 --- /dev/null +++ b/include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_PMR735B_H +#define _DT_BINDINGS_QCOM_SPMI_VADC_PMR735B_H + +#ifndef PMR735B_SID +#define PMR735B_SID 5 +#endif + +/* ADC channels for PMR735B_ADC for PMIC7 */ +#define PMR735B_ADC7_REF_GND (PMR735B_SID << 8 | 0x0) +#define PMR735B_ADC7_1P25VREF (PMR735B_SID << 8 | 0x01) +#define PMR735B_ADC7_VREF_VADC (PMR735B_SID << 8 | 0x02) +#define PMR735B_ADC7_DIE_TEMP (PMR735B_SID << 8 | 0x03) + +#define PMR735B_ADC7_GPIO1 (PMR735B_SID << 8 | 0x0a) +#define PMR735B_ADC7_GPIO2 (PMR735B_SID << 8 | 0x0b) +#define PMR735B_ADC7_GPIO3 (PMR735B_SID << 8 | 0x0c) + +/* 100k pull-up2 */ +#define PMR735B_ADC7_GPIO1_100K_PU (PMR735B_SID << 8 | 0x4a) +#define PMR735B_ADC7_GPIO2_100K_PU (PMR735B_SID << 8 | 0x4b) +#define PMR735B_ADC7_GPIO3_100K_PU (PMR735B_SID << 8 | 0x4c) + +#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PMR735B_H */ diff --git a/include/dt-bindings/iio/qcom,spmi-vadc.h b/include/dt-bindings/iio/qcom,spmi-vadc.h index 61d556db1542..08adfe25964c 100644 --- a/include/dt-bindings/iio/qcom,spmi-vadc.h +++ b/include/dt-bindings/iio/qcom,spmi-vadc.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (c) 2012-2014,2018 The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2014,2018,2020 The Linux Foundation. All rights reserved. */ #ifndef _DT_BINDINGS_QCOM_SPMI_VADC_H @@ -221,4 +221,80 @@ #define ADC5_MAX_CHANNEL 0xc0 +/* ADC channels for ADC for PMIC7 */ + +#define ADC7_REF_GND 0x00 +#define ADC7_1P25VREF 0x01 +#define ADC7_VREF_VADC 0x02 +#define ADC7_DIE_TEMP 0x03 + +#define ADC7_AMUX_THM1 0x04 +#define ADC7_AMUX_THM2 0x05 +#define ADC7_AMUX_THM3 0x06 +#define ADC7_AMUX_THM4 0x07 +#define ADC7_AMUX_THM5 0x08 +#define ADC7_AMUX_THM6 0x09 +#define ADC7_GPIO1 0x0a +#define ADC7_GPIO2 0x0b +#define ADC7_GPIO3 0x0c +#define ADC7_GPIO4 0x0d + +#define ADC7_CHG_TEMP 0x10 +#define ADC7_USB_IN_V_16 0x11 +#define ADC7_VDC_16 0x12 +#define ADC7_CC1_ID 0x13 +#define ADC7_VREF_BAT_THERM 0x15 +#define ADC7_IIN_FB 0x17 + +/* 30k pull-up1 */ +#define ADC7_AMUX_THM1_30K_PU 0x24 +#define ADC7_AMUX_THM2_30K_PU 0x25 +#define ADC7_AMUX_THM3_30K_PU 0x26 +#define ADC7_AMUX_THM4_30K_PU 0x27 +#define ADC7_AMUX_THM5_30K_PU 0x28 +#define ADC7_AMUX_THM6_30K_PU 0x29 +#define ADC7_GPIO1_30K_PU 0x2a +#define ADC7_GPIO2_30K_PU 0x2b +#define ADC7_GPIO3_30K_PU 0x2c +#define ADC7_GPIO4_30K_PU 0x2d +#define ADC7_CC1_ID_30K_PU 0x33 + +/* 100k pull-up2 */ +#define ADC7_AMUX_THM1_100K_PU 0x44 +#define ADC7_AMUX_THM2_100K_PU 0x45 +#define ADC7_AMUX_THM3_100K_PU 0x46 +#define ADC7_AMUX_THM4_100K_PU 0x47 +#define ADC7_AMUX_THM5_100K_PU 0x48 +#define ADC7_AMUX_THM6_100K_PU 0x49 +#define ADC7_GPIO1_100K_PU 0x4a +#define ADC7_GPIO2_100K_PU 0x4b +#define ADC7_GPIO3_100K_PU 0x4c +#define ADC7_GPIO4_100K_PU 0x4d +#define ADC7_CC1_ID_100K_PU 0x53 + +/* 400k pull-up3 */ +#define ADC7_AMUX_THM1_400K_PU 0x64 +#define ADC7_AMUX_THM2_400K_PU 0x65 +#define ADC7_AMUX_THM3_400K_PU 0x66 +#define ADC7_AMUX_THM4_400K_PU 0x67 +#define ADC7_AMUX_THM5_400K_PU 0x68 +#define ADC7_AMUX_THM6_400K_PU 0x69 +#define ADC7_GPIO1_400K_PU 0x6a +#define ADC7_GPIO2_400K_PU 0x6b +#define ADC7_GPIO3_400K_PU 0x6c +#define ADC7_GPIO4_400K_PU 0x6d +#define ADC7_CC1_ID_400K_PU 0x73 + +/* 1/3 Divider */ +#define ADC7_GPIO1_DIV3 0x8a +#define ADC7_GPIO2_DIV3 0x8b +#define ADC7_GPIO3_DIV3 0x8c +#define ADC7_GPIO4_DIV3 0x8d + +#define ADC7_VPH_PWR 0x8e +#define ADC7_VBAT_SNS 0x8f + +#define ADC7_SBUx 0x94 +#define ADC7_VBAT_2S_MID 0x96 + #endif /* _DT_BINDINGS_QCOM_SPMI_VADC_H */ -- cgit v1.2.3-59-g8ed1b From e7e3b9d23f3bc6774cce585ef4fcb02462e04065 Mon Sep 17 00:00:00 2001 From: Gwendal Grignou Date: Tue, 26 May 2020 21:35:17 -0700 Subject: iio: cros_ec: Reapply range at resume EC does not currently preserve range across sensor reinit. If sensor is powered down at suspend, it will default to the EC default range at resume, not the range set by the host. Save range if modified, and apply at resume. Signed-off-by: Gwendal Grignou Signed-off-by: Jonathan Cameron --- .../iio/common/cros_ec_sensors/cros_ec_sensors.c | 5 +++++ .../common/cros_ec_sensors/cros_ec_sensors_core.c | 21 +++++++++++++++++++++ drivers/iio/light/cros_ec_light_prox.c | 6 +++++- drivers/iio/pressure/cros_ec_baro.c | 8 ++++++-- include/linux/iio/common/cros_ec_sensors_core.h | 11 ++++++++++- 5 files changed, 47 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c index a66941fdb385..130ab8ce0269 100644 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c @@ -200,6 +200,10 @@ static int cros_ec_sensors_write(struct iio_dev *indio_dev, st->core.param.sensor_range.roundup = 1; ret = cros_ec_motion_send_host_cmd(&st->core, 0); + if (ret == 0) { + st->core.range_updated = true; + st->core.curr_range = val; + } break; default: ret = cros_ec_sensors_core_write( @@ -315,6 +319,7 @@ MODULE_DEVICE_TABLE(platform, cros_ec_sensors_ids); static struct platform_driver cros_ec_sensors_platform_driver = { .driver = { .name = "cros-ec-sensors", + .pm = &cros_ec_sensors_pm_ops, }, .probe = cros_ec_sensors_probe, .id_table = cros_ec_sensors_ids, diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c index c831915ca7e5..cda459b61206 100644 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c @@ -824,5 +824,26 @@ int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st, } EXPORT_SYMBOL_GPL(cros_ec_sensors_core_write); +static int __maybe_unused cros_ec_sensors_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct iio_dev *indio_dev = platform_get_drvdata(pdev); + struct cros_ec_sensors_core_state *st = iio_priv(indio_dev); + int ret = 0; + + if (st->range_updated) { + mutex_lock(&st->cmd_lock); + st->param.cmd = MOTIONSENSE_CMD_SENSOR_RANGE; + st->param.sensor_range.data = st->curr_range; + st->param.sensor_range.roundup = 1; + ret = cros_ec_motion_send_host_cmd(st, 0); + mutex_unlock(&st->cmd_lock); + } + return ret; +} + +SIMPLE_DEV_PM_OPS(cros_ec_sensors_pm_ops, NULL, cros_ec_sensors_resume); +EXPORT_SYMBOL_GPL(cros_ec_sensors_pm_ops); + MODULE_DESCRIPTION("ChromeOS EC sensor hub core functions"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/iio/light/cros_ec_light_prox.c b/drivers/iio/light/cros_ec_light_prox.c index 2198b50909ed..fed79ba27fda 100644 --- a/drivers/iio/light/cros_ec_light_prox.c +++ b/drivers/iio/light/cros_ec_light_prox.c @@ -145,8 +145,11 @@ static int cros_ec_light_prox_write(struct iio_dev *indio_dev, break; case IIO_CHAN_INFO_CALIBSCALE: st->core.param.cmd = MOTIONSENSE_CMD_SENSOR_RANGE; - st->core.param.sensor_range.data = (val << 16) | (val2 / 100); + st->core.curr_range = (val << 16) | (val2 / 100); + st->core.param.sensor_range.data = st->core.curr_range; ret = cros_ec_motion_send_host_cmd(&st->core, 0); + if (ret == 0) + st->core.range_updated = true; break; default: ret = cros_ec_sensors_core_write(&st->core, chan, val, val2, @@ -256,6 +259,7 @@ MODULE_DEVICE_TABLE(platform, cros_ec_light_prox_ids); static struct platform_driver cros_ec_light_prox_platform_driver = { .driver = { .name = "cros-ec-light-prox", + .pm = &cros_ec_sensors_pm_ops, }, .probe = cros_ec_light_prox_probe, .id_table = cros_ec_light_prox_ids, diff --git a/drivers/iio/pressure/cros_ec_baro.c b/drivers/iio/pressure/cros_ec_baro.c index c079b8960082..f0938b6fbba0 100644 --- a/drivers/iio/pressure/cros_ec_baro.c +++ b/drivers/iio/pressure/cros_ec_baro.c @@ -96,8 +96,11 @@ static int cros_ec_baro_write(struct iio_dev *indio_dev, /* Always roundup, so caller gets at least what it asks for. */ st->core.param.sensor_range.roundup = 1; - if (cros_ec_motion_send_host_cmd(&st->core, 0)) - ret = -EIO; + ret = cros_ec_motion_send_host_cmd(&st->core, 0); + if (ret == 0) { + st->core.range_updated = true; + st->core.curr_range = val; + } break; default: ret = cros_ec_sensors_core_write(&st->core, chan, val, val2, @@ -199,6 +202,7 @@ MODULE_DEVICE_TABLE(platform, cros_ec_baro_ids); static struct platform_driver cros_ec_baro_platform_driver = { .driver = { .name = "cros-ec-baro", + .pm = &cros_ec_sensors_pm_ops, }, .probe = cros_ec_baro_probe, .id_table = cros_ec_baro_ids, diff --git a/include/linux/iio/common/cros_ec_sensors_core.h b/include/linux/iio/common/cros_ec_sensors_core.h index 7bc961defa87..caa8bb279a34 100644 --- a/include/linux/iio/common/cros_ec_sensors_core.h +++ b/include/linux/iio/common/cros_ec_sensors_core.h @@ -42,6 +42,10 @@ typedef irqreturn_t (*cros_ec_sensors_capture_t)(int irq, void *p); * @resp: motion sensor response structure * @type: type of motion sensor * @loc: location where the motion sensor is placed + * @range_updated: True if the range of the sensor has been + * updated. + * @curr_range: If updated, the current range value. + * It will be reapplied at every resume. * @calib: calibration parameters. Note that trigger * captured data will always provide the calibrated * data @@ -65,6 +69,9 @@ struct cros_ec_sensors_core_state { enum motionsensor_type type; enum motionsensor_location loc; + bool range_updated; + int curr_range; + struct calib_data { s16 offset; u16 scale; @@ -114,7 +121,9 @@ int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st, struct iio_chan_spec const *chan, int val, int val2, long mask); -/* List of extended channel specification for all sensors */ +extern const struct dev_pm_ops cros_ec_sensors_pm_ops; + +/* List of extended channel specification for all sensors. */ extern const struct iio_chan_spec_ext_info cros_ec_sensors_ext_info[]; extern const struct attribute *cros_ec_sensor_fifo_attributes[]; -- cgit v1.2.3-59-g8ed1b From 18d563858d97dbefc9a16c8210ef2f97dc264202 Mon Sep 17 00:00:00 2001 From: Jonathan Cameron Date: Thu, 21 May 2020 18:53:22 +0100 Subject: iio: make iio_device_get_drvdata take a const struct iio_dev *. As this just calls dev_get_drvdata underneath which is happy with a const struct device * we should change and avoid potentially casting away a const in order to then put it back again. Signed-off-by: Jonathan Cameron Reviewed-by: Jean-Baptiste Maneyrol Signed-off-by: Jonathan Cameron --- include/linux/iio/iio.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index a1be82e74c93..e846a0a7001e 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -669,7 +669,7 @@ static inline void iio_device_set_drvdata(struct iio_dev *indio_dev, void *data) * * Returns the data previously set with iio_device_set_drvdata() */ -static inline void *iio_device_get_drvdata(struct iio_dev *indio_dev) +static inline void *iio_device_get_drvdata(const struct iio_dev *indio_dev) { return dev_get_drvdata(&indio_dev->dev); } -- cgit v1.2.3-59-g8ed1b From 78289b4a58b58e9a8a76ef43ffbaf04a097e33c6 Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Wed, 3 Jun 2020 14:40:18 +0300 Subject: iio: core: pass parent device as parameter during allocation The change passes the parent device to the iio_device_alloc() call. This also updates the devm_iio_device_alloc() call to consider the device object as the parent device by default. Having it passed like this, should ensure that any IIO device object already has a device object as parent, allowing for neater control, like passing the 'indio_dev' object for other stuff [like buffers/triggers/etc], and potentially creating iiom_xxx(indio_dev) functions. With this patch, only the 'drivers/platform/x86/toshiba_acpi.c' needs an update to pass the parent object as a parameter. In the next patch all devm_iio_device_alloc() calls will be handled. Acked-by: Andy Shevchenko Signed-off-by: Alexandru Ardelean Signed-off-by: Jonathan Cameron --- drivers/iio/dummy/iio_simple_dummy.c | 14 ++++++++------ drivers/iio/industrialio-core.c | 11 ++++++----- drivers/platform/x86/toshiba_acpi.c | 3 +-- drivers/staging/iio/Documentation/device.txt | 4 +--- include/linux/iio/iio.h | 4 ++-- 5 files changed, 18 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/drivers/iio/dummy/iio_simple_dummy.c b/drivers/iio/dummy/iio_simple_dummy.c index 6cb02299a215..b35ae7c039f7 100644 --- a/drivers/iio/dummy/iio_simple_dummy.c +++ b/drivers/iio/dummy/iio_simple_dummy.c @@ -566,6 +566,13 @@ static struct iio_sw_device *iio_dummy_probe(const char *name) struct iio_dev *indio_dev; struct iio_dummy_state *st; struct iio_sw_device *swd; + struct device *parent = NULL; + + /* + * With hardware: Set the parent device. + * parent = &spi->dev; + * parent = &client->dev; + */ swd = kzalloc(sizeof(*swd), GFP_KERNEL); if (!swd) { @@ -580,7 +587,7 @@ static struct iio_sw_device *iio_dummy_probe(const char *name) * It also has a region (accessed by iio_priv() * for chip specific state information. */ - indio_dev = iio_device_alloc(sizeof(*st)); + indio_dev = iio_device_alloc(parent, sizeof(*st)); if (!indio_dev) { ret = -ENOMEM; goto error_ret; @@ -590,11 +597,6 @@ static struct iio_sw_device *iio_dummy_probe(const char *name) mutex_init(&st->lock); iio_dummy_init_device(indio_dev); - /* - * With hardware: Set the parent device. - * indio_dev->dev.parent = &spi->dev; - * indio_dev->dev.parent = &client->dev; - */ /* * Make the iio_dev struct available to remove function. diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index 1527f01a44f1..75661661aaba 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -1493,7 +1493,7 @@ struct device_type iio_device_type = { * iio_device_alloc() - allocate an iio_dev from a driver * @sizeof_priv: Space to allocate for private structure. **/ -struct iio_dev *iio_device_alloc(int sizeof_priv) +struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv) { struct iio_dev *dev; size_t alloc_size; @@ -1510,6 +1510,7 @@ struct iio_dev *iio_device_alloc(int sizeof_priv) if (!dev) return NULL; + dev->dev.parent = parent; dev->dev.groups = dev->groups; dev->dev.type = &iio_device_type; dev->dev.bus = &iio_bus_type; @@ -1551,7 +1552,7 @@ static void devm_iio_device_release(struct device *dev, void *res) /** * devm_iio_device_alloc - Resource-managed iio_device_alloc() - * @dev: Device to allocate iio_dev for + * @parent: Device to allocate iio_dev for, and parent for this IIO device * @sizeof_priv: Space to allocate for private structure. * * Managed iio_device_alloc. iio_dev allocated with this function is @@ -1560,7 +1561,7 @@ static void devm_iio_device_release(struct device *dev, void *res) * RETURNS: * Pointer to allocated iio_dev on success, NULL on failure. */ -struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv) +struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv) { struct iio_dev **ptr, *iio_dev; @@ -1569,10 +1570,10 @@ struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv) if (!ptr) return NULL; - iio_dev = iio_device_alloc(sizeof_priv); + iio_dev = iio_device_alloc(parent, sizeof_priv); if (iio_dev) { *ptr = iio_dev; - devres_add(dev, ptr); + devres_add(parent, ptr); } else { devres_free(ptr); } diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index 1ddab5a6dead..36fff00af9eb 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -3114,7 +3114,7 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev) toshiba_accelerometer_available(dev); if (dev->accelerometer_supported) { - dev->indio_dev = iio_device_alloc(sizeof(*dev)); + dev->indio_dev = iio_device_alloc(&acpi_dev->dev, sizeof(*dev)); if (!dev->indio_dev) { pr_err("Unable to allocate iio device\n"); goto iio_error; @@ -3124,7 +3124,6 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev) dev->indio_dev->info = &toshiba_iio_accel_info; dev->indio_dev->name = "Toshiba accelerometer"; - dev->indio_dev->dev.parent = &acpi_dev->dev; dev->indio_dev->modes = INDIO_DIRECT_MODE; dev->indio_dev->channels = toshiba_iio_accel_channels; dev->indio_dev->num_channels = diff --git a/drivers/staging/iio/Documentation/device.txt b/drivers/staging/iio/Documentation/device.txt index ec42544a46aa..0d1275b1eb3f 100644 --- a/drivers/staging/iio/Documentation/device.txt +++ b/drivers/staging/iio/Documentation/device.txt @@ -8,7 +8,7 @@ The crucial structure for device drivers in iio is iio_dev. First allocate one using: -struct iio_dev *indio_dev = iio_device_alloc(sizeof(struct chip_state)); +struct iio_dev *indio_dev = iio_device_alloc(parent, sizeof(struct chip_state)); where chip_state is a structure of local state data for this instance of the chip. @@ -16,8 +16,6 @@ That data can be accessed using iio_priv(struct iio_dev *). Then fill in the following: -- indio_dev->dev.parent - Struct device associated with the underlying hardware. - indio_dev->name Name of the device being driven - made available as the name attribute in sysfs. diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index e846a0a7001e..655e34a08d94 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -676,7 +676,7 @@ static inline void *iio_device_get_drvdata(const struct iio_dev *indio_dev) /* Can we make this smaller? */ #define IIO_ALIGN L1_CACHE_BYTES -struct iio_dev *iio_device_alloc(int sizeof_priv); +struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv); static inline void *iio_priv(const struct iio_dev *indio_dev) { @@ -690,7 +690,7 @@ static inline struct iio_dev *iio_priv_to_dev(void *priv) } void iio_device_free(struct iio_dev *indio_dev); -struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv); +struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv); struct iio_trigger *devm_iio_trigger_alloc(struct device *dev, const char *fmt, ...); /** -- cgit v1.2.3-59-g8ed1b From f5d017938e7a6517b85f7fd215213a28e11291fb Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Wed, 3 Jun 2020 14:40:19 +0300 Subject: iio: core: add iio_device_set_parent() helper By default, the device allocation will also assign a parent device to the IIO device object. In cases where devm_iio_device_alloc() is used, sometimes the parent device must be different than the device used to manage the allocation. In that case, this helper should be used to change the parent, hence the requirement to call this between allocation & registration. This pattern/requirement is not very common in the IIO space, and it may be cleaned up later. But until then, assigning the parent manually between allocation & registration is slightly easier. Signed-off-by: Alexandru Ardelean Signed-off-by: Jonathan Cameron --- include/linux/iio/iio.h | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'include') diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index 655e34a08d94..1c1d02107722 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -649,6 +649,26 @@ static inline struct iio_dev *iio_device_get(struct iio_dev *indio_dev) return indio_dev ? dev_to_iio_dev(get_device(&indio_dev->dev)) : NULL; } +/** + * iio_device_set_parent() - assign parent device to the IIO device object + * @indio_dev: IIO device structure + * @parent: reference to parent device object + * + * This utility must be called between IIO device allocation + * (via devm_iio_device_alloc()) & IIO device registration + * (via {devm_}iio_device_register()). + * By default, the device allocation will also assign a parent device to + * the IIO device object. In cases where devm_iio_device_alloc() is used, + * sometimes the parent device must be different than the device used to + * manage the allocation. + * In that case, this helper should be used to change the parent, hence the + * requirement to call this between allocation & registration. + **/ +static inline void iio_device_set_parent(struct iio_dev *indio_dev, + struct device *parent) +{ + indio_dev->dev.parent = parent; +} /** * iio_device_set_drvdata() - Set device driver data -- cgit v1.2.3-59-g8ed1b From e012d15a238f24795081ef1e43ffe2859b6538ed Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Mon, 15 Jun 2020 08:46:47 +0200 Subject: gpio: driver.h: fix kernel-doc markup There is one parameter with a wrong name at kernel-doc macro: ./include/linux/gpio/driver.h:499: warning: Function parameter or member 'gc' not described in 'gpiochip_add_data' ./include/linux/gpio/driver.h:499: warning: Excess function parameter 'chip' description in 'gpiochip_add_data' Fix it. Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Bartosz Golaszewski --- include/linux/gpio/driver.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index c4f272af7af5..c11261f3c724 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -481,7 +481,7 @@ extern int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, /** * gpiochip_add_data() - register a gpio_chip - * @chip: the chip to register, with chip->base initialized + * @gc: the chip to register, with chip->base initialized * @data: driver-private data associated with this chip * * Context: potentially before irqs will work -- cgit v1.2.3-59-g8ed1b From e17d43b93e544f5016c0251d2074c15568d5d963 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 12 May 2020 15:19:08 +0300 Subject: perf: Add perf text poke event Record (single instruction) changes to the kernel text (i.e. self-modifying code) in order to support tracers like Intel PT and ARM CoreSight. A copy of the running kernel code is needed as a reference point (e.g. from /proc/kcore). The text poke event records the old bytes and the new bytes so that the event can be processed forwards or backwards. The basic problem is recording the modified instruction in an unambiguous manner given SMP instruction cache (in)coherence. That is, when modifying an instruction concurrently any solution with one or multiple timestamps is not sufficient: CPU0 CPU1 0 1 write insn A 2 execute insn A 3 sync-I$ 4 Due to I$, CPU1 might execute either the old or new A. No matter where we record tracepoints on CPU0, one simply cannot tell what CPU1 will have observed, except that at 0 it must be the old one and at 4 it must be the new one. To solve this, take inspiration from x86 text poking, which has to solve this exact problem due to variable length instruction encoding and I-fetch windows. 1) overwrite the instruction with a breakpoint and sync I$ This guarantees that that code flow will never hit the target instruction anymore, on any CPU (or rather, it will cause an exception). 2) issue the TEXT_POKE event 3) overwrite the breakpoint with the new instruction and sync I$ Now we know that any execution after the TEXT_POKE event will either observe the breakpoint (and hit the exception) or the new instruction. So by guarding the TEXT_POKE event with an exception on either side; we can now tell, without doubt, which instruction another CPU will have observed. Signed-off-by: Adrian Hunter Signed-off-by: Peter Zijlstra (Intel) Acked-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200512121922.8997-2-adrian.hunter@intel.com --- include/linux/perf_event.h | 8 ++++ include/uapi/linux/perf_event.h | 21 +++++++++- kernel/events/core.c | 90 ++++++++++++++++++++++++++++++++++++++++- 3 files changed, 117 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index b4bb32082342..46fe5cfb5163 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1232,6 +1232,9 @@ extern void perf_event_exec(void); extern void perf_event_comm(struct task_struct *tsk, bool exec); extern void perf_event_namespaces(struct task_struct *tsk); extern void perf_event_fork(struct task_struct *tsk); +extern void perf_event_text_poke(const void *addr, + const void *old_bytes, size_t old_len, + const void *new_bytes, size_t new_len); /* Callchains */ DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); @@ -1479,6 +1482,11 @@ static inline void perf_event_exec(void) { } static inline void perf_event_comm(struct task_struct *tsk, bool exec) { } static inline void perf_event_namespaces(struct task_struct *tsk) { } static inline void perf_event_fork(struct task_struct *tsk) { } +static inline void perf_event_text_poke(const void *addr, + const void *old_bytes, + size_t old_len, + const void *new_bytes, + size_t new_len) { } static inline void perf_event_init(void) { } static inline int perf_swevent_get_recursion_context(void) { return -1; } static inline void perf_swevent_put_recursion_context(int rctx) { } diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 7b2d6fc9e6ed..e5bee6c17b86 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -383,7 +383,8 @@ struct perf_event_attr { bpf_event : 1, /* include bpf events */ aux_output : 1, /* generate AUX records instead of events */ cgroup : 1, /* include cgroup events */ - __reserved_1 : 31; + text_poke : 1, /* include text poke events */ + __reserved_1 : 30; union { __u32 wakeup_events; /* wakeup every n events */ @@ -1024,6 +1025,24 @@ enum perf_event_type { */ PERF_RECORD_CGROUP = 19, + /* + * Records changes to kernel text i.e. self-modified code. 'old_len' is + * the number of old bytes, 'new_len' is the number of new bytes. Either + * 'old_len' or 'new_len' may be zero to indicate, for example, the + * addition or removal of a trampoline. 'bytes' contains the old bytes + * followed immediately by the new bytes. + * + * struct { + * struct perf_event_header header; + * u64 addr; + * u16 old_len; + * u16 new_len; + * u8 bytes[]; + * struct sample_id sample_id; + * }; + */ + PERF_RECORD_TEXT_POKE = 20, + PERF_RECORD_MAX, /* non-ABI */ }; diff --git a/kernel/events/core.c b/kernel/events/core.c index 856d98c36f56..9b8f92500833 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -394,6 +394,7 @@ static atomic_t nr_switch_events __read_mostly; static atomic_t nr_ksymbol_events __read_mostly; static atomic_t nr_bpf_events __read_mostly; static atomic_t nr_cgroup_events __read_mostly; +static atomic_t nr_text_poke_events __read_mostly; static LIST_HEAD(pmus); static DEFINE_MUTEX(pmus_lock); @@ -4575,7 +4576,7 @@ static bool is_sb_event(struct perf_event *event) if (attr->mmap || attr->mmap_data || attr->mmap2 || attr->comm || attr->comm_exec || attr->task || attr->ksymbol || - attr->context_switch || + attr->context_switch || attr->text_poke || attr->bpf_event) return true; return false; @@ -4651,6 +4652,8 @@ static void unaccount_event(struct perf_event *event) atomic_dec(&nr_ksymbol_events); if (event->attr.bpf_event) atomic_dec(&nr_bpf_events); + if (event->attr.text_poke) + atomic_dec(&nr_text_poke_events); if (dec) { if (!atomic_add_unless(&perf_sched_count, -1, 1)) @@ -8628,6 +8631,89 @@ void perf_event_bpf_event(struct bpf_prog *prog, perf_iterate_sb(perf_event_bpf_output, &bpf_event, NULL); } +struct perf_text_poke_event { + const void *old_bytes; + const void *new_bytes; + size_t pad; + u16 old_len; + u16 new_len; + + struct { + struct perf_event_header header; + + u64 addr; + } event_id; +}; + +static int perf_event_text_poke_match(struct perf_event *event) +{ + return event->attr.text_poke; +} + +static void perf_event_text_poke_output(struct perf_event *event, void *data) +{ + struct perf_text_poke_event *text_poke_event = data; + struct perf_output_handle handle; + struct perf_sample_data sample; + u64 padding = 0; + int ret; + + if (!perf_event_text_poke_match(event)) + return; + + perf_event_header__init_id(&text_poke_event->event_id.header, &sample, event); + + ret = perf_output_begin(&handle, event, text_poke_event->event_id.header.size); + if (ret) + return; + + perf_output_put(&handle, text_poke_event->event_id); + perf_output_put(&handle, text_poke_event->old_len); + perf_output_put(&handle, text_poke_event->new_len); + + __output_copy(&handle, text_poke_event->old_bytes, text_poke_event->old_len); + __output_copy(&handle, text_poke_event->new_bytes, text_poke_event->new_len); + + if (text_poke_event->pad) + __output_copy(&handle, &padding, text_poke_event->pad); + + perf_event__output_id_sample(event, &handle, &sample); + + perf_output_end(&handle); +} + +void perf_event_text_poke(const void *addr, const void *old_bytes, + size_t old_len, const void *new_bytes, size_t new_len) +{ + struct perf_text_poke_event text_poke_event; + size_t tot, pad; + + if (!atomic_read(&nr_text_poke_events)) + return; + + tot = sizeof(text_poke_event.old_len) + old_len; + tot += sizeof(text_poke_event.new_len) + new_len; + pad = ALIGN(tot, sizeof(u64)) - tot; + + text_poke_event = (struct perf_text_poke_event){ + .old_bytes = old_bytes, + .new_bytes = new_bytes, + .pad = pad, + .old_len = old_len, + .new_len = new_len, + .event_id = { + .header = { + .type = PERF_RECORD_TEXT_POKE, + .misc = PERF_RECORD_MISC_KERNEL, + .size = sizeof(text_poke_event.event_id) + tot + pad, + }, + .addr = (unsigned long)addr, + }, + }; + + perf_iterate_sb(perf_event_text_poke_output, &text_poke_event, NULL); +} + void perf_event_itrace_started(struct perf_event *event) { event->attach_state |= PERF_ATTACH_ITRACE; @@ -10945,6 +11031,8 @@ static void account_event(struct perf_event *event) atomic_inc(&nr_ksymbol_events); if (event->attr.bpf_event) atomic_inc(&nr_bpf_events); + if (event->attr.text_poke) + atomic_inc(&nr_text_poke_events); if (inc) { /* -- cgit v1.2.3-59-g8ed1b From d002b8bc6dbc20e9043e279196cff8795dba05fe Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Thu, 28 May 2020 11:00:58 +0300 Subject: kprobes: Add symbols for kprobe insn pages Symbols are needed for tools to describe instruction addresses. Pages allocated for kprobe's purposes need symbols to be created for them. Add such symbols to be visible via /proc/kallsyms. Note: kprobe insn pages are not used if ftrace is configured. To see the effect of this patch, the kernel must be configured with: # CONFIG_FUNCTION_TRACER is not set CONFIG_KPROBES=y and for optimised kprobes: CONFIG_OPTPROBES=y Example on x86: # perf probe __schedule Added new event: probe:__schedule (on __schedule) # cat /proc/kallsyms | grep '\[__builtin__kprobes\]' ffffffffc00d4000 t kprobe_insn_page [__builtin__kprobes] ffffffffc00d6000 t kprobe_optinsn_page [__builtin__kprobes] Note: This patch adds "__builtin__kprobes" as a module name in /proc/kallsyms for symbols for pages allocated for kprobes' purposes, even though "__builtin__kprobes" is not a module. Signed-off-by: Adrian Hunter Signed-off-by: Peter Zijlstra (Intel) Acked-by: Masami Hiramatsu Link: https://lkml.kernel.org/r/20200528080058.20230-1-adrian.hunter@intel.com --- include/linux/kprobes.h | 15 +++++++++++++++ kernel/kallsyms.c | 37 +++++++++++++++++++++++++++++++++---- kernel/kprobes.c | 45 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 93 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 594265bfd390..13fc58a74c04 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -242,6 +242,7 @@ struct kprobe_insn_cache { struct mutex mutex; void *(*alloc)(void); /* allocate insn page */ void (*free)(void *); /* free insn page */ + const char *sym; /* symbol for insn pages */ struct list_head pages; /* list of kprobe_insn_page */ size_t insn_size; /* size of instruction slot */ int nr_garbage; @@ -272,6 +273,10 @@ static inline bool is_kprobe_##__name##_slot(unsigned long addr) \ { \ return __is_insn_slot_addr(&kprobe_##__name##_slots, addr); \ } +#define KPROBE_INSN_PAGE_SYM "kprobe_insn_page" +#define KPROBE_OPTINSN_PAGE_SYM "kprobe_optinsn_page" +int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum, + unsigned long *value, char *type, char *sym); #else /* __ARCH_WANT_KPROBES_INSN_SLOT */ #define DEFINE_INSN_CACHE_OPS(__name) \ static inline bool is_kprobe_##__name##_slot(unsigned long addr) \ @@ -373,6 +378,11 @@ void dump_kprobe(struct kprobe *kp); void *alloc_insn_page(void); void free_insn_page(void *page); +int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type, + char *sym); + +int arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value, + char *type, char *sym); #else /* !CONFIG_KPROBES: */ static inline int kprobes_built_in(void) @@ -435,6 +445,11 @@ static inline bool within_kprobe_blacklist(unsigned long addr) { return true; } +static inline int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, + char *type, char *sym) +{ + return -ERANGE; +} #endif /* CONFIG_KPROBES */ static inline int disable_kretprobe(struct kretprobe *rp) { diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index 16c8c605f4b0..c6cc293c0e67 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -24,6 +24,7 @@ #include #include #include +#include #include /* @@ -437,6 +438,7 @@ struct kallsym_iter { loff_t pos_arch_end; loff_t pos_mod_end; loff_t pos_ftrace_mod_end; + loff_t pos_bpf_end; unsigned long value; unsigned int nameoff; /* If iterating in core kernel symbols. */ char type; @@ -496,11 +498,33 @@ static int get_ksymbol_ftrace_mod(struct kallsym_iter *iter) static int get_ksymbol_bpf(struct kallsym_iter *iter) { + int ret; + strlcpy(iter->module_name, "bpf", MODULE_NAME_LEN); iter->exported = 0; - return bpf_get_kallsym(iter->pos - iter->pos_ftrace_mod_end, - &iter->value, &iter->type, - iter->name) < 0 ? 0 : 1; + ret = bpf_get_kallsym(iter->pos - iter->pos_ftrace_mod_end, + &iter->value, &iter->type, + iter->name); + if (ret < 0) { + iter->pos_bpf_end = iter->pos; + return 0; + } + + return 1; +} + +/* + * This uses "__builtin__kprobes" as a module name for symbols for pages + * allocated for kprobes' purposes, even though "__builtin__kprobes" is not a + * module. + */ +static int get_ksymbol_kprobe(struct kallsym_iter *iter) +{ + strlcpy(iter->module_name, "__builtin__kprobes", MODULE_NAME_LEN); + iter->exported = 0; + return kprobe_get_kallsym(iter->pos - iter->pos_bpf_end, + &iter->value, &iter->type, + iter->name) < 0 ? 0 : 1; } /* Returns space to next name. */ @@ -527,6 +551,7 @@ static void reset_iter(struct kallsym_iter *iter, loff_t new_pos) iter->pos_arch_end = 0; iter->pos_mod_end = 0; iter->pos_ftrace_mod_end = 0; + iter->pos_bpf_end = 0; } } @@ -551,7 +576,11 @@ static int update_iter_mod(struct kallsym_iter *iter, loff_t pos) get_ksymbol_ftrace_mod(iter)) return 1; - return get_ksymbol_bpf(iter); + if ((!iter->pos_bpf_end || iter->pos_bpf_end > pos) && + get_ksymbol_bpf(iter)) + return 1; + + return get_ksymbol_kprobe(iter); } /* Returns false if pos at or past end of file. */ diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 50cd84f53df0..058c0be3464b 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -118,6 +118,7 @@ struct kprobe_insn_cache kprobe_insn_slots = { .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex), .alloc = alloc_insn_page, .free = free_insn_page, + .sym = KPROBE_INSN_PAGE_SYM, .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages), .insn_size = MAX_INSN_SIZE, .nr_garbage = 0, @@ -290,12 +291,34 @@ bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr) return ret; } +int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum, + unsigned long *value, char *type, char *sym) +{ + struct kprobe_insn_page *kip; + int ret = -ERANGE; + + rcu_read_lock(); + list_for_each_entry_rcu(kip, &c->pages, list) { + if ((*symnum)--) + continue; + strlcpy(sym, c->sym, KSYM_NAME_LEN); + *type = 't'; + *value = (unsigned long)kip->insns; + ret = 0; + break; + } + rcu_read_unlock(); + + return ret; +} + #ifdef CONFIG_OPTPROBES /* For optimized_kprobe buffer */ struct kprobe_insn_cache kprobe_optinsn_slots = { .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex), .alloc = alloc_insn_page, .free = free_insn_page, + .sym = KPROBE_OPTINSN_PAGE_SYM, .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages), /* .insn_size is initialized later */ .nr_garbage = 0, @@ -2197,6 +2220,28 @@ static void kprobe_remove_ksym_blacklist(unsigned long entry) kprobe_remove_area_blacklist(entry, entry + 1); } +int __weak arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value, + char *type, char *sym) +{ + return -ERANGE; +} + +int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type, + char *sym) +{ +#ifdef __ARCH_WANT_KPROBES_INSN_SLOT + if (!kprobe_cache_get_kallsym(&kprobe_insn_slots, &symnum, value, type, sym)) + return 0; +#ifdef CONFIG_OPTPROBES + if (!kprobe_cache_get_kallsym(&kprobe_optinsn_slots, &symnum, value, type, sym)) + return 0; +#endif +#endif + if (!arch_kprobe_get_kallsym(&symnum, value, type, sym)) + return 0; + return -ERANGE; +} + int __init __weak arch_populate_kprobe_blacklist(void) { return 0; -- cgit v1.2.3-59-g8ed1b From 69e49088692899d25dedfa22f00dfb9761e86ed7 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 12 May 2020 15:19:11 +0300 Subject: kprobes: Add perf ksymbol events for kprobe insn pages Symbols are needed for tools to describe instruction addresses. Pages allocated for kprobe's purposes need symbols to be created for them. Add such symbols to be visible via perf ksymbol events. Signed-off-by: Adrian Hunter Signed-off-by: Peter Zijlstra (Intel) Acked-by: Peter Zijlstra (Intel) Acked-by: Masami Hiramatsu Link: https://lkml.kernel.org/r/20200512121922.8997-5-adrian.hunter@intel.com --- include/uapi/linux/perf_event.h | 5 +++++ kernel/kprobes.c | 12 ++++++++++++ 2 files changed, 17 insertions(+) (limited to 'include') diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index e5bee6c17b86..e1a4179144a1 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -1049,6 +1049,11 @@ enum perf_event_type { enum perf_record_ksymbol_type { PERF_RECORD_KSYMBOL_TYPE_UNKNOWN = 0, PERF_RECORD_KSYMBOL_TYPE_BPF = 1, + /* + * Out of line code such as kprobe-replaced instructions or optimized + * kprobes. + */ + PERF_RECORD_KSYMBOL_TYPE_OOL = 2, PERF_RECORD_KSYMBOL_TYPE_MAX /* non-ABI */ }; diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 058c0be3464b..2b58740ca0f3 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include @@ -184,6 +185,10 @@ kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c) kip->cache = c; list_add_rcu(&kip->list, &c->pages); slot = kip->insns; + + /* Record the perf ksymbol register event after adding the page */ + perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, (unsigned long)kip->insns, + PAGE_SIZE, false, c->sym); out: mutex_unlock(&c->mutex); return slot; @@ -202,6 +207,13 @@ static int collect_one_slot(struct kprobe_insn_page *kip, int idx) * next time somebody inserts a probe. */ if (!list_is_singular(&kip->list)) { + /* + * Record perf ksymbol unregister event before removing + * the page. + */ + perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, + (unsigned long)kip->insns, PAGE_SIZE, true, + kip->cache->sym); list_del_rcu(&kip->list); synchronize_rcu(); kip->cache->free(kip->insns); -- cgit v1.2.3-59-g8ed1b From fc0ea795f53c8d7040fa42471f74fe51d78d0834 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 12 May 2020 15:19:13 +0300 Subject: ftrace: Add symbols for ftrace trampolines Symbols are needed for tools to describe instruction addresses. Pages allocated for ftrace's purposes need symbols to be created for them. Add such symbols to be visible via /proc/kallsyms. Example on x86 with CONFIG_DYNAMIC_FTRACE=y # echo function > /sys/kernel/debug/tracing/current_tracer # cat /proc/kallsyms | grep '\[__builtin__ftrace\]' ffffffffc0238000 t ftrace_trampoline [__builtin__ftrace] Note: This patch adds "__builtin__ftrace" as a module name in /proc/kallsyms for symbols for pages allocated for ftrace's purposes, even though "__builtin__ftrace" is not a module. Signed-off-by: Adrian Hunter Signed-off-by: Peter Zijlstra (Intel) Acked-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200512121922.8997-7-adrian.hunter@intel.com --- include/linux/ftrace.h | 12 +++++--- kernel/kallsyms.c | 5 ++++ kernel/trace/ftrace.c | 77 ++++++++++++++++++++++++++++++++++++++++++++++++-- 3 files changed, 88 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index e339dac91ee6..ce2c06f72e86 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -58,9 +58,6 @@ struct ftrace_direct_func; const char * ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, unsigned long *off, char **modname, char *sym); -int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, - char *type, char *name, - char *module_name, int *exported); #else static inline const char * ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, @@ -68,6 +65,13 @@ ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, { return NULL; } +#endif + +#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE) +int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, + char *type, char *name, + char *module_name, int *exported); +#else static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, char *type, char *name, char *module_name, int *exported) @@ -76,7 +80,6 @@ static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *val } #endif - #ifdef CONFIG_FUNCTION_TRACER extern int ftrace_enabled; @@ -207,6 +210,7 @@ struct ftrace_ops { struct ftrace_ops_hash old_hash; unsigned long trampoline; unsigned long trampoline_size; + struct list_head list; #endif }; diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index c6cc293c0e67..834bfdc43235 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -482,6 +482,11 @@ static int get_ksymbol_mod(struct kallsym_iter *iter) return 1; } +/* + * ftrace_mod_get_kallsym() may also get symbols for pages allocated for ftrace + * purposes. In that case "__builtin__ftrace" is used as a module name, even + * though "__builtin__ftrace" is not a module. + */ static int get_ksymbol_ftrace_mod(struct kallsym_iter *iter) { int ret = ftrace_mod_get_kallsym(iter->pos - iter->pos_mod_end, diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index c163c3531faf..31675b209db2 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -2764,6 +2764,38 @@ void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops) { } +/* List of trace_ops that have allocated trampolines */ +static LIST_HEAD(ftrace_ops_trampoline_list); + +static void ftrace_add_trampoline_to_kallsyms(struct ftrace_ops *ops) +{ + lockdep_assert_held(&ftrace_lock); + list_add_rcu(&ops->list, &ftrace_ops_trampoline_list); +} + +static void ftrace_remove_trampoline_from_kallsyms(struct ftrace_ops *ops) +{ + lockdep_assert_held(&ftrace_lock); + list_del_rcu(&ops->list); +} + +/* + * "__builtin__ftrace" is used as a module name in /proc/kallsyms for symbols + * for pages allocated for ftrace purposes, even though "__builtin__ftrace" is + * not a module. + */ +#define FTRACE_TRAMPOLINE_MOD "__builtin__ftrace" +#define FTRACE_TRAMPOLINE_SYM "ftrace_trampoline" + +static void ftrace_trampoline_free(struct ftrace_ops *ops) +{ + if (ops && (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP) && + ops->trampoline) + ftrace_remove_trampoline_from_kallsyms(ops); + + arch_ftrace_trampoline_free(ops); +} + static void ftrace_startup_enable(int command) { if (saved_ftrace_func != ftrace_trace_function) { @@ -2934,7 +2966,7 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command) synchronize_rcu_tasks(); free_ops: - arch_ftrace_trampoline_free(ops); + ftrace_trampoline_free(ops); } return 0; @@ -6178,6 +6210,27 @@ struct ftrace_mod_map { unsigned int num_funcs; }; +static int ftrace_get_trampoline_kallsym(unsigned int symnum, + unsigned long *value, char *type, + char *name, char *module_name, + int *exported) +{ + struct ftrace_ops *op; + + list_for_each_entry_rcu(op, &ftrace_ops_trampoline_list, list) { + if (!op->trampoline || symnum--) + continue; + *value = op->trampoline; + *type = 't'; + strlcpy(name, FTRACE_TRAMPOLINE_SYM, KSYM_NAME_LEN); + strlcpy(module_name, FTRACE_TRAMPOLINE_MOD, MODULE_NAME_LEN); + *exported = 0; + return 0; + } + + return -ERANGE; +} + #ifdef CONFIG_MODULES #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next) @@ -6514,6 +6567,7 @@ int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, { struct ftrace_mod_map *mod_map; struct ftrace_mod_func *mod_func; + int ret; preempt_disable(); list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) { @@ -6540,8 +6594,10 @@ int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, WARN_ON(1); break; } + ret = ftrace_get_trampoline_kallsym(symnum, value, type, name, + module_name, exported); preempt_enable(); - return -ERANGE; + return ret; } #else @@ -6553,6 +6609,18 @@ allocate_ftrace_mod_map(struct module *mod, { return NULL; } +int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, + char *type, char *name, char *module_name, + int *exported) +{ + int ret; + + preempt_disable(); + ret = ftrace_get_trampoline_kallsym(symnum, value, type, name, + module_name, exported); + preempt_enable(); + return ret; +} #endif /* CONFIG_MODULES */ struct ftrace_init_func { @@ -6733,7 +6801,12 @@ void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops) static void ftrace_update_trampoline(struct ftrace_ops *ops) { + unsigned long trampoline = ops->trampoline; + arch_ftrace_update_trampoline(ops); + if (ops->trampoline && ops->trampoline != trampoline && + (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) + ftrace_add_trampoline_to_kallsyms(ops); } void ftrace_init_trace_array(struct trace_array *tr) -- cgit v1.2.3-59-g8ed1b From dd9ddf466ad7a5d2e247925d81ebb0b878bf3b76 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 12 May 2020 15:19:14 +0300 Subject: ftrace: Add perf ksymbol events for ftrace trampolines Symbols are needed for tools to describe instruction addresses. Pages allocated for ftrace's purposes need symbols to be created for them. Add such symbols to be visible via perf ksymbol events. Signed-off-by: Adrian Hunter Signed-off-by: Peter Zijlstra (Intel) Acked-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200512121922.8997-8-adrian.hunter@intel.com --- include/uapi/linux/perf_event.h | 2 +- kernel/trace/ftrace.c | 14 ++++++++++++-- 2 files changed, 13 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index e1a4179144a1..52ca2093831c 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -1051,7 +1051,7 @@ enum perf_record_ksymbol_type { PERF_RECORD_KSYMBOL_TYPE_BPF = 1, /* * Out of line code such as kprobe-replaced instructions or optimized - * kprobes. + * kprobes or ftrace trampolines. */ PERF_RECORD_KSYMBOL_TYPE_OOL = 2, PERF_RECORD_KSYMBOL_TYPE_MAX /* non-ABI */ diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 31675b209db2..2baaf7716537 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -2790,8 +2790,13 @@ static void ftrace_remove_trampoline_from_kallsyms(struct ftrace_ops *ops) static void ftrace_trampoline_free(struct ftrace_ops *ops) { if (ops && (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP) && - ops->trampoline) + ops->trampoline) { + perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, + ops->trampoline, ops->trampoline_size, + true, FTRACE_TRAMPOLINE_SYM); + /* Remove from kallsyms after the perf events */ ftrace_remove_trampoline_from_kallsyms(ops); + } arch_ftrace_trampoline_free(ops); } @@ -6805,8 +6810,13 @@ static void ftrace_update_trampoline(struct ftrace_ops *ops) arch_ftrace_update_trampoline(ops); if (ops->trampoline && ops->trampoline != trampoline && - (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) + (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) { + /* Add to kallsyms before the perf events */ ftrace_add_trampoline_to_kallsyms(ops); + perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, + ops->trampoline, ops->trampoline_size, false, + FTRACE_TRAMPOLINE_SYM); + } } void ftrace_init_trace_array(struct trace_array *tr) -- cgit v1.2.3-59-g8ed1b From 3dc167ba5729ddd2d8e3fa1841653792c295d3f1 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 19 May 2020 19:25:06 +0200 Subject: sched/cputime: Improve cputime_adjust() People report that utime and stime from /proc//stat become very wrong when the numbers are big enough, especially if you watch these counters incrementally. Specifically, the current implementation of: stime*rtime/total, results in a saw-tooth function on top of the desired line, where the teeth grow in size the larger the values become. IOW, it has a relative error. The result is that, when watching incrementally as time progresses (for large values), we'll see periods of pure stime or utime increase, irrespective of the actual ratio we're striving for. Replace scale_stime() with a math64.h helper: mul_u64_u64_div_u64() that is far more accurate. This also allows architectures to override the implementation -- for instance they can opt for the old algorithm if this new one turns out to be too expensive for them. Signed-off-by: Oleg Nesterov Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200519172506.GA317395@hirez.programming.kicks-ass.net --- arch/x86/include/asm/div64.h | 14 ++++++++++++-- include/linux/math64.h | 2 ++ kernel/sched/cputime.c | 46 +------------------------------------------- lib/math/div64.c | 41 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 56 insertions(+), 47 deletions(-) (limited to 'include') diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h index 9b8cb50768c2..b8f1dc0761e4 100644 --- a/arch/x86/include/asm/div64.h +++ b/arch/x86/include/asm/div64.h @@ -74,16 +74,26 @@ static inline u64 mul_u32_u32(u32 a, u32 b) #else # include -static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 div) +/* + * Will generate an #DE when the result doesn't fit u64, could fix with an + * __ex_table[] entry when it becomes an issue. + */ +static inline u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div) { u64 q; asm ("mulq %2; divq %3" : "=a" (q) - : "a" (a), "rm" ((u64)mul), "rm" ((u64)div) + : "a" (a), "rm" (mul), "rm" (div) : "rdx"); return q; } +#define mul_u64_u64_div_u64 mul_u64_u64_div_u64 + +static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 div) +{ + return mul_u64_u64_div_u64(a, mul, div); +} #define mul_u64_u32_div mul_u64_u32_div #endif /* CONFIG_X86_32 */ diff --git a/include/linux/math64.h b/include/linux/math64.h index 11a267413e8e..d097119419e6 100644 --- a/include/linux/math64.h +++ b/include/linux/math64.h @@ -263,6 +263,8 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor) } #endif /* mul_u64_u32_div */ +u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div); + #define DIV64_U64_ROUND_UP(ll, d) \ ({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); }) diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index ff9435dee1df..5a55d2300452 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -519,50 +519,6 @@ void account_idle_ticks(unsigned long ticks) account_idle_time(cputime); } -/* - * Perform (stime * rtime) / total, but avoid multiplication overflow by - * losing precision when the numbers are big. - */ -static u64 scale_stime(u64 stime, u64 rtime, u64 total) -{ - u64 scaled; - - for (;;) { - /* Make sure "rtime" is the bigger of stime/rtime */ - if (stime > rtime) - swap(rtime, stime); - - /* Make sure 'total' fits in 32 bits */ - if (total >> 32) - goto drop_precision; - - /* Does rtime (and thus stime) fit in 32 bits? */ - if (!(rtime >> 32)) - break; - - /* Can we just balance rtime/stime rather than dropping bits? */ - if (stime >> 31) - goto drop_precision; - - /* We can grow stime and shrink rtime and try to make them both fit */ - stime <<= 1; - rtime >>= 1; - continue; - -drop_precision: - /* We drop from rtime, it has more bits than stime */ - rtime >>= 1; - total >>= 1; - } - - /* - * Make sure gcc understands that this is a 32x32->64 multiply, - * followed by a 64/32->64 divide. - */ - scaled = div_u64((u64) (u32) stime * (u64) (u32) rtime, (u32)total); - return scaled; -} - /* * Adjust tick based cputime random precision against scheduler runtime * accounting. @@ -622,7 +578,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, goto update; } - stime = scale_stime(stime, rtime, stime + utime); + stime = mul_u64_u64_div_u64(stime, rtime, stime + utime); update: /* diff --git a/lib/math/div64.c b/lib/math/div64.c index 368ca7fd0d82..3952a07130d8 100644 --- a/lib/math/div64.c +++ b/lib/math/div64.c @@ -190,3 +190,44 @@ u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) return __iter_div_u64_rem(dividend, divisor, remainder); } EXPORT_SYMBOL(iter_div_u64_rem); + +#ifndef mul_u64_u64_div_u64 +u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c) +{ + u64 res = 0, div, rem; + int shift; + + /* can a * b overflow ? */ + if (ilog2(a) + ilog2(b) > 62) { + /* + * (b * a) / c is equal to + * + * (b / c) * a + + * (b % c) * a / c + * + * if nothing overflows. Can the 1st multiplication + * overflow? Yes, but we do not care: this can only + * happen if the end result can't fit in u64 anyway. + * + * So the code below does + * + * res = (b / c) * a; + * b = b % c; + */ + div = div64_u64_rem(b, c, &rem); + res = div * a; + b = rem; + + shift = ilog2(a) + ilog2(b) - 62; + if (shift > 0) { + /* drop precision */ + b >>= shift; + c >>= shift; + if (!c) + return res; + } + } + + return res + div64_u64(a * b, c); +} +#endif -- cgit v1.2.3-59-g8ed1b From 4581bea8b4ec4de353369775dfef921191e393b3 Mon Sep 17 00:00:00 2001 From: Vincent Donnefort Date: Wed, 27 May 2020 17:39:14 +0100 Subject: sched/debug: Add new tracepoints to track util_est The util_est signals are key elements for EAS task placement and frequency selection. Having tracepoints to track these signals enables load-tracking and schedutil testing and/or debugging by a toolkit. Signed-off-by: Vincent Donnefort Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Valentin Schneider Link: https://lkml.kernel.org/r/1590597554-370150-1-git-send-email-vincent.donnefort@arm.com --- include/trace/events/sched.h | 8 ++++++++ kernel/sched/core.c | 2 ++ kernel/sched/fair.c | 6 ++++++ 3 files changed, 16 insertions(+) (limited to 'include') diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index ed168b0e2c53..04f9a4c7b0d9 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -634,6 +634,14 @@ DECLARE_TRACE(sched_overutilized_tp, TP_PROTO(struct root_domain *rd, bool overutilized), TP_ARGS(rd, overutilized)); +DECLARE_TRACE(sched_util_est_cfs_tp, + TP_PROTO(struct cfs_rq *cfs_rq), + TP_ARGS(cfs_rq)); + +DECLARE_TRACE(sched_util_est_se_tp, + TP_PROTO(struct sched_entity *se), + TP_ARGS(se)); + #endif /* _TRACE_SCHED_H */ /* This part must be outside protection */ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 9c89b0eaf796..0208b71bef80 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -36,6 +36,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp); EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp); EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp); EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp); +EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp); +EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp); DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 69da576f7f48..a785a9b262dd 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3922,6 +3922,8 @@ static inline void util_est_enqueue(struct cfs_rq *cfs_rq, enqueued = cfs_rq->avg.util_est.enqueued; enqueued += _task_util_est(p); WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued); + + trace_sched_util_est_cfs_tp(cfs_rq); } /* @@ -3952,6 +3954,8 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) ue.enqueued -= min_t(unsigned int, ue.enqueued, _task_util_est(p)); WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued); + trace_sched_util_est_cfs_tp(cfs_rq); + /* * Skip update of task's estimated utilization when the task has not * yet completed an activation, e.g. being migrated. @@ -4017,6 +4021,8 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) ue.ewma >>= UTIL_EST_WEIGHT_SHIFT; done: WRITE_ONCE(p->se.avg.util_est, ue); + + trace_sched_util_est_se_tp(&p->se); } static inline int task_fits_capacity(struct task_struct *p, long capacity) -- cgit v1.2.3-59-g8ed1b From 461daba06bdcb9c7a3f92b9bbd110e1f7d093ffc Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 28 May 2020 12:54:42 -0700 Subject: psi: eliminate kthread_worker from psi trigger scheduling mechanism Each psi group requires a dedicated kthread_delayed_work and kthread_worker. Since no other work can be performed using psi_group's kthread_worker, the same result can be obtained using a task_struct and a timer directly. This makes psi triggering simpler by removing lists and locks involved with kthread_worker usage and eliminates the need for poll_scheduled atomic use in the hot path. Signed-off-by: Suren Baghdasaryan Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200528195442.190116-1-surenb@google.com --- include/linux/psi_types.h | 7 +-- kernel/sched/psi.c | 113 ++++++++++++++++++++++++++-------------------- 2 files changed, 68 insertions(+), 52 deletions(-) (limited to 'include') diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h index 4b7258495a04..b95f3211566a 100644 --- a/include/linux/psi_types.h +++ b/include/linux/psi_types.h @@ -153,9 +153,10 @@ struct psi_group { unsigned long avg[NR_PSI_STATES - 1][3]; /* Monitor work control */ - atomic_t poll_scheduled; - struct kthread_worker __rcu *poll_kworker; - struct kthread_delayed_work poll_work; + struct task_struct __rcu *poll_task; + struct timer_list poll_timer; + wait_queue_head_t poll_wait; + atomic_t poll_wakeup; /* Protects data used by the monitor */ struct mutex trigger_lock; diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c index 8f45cdb6463b..e53b711bd643 100644 --- a/kernel/sched/psi.c +++ b/kernel/sched/psi.c @@ -190,7 +190,6 @@ static void group_init(struct psi_group *group) INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work); mutex_init(&group->avgs_lock); /* Init trigger-related members */ - atomic_set(&group->poll_scheduled, 0); mutex_init(&group->trigger_lock); INIT_LIST_HEAD(&group->triggers); memset(group->nr_triggers, 0, sizeof(group->nr_triggers)); @@ -199,7 +198,7 @@ static void group_init(struct psi_group *group) memset(group->polling_total, 0, sizeof(group->polling_total)); group->polling_next_update = ULLONG_MAX; group->polling_until = 0; - rcu_assign_pointer(group->poll_kworker, NULL); + rcu_assign_pointer(group->poll_task, NULL); } void __init psi_init(void) @@ -547,47 +546,38 @@ static u64 update_triggers(struct psi_group *group, u64 now) return now + group->poll_min_period; } -/* - * Schedule polling if it's not already scheduled. It's safe to call even from - * hotpath because even though kthread_queue_delayed_work takes worker->lock - * spinlock that spinlock is never contended due to poll_scheduled atomic - * preventing such competition. - */ +/* Schedule polling if it's not already scheduled. */ static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay) { - struct kthread_worker *kworker; + struct task_struct *task; - /* Do not reschedule if already scheduled */ - if (atomic_cmpxchg(&group->poll_scheduled, 0, 1) != 0) + /* + * Do not reschedule if already scheduled. + * Possible race with a timer scheduled after this check but before + * mod_timer below can be tolerated because group->polling_next_update + * will keep updates on schedule. + */ + if (timer_pending(&group->poll_timer)) return; rcu_read_lock(); - kworker = rcu_dereference(group->poll_kworker); + task = rcu_dereference(group->poll_task); /* * kworker might be NULL in case psi_trigger_destroy races with * psi_task_change (hotpath) which can't use locks */ - if (likely(kworker)) - kthread_queue_delayed_work(kworker, &group->poll_work, delay); - else - atomic_set(&group->poll_scheduled, 0); + if (likely(task)) + mod_timer(&group->poll_timer, jiffies + delay); rcu_read_unlock(); } -static void psi_poll_work(struct kthread_work *work) +static void psi_poll_work(struct psi_group *group) { - struct kthread_delayed_work *dwork; - struct psi_group *group; u32 changed_states; u64 now; - dwork = container_of(work, struct kthread_delayed_work, work); - group = container_of(dwork, struct psi_group, poll_work); - - atomic_set(&group->poll_scheduled, 0); - mutex_lock(&group->trigger_lock); now = sched_clock(); @@ -623,6 +613,35 @@ out: mutex_unlock(&group->trigger_lock); } +static int psi_poll_worker(void *data) +{ + struct psi_group *group = (struct psi_group *)data; + struct sched_param param = { + .sched_priority = 1, + }; + + sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m); + + while (true) { + wait_event_interruptible(group->poll_wait, + atomic_cmpxchg(&group->poll_wakeup, 1, 0) || + kthread_should_stop()); + if (kthread_should_stop()) + break; + + psi_poll_work(group); + } + return 0; +} + +static void poll_timer_fn(struct timer_list *t) +{ + struct psi_group *group = from_timer(group, t, poll_timer); + + atomic_set(&group->poll_wakeup, 1); + wake_up_interruptible(&group->poll_wait); +} + static void record_times(struct psi_group_cpu *groupc, int cpu, bool memstall_tick) { @@ -1099,22 +1118,20 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group, mutex_lock(&group->trigger_lock); - if (!rcu_access_pointer(group->poll_kworker)) { - struct sched_param param = { - .sched_priority = 1, - }; - struct kthread_worker *kworker; + if (!rcu_access_pointer(group->poll_task)) { + struct task_struct *task; - kworker = kthread_create_worker(0, "psimon"); - if (IS_ERR(kworker)) { + task = kthread_create(psi_poll_worker, group, "psimon"); + if (IS_ERR(task)) { kfree(t); mutex_unlock(&group->trigger_lock); - return ERR_CAST(kworker); + return ERR_CAST(task); } - sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, ¶m); - kthread_init_delayed_work(&group->poll_work, - psi_poll_work); - rcu_assign_pointer(group->poll_kworker, kworker); + atomic_set(&group->poll_wakeup, 0); + init_waitqueue_head(&group->poll_wait); + wake_up_process(task); + timer_setup(&group->poll_timer, poll_timer_fn, 0); + rcu_assign_pointer(group->poll_task, task); } list_add(&t->node, &group->triggers); @@ -1132,7 +1149,7 @@ static void psi_trigger_destroy(struct kref *ref) { struct psi_trigger *t = container_of(ref, struct psi_trigger, refcount); struct psi_group *group = t->group; - struct kthread_worker *kworker_to_destroy = NULL; + struct task_struct *task_to_destroy = NULL; if (static_branch_likely(&psi_disabled)) return; @@ -1158,13 +1175,13 @@ static void psi_trigger_destroy(struct kref *ref) period = min(period, div_u64(tmp->win.size, UPDATES_PER_WINDOW)); group->poll_min_period = period; - /* Destroy poll_kworker when the last trigger is destroyed */ + /* Destroy poll_task when the last trigger is destroyed */ if (group->poll_states == 0) { group->polling_until = 0; - kworker_to_destroy = rcu_dereference_protected( - group->poll_kworker, + task_to_destroy = rcu_dereference_protected( + group->poll_task, lockdep_is_held(&group->trigger_lock)); - rcu_assign_pointer(group->poll_kworker, NULL); + rcu_assign_pointer(group->poll_task, NULL); } } @@ -1172,25 +1189,23 @@ static void psi_trigger_destroy(struct kref *ref) /* * Wait for both *trigger_ptr from psi_trigger_replace and - * poll_kworker RCUs to complete their read-side critical sections - * before destroying the trigger and optionally the poll_kworker + * poll_task RCUs to complete their read-side critical sections + * before destroying the trigger and optionally the poll_task */ synchronize_rcu(); /* * Destroy the kworker after releasing trigger_lock to prevent a * deadlock while waiting for psi_poll_work to acquire trigger_lock */ - if (kworker_to_destroy) { + if (task_to_destroy) { /* * After the RCU grace period has expired, the worker - * can no longer be found through group->poll_kworker. + * can no longer be found through group->poll_task. * But it might have been already scheduled before * that - deschedule it cleanly before destroying it. */ - kthread_cancel_delayed_work_sync(&group->poll_work); - atomic_set(&group->poll_scheduled, 0); - - kthread_destroy_worker(kworker_to_destroy); + del_timer_sync(&group->poll_timer); + kthread_stop(task_to_destroy); } kfree(t); } -- cgit v1.2.3-59-g8ed1b From 9cc5b8656892a72438ee7deb5e80f5be47643b8b Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Wed, 27 May 2020 16:29:09 +0200 Subject: isolcpus: Affine unbound kernel threads to housekeeping cpus This is a kernel enhancement that configures the cpu affinity of kernel threads via kernel boot option nohz_full=. When this option is specified, the cpumask is immediately applied upon kthread launch. This does not affect kernel threads that specify cpu and node. This allows CPU isolation (that is not allowing certain threads to execute on certain CPUs) without using the isolcpus=domain parameter, making it possible to enable load balancing on such CPUs during runtime (see kernel-parameters.txt). Note-1: this is based off on Wind River's patch at https://github.com/starlingx-staging/stx-integ/blob/master/kernel/kernel-std/centos/patches/affine-compute-kernel-threads.patch Difference being that this patch is limited to modifying kernel thread cpumask. Behaviour of other threads can be controlled via cgroups or sched_setaffinity. Note-2: Wind River's patch was based off Christoph Lameter's patch at https://lwn.net/Articles/565932/ with the only difference being the kernel parameter changed from kthread to kthread_cpus. Signed-off-by: Frederic Weisbecker Signed-off-by: Marcelo Tosatti Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200527142909.23372-3-frederic@kernel.org --- include/linux/sched/isolation.h | 1 + kernel/kthread.c | 6 ++++-- kernel/sched/isolation.c | 3 ++- 3 files changed, 7 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h index 0fbcbacd1b29..cc9f393e2a70 100644 --- a/include/linux/sched/isolation.h +++ b/include/linux/sched/isolation.h @@ -14,6 +14,7 @@ enum hk_flags { HK_FLAG_DOMAIN = (1 << 5), HK_FLAG_WQ = (1 << 6), HK_FLAG_MANAGED_IRQ = (1 << 7), + HK_FLAG_KTHREAD = (1 << 8), }; #ifdef CONFIG_CPU_ISOLATION diff --git a/kernel/kthread.c b/kernel/kthread.c index b86d37cda109..032b610912b0 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -27,6 +27,7 @@ #include #include #include +#include #include @@ -383,7 +384,8 @@ struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data), * The kernel thread should not inherit these properties. */ sched_setscheduler_nocheck(task, SCHED_NORMAL, ¶m); - set_cpus_allowed_ptr(task, cpu_possible_mask); + set_cpus_allowed_ptr(task, + housekeeping_cpumask(HK_FLAG_KTHREAD)); } kfree(create); return task; @@ -608,7 +610,7 @@ int kthreadd(void *unused) /* Setup a clean context for our children to inherit. */ set_task_comm(tsk, "kthreadd"); ignore_signals(tsk); - set_cpus_allowed_ptr(tsk, cpu_possible_mask); + set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_FLAG_KTHREAD)); set_mems_allowed(node_states[N_MEMORY]); current->flags |= PF_NOFREEZE; diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c index 808244f3ddd9..5a6ea03f9882 100644 --- a/kernel/sched/isolation.c +++ b/kernel/sched/isolation.c @@ -140,7 +140,8 @@ static int __init housekeeping_nohz_full_setup(char *str) { unsigned int flags; - flags = HK_FLAG_TICK | HK_FLAG_WQ | HK_FLAG_TIMER | HK_FLAG_RCU | HK_FLAG_MISC; + flags = HK_FLAG_TICK | HK_FLAG_WQ | HK_FLAG_TIMER | HK_FLAG_RCU | + HK_FLAG_MISC | HK_FLAG_KTHREAD; return housekeeping_setup(str, flags); } -- cgit v1.2.3-59-g8ed1b From b4098bfc5efb1fd7ecf40165132a1283aeea3500 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 26 Jul 2019 16:54:10 +0200 Subject: sched/deadline: Impose global limits on sched_attr::sched_period Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20190726161357.397880775@infradead.org --- include/linux/sched/sysctl.h | 3 +++ kernel/sched/deadline.c | 23 +++++++++++++++++++++-- kernel/sysctl.c | 14 ++++++++++++++ 3 files changed, 38 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index 660ac49f2b53..24be30a40814 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -61,6 +61,9 @@ int sched_proc_update_handler(struct ctl_table *table, int write, extern unsigned int sysctl_sched_rt_period; extern int sysctl_sched_rt_runtime; +extern unsigned int sysctl_sched_dl_period_max; +extern unsigned int sysctl_sched_dl_period_min; + #ifdef CONFIG_UCLAMP_TASK extern unsigned int sysctl_sched_uclamp_util_min; extern unsigned int sysctl_sched_uclamp_util_max; diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 504d2f51b0d6..f31964ad9c2e 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -2634,6 +2634,14 @@ void __getparam_dl(struct task_struct *p, struct sched_attr *attr) attr->sched_flags = dl_se->flags; } +/* + * Default limits for DL period; on the top end we guard against small util + * tasks still getting rediculous long effective runtimes, on the bottom end we + * guard against timer DoS. + */ +unsigned int sysctl_sched_dl_period_max = 1 << 22; /* ~4 seconds */ +unsigned int sysctl_sched_dl_period_min = 100; /* 100 us */ + /* * This function validates the new parameters of a -deadline task. * We ask for the deadline not being zero, and greater or equal @@ -2646,6 +2654,8 @@ void __getparam_dl(struct task_struct *p, struct sched_attr *attr) */ bool __checkparam_dl(const struct sched_attr *attr) { + u64 period, max, min; + /* special dl tasks don't actually use any parameter */ if (attr->sched_flags & SCHED_FLAG_SUGOV) return true; @@ -2669,12 +2679,21 @@ bool __checkparam_dl(const struct sched_attr *attr) attr->sched_period & (1ULL << 63)) return false; + period = attr->sched_period; + if (!period) + period = attr->sched_deadline; + /* runtime <= deadline <= period (if period != 0) */ - if ((attr->sched_period != 0 && - attr->sched_period < attr->sched_deadline) || + if (period < attr->sched_deadline || attr->sched_deadline < attr->sched_runtime) return false; + max = (u64)READ_ONCE(sysctl_sched_dl_period_max) * NSEC_PER_USEC; + min = (u64)READ_ONCE(sysctl_sched_dl_period_min) * NSEC_PER_USEC; + + if (period < min || period > max) + return false; + return true; } diff --git a/kernel/sysctl.c b/kernel/sysctl.c index db1ce7af2563..4aea67d3d552 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -1779,6 +1779,20 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = sched_rt_handler, }, + { + .procname = "sched_deadline_period_max_us", + .data = &sysctl_sched_dl_period_max, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "sched_deadline_period_min_us", + .data = &sysctl_sched_dl_period_min, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, { .procname = "sched_rr_timeslice_ms", .data = &sysctl_sched_rr_timeslice, -- cgit v1.2.3-59-g8ed1b From 7318d4cc14c8c8a5dde2b0b72ea50fd2545f0b7a Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 21 Apr 2020 12:09:13 +0200 Subject: sched: Provide sched_set_fifo() SCHED_FIFO (or any static priority scheduler) is a broken scheduler model; it is fundamentally incapable of resource management, the one thing an OS is actually supposed to do. It is impossible to compose static priority workloads. One cannot take two well designed and functional static priority workloads and mash them together and still expect them to work. Therefore it doesn't make sense to expose the priority field; the kernel is fundamentally incapable of setting a sensible value, it needs systems knowledge that it doesn't have. Take away sched_setschedule() / sched_setattr() from modules and replace them with: - sched_set_fifo(p); create a FIFO task (at prio 50) - sched_set_fifo_low(p); create a task higher than NORMAL, which ends up being a FIFO task at prio 1. - sched_set_normal(p, nice); (re)set the task to normal This stops the proliferation of randomly chosen, and irrelevant, FIFO priorities that dont't really mean anything anyway. The system administrator/integrator, whoever has insight into the actual system design and requirements (userspace) can set-up appropriate priorities if and when needed. Cc: airlied@redhat.com Cc: alexander.deucher@amd.com Cc: awalls@md.metrocast.net Cc: axboe@kernel.dk Cc: broonie@kernel.org Cc: daniel.lezcano@linaro.org Cc: gregkh@linuxfoundation.org Cc: hannes@cmpxchg.org Cc: herbert@gondor.apana.org.au Cc: hverkuil@xs4all.nl Cc: john.stultz@linaro.org Cc: nico@fluxnic.net Cc: paulmck@kernel.org Cc: rafael.j.wysocki@intel.com Cc: rmk+kernel@arm.linux.org.uk Cc: sudeep.holla@arm.com Cc: tglx@linutronix.de Cc: ulf.hansson@linaro.org Cc: wim@linux-watchdog.org Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Ingo Molnar Tested-by: Paul E. McKenney --- include/linux/sched.h | 3 +++ kernel/sched/core.c | 47 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 50 insertions(+) (limited to 'include') diff --git a/include/linux/sched.h b/include/linux/sched.h index b62e6aaf28f0..b792b8f0f4cf 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1653,6 +1653,9 @@ extern int idle_cpu(int cpu); extern int available_idle_cpu(int cpu); extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *); extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *); +extern int sched_set_fifo(struct task_struct *p); +extern int sched_set_fifo_low(struct task_struct *p); +extern int sched_set_normal(struct task_struct *p, int nice); extern int sched_setattr(struct task_struct *, const struct sched_attr *); extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *); extern struct task_struct *idle_task(int cpu); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 0208b71bef80..40d3939b0520 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5124,6 +5124,8 @@ static int _sched_setscheduler(struct task_struct *p, int policy, * @policy: new policy. * @param: structure containing the new RT priority. * + * Use sched_set_fifo(), read its comment. + * * Return: 0 on success. An error code otherwise. * * NOTE that the task may be already dead. @@ -5166,6 +5168,51 @@ int sched_setscheduler_nocheck(struct task_struct *p, int policy, } EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck); +/* + * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally + * incapable of resource management, which is the one thing an OS really should + * be doing. + * + * This is of course the reason it is limited to privileged users only. + * + * Worse still; it is fundamentally impossible to compose static priority + * workloads. You cannot take two correctly working static prio workloads + * and smash them together and still expect them to work. + * + * For this reason 'all' FIFO tasks the kernel creates are basically at: + * + * MAX_RT_PRIO / 2 + * + * The administrator _MUST_ configure the system, the kernel simply doesn't + * know enough information to make a sensible choice. + */ +int sched_set_fifo(struct task_struct *p) +{ + struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 }; + return sched_setscheduler_nocheck(p, SCHED_FIFO, &sp); +} +EXPORT_SYMBOL_GPL(sched_set_fifo); + +/* + * For when you don't much care about FIFO, but want to be above SCHED_NORMAL. + */ +int sched_set_fifo_low(struct task_struct *p) +{ + struct sched_param sp = { .sched_priority = 1 }; + return sched_setscheduler_nocheck(p, SCHED_FIFO, &sp); +} +EXPORT_SYMBOL_GPL(sched_set_fifo_low); + +int sched_set_normal(struct task_struct *p, int nice) +{ + struct sched_attr attr = { + .sched_policy = SCHED_NORMAL, + .sched_nice = nice, + }; + return sched_setattr_nocheck(p, &attr); +} +EXPORT_SYMBOL_GPL(sched_set_normal); + static int do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) { -- cgit v1.2.3-59-g8ed1b From 8b700983de82f79e05b2c1136d6513ea4c9b22c4 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 22 Apr 2020 13:10:04 +0200 Subject: sched: Remove sched_set_*() return value Ingo suggested that since the new sched_set_*() functions are implemented using the 'nocheck' variants, they really shouldn't ever fail, so remove the return value. Cc: axboe@kernel.dk Cc: daniel.lezcano@linaro.org Cc: sudeep.holla@arm.com Cc: airlied@redhat.com Cc: broonie@kernel.org Cc: paulmck@kernel.org Suggested-by: Ingo Molnar Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Ingo Molnar --- drivers/block/drbd/drbd_receiver.c | 4 +--- drivers/firmware/psci/psci_checker.c | 3 +-- drivers/gpu/drm/msm/msm_drv.c | 5 +---- drivers/platform/chrome/cros_ec_spi.c | 7 +++---- include/linux/sched.h | 6 +++--- kernel/rcu/rcutorture.c | 5 +---- kernel/sched/core.c | 12 ++++++------ 7 files changed, 16 insertions(+), 26 deletions(-) (limited to 'include') diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 140fd98274b1..280615efef74 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -6020,9 +6020,7 @@ int drbd_ack_receiver(struct drbd_thread *thi) int expect = header_size; bool ping_timeout_active = false; - rv = sched_set_fifo_low(current); - if (rv < 0) - drbd_err(connection, "drbd_ack_receiver: ERROR set priority, ret=%d\n", rv); + sched_set_fifo_low(current); while (get_t_state(thi) == RUNNING) { drbd_thread_current_set_cpu(thi); diff --git a/drivers/firmware/psci/psci_checker.c b/drivers/firmware/psci/psci_checker.c index a5279a430274..6fff482847e7 100644 --- a/drivers/firmware/psci/psci_checker.c +++ b/drivers/firmware/psci/psci_checker.c @@ -281,8 +281,7 @@ static int suspend_test_thread(void *arg) wait_for_completion(&suspend_threads_started); /* Set maximum priority to preempt all other threads on this CPU. */ - if (sched_set_fifo(current)) - pr_warn("Failed to set suspend thread scheduler on CPU %d\n", cpu); + sched_set_fifo(current); dev = this_cpu_read(cpuidle_devices); drv = cpuidle_get_cpu_driver(dev); diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 89a8b9c7e044..556cca38487c 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -509,10 +509,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) goto err_msm_uninit; } - ret = sched_set_fifo(priv->event_thread[i].thread); - if (ret) - dev_warn(dev, "event_thread set priority failed:%d\n", - ret); + sched_set_fifo(priv->event_thread[i].thread); } ret = drm_vblank_init(ddev, priv->num_crtcs); diff --git a/drivers/platform/chrome/cros_ec_spi.c b/drivers/platform/chrome/cros_ec_spi.c index c20a43a97040..d09260382550 100644 --- a/drivers/platform/chrome/cros_ec_spi.c +++ b/drivers/platform/chrome/cros_ec_spi.c @@ -725,10 +725,9 @@ static int cros_ec_spi_devm_high_pri_alloc(struct device *dev, if (err) return err; - err = sched_set_fifo(ec_spi->high_pri_worker->task); - if (err) - dev_err(dev, "Can't set cros_ec high pri priority: %d\n", err); - return err; + sched_set_fifo(ec_spi->high_pri_worker->task); + + return 0; } static int cros_ec_spi_probe(struct spi_device *spi) diff --git a/include/linux/sched.h b/include/linux/sched.h index b792b8f0f4cf..ae7664492af2 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1653,9 +1653,9 @@ extern int idle_cpu(int cpu); extern int available_idle_cpu(int cpu); extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *); extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *); -extern int sched_set_fifo(struct task_struct *p); -extern int sched_set_fifo_low(struct task_struct *p); -extern int sched_set_normal(struct task_struct *p, int nice); +extern void sched_set_fifo(struct task_struct *p); +extern void sched_set_fifo_low(struct task_struct *p); +extern void sched_set_normal(struct task_struct *p, int nice); extern int sched_setattr(struct task_struct *, const struct sched_attr *); extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *); extern struct task_struct *idle_task(int cpu); diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index bbc3c8a9e7b9..b4c1146de414 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -893,10 +893,7 @@ static int rcu_torture_boost(void *arg) VERBOSE_TOROUT_STRING("rcu_torture_boost started"); /* Set real-time priority. */ - if (sched_set_fifo_low(current) < 0) { - VERBOSE_TOROUT_STRING("rcu_torture_boost RT prio failed!"); - n_rcu_torture_boost_rterror++; - } + sched_set_fifo_low(current); init_rcu_head_on_stack(&rbi.rcu); /* Each pass through the following loop does one boost-test cycle. */ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f882d3d74dad..41d3778ea80e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5183,30 +5183,30 @@ int sched_setscheduler_nocheck(struct task_struct *p, int policy, * The administrator _MUST_ configure the system, the kernel simply doesn't * know enough information to make a sensible choice. */ -int sched_set_fifo(struct task_struct *p) +void sched_set_fifo(struct task_struct *p) { struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 }; - return sched_setscheduler_nocheck(p, SCHED_FIFO, &sp); + WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); } EXPORT_SYMBOL_GPL(sched_set_fifo); /* * For when you don't much care about FIFO, but want to be above SCHED_NORMAL. */ -int sched_set_fifo_low(struct task_struct *p) +void sched_set_fifo_low(struct task_struct *p) { struct sched_param sp = { .sched_priority = 1 }; - return sched_setscheduler_nocheck(p, SCHED_FIFO, &sp); + WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); } EXPORT_SYMBOL_GPL(sched_set_fifo_low); -int sched_set_normal(struct task_struct *p, int nice) +void sched_set_normal(struct task_struct *p, int nice) { struct sched_attr attr = { .sched_policy = SCHED_NORMAL, .sched_nice = nice, }; - return sched_setattr_nocheck(p, &attr); + WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0); } EXPORT_SYMBOL_GPL(sched_set_normal); -- cgit v1.2.3-59-g8ed1b From 907f53200f982cd377e75abbc1b18ed624d7ae66 Mon Sep 17 00:00:00 2001 From: Emil Velikov Date: Sat, 30 May 2020 13:46:39 +0100 Subject: drm: vmwgfx: remove drm_driver::master_set() return type The function always returns zero (success). Ideally we'll remove it all together - although that's requires a little more work. For now, we can drop the return type and simplify the drm core code surrounding it. v2: remove redundant assignment (Sam) Cc: David Airlie Cc: Daniel Vetter Cc: VMware Graphics Cc: Roland Scheidegger Signed-off-by: Emil Velikov Cc: Sam Ravnborg Reviewed-by: Sam Ravnborg Reviewed-by: Roland Scheidegger Link: https://patchwork.freedesktop.org/patch/msgid/20200530124640.4176323-1-emil.l.velikov@gmail.com --- drivers/gpu/drm/drm_auth.c | 32 +++++++------------------------- drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 8 +++----- include/drm/drm_drv.h | 4 ++-- 3 files changed, 12 insertions(+), 32 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c index 74ce0c29c960..4c723e3a689c 100644 --- a/drivers/gpu/drm/drm_auth.c +++ b/drivers/gpu/drm/drm_auth.c @@ -122,27 +122,19 @@ struct drm_master *drm_master_create(struct drm_device *dev) return master; } -static int drm_set_master(struct drm_device *dev, struct drm_file *fpriv, - bool new_master) +static void drm_set_master(struct drm_device *dev, struct drm_file *fpriv, + bool new_master) { - int ret = 0; - dev->master = drm_master_get(fpriv->master); - if (dev->driver->master_set) { - ret = dev->driver->master_set(dev, fpriv, new_master); - if (unlikely(ret != 0)) { - drm_master_put(&dev->master); - } - } + if (dev->driver->master_set) + dev->driver->master_set(dev, fpriv, new_master); - fpriv->was_master = (ret == 0); - return ret; + fpriv->was_master = true; } static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv) { struct drm_master *old_master; - int ret; lockdep_assert_held_once(&dev->master_mutex); @@ -157,22 +149,12 @@ static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv) fpriv->is_master = 1; fpriv->authenticated = 1; - ret = drm_set_master(dev, fpriv, true); - if (ret) - goto out_err; + drm_set_master(dev, fpriv, true); if (old_master) drm_master_put(&old_master); return 0; - -out_err: - /* drop references and restore old master on failure */ - drm_master_put(&fpriv->master); - fpriv->master = old_master; - fpriv->is_master = 0; - - return ret; } /* @@ -265,7 +247,7 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data, goto out_unlock; } - ret = drm_set_master(dev, file_priv, false); + drm_set_master(dev, file_priv, false); out_unlock: mutex_unlock(&dev->master_mutex); return ret; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index c2247a893ed4..470428387878 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -1129,9 +1129,9 @@ static long vmw_compat_ioctl(struct file *filp, unsigned int cmd, } #endif -static int vmw_master_set(struct drm_device *dev, - struct drm_file *file_priv, - bool from_open) +static void vmw_master_set(struct drm_device *dev, + struct drm_file *file_priv, + bool from_open) { /* * Inform a new master that the layout may have changed while @@ -1139,8 +1139,6 @@ static int vmw_master_set(struct drm_device *dev, */ if (!from_open) drm_sysfs_hotplug_event(dev); - - return 0; } static void vmw_master_drop(struct drm_device *dev, diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index 8f110a28b6a2..7116abc1a04e 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -311,8 +311,8 @@ struct drm_driver { * * Called whenever the minor master is set. Only used by vmwgfx. */ - int (*master_set)(struct drm_device *dev, struct drm_file *file_priv, - bool from_open); + void (*master_set)(struct drm_device *dev, struct drm_file *file_priv, + bool from_open); /** * @master_drop: * -- cgit v1.2.3-59-g8ed1b From 2a1f3368bff609504cdc984cdb7cef467bb0b2b0 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Mon, 15 Jun 2020 18:00:44 +0200 Subject: ALSA: memalloc: Make SG-buffer helper usable for continuous buffer, too We have a few helper functions for making the access to the buffer address easier on SG-buffer. Those are specific to the buffer that is allocated with SG-buffer type, and it makes hard to use both SG and non-SG buffers in the same code. This patch adds a few simple checks and lets the helpers to deal with both SG- and continuous buffers gracefully. It's a preliminary step for the upcoming patch that mimics the buffer type on the fly. Link: https://lore.kernel.org/r/20200615160045.2703-4-tiwai@suse.de Signed-off-by: Takashi Iwai --- include/sound/memalloc.h | 9 ++++++++- sound/core/sgbuf.c | 3 +++ 2 files changed, 11 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h index 3b47832b1c1f..5daa937684a4 100644 --- a/include/sound/memalloc.h +++ b/include/sound/memalloc.h @@ -94,7 +94,11 @@ static inline dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset) { struct snd_sg_buf *sgbuf = dmab->private_data; - dma_addr_t addr = sgbuf->table[offset >> PAGE_SHIFT].addr; + dma_addr_t addr; + + if (!sgbuf) + return dmab->addr + offset; + addr = sgbuf->table[offset >> PAGE_SHIFT].addr; addr &= ~((dma_addr_t)PAGE_SIZE - 1); return addr + offset % PAGE_SIZE; } @@ -106,6 +110,9 @@ static inline void *snd_sgbuf_get_ptr(struct snd_dma_buffer *dmab, size_t offset) { struct snd_sg_buf *sgbuf = dmab->private_data; + + if (!sgbuf) + return dmab->area + offset; return sgbuf->table[offset >> PAGE_SHIFT].buf + offset % PAGE_SIZE; } diff --git a/sound/core/sgbuf.c b/sound/core/sgbuf.c index c42217e2dd19..29ddb76187e5 100644 --- a/sound/core/sgbuf.c +++ b/sound/core/sgbuf.c @@ -142,6 +142,9 @@ unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab, struct snd_sg_buf *sg = dmab->private_data; unsigned int start, end, pg; + if (!sg) + return size; + start = ofs >> PAGE_SHIFT; end = (ofs + size - 1) >> PAGE_SHIFT; /* check page continuity */ -- cgit v1.2.3-59-g8ed1b From c7d75b5938e38a48e5fdac44f88fc5882f1f7bed Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Thu, 4 Jun 2020 17:06:22 +0900 Subject: ASoC: soc-component: move snd_soc_component_xxx_regmap() to soc-component soc-component is handling snd_soc_component_xxx(). Move snd_soc_component_xxx_regmap() to it. Signed-off-by: Kuninori Morimoto Reviewed-by: Ranjani Sridharan Link: https://lore.kernel.org/r/87sgfbw8zl.wl-kuninori.morimoto.gx@renesas.com Signed-off-by: Mark Brown --- include/sound/soc-component.h | 1 + sound/soc/soc-component.c | 50 +++++++++++++++++++++++++++++++++++++++++++ sound/soc/soc-core.c | 50 ------------------------------------------- 3 files changed, 51 insertions(+), 50 deletions(-) (limited to 'include') diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h index 5663891148e3..481132141dc2 100644 --- a/include/sound/soc-component.h +++ b/include/sound/soc-component.h @@ -359,6 +359,7 @@ int snd_soc_component_stream_event(struct snd_soc_component *component, int snd_soc_component_set_bias_level(struct snd_soc_component *component, enum snd_soc_bias_level level); +void snd_soc_component_setup_regmap(struct snd_soc_component *component); #ifdef CONFIG_REGMAP void snd_soc_component_init_regmap(struct snd_soc_component *component, struct regmap *regmap); diff --git a/sound/soc/soc-component.c b/sound/soc/soc-component.c index 76f4b953563c..3c96a1adaa8b 100644 --- a/sound/soc/soc-component.c +++ b/sound/soc/soc-component.c @@ -302,6 +302,56 @@ int snd_soc_component_of_xlate_dai_name(struct snd_soc_component *component, return -ENOTSUPP; } +void snd_soc_component_setup_regmap(struct snd_soc_component *component) +{ + int val_bytes = regmap_get_val_bytes(component->regmap); + + /* Errors are legitimate for non-integer byte multiples */ + if (val_bytes > 0) + component->val_bytes = val_bytes; +} + +#ifdef CONFIG_REGMAP + +/** + * snd_soc_component_init_regmap() - Initialize regmap instance for the + * component + * @component: The component for which to initialize the regmap instance + * @regmap: The regmap instance that should be used by the component + * + * This function allows deferred assignment of the regmap instance that is + * associated with the component. Only use this if the regmap instance is not + * yet ready when the component is registered. The function must also be called + * before the first IO attempt of the component. + */ +void snd_soc_component_init_regmap(struct snd_soc_component *component, + struct regmap *regmap) +{ + component->regmap = regmap; + snd_soc_component_setup_regmap(component); +} +EXPORT_SYMBOL_GPL(snd_soc_component_init_regmap); + +/** + * snd_soc_component_exit_regmap() - De-initialize regmap instance for the + * component + * @component: The component for which to de-initialize the regmap instance + * + * Calls regmap_exit() on the regmap instance associated to the component and + * removes the regmap instance from the component. + * + * This function should only be used if snd_soc_component_init_regmap() was used + * to initialize the regmap instance. + */ +void snd_soc_component_exit_regmap(struct snd_soc_component *component) +{ + regmap_exit(component->regmap); + component->regmap = NULL; +} +EXPORT_SYMBOL_GPL(snd_soc_component_exit_regmap); + +#endif + int snd_soc_pcm_component_pointer(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 0f30f5aabaa8..13a59736b2fc 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -2398,56 +2398,6 @@ static int snd_soc_component_initialize(struct snd_soc_component *component, return 0; } -static void snd_soc_component_setup_regmap(struct snd_soc_component *component) -{ - int val_bytes = regmap_get_val_bytes(component->regmap); - - /* Errors are legitimate for non-integer byte multiples */ - if (val_bytes > 0) - component->val_bytes = val_bytes; -} - -#ifdef CONFIG_REGMAP - -/** - * snd_soc_component_init_regmap() - Initialize regmap instance for the - * component - * @component: The component for which to initialize the regmap instance - * @regmap: The regmap instance that should be used by the component - * - * This function allows deferred assignment of the regmap instance that is - * associated with the component. Only use this if the regmap instance is not - * yet ready when the component is registered. The function must also be called - * before the first IO attempt of the component. - */ -void snd_soc_component_init_regmap(struct snd_soc_component *component, - struct regmap *regmap) -{ - component->regmap = regmap; - snd_soc_component_setup_regmap(component); -} -EXPORT_SYMBOL_GPL(snd_soc_component_init_regmap); - -/** - * snd_soc_component_exit_regmap() - De-initialize regmap instance for the - * component - * @component: The component for which to de-initialize the regmap instance - * - * Calls regmap_exit() on the regmap instance associated to the component and - * removes the regmap instance from the component. - * - * This function should only be used if snd_soc_component_init_regmap() was used - * to initialize the regmap instance. - */ -void snd_soc_component_exit_regmap(struct snd_soc_component *component) -{ - regmap_exit(component->regmap); - component->regmap = NULL; -} -EXPORT_SYMBOL_GPL(snd_soc_component_exit_regmap); - -#endif - #define ENDIANNESS_MAP(name) \ (SNDRV_PCM_FMTBIT_##name##LE | SNDRV_PCM_FMTBIT_##name##BE) static u64 endianness_format_map[] = { -- cgit v1.2.3-59-g8ed1b From 536aba1dd4939bf647f5d182d4f101ae548e6505 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Thu, 4 Jun 2020 17:06:32 +0900 Subject: ASoC: soc-component: move snd_soc_component_initialize() to soc-component.c snd_soc_component_xxx() should be implemented at soc-component.c Signed-off-by: Kuninori Morimoto Reviewed-by: Ranjani Sridharan Link: https://lore.kernel.org/r/87r1uvw8zb.wl-kuninori.morimoto.gx@renesas.com Signed-off-by: Mark Brown --- include/sound/soc-component.h | 4 ++++ sound/soc/soc-component.c | 16 ++++++++++++++++ sound/soc/soc-core.c | 29 ++++++++--------------------- 3 files changed, 28 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h index 481132141dc2..cb0d34fa77c6 100644 --- a/include/sound/soc-component.h +++ b/include/sound/soc-component.h @@ -324,6 +324,10 @@ static inline int snd_soc_component_cache_sync( return regcache_sync(component->regmap); } +int snd_soc_component_initialize(struct snd_soc_component *component, + const struct snd_soc_component_driver *driver, + struct device *dev, const char *name); + /* component IO */ int snd_soc_component_read(struct snd_soc_component *component, unsigned int reg, unsigned int *val); diff --git a/sound/soc/soc-component.c b/sound/soc/soc-component.c index 3c96a1adaa8b..5bf2e71d3d83 100644 --- a/sound/soc/soc-component.c +++ b/sound/soc/soc-component.c @@ -8,6 +8,22 @@ #include #include +int snd_soc_component_initialize(struct snd_soc_component *component, + const struct snd_soc_component_driver *driver, + struct device *dev, const char *name) +{ + INIT_LIST_HEAD(&component->dai_list); + INIT_LIST_HEAD(&component->dobj_list); + INIT_LIST_HEAD(&component->card_list); + mutex_init(&component->io_mutex); + + component->name = name; + component->dev = dev; + component->driver = driver; + + return 0; +} + /** * snd_soc_component_set_sysclk - configure COMPONENT system or master clock. * @component: COMPONENT diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 13a59736b2fc..e596e5a765da 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -2378,26 +2378,6 @@ err: return ret; } -static int snd_soc_component_initialize(struct snd_soc_component *component, - const struct snd_soc_component_driver *driver, struct device *dev) -{ - INIT_LIST_HEAD(&component->dai_list); - INIT_LIST_HEAD(&component->dobj_list); - INIT_LIST_HEAD(&component->card_list); - mutex_init(&component->io_mutex); - - component->name = fmt_single_name(dev, &component->id); - if (!component->name) { - dev_err(dev, "ASoC: Failed to allocate name\n"); - return -ENOMEM; - } - - component->dev = dev; - component->driver = driver; - - return 0; -} - #define ENDIANNESS_MAP(name) \ (SNDRV_PCM_FMTBIT_##name##LE | SNDRV_PCM_FMTBIT_##name##BE) static u64 endianness_format_map[] = { @@ -2460,12 +2440,19 @@ int snd_soc_add_component(struct device *dev, struct snd_soc_dai_driver *dai_drv, int num_dai) { + const char *name = fmt_single_name(dev, &component->id); int ret; int i; + if (!name) { + dev_err(dev, "ASoC: Failed to allocate name\n"); + return -ENOMEM; + } + mutex_lock(&client_mutex); - ret = snd_soc_component_initialize(component, component_driver, dev); + ret = snd_soc_component_initialize(component, component_driver, + dev, name); if (ret) goto err_free; -- cgit v1.2.3-59-g8ed1b From 4f39514f36980a4b20a754a5d51486a5999c8380 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Thu, 4 Jun 2020 17:06:58 +0900 Subject: ASoC: soc-component: add snd_soc_pcm_component_prepare() We have 2 type of component functions snd_soc_component_xxx() is focusing to component itself, snd_soc_pcm_component_xxx() is focusing to rtd related component. Now we can update snd_soc_component_prepare() to snd_soc_pcm_component_prepare(). This patch do it. Signed-off-by: Kuninori Morimoto Reviewed-by: Ranjani Sridharan Link: https://lore.kernel.org/r/87o8pzw8yl.wl-kuninori.morimoto.gx@renesas.com Signed-off-by: Mark Brown --- include/sound/soc-component.h | 3 +-- sound/soc/soc-component.c | 28 +++++++++++++++++----------- sound/soc/soc-pcm.c | 12 +++--------- 3 files changed, 21 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h index cb0d34fa77c6..fc287e910240 100644 --- a/include/sound/soc-component.h +++ b/include/sound/soc-component.h @@ -426,8 +426,6 @@ int snd_soc_component_open(struct snd_soc_component *component, struct snd_pcm_substream *substream); int snd_soc_component_close(struct snd_soc_component *component, struct snd_pcm_substream *substream); -int snd_soc_component_prepare(struct snd_soc_component *component, - struct snd_pcm_substream *substream); int snd_soc_component_hw_params(struct snd_soc_component *component, struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params); @@ -460,5 +458,6 @@ int snd_soc_pcm_component_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma); int snd_soc_pcm_component_new(struct snd_soc_pcm_runtime *rtd); void snd_soc_pcm_component_free(struct snd_soc_pcm_runtime *rtd); +int snd_soc_pcm_component_prepare(struct snd_pcm_substream *substream); #endif /* __SOC_COMPONENT_H */ diff --git a/sound/soc/soc-component.c b/sound/soc/soc-component.c index 6d29c2de3b24..1bc155bc8e5e 100644 --- a/sound/soc/soc-component.c +++ b/sound/soc/soc-component.c @@ -275,17 +275,6 @@ int snd_soc_component_close(struct snd_soc_component *component, return soc_component_ret(component, ret); } -int snd_soc_component_prepare(struct snd_soc_component *component, - struct snd_pcm_substream *substream) -{ - int ret = 0; - - if (component->driver->prepare) - ret = component->driver->prepare(component, substream); - - return soc_component_ret(component, ret); -} - int snd_soc_component_hw_params(struct snd_soc_component *component, struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) @@ -569,3 +558,20 @@ void snd_soc_pcm_component_free(struct snd_soc_pcm_runtime *rtd) if (component->driver->pcm_destruct) component->driver->pcm_destruct(component, rtd->pcm); } + +int snd_soc_pcm_component_prepare(struct snd_pcm_substream *substream) +{ + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_component *component; + int i, ret; + + for_each_rtd_components(rtd, i, component) { + if (component->driver->prepare) { + ret = component->driver->prepare(component, substream); + if (ret < 0) + return soc_component_ret(component, ret); + } + } + + return 0; +} diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index c517064f5391..8ba0f14a2f2f 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -850,7 +850,6 @@ static void codec2codec_close_delayed_work(struct snd_soc_pcm_runtime *rtd) static int soc_pcm_prepare(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; - struct snd_soc_component *component; struct snd_soc_dai *dai; int i, ret = 0; @@ -860,14 +859,9 @@ static int soc_pcm_prepare(struct snd_pcm_substream *substream) if (ret < 0) goto out; - for_each_rtd_components(rtd, i, component) { - ret = snd_soc_component_prepare(component, substream); - if (ret < 0) { - dev_err(component->dev, - "ASoC: platform prepare error: %d\n", ret); - goto out; - } - } + ret = snd_soc_pcm_component_prepare(substream); + if (ret < 0) + goto out; ret = snd_soc_pcm_dai_prepare(substream); if (ret < 0) { -- cgit v1.2.3-59-g8ed1b From e1bafa828e3a0622ac24d238e00937f3059ed585 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Thu, 4 Jun 2020 17:07:11 +0900 Subject: ASoC: soc-component: add snd_soc_pcm_component_hw_params() We have 2 type of component functions snd_soc_component_xxx() is focusing to component itself, snd_soc_pcm_component_xxx() is focusing to rtd related component. Now we can update snd_soc_component_hw_params() to snd_soc_pcm_component_hw_params(). This patch do it. Signed-off-by: Kuninori Morimoto Reviewed-by: Ranjani Sridharan Link: https://lore.kernel.org/r/87mu5jw8y8.wl-kuninori.morimoto.gx@renesas.com Signed-off-by: Mark Brown --- include/sound/soc-component.h | 6 +++--- sound/soc/soc-component.c | 36 +++++++++++++++++++++++------------- sound/soc/soc-pcm.c | 13 +++---------- 3 files changed, 29 insertions(+), 26 deletions(-) (limited to 'include') diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h index fc287e910240..a2898bdd0a3c 100644 --- a/include/sound/soc-component.h +++ b/include/sound/soc-component.h @@ -426,9 +426,6 @@ int snd_soc_component_open(struct snd_soc_component *component, struct snd_pcm_substream *substream); int snd_soc_component_close(struct snd_soc_component *component, struct snd_pcm_substream *substream); -int snd_soc_component_hw_params(struct snd_soc_component *component, - struct snd_pcm_substream *substream, - struct snd_pcm_hw_params *params); int snd_soc_component_hw_free(struct snd_soc_component *component, struct snd_pcm_substream *substream); int snd_soc_component_trigger(struct snd_soc_component *component, @@ -459,5 +456,8 @@ int snd_soc_pcm_component_mmap(struct snd_pcm_substream *substream, int snd_soc_pcm_component_new(struct snd_soc_pcm_runtime *rtd); void snd_soc_pcm_component_free(struct snd_soc_pcm_runtime *rtd); int snd_soc_pcm_component_prepare(struct snd_pcm_substream *substream); +int snd_soc_pcm_component_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct snd_soc_component **last); #endif /* __SOC_COMPONENT_H */ diff --git a/sound/soc/soc-component.c b/sound/soc/soc-component.c index 1bc155bc8e5e..56341968fe6d 100644 --- a/sound/soc/soc-component.c +++ b/sound/soc/soc-component.c @@ -275,19 +275,6 @@ int snd_soc_component_close(struct snd_soc_component *component, return soc_component_ret(component, ret); } -int snd_soc_component_hw_params(struct snd_soc_component *component, - struct snd_pcm_substream *substream, - struct snd_pcm_hw_params *params) -{ - int ret = 0; - - if (component->driver->hw_params) - ret = component->driver->hw_params(component, - substream, params); - - return soc_component_ret(component, ret); -} - int snd_soc_component_hw_free(struct snd_soc_component *component, struct snd_pcm_substream *substream) { @@ -575,3 +562,26 @@ int snd_soc_pcm_component_prepare(struct snd_pcm_substream *substream) return 0; } + +int snd_soc_pcm_component_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct snd_soc_component **last) +{ + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_component *component; + int i, ret; + + for_each_rtd_components(rtd, i, component) { + if (component->driver->hw_params) { + ret = component->driver->hw_params(component, + substream, params); + if (ret < 0) { + *last = component; + return soc_component_ret(component, ret); + } + } + } + + *last = NULL; + return 0; +} diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index 8ba0f14a2f2f..e5eef48af167 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -1009,16 +1009,9 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream, snd_soc_dapm_update_dai(substream, params, cpu_dai); } - for_each_rtd_components(rtd, i, component) { - ret = snd_soc_component_hw_params(component, substream, params); - if (ret < 0) { - dev_err(component->dev, - "ASoC: %s hw params failed: %d\n", - component->name, ret); - goto component_err; - } - } - component = NULL; + ret = snd_soc_pcm_component_hw_params(substream, params, &component); + if (ret < 0) + goto component_err; out: mutex_unlock(&rtd->card->pcm_mutex); -- cgit v1.2.3-59-g8ed1b From 047511198639649bdaacb1a34d9691429ccc5698 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Thu, 4 Jun 2020 17:07:24 +0900 Subject: ASoC: soc-component: add snd_soc_pcm_component_hw_free() We have 2 type of component functions snd_soc_component_xxx() is focusing to component itself, snd_soc_pcm_component_xxx() is focusing to rtd related component. Now we can update snd_soc_component_hw_free() to snd_soc_pcm_component_hw_free(). This patch do it. Signed-off-by: Kuninori Morimoto Reviewed-by: Ranjani Sridharan Link: https://lore.kernel.org/r/87lfl3w8xv.wl-kuninori.morimoto.gx@renesas.com Signed-off-by: Mark Brown --- include/sound/soc-component.h | 4 ++-- sound/soc/soc-component.c | 30 +++++++++++++++++++----------- sound/soc/soc-pcm.c | 23 ++--------------------- 3 files changed, 23 insertions(+), 34 deletions(-) (limited to 'include') diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h index a2898bdd0a3c..d2f62d529559 100644 --- a/include/sound/soc-component.h +++ b/include/sound/soc-component.h @@ -426,8 +426,6 @@ int snd_soc_component_open(struct snd_soc_component *component, struct snd_pcm_substream *substream); int snd_soc_component_close(struct snd_soc_component *component, struct snd_pcm_substream *substream); -int snd_soc_component_hw_free(struct snd_soc_component *component, - struct snd_pcm_substream *substream); int snd_soc_component_trigger(struct snd_soc_component *component, struct snd_pcm_substream *substream, int cmd); @@ -459,5 +457,7 @@ int snd_soc_pcm_component_prepare(struct snd_pcm_substream *substream); int snd_soc_pcm_component_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_component **last); +void snd_soc_pcm_component_hw_free(struct snd_pcm_substream *substream, + struct snd_soc_component *last); #endif /* __SOC_COMPONENT_H */ diff --git a/sound/soc/soc-component.c b/sound/soc/soc-component.c index 56341968fe6d..380f6459b5cb 100644 --- a/sound/soc/soc-component.c +++ b/sound/soc/soc-component.c @@ -275,17 +275,6 @@ int snd_soc_component_close(struct snd_soc_component *component, return soc_component_ret(component, ret); } -int snd_soc_component_hw_free(struct snd_soc_component *component, - struct snd_pcm_substream *substream) -{ - int ret = 0; - - if (component->driver->hw_free) - ret = component->driver->hw_free(component, substream); - - return soc_component_ret(component, ret); -} - int snd_soc_component_trigger(struct snd_soc_component *component, struct snd_pcm_substream *substream, int cmd) @@ -585,3 +574,22 @@ int snd_soc_pcm_component_hw_params(struct snd_pcm_substream *substream, *last = NULL; return 0; } + +void snd_soc_pcm_component_hw_free(struct snd_pcm_substream *substream, + struct snd_soc_component *last) +{ + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_component *component; + int i, ret; + + for_each_rtd_components(rtd, i, component) { + if (component == last) + break; + + if (component->driver->hw_free) { + ret = component->driver->hw_free(component, substream); + if (ret < 0) + soc_component_ret(component, ret); + } + } +} diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index e5eef48af167..cbce15c5721e 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -898,25 +898,6 @@ static void soc_pcm_codec_params_fixup(struct snd_pcm_hw_params *params, interval->max = channels; } -static int soc_pcm_components_hw_free(struct snd_pcm_substream *substream, - struct snd_soc_component *last) -{ - struct snd_soc_pcm_runtime *rtd = substream->private_data; - struct snd_soc_component *component; - int i, r, ret = 0; - - for_each_rtd_components(rtd, i, component) { - if (component == last) - break; - - r = snd_soc_component_hw_free(component, substream); - if (r < 0) - ret = r; /* use last ret */ - } - - return ret; -} - /* * Called by ALSA when the hardware params are set by application. This * function can also be called multiple times and can allocate buffers @@ -1018,7 +999,7 @@ out: return ret; component_err: - soc_pcm_components_hw_free(substream, component); + snd_soc_pcm_component_hw_free(substream, component); i = rtd->num_cpus; @@ -1077,7 +1058,7 @@ static int soc_pcm_hw_free(struct snd_pcm_substream *substream) snd_soc_link_hw_free(substream); /* free any component resources */ - soc_pcm_components_hw_free(substream, NULL); + snd_soc_pcm_component_hw_free(substream, NULL); /* now free hw params for the DAIs */ for_each_rtd_dais(rtd, i, dai) { -- cgit v1.2.3-59-g8ed1b From 32fd120475c1b8a83d28bfedc2b95ec981fbb809 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Thu, 4 Jun 2020 17:07:40 +0900 Subject: ASoC: soc-component: add snd_soc_pcm_component_trigger() We have 2 type of component functions snd_soc_component_xxx() is focusing to component itself, snd_soc_pcm_component_xxx() is focusing to rtd related component. Now we can update snd_soc_component_trigger() to snd_soc_pcm_component_trigger(). This patch do it. Signed-off-by: Kuninori Morimoto Reviewed-by: Ranjani Sridharan Link: https://lore.kernel.org/r/87k10nw8xf.wl-kuninori.morimoto.gx@renesas.com Signed-off-by: Mark Brown --- include/sound/soc-component.h | 5 ++--- sound/soc/soc-component.c | 30 ++++++++++++++++++------------ sound/soc/soc-pcm.c | 24 ++++++++---------------- 3 files changed, 28 insertions(+), 31 deletions(-) (limited to 'include') diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h index d2f62d529559..bb26d55a9289 100644 --- a/include/sound/soc-component.h +++ b/include/sound/soc-component.h @@ -426,9 +426,6 @@ int snd_soc_component_open(struct snd_soc_component *component, struct snd_pcm_substream *substream); int snd_soc_component_close(struct snd_soc_component *component, struct snd_pcm_substream *substream); -int snd_soc_component_trigger(struct snd_soc_component *component, - struct snd_pcm_substream *substream, - int cmd); void snd_soc_component_suspend(struct snd_soc_component *component); void snd_soc_component_resume(struct snd_soc_component *component); int snd_soc_component_is_suspended(struct snd_soc_component *component); @@ -459,5 +456,7 @@ int snd_soc_pcm_component_hw_params(struct snd_pcm_substream *substream, struct snd_soc_component **last); void snd_soc_pcm_component_hw_free(struct snd_pcm_substream *substream, struct snd_soc_component *last); +int snd_soc_pcm_component_trigger(struct snd_pcm_substream *substream, + int cmd); #endif /* __SOC_COMPONENT_H */ diff --git a/sound/soc/soc-component.c b/sound/soc/soc-component.c index 380f6459b5cb..150b02be0219 100644 --- a/sound/soc/soc-component.c +++ b/sound/soc/soc-component.c @@ -275,18 +275,6 @@ int snd_soc_component_close(struct snd_soc_component *component, return soc_component_ret(component, ret); } -int snd_soc_component_trigger(struct snd_soc_component *component, - struct snd_pcm_substream *substream, - int cmd) -{ - int ret = 0; - - if (component->driver->trigger) - ret = component->driver->trigger(component, substream, cmd); - - return soc_component_ret(component, ret); -} - void snd_soc_component_suspend(struct snd_soc_component *component) { if (component->driver->suspend) @@ -593,3 +581,21 @@ void snd_soc_pcm_component_hw_free(struct snd_pcm_substream *substream, } } } + +int snd_soc_pcm_component_trigger(struct snd_pcm_substream *substream, + int cmd) +{ + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_component *component; + int i, ret; + + for_each_rtd_components(rtd, i, component) { + if (component->driver->trigger) { + ret = component->driver->trigger(component, substream, cmd); + if (ret < 0) + return soc_component_ret(component, ret); + } + } + + return 0; +} diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index cbce15c5721e..be5c83f1ab0c 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -1074,38 +1074,30 @@ static int soc_pcm_hw_free(struct snd_pcm_substream *substream) static int soc_pcm_trigger_start(struct snd_pcm_substream *substream, int cmd) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; - struct snd_soc_component *component; - int i, ret; + int ret; ret = snd_soc_link_trigger(substream, cmd); if (ret < 0) return ret; - for_each_rtd_components(rtd, i, component) { - ret = snd_soc_component_trigger(component, substream, cmd); - if (ret < 0) - return ret; - } + ret = snd_soc_pcm_component_trigger(substream, cmd); + if (ret < 0) + return ret; return snd_soc_pcm_dai_trigger(substream, cmd); } static int soc_pcm_trigger_stop(struct snd_pcm_substream *substream, int cmd) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; - struct snd_soc_component *component; - int i, ret; + int ret; ret = snd_soc_pcm_dai_trigger(substream, cmd); if (ret < 0) return ret; - for_each_rtd_components(rtd, i, component) { - ret = snd_soc_component_trigger(component, substream, cmd); - if (ret < 0) - return ret; - } + ret = snd_soc_pcm_component_trigger(substream, cmd); + if (ret < 0) + return ret; ret = snd_soc_link_trigger(substream, cmd); if (ret < 0) -- cgit v1.2.3-59-g8ed1b From 257c4dac8b7877c865e734533b5f62769c64afb6 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Thu, 4 Jun 2020 17:07:54 +0900 Subject: ASoC: soc-component: add snd_soc_component_init() we wantn't to directly access to component related parameter as much as possible to keep encapsulation. This patch adds snd_soc_component_init() for it. Signed-off-by: Kuninori Morimoto Reviewed-by: Ranjani Sridharan Link: https://lore.kernel.org/r/87img7w8x2.wl-kuninori.morimoto.gx@renesas.com Signed-off-by: Mark Brown --- include/sound/soc-component.h | 3 +++ sound/soc/soc-component.c | 16 ++++++++++++++++ sound/soc/soc-core.c | 23 ++++++++++++----------- 3 files changed, 31 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h index bb26d55a9289..aea0eb0c3fcc 100644 --- a/include/sound/soc-component.h +++ b/include/sound/soc-component.h @@ -327,6 +327,9 @@ static inline int snd_soc_component_cache_sync( int snd_soc_component_initialize(struct snd_soc_component *component, const struct snd_soc_component_driver *driver, struct device *dev, const char *name); +void snd_soc_component_set_aux(struct snd_soc_component *component, + struct snd_soc_aux_dev *aux); +int snd_soc_component_init(struct snd_soc_component *component); /* component IO */ int snd_soc_component_read(struct snd_soc_component *component, diff --git a/sound/soc/soc-component.c b/sound/soc/soc-component.c index 150b02be0219..7624ff5b67d3 100644 --- a/sound/soc/soc-component.c +++ b/sound/soc/soc-component.c @@ -46,6 +46,22 @@ int snd_soc_component_initialize(struct snd_soc_component *component, return 0; } +void snd_soc_component_set_aux(struct snd_soc_component *component, + struct snd_soc_aux_dev *aux) +{ + component->init = (aux) ? aux->init : NULL; +} + +int snd_soc_component_init(struct snd_soc_component *component) +{ + int ret = 0; + + if (component->init) + ret = component->init(component); + + return soc_component_ret(component, ret); +} + /** * snd_soc_component_set_sysclk - configure COMPONENT system or master clock. * @component: COMPONENT diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index e596e5a765da..c38bb423e695 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -1208,15 +1208,14 @@ static int soc_probe_component(struct snd_soc_card *card, component->name); probed = 1; - /* machine specific init */ - if (component->init) { - ret = component->init(component); - if (ret < 0) { - dev_err(component->dev, - "Failed to do machine specific init %d\n", ret); - goto err_probe; - } - } + /* + * machine specific init + * see + * snd_soc_component_set_aux() + */ + ret = snd_soc_component_init(component); + if (ret < 0) + goto err_probe; ret = snd_soc_add_component_controls(component, component->driver->controls, @@ -1330,7 +1329,8 @@ static void soc_unbind_aux_dev(struct snd_soc_card *card) struct snd_soc_component *component, *_component; for_each_card_auxs_safe(card, component, _component) { - component->init = NULL; + /* for snd_soc_component_init() */ + snd_soc_component_set_aux(component, NULL); list_del(&component->card_aux_list); } } @@ -1347,7 +1347,8 @@ static int soc_bind_aux_dev(struct snd_soc_card *card) if (!component) return -EPROBE_DEFER; - component->init = aux->init; + /* for snd_soc_component_init() */ + snd_soc_component_set_aux(component, aux); /* see for_each_card_auxs */ list_add(&component->card_aux_list, &card->aux_comp_list); } -- cgit v1.2.3-59-g8ed1b From 45108214dbfdba4a07061d2a4db6dc12475049f2 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Thu, 4 Jun 2020 17:08:24 +0900 Subject: ASoC: soc-component: tidyup Copyright This patch add missing company copyright Signed-off-by: Kuninori Morimoto Reviewed-by: Ranjani Sridharan Link: https://lore.kernel.org/r/87eeqvw8w8.wl-kuninori.morimoto.gx@renesas.com Signed-off-by: Mark Brown --- include/sound/soc-component.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h index aea0eb0c3fcc..4a4bb723ca9f 100644 --- a/include/sound/soc-component.h +++ b/include/sound/soc-component.h @@ -2,7 +2,8 @@ * * soc-component.h * - * Copyright (c) 2019 Kuninori Morimoto + * Copyright (C) 2019 Renesas Electronics Corp. + * Kuninori Morimoto */ #ifndef __SOC_COMPONENT_H #define __SOC_COMPONENT_H -- cgit v1.2.3-59-g8ed1b From c9015a1723373f2c8f8ac994f59470f4fb852623 Mon Sep 17 00:00:00 2001 From: Shengjiu Wang Date: Wed, 3 Jun 2020 18:26:53 +0800 Subject: ASoC: wm8960: Support headphone jack detection function Add two platform variables for headphone jack detection. "hp_cfg" is for configuration of heaphone jack detection. "gpio_cfg" is for configuration of gpio, the gpio is used for plug & unplug interrupt on SoC. Signed-off-by: Shengjiu Wang Link: https://lore.kernel.org/r/1591180013-12416-2-git-send-email-shengjiu.wang@nxp.com Signed-off-by: Mark Brown --- include/sound/wm8960.h | 17 +++++++++++++++++ sound/soc/codecs/wm8960.c | 20 ++++++++++++++++++++ 2 files changed, 37 insertions(+) (limited to 'include') diff --git a/include/sound/wm8960.h b/include/sound/wm8960.h index d22e84805025..275fd5b201ce 100644 --- a/include/sound/wm8960.h +++ b/include/sound/wm8960.h @@ -16,6 +16,23 @@ struct wm8960_data { bool capless; /* Headphone outputs configured in capless mode */ bool shared_lrclk; /* DAC and ADC LRCLKs are wired together */ + + /* + * Setup for headphone detection + * + * hp_cfg[0]: HPSEL[1:0] of R48 (Additional Control 4) + * hp_cfg[1]: {HPSWEN:HPSWPOL} of R24 (Additional Control 2). + * hp_cfg[2]: {TOCLKSEL:TOEN} of R23 (Additional Control 1). + */ + u32 hp_cfg[3]; + + /* + * Setup for gpio configuration + * + * gpio_cfg[0]: ALRCGPIO of R9 (Audio interface) + * gpio_cfg[1]: {GPIOPOL:GPIOSEL[2:0]} of R48 (Additional Control 4). + */ + u32 gpio_cfg[2]; }; #endif diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c index 6cf0f6612bda..2f7f0493144a 100644 --- a/sound/soc/codecs/wm8960.c +++ b/sound/soc/codecs/wm8960.c @@ -1389,6 +1389,12 @@ static void wm8960_set_pdata_from_of(struct i2c_client *i2c, if (of_property_read_bool(np, "wlf,shared-lrclk")) pdata->shared_lrclk = true; + + of_property_read_u32_array(np, "wlf,gpio-cfg", pdata->gpio_cfg, + ARRAY_SIZE(pdata->gpio_cfg)); + + of_property_read_u32_array(np, "wlf,hp-cfg", pdata->hp_cfg, + ARRAY_SIZE(pdata->hp_cfg)); } static int wm8960_i2c_probe(struct i2c_client *i2c, @@ -1446,6 +1452,20 @@ static int wm8960_i2c_probe(struct i2c_client *i2c, regmap_update_bits(wm8960->regmap, WM8960_LOUT2, 0x100, 0x100); regmap_update_bits(wm8960->regmap, WM8960_ROUT2, 0x100, 0x100); + /* ADCLRC pin configured as GPIO. */ + regmap_update_bits(wm8960->regmap, WM8960_IFACE2, 1 << 6, + wm8960->pdata.gpio_cfg[0] << 6); + regmap_update_bits(wm8960->regmap, WM8960_ADDCTL4, 0xF << 4, + wm8960->pdata.gpio_cfg[1] << 4); + + /* Enable headphone jack detect */ + regmap_update_bits(wm8960->regmap, WM8960_ADDCTL4, 3 << 2, + wm8960->pdata.hp_cfg[0] << 2); + regmap_update_bits(wm8960->regmap, WM8960_ADDCTL2, 3 << 5, + wm8960->pdata.hp_cfg[1] << 5); + regmap_update_bits(wm8960->regmap, WM8960_ADDCTL1, 3, + wm8960->pdata.hp_cfg[2]); + i2c_set_clientdata(i2c, wm8960); ret = devm_snd_soc_register_component(&i2c->dev, -- cgit v1.2.3-59-g8ed1b From 4b9e7edb5afc4e3c27d6623f5008bf53ae96cf1a Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Mon, 15 Jun 2020 09:23:13 +0200 Subject: regmap: convert all regmap_update_bits() and co. macros to static inlines There's no reason to have these as macros. Let's convert them all to static inlines for better readability and stronger typing. Suggested-by: Mark Brown Signed-off-by: Bartosz Golaszewski Link: https://lore.kernel.org/r/20200615072313.11106-1-brgl@bgdev.pl Signed-off-by: Mark Brown --- include/linux/regmap.h | 222 ++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 192 insertions(+), 30 deletions(-) (limited to 'include') diff --git a/include/linux/regmap.h b/include/linux/regmap.h index cb666b9c6b6a..f4917efed5c3 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -80,36 +80,6 @@ struct reg_sequence { } #define REG_SEQ0(_reg, _def) REG_SEQ(_reg, _def, 0) -#define regmap_update_bits(map, reg, mask, val) \ - regmap_update_bits_base(map, reg, mask, val, NULL, false, false) -#define regmap_update_bits_async(map, reg, mask, val)\ - regmap_update_bits_base(map, reg, mask, val, NULL, true, false) -#define regmap_update_bits_check(map, reg, mask, val, change)\ - regmap_update_bits_base(map, reg, mask, val, change, false, false) -#define regmap_update_bits_check_async(map, reg, mask, val, change)\ - regmap_update_bits_base(map, reg, mask, val, change, true, false) - -#define regmap_write_bits(map, reg, mask, val) \ - regmap_update_bits_base(map, reg, mask, val, NULL, false, true) - -#define regmap_field_write(field, val) \ - regmap_field_update_bits_base(field, ~0, val, NULL, false, false) -#define regmap_field_force_write(field, val) \ - regmap_field_update_bits_base(field, ~0, val, NULL, false, true) -#define regmap_field_update_bits(field, mask, val)\ - regmap_field_update_bits_base(field, mask, val, NULL, false, false) -#define regmap_field_force_update_bits(field, mask, val) \ - regmap_field_update_bits_base(field, mask, val, NULL, false, true) - -#define regmap_fields_write(field, id, val) \ - regmap_fields_update_bits_base(field, id, ~0, val, NULL, false, false) -#define regmap_fields_force_write(field, id, val) \ - regmap_fields_update_bits_base(field, id, ~0, val, NULL, false, true) -#define regmap_fields_update_bits(field, id, mask, val)\ - regmap_fields_update_bits_base(field, id, mask, val, NULL, false, false) -#define regmap_fields_force_update_bits(field, id, mask, val) \ - regmap_fields_update_bits_base(field, id, mask, val, NULL, false, true) - /** * regmap_read_poll_timeout - Poll until a condition is met or a timeout occurs * @@ -1054,6 +1024,42 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, int regmap_update_bits_base(struct regmap *map, unsigned int reg, unsigned int mask, unsigned int val, bool *change, bool async, bool force); + +static inline int regmap_update_bits(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val) +{ + return regmap_update_bits_base(map, reg, mask, val, NULL, false, false); +} + +static inline int regmap_update_bits_async(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val) +{ + return regmap_update_bits_base(map, reg, mask, val, NULL, true, false); +} + +static inline int regmap_update_bits_check(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val, + bool *change) +{ + return regmap_update_bits_base(map, reg, mask, val, + change, false, false); +} + +static inline int +regmap_update_bits_check_async(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val, + bool *change) +{ + return regmap_update_bits_base(map, reg, mask, val, + change, true, false); +} + +static inline int regmap_write_bits(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val) +{ + return regmap_update_bits_base(map, reg, mask, val, NULL, false, true); +} + int regmap_get_val_bytes(struct regmap *map); int regmap_get_max_register(struct regmap *map); int regmap_get_reg_stride(struct regmap *map); @@ -1152,6 +1158,65 @@ int regmap_fields_read(struct regmap_field *field, unsigned int id, int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id, unsigned int mask, unsigned int val, bool *change, bool async, bool force); + +static inline int regmap_field_write(struct regmap_field *field, + unsigned int val) +{ + return regmap_field_update_bits_base(field, ~0, val, + NULL, false, false); +} + +static inline int regmap_field_force_write(struct regmap_field *field, + unsigned int val) +{ + return regmap_field_update_bits_base(field, ~0, val, NULL, false, true); +} + +static inline int regmap_field_update_bits(struct regmap_field *field, + unsigned int mask, unsigned int val) +{ + return regmap_field_update_bits_base(field, mask, val, + NULL, false, false); +} + +static inline int +regmap_field_force_update_bits(struct regmap_field *field, + unsigned int mask, unsigned int val) +{ + return regmap_field_update_bits_base(field, mask, val, + NULL, false, true); +} + +static inline int regmap_fields_write(struct regmap_field *field, + unsigned int id, unsigned int val) +{ + return regmap_fields_update_bits_base(field, id, ~0, val, + NULL, false, false); +} + +static inline int regmap_fields_force_write(struct regmap_field *field, + unsigned int id, unsigned int val) +{ + return regmap_fields_update_bits_base(field, id, ~0, val, + NULL, false, true); +} + +static inline int +regmap_fields_update_bits(struct regmap_field *field, unsigned int id, + unsigned int mask, unsigned int val) +{ + return regmap_fields_update_bits_base(field, id, mask, val, + NULL, false, false); +} + +static inline int +regmap_fields_force_update_bits(struct regmap_field *field, unsigned int id, + unsigned int mask, unsigned int val) +{ + return regmap_fields_update_bits_base(field, id, mask, val, + NULL, false, true); +} + /** * struct regmap_irq_type - IRQ type definitions. * @@ -1458,6 +1523,103 @@ static inline int regmap_fields_update_bits_base(struct regmap_field *field, return -EINVAL; } +static inline int regmap_update_bits(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_update_bits_async(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_update_bits_check(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val, + bool *change) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int +regmap_update_bits_check_async(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val, + bool *change) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_write_bits(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_field_write(struct regmap_field *field, + unsigned int val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_field_force_write(struct regmap_field *field, + unsigned int val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_field_update_bits(struct regmap_field *field, + unsigned int mask, unsigned int val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int +regmap_field_force_update_bits(struct regmap_field *field, + unsigned int mask, unsigned int val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_fields_write(struct regmap_field *field, + unsigned int id, unsigned int val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_fields_force_write(struct regmap_field *field, + unsigned int id, unsigned int val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int +regmap_fields_update_bits(struct regmap_field *field, unsigned int id, + unsigned int mask, unsigned int val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int +regmap_fields_force_update_bits(struct regmap_field *field, unsigned int id, + unsigned int mask, unsigned int val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + static inline int regmap_get_val_bytes(struct regmap *map) { WARN_ONCE(1, "regmap API is disabled"); -- cgit v1.2.3-59-g8ed1b From dff08caf35ecef4f7647f8b1e40877a254852a2b Mon Sep 17 00:00:00 2001 From: Pi-Hsun Shih Date: Fri, 12 Jun 2020 12:05:19 +0800 Subject: platform/chrome: cros_ec: Add command for regulator control. Add host commands for voltage regulator control through ChromeOS EC. Signed-off-by: Pi-Hsun Shih Reviewed-by: Enric Balletbo i Serra Link: https://lore.kernel.org/r/20200612040526.192878-3-pihsun@chromium.org Signed-off-by: Mark Brown --- drivers/platform/chrome/cros_ec_trace.c | 5 ++ include/linux/platform_data/cros_ec_commands.h | 82 ++++++++++++++++++++++++++ 2 files changed, 87 insertions(+) (limited to 'include') diff --git a/drivers/platform/chrome/cros_ec_trace.c b/drivers/platform/chrome/cros_ec_trace.c index 523a39bd0ff6..425e9441b7ca 100644 --- a/drivers/platform/chrome/cros_ec_trace.c +++ b/drivers/platform/chrome/cros_ec_trace.c @@ -161,6 +161,11 @@ TRACE_SYMBOL(EC_CMD_ADC_READ), \ TRACE_SYMBOL(EC_CMD_ROLLBACK_INFO), \ TRACE_SYMBOL(EC_CMD_AP_RESET), \ + TRACE_SYMBOL(EC_CMD_REGULATOR_GET_INFO), \ + TRACE_SYMBOL(EC_CMD_REGULATOR_ENABLE), \ + TRACE_SYMBOL(EC_CMD_REGULATOR_IS_ENABLED), \ + TRACE_SYMBOL(EC_CMD_REGULATOR_SET_VOLTAGE), \ + TRACE_SYMBOL(EC_CMD_REGULATOR_GET_VOLTAGE), \ TRACE_SYMBOL(EC_CMD_CR51_BASE), \ TRACE_SYMBOL(EC_CMD_CR51_LAST), \ TRACE_SYMBOL(EC_CMD_FP_PASSTHRU), \ diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h index 69210881ebac..a417b51b5764 100644 --- a/include/linux/platform_data/cros_ec_commands.h +++ b/include/linux/platform_data/cros_ec_commands.h @@ -5430,6 +5430,88 @@ struct ec_response_rollback_info { /* Issue AP reset */ #define EC_CMD_AP_RESET 0x0125 +/*****************************************************************************/ +/* Voltage regulator controls */ + +/* + * Get basic info of voltage regulator for given index. + * + * Returns the regulator name and supported voltage list in mV. + */ +#define EC_CMD_REGULATOR_GET_INFO 0x012B + +/* Maximum length of regulator name */ +#define EC_REGULATOR_NAME_MAX_LEN 16 + +/* Maximum length of the supported voltage list. */ +#define EC_REGULATOR_VOLTAGE_MAX_COUNT 16 + +struct ec_params_regulator_get_info { + uint32_t index; +} __ec_align4; + +struct ec_response_regulator_get_info { + char name[EC_REGULATOR_NAME_MAX_LEN]; + uint16_t num_voltages; + uint16_t voltages_mv[EC_REGULATOR_VOLTAGE_MAX_COUNT]; +} __ec_align1; + +/* + * Configure the regulator as enabled / disabled. + */ +#define EC_CMD_REGULATOR_ENABLE 0x012C + +struct ec_params_regulator_enable { + uint32_t index; + uint8_t enable; +} __ec_align4; + +/* + * Query if the regulator is enabled. + * + * Returns 1 if the regulator is enabled, 0 if not. + */ +#define EC_CMD_REGULATOR_IS_ENABLED 0x012D + +struct ec_params_regulator_is_enabled { + uint32_t index; +} __ec_align4; + +struct ec_response_regulator_is_enabled { + uint8_t enabled; +} __ec_align1; + +/* + * Set voltage for the voltage regulator within the range specified. + * + * The driver should select the voltage in range closest to min_mv. + * + * Also note that this might be called before the regulator is enabled, and the + * setting should be in effect after the regulator is enabled. + */ +#define EC_CMD_REGULATOR_SET_VOLTAGE 0x012E + +struct ec_params_regulator_set_voltage { + uint32_t index; + uint32_t min_mv; + uint32_t max_mv; +} __ec_align4; + +/* + * Get the currently configured voltage for the voltage regulator. + * + * Note that this might be called before the regulator is enabled. + */ +#define EC_CMD_REGULATOR_GET_VOLTAGE 0x012F + +struct ec_params_regulator_get_voltage { + uint32_t index; +} __ec_align4; + +struct ec_response_regulator_get_voltage { + uint32_t voltage_mv; +} __ec_align4; + /*****************************************************************************/ /* The command range 0x200-0x2FF is reserved for Rotor. */ -- cgit v1.2.3-59-g8ed1b From 8e04187c1bc7953f6dfad3400c58b1b0b0ad767b Mon Sep 17 00:00:00 2001 From: Xu Yilun Date: Thu, 11 Jun 2020 11:25:07 +0800 Subject: spi: altera: add SPI core parameters support via platform data. This patch introduced SPI core parameters in platform data, it allows passing these SPI core parameters via platform data. Signed-off-by: Wu Hao Signed-off-by: Xu Yilun Signed-off-by: Matthew Gerlach Signed-off-by: Russ Weight Reviewed-by: Tom Rix Link: https://lore.kernel.org/r/1591845911-10197-3-git-send-email-yilun.xu@intel.com Signed-off-by: Mark Brown --- drivers/spi/spi-altera.c | 25 ++++++++++++++++++++++--- include/linux/spi/altera.h | 24 ++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 3 deletions(-) create mode 100644 include/linux/spi/altera.h (limited to 'include') diff --git a/drivers/spi/spi-altera.c b/drivers/spi/spi-altera.c index d5fa0c5706b0..e6e6708c7c47 100644 --- a/drivers/spi/spi-altera.c +++ b/drivers/spi/spi-altera.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -40,6 +41,8 @@ #define ALTERA_SPI_CONTROL_IE_MSK 0x100 #define ALTERA_SPI_CONTROL_SSO_MSK 0x400 +#define ALTERA_SPI_MAX_CS 32 + struct altera_spi { void __iomem *base; int irq; @@ -182,6 +185,7 @@ static irqreturn_t altera_spi_irq(int irq, void *dev) static int altera_spi_probe(struct platform_device *pdev) { + struct altera_spi_platform_data *pdata = dev_get_platdata(&pdev->dev); struct altera_spi *hw; struct spi_master *master; int err = -ENODEV; @@ -192,9 +196,24 @@ static int altera_spi_probe(struct platform_device *pdev) /* setup the master state. */ master->bus_num = pdev->id; - master->num_chipselect = 16; - master->mode_bits = SPI_CS_HIGH; - master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16); + + if (pdata) { + if (pdata->num_chipselect > ALTERA_SPI_MAX_CS) { + dev_err(&pdev->dev, + "Invalid number of chipselect: %hu\n", + pdata->num_chipselect); + return -EINVAL; + } + + master->num_chipselect = pdata->num_chipselect; + master->mode_bits = pdata->mode_bits; + master->bits_per_word_mask = pdata->bits_per_word_mask; + } else { + master->num_chipselect = 16; + master->mode_bits = SPI_CS_HIGH; + master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16); + } + master->dev.of_node = pdev->dev.of_node; master->transfer_one = altera_spi_txrx; master->set_cs = altera_spi_set_cs; diff --git a/include/linux/spi/altera.h b/include/linux/spi/altera.h new file mode 100644 index 000000000000..344a3fce56a4 --- /dev/null +++ b/include/linux/spi/altera.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Header File for Altera SPI Driver. + */ +#ifndef __LINUX_SPI_ALTERA_H +#define __LINUX_SPI_ALTERA_H + +#include +#include +#include + +/** + * struct altera_spi_platform_data - Platform data of the Altera SPI driver + * @mode_bits: Mode bits of SPI master. + * @num_chipselect: Number of chipselects. + * @bits_per_word_mask: bitmask of supported bits_per_word for transfers. + */ +struct altera_spi_platform_data { + u16 mode_bits; + u16 num_chipselect; + u32 bits_per_word_mask; +}; + +#endif /* __LINUX_SPI_ALTERA_H */ -- cgit v1.2.3-59-g8ed1b From 1fccd182a4694a848f2d6f3b1820d6fc71d9c99d Mon Sep 17 00:00:00 2001 From: Xu Yilun Date: Thu, 11 Jun 2020 11:25:08 +0800 Subject: spi: altera: add platform data for slave information. This patch introduces platform data for slave information, it allows spi-altera to add new spi devices once master registration is done. Signed-off-by: Wu Hao Signed-off-by: Xu Yilun Signed-off-by: Matthew Gerlach Signed-off-by: Russ Weight Reviewed-by: Tom Rix Link: https://lore.kernel.org/r/1591845911-10197-4-git-send-email-yilun.xu@intel.com Signed-off-by: Mark Brown --- drivers/spi/spi-altera.c | 11 +++++++++++ include/linux/spi/altera.h | 5 +++++ 2 files changed, 16 insertions(+) (limited to 'include') diff --git a/drivers/spi/spi-altera.c b/drivers/spi/spi-altera.c index e6e6708c7c47..aa9d1a257433 100644 --- a/drivers/spi/spi-altera.c +++ b/drivers/spi/spi-altera.c @@ -189,6 +189,7 @@ static int altera_spi_probe(struct platform_device *pdev) struct altera_spi *hw; struct spi_master *master; int err = -ENODEV; + u16 i; master = spi_alloc_master(&pdev->dev, sizeof(struct altera_spi)); if (!master) @@ -244,6 +245,16 @@ static int altera_spi_probe(struct platform_device *pdev) err = devm_spi_register_master(&pdev->dev, master); if (err) goto exit; + + if (pdata) { + for (i = 0; i < pdata->num_devices; i++) { + if (!spi_new_device(master, pdata->devices + i)) + dev_warn(&pdev->dev, + "unable to create SPI device: %s\n", + pdata->devices[i].modalias); + } + } + dev_info(&pdev->dev, "base %p, irq %d\n", hw->base, hw->irq); return 0; diff --git a/include/linux/spi/altera.h b/include/linux/spi/altera.h index 344a3fce56a4..2d42641499a6 100644 --- a/include/linux/spi/altera.h +++ b/include/linux/spi/altera.h @@ -14,11 +14,16 @@ * @mode_bits: Mode bits of SPI master. * @num_chipselect: Number of chipselects. * @bits_per_word_mask: bitmask of supported bits_per_word for transfers. + * @num_devices: Number of devices that shall be added when the driver + * is probed. + * @devices: The devices to add. */ struct altera_spi_platform_data { u16 mode_bits; u16 num_chipselect; u32 bits_per_word_mask; + u16 num_devices; + struct spi_board_info *devices; }; #endif /* __LINUX_SPI_ALTERA_H */ -- cgit v1.2.3-59-g8ed1b From aa5c697988b4c7e1077de1e2eb2298114d531736 Mon Sep 17 00:00:00 2001 From: Stanley Chu Date: Mon, 15 Jun 2020 15:22:35 +0800 Subject: scsi: ufs: Add trace event for UIC commands Use the ftrace infrastructure to conditionally trace UFS UIC command events. New trace event "ufshcd_uic_command" is created, which samples the following UFS UIC command data: - Device name - Optional identification string - UIC command opcode - UIC command argument1 - UIC command argument2 - UIC command argement3 Usage: echo 1 > /sys/kernel/debug/tracing/events/ufs/enable cat /sys/kernel/debug/tracing/trace_pipe Link: https://lore.kernel.org/r/20200615072235.23042-3-stanley.chu@mediatek.com Acked-by: Avri Altman Signed-off-by: Stanley Chu Signed-off-by: Martin K. Petersen --- drivers/scsi/ufs/ufshcd.c | 26 ++++++++++++++++++++++++++ include/trace/events/ufs.h | 31 +++++++++++++++++++++++++++++++ 2 files changed, 57 insertions(+) (limited to 'include') diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 6cedf0758b3f..19221de37a4d 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -311,6 +311,26 @@ static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag, &descp->input_param1); } +static void ufshcd_add_uic_command_trace(struct ufs_hba *hba, + struct uic_command *ucmd, + const char *str) +{ + u32 cmd; + + if (!trace_ufshcd_uic_command_enabled()) + return; + + if (!strcmp(str, "send")) + cmd = ucmd->command; + else + cmd = ufshcd_readl(hba, REG_UIC_COMMAND); + + trace_ufshcd_uic_command(dev_name(hba->dev), str, cmd, + ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1), + ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2), + ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3)); +} + static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag, const char *str) { @@ -2030,6 +2050,8 @@ ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2); ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3); + ufshcd_add_uic_command_trace(hba, uic_cmd, "send"); + /* Write UIC Cmd */ ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK, REG_UIC_COMMAND); @@ -4835,6 +4857,10 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) complete(hba->uic_async_done); retval = IRQ_HANDLED; } + + if (retval == IRQ_HANDLED) + ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd, + "complete"); return retval; } diff --git a/include/trace/events/ufs.h b/include/trace/events/ufs.h index 5f300739240d..84841b3a7ffd 100644 --- a/include/trace/events/ufs.h +++ b/include/trace/events/ufs.h @@ -249,6 +249,37 @@ TRACE_EVENT(ufshcd_command, ) ); +TRACE_EVENT(ufshcd_uic_command, + TP_PROTO(const char *dev_name, const char *str, u32 cmd, + u32 arg1, u32 arg2, u32 arg3), + + TP_ARGS(dev_name, str, cmd, arg1, arg2, arg3), + + TP_STRUCT__entry( + __string(dev_name, dev_name) + __string(str, str) + __field(u32, cmd) + __field(u32, arg1) + __field(u32, arg2) + __field(u32, arg3) + ), + + TP_fast_assign( + __assign_str(dev_name, dev_name); + __assign_str(str, str); + __entry->cmd = cmd; + __entry->arg1 = arg1; + __entry->arg2 = arg2; + __entry->arg3 = arg3; + ), + + TP_printk( + "%s: %s: cmd: 0x%x, arg1: 0x%x, arg2: 0x%x, arg3: 0x%x", + __get_str(str), __get_str(dev_name), __entry->cmd, + __entry->arg1, __entry->arg2, __entry->arg3 + ) +); + TRACE_EVENT(ufshcd_upiu, TP_PROTO(const char *dev_name, const char *str, void *hdr, void *tsf), -- cgit v1.2.3-59-g8ed1b From 9357b04624013294e4204b1a837d0a611b9048c3 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Wed, 27 May 2020 17:47:31 +0200 Subject: reset: Move reset-simple header out of drivers/reset The reset-simple code can be useful for drivers outside of drivers/reset that have a few reset controls as part of their features. Let's move it to include/linux/reset. Reviewed-by: Philipp Zabel Signed-off-by: Maxime Ripard Signed-off-by: Philipp Zabel --- drivers/reset/reset-simple.c | 3 +-- drivers/reset/reset-simple.h | 41 ------------------------------------- drivers/reset/reset-socfpga.c | 3 +-- drivers/reset/reset-sunxi.c | 3 +-- drivers/reset/reset-uniphier-glue.c | 3 +-- include/linux/reset/reset-simple.h | 41 +++++++++++++++++++++++++++++++++++++ 6 files changed, 45 insertions(+), 49 deletions(-) delete mode 100644 drivers/reset/reset-simple.h create mode 100644 include/linux/reset/reset-simple.h (limited to 'include') diff --git a/drivers/reset/reset-simple.c b/drivers/reset/reset-simple.c index 067e7e7b34f1..c854aa351640 100644 --- a/drivers/reset/reset-simple.c +++ b/drivers/reset/reset-simple.c @@ -18,10 +18,9 @@ #include #include #include +#include #include -#include "reset-simple.h" - static inline struct reset_simple_data * to_reset_simple_data(struct reset_controller_dev *rcdev) { diff --git a/drivers/reset/reset-simple.h b/drivers/reset/reset-simple.h deleted file mode 100644 index 08ccb25a55e6..000000000000 --- a/drivers/reset/reset-simple.h +++ /dev/null @@ -1,41 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Simple Reset Controller ops - * - * Based on Allwinner SoCs Reset Controller driver - * - * Copyright 2013 Maxime Ripard - * - * Maxime Ripard - */ - -#ifndef __RESET_SIMPLE_H__ -#define __RESET_SIMPLE_H__ - -#include -#include -#include - -/** - * struct reset_simple_data - driver data for simple reset controllers - * @lock: spinlock to protect registers during read-modify-write cycles - * @membase: memory mapped I/O register range - * @rcdev: reset controller device base structure - * @active_low: if true, bits are cleared to assert the reset. Otherwise, bits - * are set to assert the reset. Note that this says nothing about - * the voltage level of the actual reset line. - * @status_active_low: if true, bits read back as cleared while the reset is - * asserted. Otherwise, bits read back as set while the - * reset is asserted. - */ -struct reset_simple_data { - spinlock_t lock; - void __iomem *membase; - struct reset_controller_dev rcdev; - bool active_low; - bool status_active_low; -}; - -extern const struct reset_control_ops reset_simple_ops; - -#endif /* __RESET_SIMPLE_H__ */ diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c index 96953992c2bb..bdd984296196 100644 --- a/drivers/reset/reset-socfpga.c +++ b/drivers/reset/reset-socfpga.c @@ -11,13 +11,12 @@ #include #include #include +#include #include #include #include #include -#include "reset-simple.h" - #define SOCFPGA_NR_BANKS 8 static int a10_reset_init(struct device_node *np) diff --git a/drivers/reset/reset-sunxi.c b/drivers/reset/reset-sunxi.c index e7f169e57bcf..e752594b6971 100644 --- a/drivers/reset/reset-sunxi.c +++ b/drivers/reset/reset-sunxi.c @@ -14,13 +14,12 @@ #include #include #include +#include #include #include #include #include -#include "reset-simple.h" - static int sunxi_reset_init(struct device_node *np) { struct reset_simple_data *data; diff --git a/drivers/reset/reset-uniphier-glue.c b/drivers/reset/reset-uniphier-glue.c index 2b188b3bb69a..027990b79f61 100644 --- a/drivers/reset/reset-uniphier-glue.c +++ b/drivers/reset/reset-uniphier-glue.c @@ -9,8 +9,7 @@ #include #include #include - -#include "reset-simple.h" +#include #define MAX_CLKS 2 #define MAX_RSTS 2 diff --git a/include/linux/reset/reset-simple.h b/include/linux/reset/reset-simple.h new file mode 100644 index 000000000000..08ccb25a55e6 --- /dev/null +++ b/include/linux/reset/reset-simple.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Simple Reset Controller ops + * + * Based on Allwinner SoCs Reset Controller driver + * + * Copyright 2013 Maxime Ripard + * + * Maxime Ripard + */ + +#ifndef __RESET_SIMPLE_H__ +#define __RESET_SIMPLE_H__ + +#include +#include +#include + +/** + * struct reset_simple_data - driver data for simple reset controllers + * @lock: spinlock to protect registers during read-modify-write cycles + * @membase: memory mapped I/O register range + * @rcdev: reset controller device base structure + * @active_low: if true, bits are cleared to assert the reset. Otherwise, bits + * are set to assert the reset. Note that this says nothing about + * the voltage level of the actual reset line. + * @status_active_low: if true, bits read back as cleared while the reset is + * asserted. Otherwise, bits read back as set while the + * reset is asserted. + */ +struct reset_simple_data { + spinlock_t lock; + void __iomem *membase; + struct reset_controller_dev rcdev; + bool active_low; + bool status_active_low; +}; + +extern const struct reset_control_ops reset_simple_ops; + +#endif /* __RESET_SIMPLE_H__ */ -- cgit v1.2.3-59-g8ed1b From a9701376ed0fb61a5be4bb438daf26bd9cfa24b5 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Wed, 27 May 2020 17:47:32 +0200 Subject: reset: simple: Add reset callback The reset-simple code lacks a reset callback that is still pretty easy to implement. The only real thing to consider is the delay needed for a device to be reset, so let's expose that as part of the reset-simple driver data. Reviewed-by: Philipp Zabel Signed-off-by: Maxime Ripard Signed-off-by: Philipp Zabel --- drivers/reset/reset-simple.c | 20 ++++++++++++++++++++ include/linux/reset/reset-simple.h | 7 +++++++ 2 files changed, 27 insertions(+) (limited to 'include') diff --git a/drivers/reset/reset-simple.c b/drivers/reset/reset-simple.c index c854aa351640..e066614818a3 100644 --- a/drivers/reset/reset-simple.c +++ b/drivers/reset/reset-simple.c @@ -11,6 +11,7 @@ * Maxime Ripard */ +#include #include #include #include @@ -63,6 +64,24 @@ static int reset_simple_deassert(struct reset_controller_dev *rcdev, return reset_simple_update(rcdev, id, false); } +static int reset_simple_reset(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct reset_simple_data *data = to_reset_simple_data(rcdev); + int ret; + + if (!data->reset_us) + return -ENOTSUPP; + + ret = reset_simple_assert(rcdev, id); + if (ret) + return ret; + + usleep_range(data->reset_us, data->reset_us * 2); + + return reset_simple_deassert(rcdev, id); +} + static int reset_simple_status(struct reset_controller_dev *rcdev, unsigned long id) { @@ -80,6 +99,7 @@ static int reset_simple_status(struct reset_controller_dev *rcdev, const struct reset_control_ops reset_simple_ops = { .assert = reset_simple_assert, .deassert = reset_simple_deassert, + .reset = reset_simple_reset, .status = reset_simple_status, }; EXPORT_SYMBOL_GPL(reset_simple_ops); diff --git a/include/linux/reset/reset-simple.h b/include/linux/reset/reset-simple.h index 08ccb25a55e6..c3e44f45b0f7 100644 --- a/include/linux/reset/reset-simple.h +++ b/include/linux/reset/reset-simple.h @@ -27,6 +27,12 @@ * @status_active_low: if true, bits read back as cleared while the reset is * asserted. Otherwise, bits read back as set while the * reset is asserted. + * @reset_us: Minimum delay in microseconds needed that needs to be + * waited for between an assert and a deassert to reset the + * device. If multiple consumers with different delay + * requirements are connected to this controller, it must + * be the largest minimum delay. 0 means that such a delay is + * unknown and the reset operation is unsupported. */ struct reset_simple_data { spinlock_t lock; @@ -34,6 +40,7 @@ struct reset_simple_data { struct reset_controller_dev rcdev; bool active_low; bool status_active_low; + unsigned int reset_us; }; extern const struct reset_control_ops reset_simple_ops; -- cgit v1.2.3-59-g8ed1b From 8a307d3601bcca99723b1a45e785adc3c9d3a476 Mon Sep 17 00:00:00 2001 From: Artur Świgoń Date: Thu, 21 May 2020 14:28:39 +0200 Subject: interconnect: Export of_icc_get_from_provider() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch makes the above function public (for use in exynos-bus devfreq driver). Signed-off-by: Artur Świgoń Reviewed-by: Krzysztof Kozlowski Reviewed-by: Chanwoo Choi Signed-off-by: Sylwester Nawrocki Link: https://lore.kernel.org/r/20200521122841.8867-2-s.nawrocki@samsung.com Signed-off-by: Georgi Djakov --- drivers/interconnect/core.c | 3 ++- include/linux/interconnect-provider.h | 6 ++++++ 2 files changed, 8 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c index e5f998744501..9e2d55d94fb4 100644 --- a/drivers/interconnect/core.c +++ b/drivers/interconnect/core.c @@ -334,7 +334,7 @@ EXPORT_SYMBOL_GPL(of_icc_xlate_onecell); * Returns a valid pointer to struct icc_node on success or ERR_PTR() * on failure. */ -static struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec) +struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec) { struct icc_node *node = ERR_PTR(-EPROBE_DEFER); struct icc_provider *provider; @@ -353,6 +353,7 @@ static struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec) return node; } +EXPORT_SYMBOL_GPL(of_icc_get_from_provider); static void devm_icc_release(struct device *dev, void *res) { diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h index 0c494534b4d3..c92be2a90fa0 100644 --- a/include/linux/interconnect-provider.h +++ b/include/linux/interconnect-provider.h @@ -103,6 +103,7 @@ void icc_node_del(struct icc_node *node); int icc_nodes_remove(struct icc_provider *provider); int icc_provider_add(struct icc_provider *provider); int icc_provider_del(struct icc_provider *provider); +struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec); #else @@ -154,6 +155,11 @@ static inline int icc_provider_del(struct icc_provider *provider) return -ENOTSUPP; } +static inline struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec) +{ + return ERR_PTR(-ENOTSUPP); +} + #endif /* CONFIG_INTERCONNECT */ #endif /* __LINUX_INTERCONNECT_PROVIDER_H */ -- cgit v1.2.3-59-g8ed1b From 65461e26b1fe73bde4326367ee23cc1a24e6c33e Mon Sep 17 00:00:00 2001 From: Artur Świgoń Date: Thu, 21 May 2020 14:28:41 +0200 Subject: interconnect: Allow inter-provider pairs to be configured MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch adds support for a new boolean 'inter_set' field in struct icc_provider. Setting it to 'true' enables calling '->set' for inter-provider node pairs. All existing users of the interconnect framework allocate this structure with kzalloc, and are therefore unaffected by this change. This makes it easier for hierarchies like exynos-bus, where every bus is probed separately and registers a separate interconnect provider, to model constraints between buses. Signed-off-by: Artur Świgoń Signed-off-by: Sylwester Nawrocki Acked-by: Krzysztof Kozlowski Link: https://lore.kernel.org/r/20200521122841.8867-4-s.nawrocki@samsung.com Signed-off-by: Georgi Djakov --- drivers/interconnect/core.c | 11 +++++------ include/linux/interconnect-provider.h | 2 ++ 2 files changed, 7 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c index 8b4d50d59e16..609e206bf598 100644 --- a/drivers/interconnect/core.c +++ b/drivers/interconnect/core.c @@ -263,23 +263,22 @@ static int aggregate_requests(struct icc_node *node) static int apply_constraints(struct icc_path *path) { struct icc_node *next, *prev = NULL; + struct icc_provider *p; int ret = -EINVAL; int i; for (i = 0; i < path->num_nodes; i++) { next = path->reqs[i].node; + p = next->provider; - /* - * Both endpoints should be valid master-slave pairs of the - * same interconnect provider that will be configured. - */ - if (!prev || next->provider != prev->provider) { + /* both endpoints should be valid master-slave pairs */ + if (!prev || (p != prev->provider && !p->inter_set)) { prev = next; continue; } /* set the constraints */ - ret = next->provider->set(prev, next); + ret = p->set(prev, next); if (ret) goto out; diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h index c92be2a90fa0..38701925ab91 100644 --- a/include/linux/interconnect-provider.h +++ b/include/linux/interconnect-provider.h @@ -41,6 +41,7 @@ struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec, * @xlate: provider-specific callback for mapping nodes from phandle arguments * @dev: the device this interconnect provider belongs to * @users: count of active users + * @inter_set: whether inter-provider pairs will be configured with @set * @data: pointer to private data */ struct icc_provider { @@ -53,6 +54,7 @@ struct icc_provider { struct icc_node* (*xlate)(struct of_phandle_args *spec, void *data); struct device *dev; int users; + bool inter_set; void *data; }; -- cgit v1.2.3-59-g8ed1b From 12a400b016ab955be8e4c569346fa18aaceed9d7 Mon Sep 17 00:00:00 2001 From: Georgi Djakov Date: Tue, 16 Jun 2020 16:43:23 +0300 Subject: interconnect: Mark all dummy functions as static inline There are a few dummy stub functions that are not marked as static inline yet. Currently this header file is not included in any other file outside of drivers/interconnect/, but that might not be the case in the future. If this file gets included and the framework is disabled, we will be see warnings. Let's fix this in advance. Link: https://lore.kernel.org/r/20200228145945.13579-1-georgi.djakov@linaro.org Signed-off-by: Georgi Djakov --- include/linux/interconnect-provider.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h index 38701925ab91..4735518de515 100644 --- a/include/linux/interconnect-provider.h +++ b/include/linux/interconnect-provider.h @@ -120,7 +120,7 @@ static inline struct icc_node *icc_node_create(int id) return ERR_PTR(-ENOTSUPP); } -void icc_node_destroy(int id) +static inline void icc_node_destroy(int id) { } @@ -129,16 +129,16 @@ static inline int icc_link_create(struct icc_node *node, const int dst_id) return -ENOTSUPP; } -int icc_link_destroy(struct icc_node *src, struct icc_node *dst) +static inline int icc_link_destroy(struct icc_node *src, struct icc_node *dst) { return -ENOTSUPP; } -void icc_node_add(struct icc_node *node, struct icc_provider *provider) +static inline void icc_node_add(struct icc_node *node, struct icc_provider *provider) { } -void icc_node_del(struct icc_node *node) +static inline void icc_node_del(struct icc_node *node) { } -- cgit v1.2.3-59-g8ed1b From e370f886fefc23b9ca3011760d9376f1947eb321 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Tue, 16 Jun 2020 15:39:25 +0200 Subject: EDAC: Remove edac_get_dimm_by_index() It is unused now. Signed-off-by: Borislav Petkov --- include/linux/edac.h | 29 +++++++---------------------- 1 file changed, 7 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/include/linux/edac.h b/include/linux/edac.h index 6eb7d55d7c3d..15e8f3d8a895 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h @@ -594,27 +594,6 @@ struct mem_ctl_info { ? (mci)->dimms[(dimm)->idx + 1] \ : NULL) -/** - * edac_get_dimm_by_index - Get DIMM info at @index from a memory - * controller - * - * @mci: MC descriptor struct mem_ctl_info - * @index: index in the memory controller's DIMM array - * - * Returns a struct dimm_info * or NULL on failure. - */ -static inline struct dimm_info * -edac_get_dimm_by_index(struct mem_ctl_info *mci, int index) -{ - if (index < 0 || index >= mci->tot_dimms) - return NULL; - - if (WARN_ON_ONCE(mci->dimms[index]->idx != index)) - return NULL; - - return mci->dimms[index]; -} - /** * edac_get_dimm - Get DIMM info from a memory controller given by * [layer0,layer1,layer2] position @@ -650,6 +629,12 @@ static inline struct dimm_info *edac_get_dimm(struct mem_ctl_info *mci, if (mci->n_layers > 2) index = index * mci->layers[2].size + layer2; - return edac_get_dimm_by_index(mci, index); + if (index < 0 || index >= mci->tot_dimms) + return NULL; + + if (WARN_ON_ONCE(mci->dimms[index]->idx != index)) + return NULL; + + return mci->dimms[index]; } #endif /* _LINUX_EDAC_H_ */ -- cgit v1.2.3-59-g8ed1b From 278a5fbaed89dacd04e9d052f4594ffd0e0585de Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Fri, 24 May 2019 11:30:34 +0200 Subject: open: add close_range() This adds the close_range() syscall. It allows to efficiently close a range of file descriptors up to all file descriptors of a calling task. I was contacted by FreeBSD as they wanted to have the same close_range() syscall as we proposed here. We've coordinated this and in the meantime, Kyle was fast enough to merge close_range() into FreeBSD already in April: https://reviews.freebsd.org/D21627 https://svnweb.freebsd.org/base?view=revision&revision=359836 and the current plan is to backport close_range() to FreeBSD 12.2 (cf. [2]) once its merged in Linux too. Python is in the process of switching to close_range() on FreeBSD and they are waiting on us to merge this to switch on Linux as well: https://bugs.python.org/issue38061 The syscall came up in a recent discussion around the new mount API and making new file descriptor types cloexec by default. During this discussion, Al suggested the close_range() syscall (cf. [1]). Note, a syscall in this manner has been requested by various people over time. First, it helps to close all file descriptors of an exec()ing task. This can be done safely via (quoting Al's example from [1] verbatim): /* that exec is sensitive */ unshare(CLONE_FILES); /* we don't want anything past stderr here */ close_range(3, ~0U); execve(....); The code snippet above is one way of working around the problem that file descriptors are not cloexec by default. This is aggravated by the fact that we can't just switch them over without massively regressing userspace. For a whole class of programs having an in-kernel method of closing all file descriptors is very helpful (e.g. demons, service managers, programming language standard libraries, container managers etc.). (Please note, unshare(CLONE_FILES) should only be needed if the calling task is multi-threaded and shares the file descriptor table with another thread in which case two threads could race with one thread allocating file descriptors and the other one closing them via close_range(). For the general case close_range() before the execve() is sufficient.) Second, it allows userspace to avoid implementing closing all file descriptors by parsing through /proc//fd/* and calling close() on each file descriptor. From looking at various large(ish) userspace code bases this or similar patterns are very common in: - service managers (cf. [4]) - libcs (cf. [6]) - container runtimes (cf. [5]) - programming language runtimes/standard libraries - Python (cf. [2]) - Rust (cf. [7], [8]) As Dmitry pointed out there's even a long-standing glibc bug about missing kernel support for this task (cf. [3]). In addition, the syscall will also work for tasks that do not have procfs mounted and on kernels that do not have procfs support compiled in. In such situations the only way to make sure that all file descriptors are closed is to call close() on each file descriptor up to UINT_MAX or RLIMIT_NOFILE, OPEN_MAX trickery (cf. comment [8] on Rust). The performance is striking. For good measure, comparing the following simple close_all_fds() userspace implementation that is essentially just glibc's version in [6]: static int close_all_fds(void) { int dir_fd; DIR *dir; struct dirent *direntp; dir = opendir("/proc/self/fd"); if (!dir) return -1; dir_fd = dirfd(dir); while ((direntp = readdir(dir))) { int fd; if (strcmp(direntp->d_name, ".") == 0) continue; if (strcmp(direntp->d_name, "..") == 0) continue; fd = atoi(direntp->d_name); if (fd == dir_fd || fd == 0 || fd == 1 || fd == 2) continue; close(fd); } closedir(dir); return 0; } to close_range() yields: 1. closing 4 open files: - close_all_fds(): ~280 us - close_range(): ~24 us 2. closing 1000 open files: - close_all_fds(): ~5000 us - close_range(): ~800 us close_range() is designed to allow for some flexibility. Specifically, it does not simply always close all open file descriptors of a task. Instead, callers can specify an upper bound. This is e.g. useful for scenarios where specific file descriptors are created with well-known numbers that are supposed to be excluded from getting closed. For extra paranoia close_range() comes with a flags argument. This can e.g. be used to implement extension. Once can imagine userspace wanting to stop at the first error instead of ignoring errors under certain circumstances. There might be other valid ideas in the future. In any case, a flag argument doesn't hurt and keeps us on the safe side. From an implementation side this is kept rather dumb. It saw some input from David and Jann but all nonsense is obviously my own! - Errors to close file descriptors are currently ignored. (Could be changed by setting a flag in the future if needed.) - __close_range() is a rather simplistic wrapper around __close_fd(). My reasoning behind this is based on the nature of how __close_fd() needs to release an fd. But maybe I misunderstood specifics: We take the files_lock and rcu-dereference the fdtable of the calling task, we find the entry in the fdtable, get the file and need to release files_lock before calling filp_close(). In the meantime the fdtable might have been altered so we can't just retake the spinlock and keep the old rcu-reference of the fdtable around. Instead we need to grab a fresh reference to the fdtable. If my reasoning is correct then there's really no point in fancyfying __close_range(): We just need to rcu-dereference the fdtable of the calling task once to cap the max_fd value correctly and then go on calling __close_fd() in a loop. /* References */ [1]: https://lore.kernel.org/lkml/20190516165021.GD17978@ZenIV.linux.org.uk/ [2]: https://github.com/python/cpython/blob/9e4f2f3a6b8ee995c365e86d976937c141d867f8/Modules/_posixsubprocess.c#L220 [3]: https://sourceware.org/bugzilla/show_bug.cgi?id=10353#c7 [4]: https://github.com/systemd/systemd/blob/5238e9575906297608ff802a27e2ff9effa3b338/src/basic/fd-util.c#L217 [5]: https://github.com/lxc/lxc/blob/ddf4b77e11a4d08f09b7b9cd13e593f8c047edc5/src/lxc/start.c#L236 [6]: https://sourceware.org/git/?p=glibc.git;a=blob;f=sysdeps/unix/sysv/linux/grantpt.c;h=2030e07fa6e652aac32c775b8c6e005844c3c4eb;hb=HEAD#l17 Note that this is an internal implementation that is not exported. Currently, libc seems to not provide an exported version of this because of missing kernel support to do this. Note, in a recent patch series Florian made grantpt() a nop thereby removing the code referenced here. [7]: https://github.com/rust-lang/rust/issues/12148 [8]: https://github.com/rust-lang/rust/blob/5f47c0613ed4eb46fca3633c1297364c09e5e451/src/libstd/sys/unix/process2.rs#L303-L308 Rust's solution is slightly different but is equally unperformant. Rust calls getdtablesize() which is a glibc library function that simply returns the current RLIMIT_NOFILE or OPEN_MAX values. Rust then goes on to call close() on each fd. That's obviously overkill for most tasks. Rarely, tasks - especially non-demons - hit RLIMIT_NOFILE or OPEN_MAX. Let's be nice and assume an unprivileged user with RLIMIT_NOFILE set to 1024. Even in this case, there's a very high chance that in the common case Rust is calling the close() syscall 1021 times pointlessly if the task just has 0, 1, and 2 open. Suggested-by: Al Viro Signed-off-by: Christian Brauner Cc: Arnd Bergmann Cc: Kyle Evans Cc: Jann Horn Cc: David Howells Cc: Dmitry V. Levin Cc: Oleg Nesterov Cc: Linus Torvalds Cc: Florian Weimer Cc: linux-api@vger.kernel.org --- fs/file.c | 64 ++++++++++++++++++++++++++++++++++++++++++------ fs/open.c | 20 +++++++++++++++ include/linux/fdtable.h | 2 ++ include/linux/syscalls.h | 2 ++ 4 files changed, 80 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/fs/file.c b/fs/file.c index abb8b7081d7a..1b8ff05e8311 100644 --- a/fs/file.c +++ b/fs/file.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -620,12 +621,9 @@ void fd_install(unsigned int fd, struct file *file) EXPORT_SYMBOL(fd_install); -/* - * The same warnings as for __alloc_fd()/__fd_install() apply here... - */ -int __close_fd(struct files_struct *files, unsigned fd) +static struct file *pick_file(struct files_struct *files, unsigned fd) { - struct file *file; + struct file *file = NULL; struct fdtable *fdt; spin_lock(&files->file_lock); @@ -637,15 +635,65 @@ int __close_fd(struct files_struct *files, unsigned fd) goto out_unlock; rcu_assign_pointer(fdt->fd[fd], NULL); __put_unused_fd(files, fd); - spin_unlock(&files->file_lock); - return filp_close(file, files); out_unlock: spin_unlock(&files->file_lock); - return -EBADF; + return file; +} + +/* + * The same warnings as for __alloc_fd()/__fd_install() apply here... + */ +int __close_fd(struct files_struct *files, unsigned fd) +{ + struct file *file; + + file = pick_file(files, fd); + if (!file) + return -EBADF; + + return filp_close(file, files); } EXPORT_SYMBOL(__close_fd); /* for ksys_close() */ +/** + * __close_range() - Close all file descriptors in a given range. + * + * @fd: starting file descriptor to close + * @max_fd: last file descriptor to close + * + * This closes a range of file descriptors. All file descriptors + * from @fd up to and including @max_fd are closed. + */ +int __close_range(struct files_struct *files, unsigned fd, unsigned max_fd) +{ + unsigned int cur_max; + + if (fd > max_fd) + return -EINVAL; + + rcu_read_lock(); + cur_max = files_fdtable(files)->max_fds; + rcu_read_unlock(); + + /* cap to last valid index into fdtable */ + cur_max--; + + max_fd = min(max_fd, cur_max); + while (fd <= max_fd) { + struct file *file; + + file = pick_file(files, fd++); + if (!file) + continue; + + filp_close(file, files); + cond_resched(); + } + + return 0; +} + /* * variant of __close_fd that gets a ref on the file for later fput. * The caller must ensure that filp_close() called on the file, and then diff --git a/fs/open.c b/fs/open.c index 6cd48a61cda3..073ea3c45347 100644 --- a/fs/open.c +++ b/fs/open.c @@ -1310,6 +1310,26 @@ SYSCALL_DEFINE1(close, unsigned int, fd) return retval; } +/** + * close_range() - Close all file descriptors in a given range. + * + * @fd: starting file descriptor to close + * @max_fd: last file descriptor to close + * @flags: reserved for future extensions + * + * This closes a range of file descriptors. All file descriptors + * from @fd up to and including @max_fd are closed. + * Currently, errors to close a given file descriptor are ignored. + */ +SYSCALL_DEFINE3(close_range, unsigned int, fd, unsigned int, max_fd, + unsigned int, flags) +{ + if (flags) + return -EINVAL; + + return __close_range(current->files, fd, max_fd); +} + /* * This routine simulates a hangup on the tty, to arrange that users * are given clean terminals at login time. diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index f07c55ea0c22..fcd07181a365 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h @@ -121,6 +121,8 @@ extern void __fd_install(struct files_struct *files, unsigned int fd, struct file *file); extern int __close_fd(struct files_struct *files, unsigned int fd); +extern int __close_range(struct files_struct *files, unsigned int fd, + unsigned int max_fd); extern int __close_fd_get_file(unsigned int fd, struct file **res); extern struct kmem_cache *files_cachep; diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 7c354c2955f5..b22382db89cf 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -444,6 +444,8 @@ asmlinkage long sys_openat(int dfd, const char __user *filename, int flags, asmlinkage long sys_openat2(int dfd, const char __user *filename, struct open_how *how, size_t size); asmlinkage long sys_close(unsigned int fd); +asmlinkage long sys_close_range(unsigned int fd, unsigned int max_fd, + unsigned int flags); asmlinkage long sys_vhangup(void); /* fs/pipe.c */ -- cgit v1.2.3-59-g8ed1b From 9b4feb630e8e9801603f3cab3a36369e3c1cf88d Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Fri, 24 May 2019 11:31:44 +0200 Subject: arch: wire-up close_range() This wires up the close_range() syscall into all arches at once. Suggested-by: Arnd Bergmann Signed-off-by: Christian Brauner Reviewed-by: Oleg Nesterov Acked-by: Arnd Bergmann Acked-by: Michael Ellerman (powerpc) Cc: Jann Horn Cc: David Howells Cc: Dmitry V. Levin Cc: Linus Torvalds Cc: Al Viro Cc: Florian Weimer Cc: linux-api@vger.kernel.org Cc: linux-alpha@vger.kernel.org Cc: linux-arm-kernel@lists.infradead.org Cc: linux-ia64@vger.kernel.org Cc: linux-m68k@lists.linux-m68k.org Cc: linux-mips@vger.kernel.org Cc: linux-parisc@vger.kernel.org Cc: linuxppc-dev@lists.ozlabs.org Cc: linux-s390@vger.kernel.org Cc: linux-sh@vger.kernel.org Cc: sparclinux@vger.kernel.org Cc: linux-xtensa@linux-xtensa.org Cc: linux-arch@vger.kernel.org Cc: x86@kernel.org --- arch/alpha/kernel/syscalls/syscall.tbl | 1 + arch/arm/tools/syscall.tbl | 1 + arch/arm64/include/asm/unistd32.h | 2 ++ arch/ia64/kernel/syscalls/syscall.tbl | 1 + arch/m68k/kernel/syscalls/syscall.tbl | 1 + arch/microblaze/kernel/syscalls/syscall.tbl | 1 + arch/mips/kernel/syscalls/syscall_n32.tbl | 1 + arch/mips/kernel/syscalls/syscall_n64.tbl | 1 + arch/mips/kernel/syscalls/syscall_o32.tbl | 1 + arch/parisc/kernel/syscalls/syscall.tbl | 1 + arch/powerpc/kernel/syscalls/syscall.tbl | 1 + arch/s390/kernel/syscalls/syscall.tbl | 1 + arch/sh/kernel/syscalls/syscall.tbl | 1 + arch/sparc/kernel/syscalls/syscall.tbl | 1 + arch/x86/entry/syscalls/syscall_32.tbl | 1 + arch/x86/entry/syscalls/syscall_64.tbl | 1 + arch/xtensa/kernel/syscalls/syscall.tbl | 1 + include/uapi/asm-generic/unistd.h | 2 ++ 18 files changed, 20 insertions(+) (limited to 'include') diff --git a/arch/alpha/kernel/syscalls/syscall.tbl b/arch/alpha/kernel/syscalls/syscall.tbl index 5ddd128d4b7a..a28fb211881d 100644 --- a/arch/alpha/kernel/syscalls/syscall.tbl +++ b/arch/alpha/kernel/syscalls/syscall.tbl @@ -475,6 +475,7 @@ 543 common fspick sys_fspick 544 common pidfd_open sys_pidfd_open # 545 reserved for clone3 +546 common close_range sys_close_range 547 common openat2 sys_openat2 548 common pidfd_getfd sys_pidfd_getfd 549 common faccessat2 sys_faccessat2 diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl index d5cae5ffede0..7e8ee4adf269 100644 --- a/arch/arm/tools/syscall.tbl +++ b/arch/arm/tools/syscall.tbl @@ -449,6 +449,7 @@ 433 common fspick sys_fspick 434 common pidfd_open sys_pidfd_open 435 common clone3 sys_clone3 +436 common close_range sys_close_range 437 common openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd 439 common faccessat2 sys_faccessat2 diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index 6d95d0c8bf2f..c760b9e159f5 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -879,6 +879,8 @@ __SYSCALL(__NR_fspick, sys_fspick) __SYSCALL(__NR_pidfd_open, sys_pidfd_open) #define __NR_clone3 435 __SYSCALL(__NR_clone3, sys_clone3) +#define __NR_close_range 436 +__SYSCALL(__NR_close_range, sys_close_range) #define __NR_openat2 437 __SYSCALL(__NR_openat2, sys_openat2) #define __NR_pidfd_getfd 438 diff --git a/arch/ia64/kernel/syscalls/syscall.tbl b/arch/ia64/kernel/syscalls/syscall.tbl index 49e325b604b3..ced9c83e47c9 100644 --- a/arch/ia64/kernel/syscalls/syscall.tbl +++ b/arch/ia64/kernel/syscalls/syscall.tbl @@ -356,6 +356,7 @@ 433 common fspick sys_fspick 434 common pidfd_open sys_pidfd_open # 435 reserved for clone3 +436 common close_range sys_close_range 437 common openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd 439 common faccessat2 sys_faccessat2 diff --git a/arch/m68k/kernel/syscalls/syscall.tbl b/arch/m68k/kernel/syscalls/syscall.tbl index f71b1bbcc198..1a4822de7292 100644 --- a/arch/m68k/kernel/syscalls/syscall.tbl +++ b/arch/m68k/kernel/syscalls/syscall.tbl @@ -435,6 +435,7 @@ 433 common fspick sys_fspick 434 common pidfd_open sys_pidfd_open 435 common clone3 __sys_clone3 +436 common close_range sys_close_range 437 common openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd 439 common faccessat2 sys_faccessat2 diff --git a/arch/microblaze/kernel/syscalls/syscall.tbl b/arch/microblaze/kernel/syscalls/syscall.tbl index edacc4561f2b..a3f4be8e7238 100644 --- a/arch/microblaze/kernel/syscalls/syscall.tbl +++ b/arch/microblaze/kernel/syscalls/syscall.tbl @@ -441,6 +441,7 @@ 433 common fspick sys_fspick 434 common pidfd_open sys_pidfd_open 435 common clone3 sys_clone3 +436 common close_range sys_close_range 437 common openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd 439 common faccessat2 sys_faccessat2 diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl index f777141f5256..501bc09643bd 100644 --- a/arch/mips/kernel/syscalls/syscall_n32.tbl +++ b/arch/mips/kernel/syscalls/syscall_n32.tbl @@ -374,6 +374,7 @@ 433 n32 fspick sys_fspick 434 n32 pidfd_open sys_pidfd_open 435 n32 clone3 __sys_clone3 +436 n32 close_range sys_close_range 437 n32 openat2 sys_openat2 438 n32 pidfd_getfd sys_pidfd_getfd 439 n32 faccessat2 sys_faccessat2 diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl index da8c76394e17..391acbf425a0 100644 --- a/arch/mips/kernel/syscalls/syscall_n64.tbl +++ b/arch/mips/kernel/syscalls/syscall_n64.tbl @@ -350,6 +350,7 @@ 433 n64 fspick sys_fspick 434 n64 pidfd_open sys_pidfd_open 435 n64 clone3 __sys_clone3 +436 n64 close_range sys_close_range 437 n64 openat2 sys_openat2 438 n64 pidfd_getfd sys_pidfd_getfd 439 n64 faccessat2 sys_faccessat2 diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl index 13280625d312..d28f12f641d3 100644 --- a/arch/mips/kernel/syscalls/syscall_o32.tbl +++ b/arch/mips/kernel/syscalls/syscall_o32.tbl @@ -423,6 +423,7 @@ 433 o32 fspick sys_fspick 434 o32 pidfd_open sys_pidfd_open 435 o32 clone3 __sys_clone3 +436 o32 close_range sys_close_range 437 o32 openat2 sys_openat2 438 o32 pidfd_getfd sys_pidfd_getfd 439 o32 faccessat2 sys_faccessat2 diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl index 5a758fa6ec52..5d76b8f15197 100644 --- a/arch/parisc/kernel/syscalls/syscall.tbl +++ b/arch/parisc/kernel/syscalls/syscall.tbl @@ -433,6 +433,7 @@ 433 common fspick sys_fspick 434 common pidfd_open sys_pidfd_open 435 common clone3 sys_clone3_wrapper +436 common close_range sys_close_range 437 common openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd 439 common faccessat2 sys_faccessat2 diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl index f833a3190822..dd87a782d80e 100644 --- a/arch/powerpc/kernel/syscalls/syscall.tbl +++ b/arch/powerpc/kernel/syscalls/syscall.tbl @@ -525,6 +525,7 @@ 435 32 clone3 ppc_clone3 sys_clone3 435 64 clone3 sys_clone3 435 spu clone3 sys_ni_syscall +436 common close_range sys_close_range 437 common openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd 439 common faccessat2 sys_faccessat2 diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl index bfdcb7633957..effb5195608c 100644 --- a/arch/s390/kernel/syscalls/syscall.tbl +++ b/arch/s390/kernel/syscalls/syscall.tbl @@ -438,6 +438,7 @@ 433 common fspick sys_fspick sys_fspick 434 common pidfd_open sys_pidfd_open sys_pidfd_open 435 common clone3 sys_clone3 sys_clone3 +436 common close_range sys_close_range sys_close_range 437 common openat2 sys_openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd sys_pidfd_getfd 439 common faccessat2 sys_faccessat2 sys_faccessat2 diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl index acc35daa1b79..96848db9659e 100644 --- a/arch/sh/kernel/syscalls/syscall.tbl +++ b/arch/sh/kernel/syscalls/syscall.tbl @@ -438,6 +438,7 @@ 433 common fspick sys_fspick 434 common pidfd_open sys_pidfd_open # 435 reserved for clone3 +436 common close_range sys_close_range 437 common openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd 439 common faccessat2 sys_faccessat2 diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl index 8004a276cb74..d6447d08c9a1 100644 --- a/arch/sparc/kernel/syscalls/syscall.tbl +++ b/arch/sparc/kernel/syscalls/syscall.tbl @@ -481,6 +481,7 @@ 433 common fspick sys_fspick 434 common pidfd_open sys_pidfd_open # 435 reserved for clone3 +436 common close_range sys_close_range 437 common openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd 439 common faccessat2 sys_faccessat2 diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl index d8f8a1a69ed1..3f0c6546b47c 100644 --- a/arch/x86/entry/syscalls/syscall_32.tbl +++ b/arch/x86/entry/syscalls/syscall_32.tbl @@ -440,6 +440,7 @@ 433 i386 fspick sys_fspick 434 i386 pidfd_open sys_pidfd_open 435 i386 clone3 sys_clone3 +436 i386 close_range sys_close_range 437 i386 openat2 sys_openat2 438 i386 pidfd_getfd sys_pidfd_getfd 439 i386 faccessat2 sys_faccessat2 diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl index 78847b32e137..f8637c2c863d 100644 --- a/arch/x86/entry/syscalls/syscall_64.tbl +++ b/arch/x86/entry/syscalls/syscall_64.tbl @@ -357,6 +357,7 @@ 433 common fspick sys_fspick 434 common pidfd_open sys_pidfd_open 435 common clone3 sys_clone3 +436 common close_range sys_close_range 437 common openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd 439 common faccessat2 sys_faccessat2 diff --git a/arch/xtensa/kernel/syscalls/syscall.tbl b/arch/xtensa/kernel/syscalls/syscall.tbl index 69d0d73876b3..d216ccba42f7 100644 --- a/arch/xtensa/kernel/syscalls/syscall.tbl +++ b/arch/xtensa/kernel/syscalls/syscall.tbl @@ -406,6 +406,7 @@ 433 common fspick sys_fspick 434 common pidfd_open sys_pidfd_open 435 common clone3 sys_clone3 +436 common close_range sys_close_range 437 common openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd 439 common faccessat2 sys_faccessat2 diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index f4a01305d9a6..31001e3323bc 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -850,6 +850,8 @@ __SYSCALL(__NR_pidfd_open, sys_pidfd_open) #define __NR_clone3 435 __SYSCALL(__NR_clone3, sys_clone3) #endif +#define __NR_close_range 436 +__SYSCALL(__NR_close_range, sys_close_range) #define __NR_openat2 437 __SYSCALL(__NR_openat2, sys_openat2) -- cgit v1.2.3-59-g8ed1b From 60997c3d45d9a67daf01c56d805ae4fec37e0bd8 Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Wed, 3 Jun 2020 21:48:55 +0200 Subject: close_range: add CLOSE_RANGE_UNSHARE One of the use-cases of close_range() is to drop file descriptors just before execve(). This would usually be expressed in the sequence: unshare(CLONE_FILES); close_range(3, ~0U); as pointed out by Linus it might be desirable to have this be a part of close_range() itself under a new flag CLOSE_RANGE_UNSHARE. This expands {dup,unshare)_fd() to take a max_fds argument that indicates the maximum number of file descriptors to copy from the old struct files. When the user requests that all file descriptors are supposed to be closed via close_range(min, max) then we can cap via unshare_fd(min) and hence don't need to do any of the heavy fput() work for everything above min. The patch makes it so that if CLOSE_RANGE_UNSHARE is requested and we do in fact currently share our file descriptor table we create a new private copy. We then close all fds in the requested range and finally after we're done we install the new fd table. Suggested-by: Linus Torvalds Signed-off-by: Christian Brauner --- fs/file.c | 65 +++++++++++++++++++++++++++++++++++----- fs/open.c | 5 +--- include/linux/fdtable.h | 8 +++-- include/uapi/linux/close_range.h | 9 ++++++ kernel/fork.c | 11 +++---- 5 files changed, 79 insertions(+), 19 deletions(-) create mode 100644 include/uapi/linux/close_range.h (limited to 'include') diff --git a/fs/file.c b/fs/file.c index 1b8ff05e8311..340bc9569f9d 100644 --- a/fs/file.c +++ b/fs/file.c @@ -19,6 +19,7 @@ #include #include #include +#include unsigned int sysctl_nr_open __read_mostly = 1024*1024; unsigned int sysctl_nr_open_min = BITS_PER_LONG; @@ -265,12 +266,22 @@ static unsigned int count_open_files(struct fdtable *fdt) return i; } +static unsigned int sane_fdtable_size(struct fdtable *fdt, unsigned int max_fds) +{ + unsigned int count; + + count = count_open_files(fdt); + if (max_fds < NR_OPEN_DEFAULT) + max_fds = NR_OPEN_DEFAULT; + return min(count, max_fds); +} + /* * Allocate a new files structure and copy contents from the * passed in files structure. * errorp will be valid only when the returned files_struct is NULL. */ -struct files_struct *dup_fd(struct files_struct *oldf, int *errorp) +struct files_struct *dup_fd(struct files_struct *oldf, unsigned int max_fds, int *errorp) { struct files_struct *newf; struct file **old_fds, **new_fds; @@ -297,7 +308,7 @@ struct files_struct *dup_fd(struct files_struct *oldf, int *errorp) spin_lock(&oldf->file_lock); old_fdt = files_fdtable(oldf); - open_files = count_open_files(old_fdt); + open_files = sane_fdtable_size(old_fdt, max_fds); /* * Check whether we need to allocate a larger fd array and fd set. @@ -328,7 +339,7 @@ struct files_struct *dup_fd(struct files_struct *oldf, int *errorp) */ spin_lock(&oldf->file_lock); old_fdt = files_fdtable(oldf); - open_files = count_open_files(old_fdt); + open_files = sane_fdtable_size(old_fdt, max_fds); } copy_fd_bitmaps(new_fdt, old_fdt, open_files); @@ -665,32 +676,72 @@ EXPORT_SYMBOL(__close_fd); /* for ksys_close() */ * This closes a range of file descriptors. All file descriptors * from @fd up to and including @max_fd are closed. */ -int __close_range(struct files_struct *files, unsigned fd, unsigned max_fd) +int __close_range(unsigned fd, unsigned max_fd, unsigned int flags) { unsigned int cur_max; + struct task_struct *me = current; + struct files_struct *cur_fds = me->files, *fds = NULL; + + if (flags & ~CLOSE_RANGE_UNSHARE) + return -EINVAL; if (fd > max_fd) return -EINVAL; rcu_read_lock(); - cur_max = files_fdtable(files)->max_fds; + cur_max = files_fdtable(cur_fds)->max_fds; rcu_read_unlock(); /* cap to last valid index into fdtable */ cur_max--; + if (flags & CLOSE_RANGE_UNSHARE) { + int ret; + unsigned int max_unshare_fds = NR_OPEN_MAX; + + /* + * If the requested range is greater than the current maximum, + * we're closing everything so only copy all file descriptors + * beneath the lowest file descriptor. + */ + if (max_fd >= cur_max) + max_unshare_fds = fd; + + ret = unshare_fd(CLONE_FILES, max_unshare_fds, &fds); + if (ret) + return ret; + + /* + * We used to share our file descriptor table, and have now + * created a private one, make sure we're using it below. + */ + if (fds) + swap(cur_fds, fds); + } + max_fd = min(max_fd, cur_max); while (fd <= max_fd) { struct file *file; - file = pick_file(files, fd++); + file = pick_file(cur_fds, fd++); if (!file) continue; - filp_close(file, files); + filp_close(file, cur_fds); cond_resched(); } + if (fds) { + /* + * We're done closing the files we were supposed to. Time to install + * the new file descriptor table and drop the old one. + */ + task_lock(me); + me->files = cur_fds; + task_unlock(me); + put_files_struct(fds); + } + return 0; } diff --git a/fs/open.c b/fs/open.c index 073ea3c45347..5e62f18adc5b 100644 --- a/fs/open.c +++ b/fs/open.c @@ -1324,10 +1324,7 @@ SYSCALL_DEFINE1(close, unsigned int, fd) SYSCALL_DEFINE3(close_range, unsigned int, fd, unsigned int, max_fd, unsigned int, flags) { - if (flags) - return -EINVAL; - - return __close_range(current->files, fd, max_fd); + return __close_range(fd, max_fd, flags); } /* diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index fcd07181a365..a32bf47c593e 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h @@ -22,6 +22,7 @@ * as this is the granularity returned by copy_fdset(). */ #define NR_OPEN_DEFAULT BITS_PER_LONG +#define NR_OPEN_MAX ~0U struct fdtable { unsigned int max_fds; @@ -109,7 +110,7 @@ struct files_struct *get_files_struct(struct task_struct *); void put_files_struct(struct files_struct *fs); void reset_files_struct(struct files_struct *); int unshare_files(struct files_struct **); -struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy; +struct files_struct *dup_fd(struct files_struct *, unsigned, int *) __latent_entropy; void do_close_on_exec(struct files_struct *); int iterate_fd(struct files_struct *, unsigned, int (*)(const void *, struct file *, unsigned), @@ -121,9 +122,10 @@ extern void __fd_install(struct files_struct *files, unsigned int fd, struct file *file); extern int __close_fd(struct files_struct *files, unsigned int fd); -extern int __close_range(struct files_struct *files, unsigned int fd, - unsigned int max_fd); +extern int __close_range(unsigned int fd, unsigned int max_fd, unsigned int flags); extern int __close_fd_get_file(unsigned int fd, struct file **res); +extern int unshare_fd(unsigned long unshare_flags, unsigned int max_fds, + struct files_struct **new_fdp); extern struct kmem_cache *files_cachep; diff --git a/include/uapi/linux/close_range.h b/include/uapi/linux/close_range.h new file mode 100644 index 000000000000..6928a9fdee3c --- /dev/null +++ b/include/uapi/linux/close_range.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_LINUX_CLOSE_RANGE_H +#define _UAPI_LINUX_CLOSE_RANGE_H + +/* Unshare the file descriptor table before closing file descriptors. */ +#define CLOSE_RANGE_UNSHARE (1U << 1) + +#endif /* _UAPI_LINUX_CLOSE_RANGE_H */ + diff --git a/kernel/fork.c b/kernel/fork.c index 142b23645d82..8948121c8454 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1474,7 +1474,7 @@ static int copy_files(unsigned long clone_flags, struct task_struct *tsk) goto out; } - newf = dup_fd(oldf, &error); + newf = dup_fd(oldf, NR_OPEN_MAX, &error); if (!newf) goto out; @@ -2907,14 +2907,15 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) /* * Unshare file descriptor table if it is being shared */ -static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp) +int unshare_fd(unsigned long unshare_flags, unsigned int max_fds, + struct files_struct **new_fdp) { struct files_struct *fd = current->files; int error = 0; if ((unshare_flags & CLONE_FILES) && (fd && atomic_read(&fd->count) > 1)) { - *new_fdp = dup_fd(fd, &error); + *new_fdp = dup_fd(fd, max_fds, &error); if (!*new_fdp) return error; } @@ -2974,7 +2975,7 @@ int ksys_unshare(unsigned long unshare_flags) err = unshare_fs(unshare_flags, &new_fs); if (err) goto bad_unshare_out; - err = unshare_fd(unshare_flags, &new_fd); + err = unshare_fd(unshare_flags, NR_OPEN_MAX, &new_fd); if (err) goto bad_unshare_cleanup_fs; err = unshare_userns(unshare_flags, &new_cred); @@ -3063,7 +3064,7 @@ int unshare_files(struct files_struct **displaced) struct files_struct *copy = NULL; int error; - error = unshare_fd(CLONE_FILES, ©); + error = unshare_fd(CLONE_FILES, NR_OPEN_MAX, ©); if (error || !copy) { *displaced = NULL; return error; -- cgit v1.2.3-59-g8ed1b From 47ec7f09bc107720905c96bc37771e4ed1ff0aed Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 13 May 2020 11:47:49 -0700 Subject: dmaengine: cookie bypass for out of order completion The cookie tracking in dmaengine expects all submissions completed in order. Some DMA devices like Intel DSA can complete submissions out of order, especially if configured with a work queue sharing multiple DMA engines. Add a status DMA_OUT_OF_ORDER that tx_status can be returned for those DMA devices. The user should use callbacks to track the completion rather than the DMA cookie. This would address the issue of dmatest complaining that descriptors are "busy" when the cookie count goes backwards due to out of order completion. Add DMA_COMPLETION_NO_ORDER DMA capability to allow the driver to flag the device's ability to complete operations out of order. Reported-by: Swathi Kovvuri Signed-off-by: Dave Jiang Tested-by: Swathi Kovvuri Link: https://lore.kernel.org/r/158939557151.20335.12404113976045569870.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul --- Documentation/driver-api/dmaengine/provider.rst | 19 +++++++++++++++++++ drivers/dma/dmatest.c | 11 ++++++++++- drivers/dma/idxd/dma.c | 3 ++- include/linux/dmaengine.h | 2 ++ 4 files changed, 33 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/Documentation/driver-api/dmaengine/provider.rst b/Documentation/driver-api/dmaengine/provider.rst index 56e5833e8a07..ce68315482b1 100644 --- a/Documentation/driver-api/dmaengine/provider.rst +++ b/Documentation/driver-api/dmaengine/provider.rst @@ -239,6 +239,22 @@ Currently, the types available are: want to transfer a portion of uncompressed data directly to the display to print it +- DMA_COMPLETION_NO_ORDER + + - The device does not support in order completion. + + - The driver should return DMA_OUT_OF_ORDER for device_tx_status if + the device is setting this capability. + + - All cookie tracking and checking API should be treated as invalid if + the device exports this capability. + + - At this point, this is incompatible with polling option for dmatest. + + - If this cap is set, the user is recommended to provide an unique + identifier for each descriptor sent to the DMA device in order to + properly track the completion. + These various types will also affect how the source and destination addresses change over time. @@ -399,6 +415,9 @@ supported. - In the case of a cyclic transfer, it should only take into account the current period. + - Should return DMA_OUT_OF_ORDER if the device does not support in order + completion and is completing the operation out of order. + - This function can be called in an interrupt context. - device_config diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index b175229a4b01..18f10154ba19 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -829,7 +829,10 @@ static int dmatest_func(void *data) result("test timed out", total_tests, src->off, dst->off, len, 0); goto error_unmap_continue; - } else if (status != DMA_COMPLETE) { + } else if (status != DMA_COMPLETE && + !(dma_has_cap(DMA_COMPLETION_NO_ORDER, + dev->cap_mask) && + status == DMA_OUT_OF_ORDER)) { result(status == DMA_ERROR ? "completion error status" : "completion busy status", total_tests, src->off, @@ -1007,6 +1010,12 @@ static int dmatest_add_channel(struct dmatest_info *info, dtc->chan = chan; INIT_LIST_HEAD(&dtc->threads); + if (dma_has_cap(DMA_COMPLETION_NO_ORDER, dma_dev->cap_mask) && + info->params.polled) { + info->params.polled = false; + pr_warn("DMA_COMPLETION_NO_ORDER, polled disabled\n"); + } + if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { if (dmatest == 0) { cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY); diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c index c64c1429d160..0c892cbd72e0 100644 --- a/drivers/dma/idxd/dma.c +++ b/drivers/dma/idxd/dma.c @@ -133,7 +133,7 @@ static enum dma_status idxd_dma_tx_status(struct dma_chan *dma_chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { - return dma_cookie_status(dma_chan, cookie, txstate); + return DMA_OUT_OF_ORDER; } /* @@ -174,6 +174,7 @@ int idxd_register_dma_device(struct idxd_device *idxd) INIT_LIST_HEAD(&dma->channels); dma->dev = &idxd->pdev->dev; + dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask); dma->device_release = idxd_dma_release; if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) { diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index e1c03339918f..9f9a13a2c01f 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -39,6 +39,7 @@ enum dma_status { DMA_IN_PROGRESS, DMA_PAUSED, DMA_ERROR, + DMA_OUT_OF_ORDER, }; /** @@ -61,6 +62,7 @@ enum dma_transaction_type { DMA_SLAVE, DMA_CYCLIC, DMA_INTERLEAVE, + DMA_COMPLETION_NO_ORDER, /* last transaction type for creation of the capabilities mask */ DMA_TX_TYPE_END, }; -- cgit v1.2.3-59-g8ed1b From 2accfa69050c2a0d6fc6106f609208b3e9622b26 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Wed, 17 Jun 2020 07:14:10 -0700 Subject: cpu/speculation: Add prototype for cpu_show_srbds() 0-day is not happy that there is no prototype for cpu_show_srbds(): drivers/base/cpu.c:565:16: error: no previous prototype for 'cpu_show_srbds' Fixes: 7e5b3c267d25 ("x86/speculation: Add Special Register Buffer Data Sampling (SRBDS) mitigation") Reported-by: kernel test robot Signed-off-by: Guenter Roeck Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200617141410.93338-1-linux@roeck-us.net --- include/linux/cpu.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 52692587f7fe..8aa84c052fdf 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -64,6 +64,7 @@ extern ssize_t cpu_show_tsx_async_abort(struct device *dev, char *buf); extern ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf); extern __printf(4, 5) struct device *cpu_device_create(struct device *parent, void *drvdata, -- cgit v1.2.3-59-g8ed1b From c935cd62d3fe985d7f0ebea185d2759e8992e96f Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 17 Jun 2020 17:17:19 +1000 Subject: lockdep: Split header file into lockdep and lockdep_types There is a header file inclusion loop between asm-generic/bug.h and linux/kernel.h. This causes potential compile failurs depending on the which file is included first. One way of breaking this loop is to stop spinlock_types.h from including lockdep.h. This patch splits lockdep.h into two files for this purpose. Signed-off-by: Herbert Xu Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Sergey Senozhatsky Reviewed-by: Andy Shevchenko Acked-by: Petr Mladek Acked-by: Steven Rostedt (VMware) Link: https://lkml.kernel.org/r/E1jlSJz-0003hE-8g@fornost.hmeau.com --- include/linux/lockdep.h | 178 +------------------------------------ include/linux/lockdep_types.h | 196 +++++++++++++++++++++++++++++++++++++++++ include/linux/spinlock.h | 1 + include/linux/spinlock_types.h | 2 +- 4 files changed, 200 insertions(+), 177 deletions(-) create mode 100644 include/linux/lockdep_types.h (limited to 'include') diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 8fce5c98a4b0..3b73cf84f77d 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -10,181 +10,20 @@ #ifndef __LINUX_LOCKDEP_H #define __LINUX_LOCKDEP_H +#include + struct task_struct; -struct lockdep_map; /* for sysctl */ extern int prove_locking; extern int lock_stat; -#define MAX_LOCKDEP_SUBCLASSES 8UL - -#include - -enum lockdep_wait_type { - LD_WAIT_INV = 0, /* not checked, catch all */ - - LD_WAIT_FREE, /* wait free, rcu etc.. */ - LD_WAIT_SPIN, /* spin loops, raw_spinlock_t etc.. */ - -#ifdef CONFIG_PROVE_RAW_LOCK_NESTING - LD_WAIT_CONFIG, /* CONFIG_PREEMPT_LOCK, spinlock_t etc.. */ -#else - LD_WAIT_CONFIG = LD_WAIT_SPIN, -#endif - LD_WAIT_SLEEP, /* sleeping locks, mutex_t etc.. */ - - LD_WAIT_MAX, /* must be last */ -}; - #ifdef CONFIG_LOCKDEP #include -#include #include #include -/* - * We'd rather not expose kernel/lockdep_states.h this wide, but we do need - * the total number of states... :-( - */ -#define XXX_LOCK_USAGE_STATES (1+2*4) - -/* - * NR_LOCKDEP_CACHING_CLASSES ... Number of classes - * cached in the instance of lockdep_map - * - * Currently main class (subclass == 0) and signle depth subclass - * are cached in lockdep_map. This optimization is mainly targeting - * on rq->lock. double_rq_lock() acquires this highly competitive with - * single depth. - */ -#define NR_LOCKDEP_CACHING_CLASSES 2 - -/* - * A lockdep key is associated with each lock object. For static locks we use - * the lock address itself as the key. Dynamically allocated lock objects can - * have a statically or dynamically allocated key. Dynamically allocated lock - * keys must be registered before being used and must be unregistered before - * the key memory is freed. - */ -struct lockdep_subclass_key { - char __one_byte; -} __attribute__ ((__packed__)); - -/* hash_entry is used to keep track of dynamically allocated keys. */ -struct lock_class_key { - union { - struct hlist_node hash_entry; - struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; - }; -}; - -extern struct lock_class_key __lockdep_no_validate__; - -struct lock_trace; - -#define LOCKSTAT_POINTS 4 - -/* - * The lock-class itself. The order of the structure members matters. - * reinit_class() zeroes the key member and all subsequent members. - */ -struct lock_class { - /* - * class-hash: - */ - struct hlist_node hash_entry; - - /* - * Entry in all_lock_classes when in use. Entry in free_lock_classes - * when not in use. Instances that are being freed are on one of the - * zapped_classes lists. - */ - struct list_head lock_entry; - - /* - * These fields represent a directed graph of lock dependencies, - * to every node we attach a list of "forward" and a list of - * "backward" graph nodes. - */ - struct list_head locks_after, locks_before; - - const struct lockdep_subclass_key *key; - unsigned int subclass; - unsigned int dep_gen_id; - - /* - * IRQ/softirq usage tracking bits: - */ - unsigned long usage_mask; - const struct lock_trace *usage_traces[XXX_LOCK_USAGE_STATES]; - - /* - * Generation counter, when doing certain classes of graph walking, - * to ensure that we check one node only once: - */ - int name_version; - const char *name; - - short wait_type_inner; - short wait_type_outer; - -#ifdef CONFIG_LOCK_STAT - unsigned long contention_point[LOCKSTAT_POINTS]; - unsigned long contending_point[LOCKSTAT_POINTS]; -#endif -} __no_randomize_layout; - -#ifdef CONFIG_LOCK_STAT -struct lock_time { - s64 min; - s64 max; - s64 total; - unsigned long nr; -}; - -enum bounce_type { - bounce_acquired_write, - bounce_acquired_read, - bounce_contended_write, - bounce_contended_read, - nr_bounce_types, - - bounce_acquired = bounce_acquired_write, - bounce_contended = bounce_contended_write, -}; - -struct lock_class_stats { - unsigned long contention_point[LOCKSTAT_POINTS]; - unsigned long contending_point[LOCKSTAT_POINTS]; - struct lock_time read_waittime; - struct lock_time write_waittime; - struct lock_time read_holdtime; - struct lock_time write_holdtime; - unsigned long bounces[nr_bounce_types]; -}; - -struct lock_class_stats lock_stats(struct lock_class *class); -void clear_lock_stats(struct lock_class *class); -#endif - -/* - * Map the lock object (the lock instance) to the lock-class object. - * This is embedded into specific lock instances: - */ -struct lockdep_map { - struct lock_class_key *key; - struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES]; - const char *name; - short wait_type_outer; /* can be taken in this context */ - short wait_type_inner; /* presents this context */ -#ifdef CONFIG_LOCK_STAT - int cpu; - unsigned long ip; -#endif -}; - static inline void lockdep_copy_map(struct lockdep_map *to, struct lockdep_map *from) { @@ -440,8 +279,6 @@ static inline void lock_set_subclass(struct lockdep_map *lock, extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip); -struct pin_cookie { unsigned int val; }; - #define NIL_COOKIE (struct pin_cookie){ .val = 0U, } extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock); @@ -520,10 +357,6 @@ static inline void lockdep_set_selftest_task(struct task_struct *task) # define lockdep_reset() do { debug_locks = 1; } while (0) # define lockdep_free_key_range(start, size) do { } while (0) # define lockdep_sys_exit() do { } while (0) -/* - * The class key takes no space if lockdep is disabled: - */ -struct lock_class_key { }; static inline void lockdep_register_key(struct lock_class_key *key) { @@ -533,11 +366,6 @@ static inline void lockdep_unregister_key(struct lock_class_key *key) { } -/* - * The lockdep_map takes no space if lockdep is disabled: - */ -struct lockdep_map { }; - #define lockdep_depth(tsk) (0) #define lockdep_is_held_type(l, r) (1) @@ -549,8 +377,6 @@ struct lockdep_map { }; #define lockdep_recursing(tsk) (0) -struct pin_cookie { }; - #define NIL_COOKIE (struct pin_cookie){ } #define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; }) diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h new file mode 100644 index 000000000000..7b9350624577 --- /dev/null +++ b/include/linux/lockdep_types.h @@ -0,0 +1,196 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Runtime locking correctness validator + * + * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar + * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra + * + * see Documentation/locking/lockdep-design.rst for more details. + */ +#ifndef __LINUX_LOCKDEP_TYPES_H +#define __LINUX_LOCKDEP_TYPES_H + +#include + +#define MAX_LOCKDEP_SUBCLASSES 8UL + +enum lockdep_wait_type { + LD_WAIT_INV = 0, /* not checked, catch all */ + + LD_WAIT_FREE, /* wait free, rcu etc.. */ + LD_WAIT_SPIN, /* spin loops, raw_spinlock_t etc.. */ + +#ifdef CONFIG_PROVE_RAW_LOCK_NESTING + LD_WAIT_CONFIG, /* CONFIG_PREEMPT_LOCK, spinlock_t etc.. */ +#else + LD_WAIT_CONFIG = LD_WAIT_SPIN, +#endif + LD_WAIT_SLEEP, /* sleeping locks, mutex_t etc.. */ + + LD_WAIT_MAX, /* must be last */ +}; + +#ifdef CONFIG_LOCKDEP + +#include + +/* + * We'd rather not expose kernel/lockdep_states.h this wide, but we do need + * the total number of states... :-( + */ +#define XXX_LOCK_USAGE_STATES (1+2*4) + +/* + * NR_LOCKDEP_CACHING_CLASSES ... Number of classes + * cached in the instance of lockdep_map + * + * Currently main class (subclass == 0) and signle depth subclass + * are cached in lockdep_map. This optimization is mainly targeting + * on rq->lock. double_rq_lock() acquires this highly competitive with + * single depth. + */ +#define NR_LOCKDEP_CACHING_CLASSES 2 + +/* + * A lockdep key is associated with each lock object. For static locks we use + * the lock address itself as the key. Dynamically allocated lock objects can + * have a statically or dynamically allocated key. Dynamically allocated lock + * keys must be registered before being used and must be unregistered before + * the key memory is freed. + */ +struct lockdep_subclass_key { + char __one_byte; +} __attribute__ ((__packed__)); + +/* hash_entry is used to keep track of dynamically allocated keys. */ +struct lock_class_key { + union { + struct hlist_node hash_entry; + struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; + }; +}; + +extern struct lock_class_key __lockdep_no_validate__; + +struct lock_trace; + +#define LOCKSTAT_POINTS 4 + +/* + * The lock-class itself. The order of the structure members matters. + * reinit_class() zeroes the key member and all subsequent members. + */ +struct lock_class { + /* + * class-hash: + */ + struct hlist_node hash_entry; + + /* + * Entry in all_lock_classes when in use. Entry in free_lock_classes + * when not in use. Instances that are being freed are on one of the + * zapped_classes lists. + */ + struct list_head lock_entry; + + /* + * These fields represent a directed graph of lock dependencies, + * to every node we attach a list of "forward" and a list of + * "backward" graph nodes. + */ + struct list_head locks_after, locks_before; + + const struct lockdep_subclass_key *key; + unsigned int subclass; + unsigned int dep_gen_id; + + /* + * IRQ/softirq usage tracking bits: + */ + unsigned long usage_mask; + const struct lock_trace *usage_traces[XXX_LOCK_USAGE_STATES]; + + /* + * Generation counter, when doing certain classes of graph walking, + * to ensure that we check one node only once: + */ + int name_version; + const char *name; + + short wait_type_inner; + short wait_type_outer; + +#ifdef CONFIG_LOCK_STAT + unsigned long contention_point[LOCKSTAT_POINTS]; + unsigned long contending_point[LOCKSTAT_POINTS]; +#endif +} __no_randomize_layout; + +#ifdef CONFIG_LOCK_STAT +struct lock_time { + s64 min; + s64 max; + s64 total; + unsigned long nr; +}; + +enum bounce_type { + bounce_acquired_write, + bounce_acquired_read, + bounce_contended_write, + bounce_contended_read, + nr_bounce_types, + + bounce_acquired = bounce_acquired_write, + bounce_contended = bounce_contended_write, +}; + +struct lock_class_stats { + unsigned long contention_point[LOCKSTAT_POINTS]; + unsigned long contending_point[LOCKSTAT_POINTS]; + struct lock_time read_waittime; + struct lock_time write_waittime; + struct lock_time read_holdtime; + struct lock_time write_holdtime; + unsigned long bounces[nr_bounce_types]; +}; + +struct lock_class_stats lock_stats(struct lock_class *class); +void clear_lock_stats(struct lock_class *class); +#endif + +/* + * Map the lock object (the lock instance) to the lock-class object. + * This is embedded into specific lock instances: + */ +struct lockdep_map { + struct lock_class_key *key; + struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES]; + const char *name; + short wait_type_outer; /* can be taken in this context */ + short wait_type_inner; /* presents this context */ +#ifdef CONFIG_LOCK_STAT + int cpu; + unsigned long ip; +#endif +}; + +struct pin_cookie { unsigned int val; }; + +#else /* !CONFIG_LOCKDEP */ + +/* + * The class key takes no space if lockdep is disabled: + */ +struct lock_class_key { }; + +/* + * The lockdep_map takes no space if lockdep is disabled: + */ +struct lockdep_map { }; + +struct pin_cookie { }; + +#endif /* !LOCKDEP */ + +#endif /* __LINUX_LOCKDEP_TYPES_H */ diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index d3770b3f9d9a..f2f12d746dbd 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -56,6 +56,7 @@ #include #include #include +#include #include #include diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index 6102e6bff3ae..b981caafe8bf 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -15,7 +15,7 @@ # include #endif -#include +#include typedef struct raw_spinlock { arch_spinlock_t raw_lock; -- cgit v1.2.3-59-g8ed1b From a9d887dc1c60ed67f2271d66560cdcf864c4a578 Mon Sep 17 00:00:00 2001 From: Guru Das Srinagesh Date: Tue, 2 Jun 2020 15:31:16 -0700 Subject: pwm: Convert period and duty cycle to u64 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Because period and duty cycle are defined as ints with units of nanoseconds, the maximum time duration that can be set is limited to ~2.147 seconds. Change their definitions to u64 in the structs of the PWM framework so that higher durations may be set. Also use the right format specifiers in debug prints in both core.c, pwm-stm32-lp.c as well as video/fbdev/ssd1307fb.c. Reported-by: kbuild test robot Signed-off-by: Guru Das Srinagesh Acked-by: Uwe Kleine-König Signed-off-by: Thierry Reding --- drivers/pwm/core.c | 14 +++++++------- drivers/pwm/pwm-stm32-lp.c | 2 +- drivers/pwm/sysfs.c | 8 ++++---- drivers/video/fbdev/ssd1307fb.c | 2 +- include/linux/pwm.h | 12 ++++++------ 5 files changed, 19 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index 004b2ea9b5fd..276e939a5684 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -510,12 +510,12 @@ static void pwm_apply_state_debug(struct pwm_device *pwm, last->period > s2.period && last->period <= state->period) dev_warn(chip->dev, - ".apply didn't pick the best available period (requested: %u, applied: %u, possible: %u)\n", + ".apply didn't pick the best available period (requested: %llu, applied: %llu, possible: %llu)\n", state->period, s2.period, last->period); if (state->enabled && state->period < s2.period) dev_warn(chip->dev, - ".apply is supposed to round down period (requested: %u, applied: %u)\n", + ".apply is supposed to round down period (requested: %llu, applied: %llu)\n", state->period, s2.period); if (state->enabled && @@ -524,14 +524,14 @@ static void pwm_apply_state_debug(struct pwm_device *pwm, last->duty_cycle > s2.duty_cycle && last->duty_cycle <= state->duty_cycle) dev_warn(chip->dev, - ".apply didn't pick the best available duty cycle (requested: %u/%u, applied: %u/%u, possible: %u/%u)\n", + ".apply didn't pick the best available duty cycle (requested: %llu/%llu, applied: %llu/%llu, possible: %llu/%llu)\n", state->duty_cycle, state->period, s2.duty_cycle, s2.period, last->duty_cycle, last->period); if (state->enabled && state->duty_cycle < s2.duty_cycle) dev_warn(chip->dev, - ".apply is supposed to round down duty_cycle (requested: %u/%u, applied: %u/%u)\n", + ".apply is supposed to round down duty_cycle (requested: %llu/%llu, applied: %llu/%llu)\n", state->duty_cycle, state->period, s2.duty_cycle, s2.period); @@ -558,7 +558,7 @@ static void pwm_apply_state_debug(struct pwm_device *pwm, (s1.enabled && s1.period != last->period) || (s1.enabled && s1.duty_cycle != last->duty_cycle)) { dev_err(chip->dev, - ".apply is not idempotent (ena=%d pol=%d %u/%u) -> (ena=%d pol=%d %u/%u)\n", + ".apply is not idempotent (ena=%d pol=%d %llu/%llu) -> (ena=%d pol=%d %llu/%llu)\n", s1.enabled, s1.polarity, s1.duty_cycle, s1.period, last->enabled, last->polarity, last->duty_cycle, last->period); @@ -1284,8 +1284,8 @@ static void pwm_dbg_show(struct pwm_chip *chip, struct seq_file *s) if (state.enabled) seq_puts(s, " enabled"); - seq_printf(s, " period: %u ns", state.period); - seq_printf(s, " duty: %u ns", state.duty_cycle); + seq_printf(s, " period: %llu ns", state.period); + seq_printf(s, " duty: %llu ns", state.duty_cycle); seq_printf(s, " polarity: %s", state.polarity ? "inverse" : "normal"); diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c index 67fca62524dc..134c14621ee0 100644 --- a/drivers/pwm/pwm-stm32-lp.c +++ b/drivers/pwm/pwm-stm32-lp.c @@ -61,7 +61,7 @@ static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm, do_div(div, NSEC_PER_SEC); if (!div) { /* Clock is too slow to achieve requested period. */ - dev_dbg(priv->chip.dev, "Can't reach %u ns\n", state->period); + dev_dbg(priv->chip.dev, "Can't reach %llu ns\n", state->period); return -EINVAL; } diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c index 2389b8669846..449dbc0f49ed 100644 --- a/drivers/pwm/sysfs.c +++ b/drivers/pwm/sysfs.c @@ -42,7 +42,7 @@ static ssize_t period_show(struct device *child, pwm_get_state(pwm, &state); - return sprintf(buf, "%u\n", state.period); + return sprintf(buf, "%llu\n", state.period); } static ssize_t period_store(struct device *child, @@ -52,10 +52,10 @@ static ssize_t period_store(struct device *child, struct pwm_export *export = child_to_pwm_export(child); struct pwm_device *pwm = export->pwm; struct pwm_state state; - unsigned int val; + u64 val; int ret; - ret = kstrtouint(buf, 0, &val); + ret = kstrtou64(buf, 0, &val); if (ret) return ret; @@ -77,7 +77,7 @@ static ssize_t duty_cycle_show(struct device *child, pwm_get_state(pwm, &state); - return sprintf(buf, "%u\n", state.duty_cycle); + return sprintf(buf, "%llu\n", state.duty_cycle); } static ssize_t duty_cycle_store(struct device *child, diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c index 8e06ba912d60..09425ec317ba 100644 --- a/drivers/video/fbdev/ssd1307fb.c +++ b/drivers/video/fbdev/ssd1307fb.c @@ -312,7 +312,7 @@ static int ssd1307fb_init(struct ssd1307fb_par *par) /* Enable the PWM */ pwm_enable(par->pwm); - dev_dbg(&par->client->dev, "Using PWM%d with a %dns period.\n", + dev_dbg(&par->client->dev, "Using PWM%d with a %lluns period.\n", par->pwm->pwm, pwm_get_period(par->pwm)); } diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 2635b2a55090..a13ff383fa1d 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -39,7 +39,7 @@ enum pwm_polarity { * current PWM hardware state. */ struct pwm_args { - unsigned int period; + u64 period; enum pwm_polarity polarity; }; @@ -56,8 +56,8 @@ enum { * @enabled: PWM enabled status */ struct pwm_state { - unsigned int period; - unsigned int duty_cycle; + u64 period; + u64 duty_cycle; enum pwm_polarity polarity; bool enabled; }; @@ -107,13 +107,13 @@ static inline bool pwm_is_enabled(const struct pwm_device *pwm) return state.enabled; } -static inline void pwm_set_period(struct pwm_device *pwm, unsigned int period) +static inline void pwm_set_period(struct pwm_device *pwm, u64 period) { if (pwm) pwm->state.period = period; } -static inline unsigned int pwm_get_period(const struct pwm_device *pwm) +static inline u64 pwm_get_period(const struct pwm_device *pwm) { struct pwm_state state; @@ -128,7 +128,7 @@ static inline void pwm_set_duty_cycle(struct pwm_device *pwm, unsigned int duty) pwm->state.duty_cycle = duty; } -static inline unsigned int pwm_get_duty_cycle(const struct pwm_device *pwm) +static inline u64 pwm_get_duty_cycle(const struct pwm_device *pwm) { struct pwm_state state; -- cgit v1.2.3-59-g8ed1b From 7d34ca3854845398cb36866d14bbdc43dcec1ad0 Mon Sep 17 00:00:00 2001 From: Saravana Kannan Date: Tue, 9 Jun 2020 18:19:33 -0700 Subject: driver core: Add device_is_dependent() to linux/device.h DT implementation of fw_devlink needs this function to detect cycles. So make it available. Signed-off-by: Saravana Kannan Tested-by: John Stultz Signed-off-by: Rob Herring --- drivers/base/core.c | 2 +- include/linux/device.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/base/core.c b/drivers/base/core.c index 67d39a90b45c..320a0e1d628a 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -121,7 +121,7 @@ int device_links_read_lock_held(void) * Check if @target depends on @dev or any device dependent on it (its child or * its consumer etc). Return 1 if that is the case or 0 otherwise. */ -static int device_is_dependent(struct device *dev, void *target) +int device_is_dependent(struct device *dev, void *target) { struct device_link *link; int ret; diff --git a/include/linux/device.h b/include/linux/device.h index 15460a5ac024..33cece5d9a4c 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -829,6 +829,7 @@ extern int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid); extern const char *device_get_devnode(struct device *dev, umode_t *mode, kuid_t *uid, kgid_t *gid, const char **tmp); +extern int device_is_dependent(struct device *dev, void *target); static inline bool device_supports_offline(struct device *dev) { -- cgit v1.2.3-59-g8ed1b From f3c802a1f30013f8f723b62d7fa49eb9e991da23 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 30 May 2020 00:23:49 +1000 Subject: crypto: algif_aead - Only wake up when ctx->more is zero AEAD does not support partial requests so we must not wake up while ctx->more is set. In order to distinguish between the case of no data sent yet and a zero-length request, a new init flag has been added to ctx. SKCIPHER has also been modified to ensure that at least a block of data is available if there is more data to come. Fixes: 2d97591ef43d ("crypto: af_alg - consolidation of...") Signed-off-by: Herbert Xu --- crypto/af_alg.c | 11 ++++++++--- crypto/algif_aead.c | 4 ++-- crypto/algif_skcipher.c | 4 ++-- include/crypto/if_alg.h | 4 +++- 4 files changed, 15 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 28fc323e3fe3..9fcb91ea10c4 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -635,6 +635,7 @@ void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst, if (!ctx->used) ctx->merge = 0; + ctx->init = ctx->more; } EXPORT_SYMBOL_GPL(af_alg_pull_tsgl); @@ -734,9 +735,10 @@ EXPORT_SYMBOL_GPL(af_alg_wmem_wakeup); * * @sk socket of connection to user space * @flags If MSG_DONTWAIT is set, then only report if function would sleep + * @min Set to minimum request size if partial requests are allowed. * @return 0 when writable memory is available, < 0 upon error */ -int af_alg_wait_for_data(struct sock *sk, unsigned flags) +int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min) { DEFINE_WAIT_FUNC(wait, woken_wake_function); struct alg_sock *ask = alg_sk(sk); @@ -754,7 +756,9 @@ int af_alg_wait_for_data(struct sock *sk, unsigned flags) if (signal_pending(current)) break; timeout = MAX_SCHEDULE_TIMEOUT; - if (sk_wait_event(sk, &timeout, (ctx->used || !ctx->more), + if (sk_wait_event(sk, &timeout, + ctx->init && (!ctx->more || + (min && ctx->used >= min)), &wait)) { err = 0; break; @@ -843,7 +847,7 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, } lock_sock(sk); - if (!ctx->more && ctx->used) { + if (ctx->init && (init || !ctx->more)) { err = -EINVAL; goto unlock; } @@ -854,6 +858,7 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, memcpy(ctx->iv, con.iv->iv, ivsize); ctx->aead_assoclen = con.aead_assoclen; + ctx->init = true; } while (size) { diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 0ae000a61c7f..d48d2156e621 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -106,8 +106,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t usedpages = 0; /* [in] RX bufs to be used from user */ size_t processed = 0; /* [in] TX bufs to be consumed */ - if (!ctx->used) { - err = af_alg_wait_for_data(sk, flags); + if (!ctx->init || ctx->more) { + err = af_alg_wait_for_data(sk, flags, 0); if (err) return err; } diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index ec5567c87a6d..a51ba22fef58 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -61,8 +61,8 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, int err = 0; size_t len = 0; - if (!ctx->used) { - err = af_alg_wait_for_data(sk, flags); + if (!ctx->init || (ctx->more && ctx->used < bs)) { + err = af_alg_wait_for_data(sk, flags, bs); if (err) return err; } diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h index 088c1ded2714..ee6412314f8f 100644 --- a/include/crypto/if_alg.h +++ b/include/crypto/if_alg.h @@ -135,6 +135,7 @@ struct af_alg_async_req { * SG? * @enc: Cryptographic operation to be performed when * recvmsg is invoked. + * @init: True if metadata has been sent. * @len: Length of memory allocated for this data structure. */ struct af_alg_ctx { @@ -151,6 +152,7 @@ struct af_alg_ctx { bool more; bool merge; bool enc; + bool init; unsigned int len; }; @@ -226,7 +228,7 @@ unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset); void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst, size_t dst_offset); void af_alg_wmem_wakeup(struct sock *sk); -int af_alg_wait_for_data(struct sock *sk, unsigned flags); +int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min); int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, unsigned int ivsize); ssize_t af_alg_sendpage(struct socket *sock, struct page *page, -- cgit v1.2.3-59-g8ed1b From 03cc8353c2244c8b790c3c81a0f1532d34a9d738 Mon Sep 17 00:00:00 2001 From: Rob Gill Date: Mon, 1 Jun 2020 21:17:56 +0000 Subject: USB: core: additional Device Classes to debug/usb/devices Several newer USB Device classes are not presently reported individually at /sys/kernel/debug/usb/devices, (reported as "unk."). This patch adds the following classes: 0fh (Personal Healthcare devices), 10h (USB Type-C combined Audio/Video devices) 11h (USB billboard), 12h (USB Type-C Bridge). As defined at [https://www.usb.org/defined-class-codes] Corresponding classes defined in include/linux/usb/ch9.h. Signed-off-by: Rob Gill Link: https://lore.kernel.org/r/20200601211749.6878-1-rrobgill@protonmail.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/devices.c | 4 ++++ include/uapi/linux/usb/ch9.h | 4 ++++ 2 files changed, 8 insertions(+) (limited to 'include') diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c index 94b6fa6e585e..696b2b692b83 100644 --- a/drivers/usb/core/devices.c +++ b/drivers/usb/core/devices.c @@ -133,6 +133,10 @@ static const struct class_info clas_info[] = { {USB_CLASS_CSCID, "scard"}, {USB_CLASS_CONTENT_SEC, "c-sec"}, {USB_CLASS_VIDEO, "video"}, + {USB_CLASS_PERSONAL_HEALTHCARE, "perhc"}, + {USB_CLASS_AUDIO_VIDEO, "av"}, + {USB_CLASS_BILLBOARD, "blbrd"}, + {USB_CLASS_USB_TYPE_C_BRIDGE, "bridg"}, {USB_CLASS_WIRELESS_CONTROLLER, "wlcon"}, {USB_CLASS_MISC, "misc"}, {USB_CLASS_APP_SPEC, "app."}, diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h index 2b623f36af6b..456ab0c2b586 100644 --- a/include/uapi/linux/usb/ch9.h +++ b/include/uapi/linux/usb/ch9.h @@ -326,6 +326,10 @@ struct usb_device_descriptor { #define USB_CLASS_CONTENT_SEC 0x0d /* content security */ #define USB_CLASS_VIDEO 0x0e #define USB_CLASS_WIRELESS_CONTROLLER 0xe0 +#define USB_CLASS_PERSONAL_HEALTHCARE 0x0f +#define USB_CLASS_AUDIO_VIDEO 0x10 +#define USB_CLASS_BILLBOARD 0x11 +#define USB_CLASS_USB_TYPE_C_BRIDGE 0x12 #define USB_CLASS_MISC 0xef #define USB_CLASS_APP_SPEC 0xfe #define USB_CLASS_VENDOR_SPEC 0xff -- cgit v1.2.3-59-g8ed1b From 8baaa4038edbff67f318574e233e9e7e43808230 Mon Sep 17 00:00:00 2001 From: Abhishek Pandit-Subedi Date: Wed, 17 Jun 2020 16:39:08 +0200 Subject: Bluetooth: Add bdaddr_list_with_flags for classic whitelist In order to more easily add device flags to classic devices, create a new type of bdaddr_list that supports setting flags. Signed-off-by: Abhishek Pandit-Subedi Reviewed-by: Alain Michaud Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- include/net/bluetooth/hci_core.h | 18 +++++++++++-- net/bluetooth/hci_core.c | 58 ++++++++++++++++++++++++++++++++++++++++ net/bluetooth/hci_event.c | 8 +++--- net/bluetooth/mgmt.c | 5 ++-- 4 files changed, 81 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 0d5dbb6cb5a0..95a3935325bb 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -136,6 +136,13 @@ struct bdaddr_list_with_irk { u8 local_irk[16]; }; +struct bdaddr_list_with_flags { + struct list_head list; + bdaddr_t bdaddr; + u8 bdaddr_type; + u32 current_flags; +}; + struct bt_uuid { struct list_head list; u8 uuid[16]; @@ -1169,12 +1176,19 @@ struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *list, struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk( struct list_head *list, bdaddr_t *bdaddr, u8 type); +struct bdaddr_list_with_flags * +hci_bdaddr_list_lookup_with_flags(struct list_head *list, bdaddr_t *bdaddr, + u8 type); int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type); int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr, - u8 type, u8 *peer_irk, u8 *local_irk); + u8 type, u8 *peer_irk, u8 *local_irk); +int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr, + u8 type, u32 flags); int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type); int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr, - u8 type); + u8 type); +int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr, + u8 type); void hci_bdaddr_list_clear(struct list_head *list); struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 4f1052a7c488..8a471bec2731 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -3023,6 +3023,20 @@ struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk( return NULL; } +struct bdaddr_list_with_flags * +hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list, + bdaddr_t *bdaddr, u8 type) +{ + struct bdaddr_list_with_flags *b; + + list_for_each_entry(b, bdaddr_list, list) { + if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) + return b; + } + + return NULL; +} + void hci_bdaddr_list_clear(struct list_head *bdaddr_list) { struct bdaddr_list *b, *n; @@ -3084,6 +3098,30 @@ int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr, return 0; } +int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr, + u8 type, u32 flags) +{ + struct bdaddr_list_with_flags *entry; + + if (!bacmp(bdaddr, BDADDR_ANY)) + return -EBADF; + + if (hci_bdaddr_list_lookup(list, bdaddr, type)) + return -EEXIST; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + bacpy(&entry->bdaddr, bdaddr); + entry->bdaddr_type = type; + entry->current_flags = flags; + + list_add(&entry->list, list); + + return 0; +} + int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type) { struct bdaddr_list *entry; @@ -3123,6 +3161,26 @@ int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr, return 0; } +int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr, + u8 type) +{ + struct bdaddr_list_with_flags *entry; + + if (!bacmp(bdaddr, BDADDR_ANY)) { + hci_bdaddr_list_clear(list); + return 0; + } + + entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type); + if (!entry) + return -ENOENT; + + list_del(&entry->list); + kfree(entry); + + return 0; +} + /* This function requires the caller holds hdev->lock */ struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index cfeaee347db3..8981954ff4c4 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -2697,10 +2697,10 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) */ if (hci_dev_test_flag(hdev, HCI_MGMT) && !hci_dev_test_flag(hdev, HCI_CONNECTABLE) && - !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr, - BDADDR_BREDR)) { - hci_reject_conn(hdev, &ev->bdaddr); - return; + !hci_bdaddr_list_lookup_with_flags(&hdev->whitelist, &ev->bdaddr, + BDADDR_BREDR)) { + hci_reject_conn(hdev, &ev->bdaddr); + return; } /* Connection accepted */ diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index ecfdfc4df486..d0d0fa832c8a 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -6000,8 +6000,9 @@ static int add_device(struct sock *sk, struct hci_dev *hdev, goto unlock; } - err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr, - cp->addr.type); + err = hci_bdaddr_list_add_with_flags(&hdev->whitelist, + &cp->addr.bdaddr, + cp->addr.type, 0); if (err) goto unlock; -- cgit v1.2.3-59-g8ed1b From 7a92906f841db46a91df0179459ad8b2052f2e54 Mon Sep 17 00:00:00 2001 From: Abhishek Pandit-Subedi Date: Wed, 17 Jun 2020 16:39:09 +0200 Subject: Bluetooth: Replace wakeable list with flag Since the classic device list now supports flags, convert the wakeable list into a flag on the existing device list. Signed-off-by: Abhishek Pandit-Subedi Reviewed-by: Alain Michaud Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- include/net/bluetooth/hci_core.h | 11 ++++++++++- net/bluetooth/hci_core.c | 1 - net/bluetooth/hci_request.c | 12 ++++++++---- 3 files changed, 18 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 95a3935325bb..0643c737ba85 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -143,6 +143,16 @@ struct bdaddr_list_with_flags { u32 current_flags; }; +enum hci_conn_flags { + HCI_CONN_FLAG_REMOTE_WAKEUP, + HCI_CONN_FLAG_MAX +}; + +#define hci_conn_test_flag(nr, flags) ((flags) & (1U << nr)) + +/* Make sure number of flags doesn't exceed sizeof(current_flags) */ +static_assert(HCI_CONN_FLAG_MAX < 32); + struct bt_uuid { struct list_head list; u8 uuid[16]; @@ -463,7 +473,6 @@ struct hci_dev { struct list_head mgmt_pending; struct list_head blacklist; struct list_head whitelist; - struct list_head wakeable; struct list_head uuids; struct list_head link_keys; struct list_head long_term_keys; diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 8a471bec2731..8e01afb2ee8c 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -3499,7 +3499,6 @@ struct hci_dev *hci_alloc_dev(void) INIT_LIST_HEAD(&hdev->mgmt_pending); INIT_LIST_HEAD(&hdev->blacklist); INIT_LIST_HEAD(&hdev->whitelist); - INIT_LIST_HEAD(&hdev->wakeable); INIT_LIST_HEAD(&hdev->uuids); INIT_LIST_HEAD(&hdev->link_keys); INIT_LIST_HEAD(&hdev->long_term_keys); diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index a7f572ad38ef..a5b53d3ea508 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -968,15 +968,19 @@ static void hci_req_clear_event_filter(struct hci_request *req) static void hci_req_set_event_filter(struct hci_request *req) { - struct bdaddr_list *b; + struct bdaddr_list_with_flags *b; struct hci_cp_set_event_filter f; struct hci_dev *hdev = req->hdev; - u8 scan; + u8 scan = SCAN_DISABLED; /* Always clear event filter when starting */ hci_req_clear_event_filter(req); - list_for_each_entry(b, &hdev->wakeable, list) { + list_for_each_entry(b, &hdev->whitelist, list) { + if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP, + b->current_flags)) + continue; + memset(&f, 0, sizeof(f)); bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr); f.flt_type = HCI_FLT_CONN_SETUP; @@ -985,9 +989,9 @@ static void hci_req_set_event_filter(struct hci_request *req) bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr); hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f); + scan = SCAN_PAGE; } - scan = !list_empty(&hdev->wakeable) ? SCAN_PAGE : SCAN_DISABLED; hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); } -- cgit v1.2.3-59-g8ed1b From a1fc7535ec34a5904abe93dd42a6ed7e31c36717 Mon Sep 17 00:00:00 2001 From: Abhishek Pandit-Subedi Date: Wed, 17 Jun 2020 16:39:10 +0200 Subject: Bluetooth: Replace wakeable in hci_conn_params Replace the wakeable boolean with flags in hci_conn_params and all users of this boolean. This will be used by the get/set device flags mgmt op. Signed-off-by: Abhishek Pandit-Subedi Reviewed-by: Alain Michaud Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- include/net/bluetooth/hci_core.h | 2 +- net/bluetooth/hci_request.c | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 0643c737ba85..6f88e5d81bd2 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -660,7 +660,7 @@ struct hci_conn_params { struct hci_conn *conn; bool explicit_connect; - bool wakeable; + u32 current_flags; }; extern struct list_head hci_dev_list; diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index a5b53d3ea508..eee9c007a5fb 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -710,7 +710,8 @@ static int add_to_white_list(struct hci_request *req, } /* During suspend, only wakeable devices can be in whitelist */ - if (hdev->suspended && !params->wakeable) + if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP, + params->current_flags)) return 0; *num_entries += 1; -- cgit v1.2.3-59-g8ed1b From 4c54bf2b093bb2ae95e756342646d868e8101cb4 Mon Sep 17 00:00:00 2001 From: Abhishek Pandit-Subedi Date: Wed, 17 Jun 2020 16:39:11 +0200 Subject: Bluetooth: Add get/set device flags mgmt op Add the get device flags and set device flags mgmt ops and the device flags changed event. Their behavior is described in detail in mgmt-api.txt in bluez. Sample btmon trace when a HID device is added (trimmed to 75 chars): @ MGMT Command: Unknown (0x0050) plen 11 {0x0001} [hci0] 18:06:14.98 90 c5 13 cd f3 cd 02 01 00 00 00 ........... @ MGMT Event: Unknown (0x002a) plen 15 {0x0004} [hci0] 18:06:14.98 90 c5 13 cd f3 cd 02 01 00 00 00 01 00 00 00 ............... @ MGMT Event: Unknown (0x002a) plen 15 {0x0003} [hci0] 18:06:14.98 90 c5 13 cd f3 cd 02 01 00 00 00 01 00 00 00 ............... @ MGMT Event: Unknown (0x002a) plen 15 {0x0002} [hci0] 18:06:14.98 90 c5 13 cd f3 cd 02 01 00 00 00 01 00 00 00 ............... @ MGMT Event: Command Compl.. (0x0001) plen 10 {0x0001} [hci0] 18:06:14.98 Unknown (0x0050) plen 7 Status: Success (0x00) 90 c5 13 cd f3 cd 02 ....... @ MGMT Command: Add Device (0x0033) plen 8 {0x0001} [hci0] 18:06:14.98 LE Address: CD:F3:CD:13:C5:90 (Static) Action: Auto-connect remote device (0x02) @ MGMT Event: Device Added (0x001a) plen 8 {0x0004} [hci0] 18:06:14.98 LE Address: CD:F3:CD:13:C5:90 (Static) Action: Auto-connect remote device (0x02) @ MGMT Event: Device Added (0x001a) plen 8 {0x0003} [hci0] 18:06:14.98 LE Address: CD:F3:CD:13:C5:90 (Static) Action: Auto-connect remote device (0x02) @ MGMT Event: Device Added (0x001a) plen 8 {0x0002} [hci0] 18:06:14.98 LE Address: CD:F3:CD:13:C5:90 (Static) Action: Auto-connect remote device (0x02) @ MGMT Event: Unknown (0x002a) plen 15 {0x0004} [hci0] 18:06:14.98 90 c5 13 cd f3 cd 02 01 00 00 00 01 00 00 00 ............... @ MGMT Event: Unknown (0x002a) plen 15 {0x0003} [hci0] 18:06:14.98 90 c5 13 cd f3 cd 02 01 00 00 00 01 00 00 00 ............... @ MGMT Event: Unknown (0x002a) plen 15 {0x0002} [hci0] 18:06:14.98 90 c5 13 cd f3 cd 02 01 00 00 00 01 00 00 00 ............... @ MGMT Event: Unknown (0x002a) plen 15 {0x0001} [hci0] 18:06:14.98 90 c5 13 cd f3 cd 02 01 00 00 00 01 00 00 00 ............... Signed-off-by: Abhishek Pandit-Subedi Reviewed-by: Alain Michaud Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- include/net/bluetooth/mgmt.h | 28 ++++++++++ net/bluetooth/mgmt.c | 128 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 156 insertions(+) (limited to 'include') diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h index e515288f328f..8e47b0c5fe52 100644 --- a/include/net/bluetooth/mgmt.h +++ b/include/net/bluetooth/mgmt.h @@ -720,6 +720,27 @@ struct mgmt_rp_set_exp_feature { #define MGMT_OP_SET_DEF_RUNTIME_CONFIG 0x004e #define MGMT_SET_DEF_RUNTIME_CONFIG_SIZE 0 +#define MGMT_OP_GET_DEVICE_FLAGS 0x004F +#define MGMT_GET_DEVICE_FLAGS_SIZE 7 +struct mgmt_cp_get_device_flags { + struct mgmt_addr_info addr; +} __packed; +struct mgmt_rp_get_device_flags { + struct mgmt_addr_info addr; + __le32 supported_flags; + __le32 current_flags; +} __packed; + +#define MGMT_OP_SET_DEVICE_FLAGS 0x0050 +#define MGMT_SET_DEVICE_FLAGS_SIZE 11 +struct mgmt_cp_set_device_flags { + struct mgmt_addr_info addr; + __le32 current_flags; +} __packed; +struct mgmt_rp_set_device_flags { + struct mgmt_addr_info addr; +} __packed; + #define MGMT_EV_CMD_COMPLETE 0x0001 struct mgmt_ev_cmd_complete { __le16 opcode; @@ -951,3 +972,10 @@ struct mgmt_ev_exp_feature_changed { __u8 uuid[16]; __le32 flags; } __packed; + +#define MGMT_EV_DEVICE_FLAGS_CHANGED 0x002a +struct mgmt_ev_device_flags_changed { + struct mgmt_addr_info addr; + __le32 supported_flags; + __le32 current_flags; +} __packed; diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index d0d0fa832c8a..e409ff48e8e6 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -116,6 +116,8 @@ static const u16 mgmt_commands[] = { MGMT_OP_SET_DEF_SYSTEM_CONFIG, MGMT_OP_READ_DEF_RUNTIME_CONFIG, MGMT_OP_SET_DEF_RUNTIME_CONFIG, + MGMT_OP_GET_DEVICE_FLAGS, + MGMT_OP_SET_DEVICE_FLAGS, }; static const u16 mgmt_events[] = { @@ -156,6 +158,7 @@ static const u16 mgmt_events[] = { MGMT_EV_EXT_INFO_CHANGED, MGMT_EV_PHY_CONFIGURATION_CHANGED, MGMT_EV_EXP_FEATURE_CHANGED, + MGMT_EV_DEVICE_FLAGS_CHANGED, }; static const u16 mgmt_untrusted_commands[] = { @@ -3856,6 +3859,120 @@ static int set_exp_feature(struct sock *sk, struct hci_dev *hdev, MGMT_STATUS_NOT_SUPPORTED); } +#define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1) + +static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, + u16 data_len) +{ + struct mgmt_cp_get_device_flags *cp = data; + struct mgmt_rp_get_device_flags rp; + struct bdaddr_list_with_flags *br_params; + struct hci_conn_params *params; + u32 supported_flags = SUPPORTED_DEVICE_FLAGS(); + u32 current_flags = 0; + u8 status = MGMT_STATUS_INVALID_PARAMS; + + bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n", + &cp->addr.bdaddr, cp->addr.type); + + if (cp->addr.type == BDADDR_BREDR) { + br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist, + &cp->addr.bdaddr, + cp->addr.type); + if (!br_params) + goto done; + + current_flags = br_params->current_flags; + } else { + params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, + le_addr_type(cp->addr.type)); + + if (!params) + goto done; + + current_flags = params->current_flags; + } + + bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); + rp.addr.type = cp->addr.type; + rp.supported_flags = cpu_to_le32(supported_flags); + rp.current_flags = cpu_to_le32(current_flags); + + status = MGMT_STATUS_SUCCESS; + +done: + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status, + &rp, sizeof(rp)); +} + +static void device_flags_changed(struct sock *sk, struct hci_dev *hdev, + bdaddr_t *bdaddr, u8 bdaddr_type, + u32 supported_flags, u32 current_flags) +{ + struct mgmt_ev_device_flags_changed ev; + + bacpy(&ev.addr.bdaddr, bdaddr); + ev.addr.type = bdaddr_type; + ev.supported_flags = cpu_to_le32(supported_flags); + ev.current_flags = cpu_to_le32(current_flags); + + mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk); +} + +static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, + u16 len) +{ + struct mgmt_cp_set_device_flags *cp = data; + struct bdaddr_list_with_flags *br_params; + struct hci_conn_params *params; + u8 status = MGMT_STATUS_INVALID_PARAMS; + u32 supported_flags = SUPPORTED_DEVICE_FLAGS(); + u32 current_flags = __le32_to_cpu(cp->current_flags); + + bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x", + &cp->addr.bdaddr, cp->addr.type, + __le32_to_cpu(current_flags)); + + if ((supported_flags | current_flags) != supported_flags) { + bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)", + current_flags, supported_flags); + goto done; + } + + if (cp->addr.type == BDADDR_BREDR) { + br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist, + &cp->addr.bdaddr, + cp->addr.type); + + if (br_params) { + br_params->current_flags = current_flags; + status = MGMT_STATUS_SUCCESS; + } else { + bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)", + &cp->addr.bdaddr, cp->addr.type); + } + } else { + params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, + le_addr_type(cp->addr.type)); + if (params) { + params->current_flags = current_flags; + status = MGMT_STATUS_SUCCESS; + } else { + bt_dev_warn(hdev, "No such LE device %pMR (0x%x)", + &cp->addr.bdaddr, + le_addr_type(cp->addr.type)); + } + } + +done: + if (status == MGMT_STATUS_SUCCESS) + device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type, + supported_flags, current_flags); + + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status, + &cp->addr, sizeof(cp->addr)); +} + static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status, u16 opcode, struct sk_buff *skb) { @@ -5973,7 +6090,9 @@ static int add_device(struct sock *sk, struct hci_dev *hdev, { struct mgmt_cp_add_device *cp = data; u8 auto_conn, addr_type; + struct hci_conn_params *params; int err; + u32 current_flags = 0; bt_dev_dbg(hdev, "sock %p", sk); @@ -6041,12 +6160,19 @@ static int add_device(struct sock *sk, struct hci_dev *hdev, MGMT_STATUS_FAILED, &cp->addr, sizeof(cp->addr)); goto unlock; + } else { + params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, + addr_type); + if (params) + current_flags = params->current_flags; } hci_update_background_scan(hdev); added: device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action); + device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type, + SUPPORTED_DEVICE_FLAGS(), current_flags); err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, MGMT_STATUS_SUCCESS, &cp->addr, @@ -7313,6 +7439,8 @@ static const struct hci_mgmt_handler mgmt_handlers[] = { HCI_MGMT_UNTRUSTED }, { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE, HCI_MGMT_VAR_LEN }, + { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE }, + { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE }, }; void mgmt_index_added(struct hci_dev *hdev) -- cgit v1.2.3-59-g8ed1b From 7fceb17c6b480e0f2bd0e566a8231039fb8a809e Mon Sep 17 00:00:00 2001 From: Miao-chen Chou Date: Wed, 17 Jun 2020 16:39:12 +0200 Subject: Bluetooth: Add definitions for advertisement monitor features This adds support for Advertisement Monitor API. Here are the commands and events added. - Read Advertisement Monitor Feature command - Add Advertisement Pattern Monitor command - Remove Advertisement Monitor command - Advertisement Monitor Added event - Advertisement Monitor Removed event Signed-off-by: Miao-chen Chou Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- include/net/bluetooth/mgmt.h | 49 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) (limited to 'include') diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h index 8e47b0c5fe52..beae5c3980f0 100644 --- a/include/net/bluetooth/mgmt.h +++ b/include/net/bluetooth/mgmt.h @@ -741,6 +741,45 @@ struct mgmt_rp_set_device_flags { struct mgmt_addr_info addr; } __packed; +#define MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS BIT(0) + +#define MGMT_OP_READ_ADV_MONITOR_FEATURES 0x0051 +#define MGMT_READ_ADV_MONITOR_FEATURES_SIZE 0 +struct mgmt_rp_read_adv_monitor_features { + __le32 supported_features; + __le32 enabled_features; + __le16 max_num_handles; + __u8 max_num_patterns; + __le16 num_handles; + __le16 handles[]; +} __packed; + +struct mgmt_adv_pattern { + __u8 ad_type; + __u8 offset; + __u8 length; + __u8 value[31]; +} __packed; + +#define MGMT_OP_ADD_ADV_PATTERNS_MONITOR 0x0052 +struct mgmt_cp_add_adv_patterns_monitor { + __u8 pattern_count; + struct mgmt_adv_pattern patterns[]; +} __packed; +#define MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE 1 +struct mgmt_rp_add_adv_patterns_monitor { + __le16 monitor_handle; +} __packed; + +#define MGMT_OP_REMOVE_ADV_MONITOR 0x0053 +struct mgmt_cp_remove_adv_monitor { + __le16 monitor_handle; +} __packed; +#define MGMT_REMOVE_ADV_MONITOR_SIZE 2 +struct mgmt_rp_remove_adv_monitor { + __le16 monitor_handle; +} __packed; + #define MGMT_EV_CMD_COMPLETE 0x0001 struct mgmt_ev_cmd_complete { __le16 opcode; @@ -979,3 +1018,13 @@ struct mgmt_ev_device_flags_changed { __le32 supported_flags; __le32 current_flags; } __packed; + +#define MGMT_EV_ADV_MONITOR_ADDED 0x002b +struct mgmt_ev_adv_monitor_added { + __le16 monitor_handle; +} __packed; + +#define MGMT_EV_ADV_MONITOR_REMOVED 0x002c +struct mgmt_ev_adv_monitor_removed { + __le16 monitor_handle; +} __packed; -- cgit v1.2.3-59-g8ed1b From e5e1e7fd470ccf2eb38ab7fb5a3ab0fc4792fe53 Mon Sep 17 00:00:00 2001 From: Miao-chen Chou Date: Wed, 17 Jun 2020 16:39:13 +0200 Subject: Bluetooth: Add handler of MGMT_OP_READ_ADV_MONITOR_FEATURES This adds the request handler of MGMT_OP_READ_ADV_MONITOR_FEATURES command. Since the controller-based monitoring is not yet in place, this report only the supported features but not the enabled features. The following test was performed. - Issuing btmgmt advmon-features. Signed-off-by: Miao-chen Chou Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- include/net/bluetooth/hci_core.h | 24 ++++++++++++++++++++ net/bluetooth/hci_core.c | 10 ++++++++- net/bluetooth/mgmt.c | 48 ++++++++++++++++++++++++++++++++++++++++ net/bluetooth/msft.c | 7 ++++++ net/bluetooth/msft.h | 9 ++++++++ 5 files changed, 97 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 6f88e5d81bd2..4e9d51087674 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -25,6 +25,7 @@ #ifndef __HCI_CORE_H #define __HCI_CORE_H +#include #include #include @@ -237,6 +238,24 @@ struct adv_info { #define HCI_MAX_ADV_INSTANCES 5 #define HCI_DEFAULT_ADV_DURATION 2 +struct adv_pattern { + struct list_head list; + __u8 ad_type; + __u8 offset; + __u8 length; + __u8 value[HCI_MAX_AD_LENGTH]; +}; + +struct adv_monitor { + struct list_head patterns; + bool active; + __u16 handle; +}; + +#define HCI_MIN_ADV_MONITOR_HANDLE 1 +#define HCI_MAX_ADV_MONITOR_NUM_HANDLES 32 +#define HCI_MAX_ADV_MONITOR_NUM_PATTERNS 16 + #define HCI_MAX_SHORT_NAME_LENGTH 10 /* Min encryption key size to match with SMP */ @@ -511,6 +530,9 @@ struct hci_dev { __u16 adv_instance_timeout; struct delayed_work adv_instance_expire; + struct idr adv_monitors_idr; + unsigned int adv_monitors_cnt; + __u8 irk[16]; __u32 rpa_timeout; struct delayed_work rpa_expired; @@ -1258,6 +1280,8 @@ int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags, int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance); void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired); +void hci_adv_monitors_clear(struct hci_dev *hdev); + void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); void hci_init_sysfs(struct hci_dev *hdev); diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 8e01afb2ee8c..53aec32a5850 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -26,7 +26,6 @@ /* Bluetooth HCI core. */ #include -#include #include #include #include @@ -2996,6 +2995,12 @@ int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags, return 0; } +/* This function requires the caller holds hdev->lock */ +void hci_adv_monitors_clear(struct hci_dev *hdev) +{ + idr_destroy(&hdev->adv_monitors_idr); +} + struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list, bdaddr_t *bdaddr, u8 type) { @@ -3646,6 +3651,8 @@ int hci_register_dev(struct hci_dev *hdev) queue_work(hdev->req_workqueue, &hdev->power_on); + idr_init(&hdev->adv_monitors_idr); + return id; err_wqueue: @@ -3716,6 +3723,7 @@ void hci_unregister_dev(struct hci_dev *hdev) hci_smp_irks_clear(hdev); hci_remote_oob_data_clear(hdev); hci_adv_instances_clear(hdev); + hci_adv_monitors_clear(hdev); hci_bdaddr_list_clear(&hdev->le_white_list); hci_bdaddr_list_clear(&hdev->le_resolv_list); hci_conn_params_clear_all(hdev); diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index e409ff48e8e6..8aec7fbe9a38 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -37,6 +37,7 @@ #include "smp.h" #include "mgmt_util.h" #include "mgmt_config.h" +#include "msft.h" #define MGMT_VERSION 1 #define MGMT_REVISION 17 @@ -118,6 +119,7 @@ static const u16 mgmt_commands[] = { MGMT_OP_SET_DEF_RUNTIME_CONFIG, MGMT_OP_GET_DEVICE_FLAGS, MGMT_OP_SET_DEVICE_FLAGS, + MGMT_OP_READ_ADV_MONITOR_FEATURES, }; static const u16 mgmt_events[] = { @@ -3973,6 +3975,51 @@ done: &cp->addr, sizeof(cp->addr)); } +static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct adv_monitor *monitor = NULL; + struct mgmt_rp_read_adv_monitor_features *rp = NULL; + int handle; + size_t rp_size = 0; + __u32 supported = 0; + __u16 num_handles = 0; + __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES]; + + BT_DBG("request for %s", hdev->name); + + hci_dev_lock(hdev); + + if (msft_get_features(hdev) & MSFT_FEATURE_MASK_LE_ADV_MONITOR) + supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS; + + idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) { + handles[num_handles++] = monitor->handle; + } + + hci_dev_unlock(hdev); + + rp_size = sizeof(*rp) + (num_handles * sizeof(u16)); + rp = kmalloc(rp_size, GFP_KERNEL); + if (!rp) + return -ENOMEM; + + /* Once controller-based monitoring is in place, the enabled_features + * should reflect the use. + */ + rp->supported_features = cpu_to_le32(supported); + rp->enabled_features = 0; + rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES); + rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS; + rp->num_handles = cpu_to_le16(num_handles); + if (num_handles) + memcpy(&rp->handles, &handles, (num_handles * sizeof(u16))); + + return mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_READ_ADV_MONITOR_FEATURES, + MGMT_STATUS_SUCCESS, rp, rp_size); +} + static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status, u16 opcode, struct sk_buff *skb) { @@ -7441,6 +7488,7 @@ static const struct hci_mgmt_handler mgmt_handlers[] = { HCI_MGMT_VAR_LEN }, { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE }, { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE }, + { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE }, }; void mgmt_index_added(struct hci_dev *hdev) diff --git a/net/bluetooth/msft.c b/net/bluetooth/msft.c index d6c4e6b5ae77..8579bfeb2836 100644 --- a/net/bluetooth/msft.c +++ b/net/bluetooth/msft.c @@ -139,3 +139,10 @@ void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb) bt_dev_dbg(hdev, "MSFT vendor event %u", event); } + +__u64 msft_get_features(struct hci_dev *hdev) +{ + struct msft_data *msft = hdev->msft_data; + + return msft ? msft->features : 0; +} diff --git a/net/bluetooth/msft.h b/net/bluetooth/msft.h index 5aa9130e1f8a..e9c478e890b8 100644 --- a/net/bluetooth/msft.h +++ b/net/bluetooth/msft.h @@ -3,16 +3,25 @@ * Copyright (C) 2020 Google Corporation */ +#define MSFT_FEATURE_MASK_BREDR_RSSI_MONITOR BIT(0) +#define MSFT_FEATURE_MASK_LE_CONN_RSSI_MONITOR BIT(1) +#define MSFT_FEATURE_MASK_LE_ADV_RSSI_MONITOR BIT(2) +#define MSFT_FEATURE_MASK_LE_ADV_MONITOR BIT(3) +#define MSFT_FEATURE_MASK_CURVE_VALIDITY BIT(4) +#define MSFT_FEATURE_MASK_CONCURRENT_ADV_MONITOR BIT(5) + #if IS_ENABLED(CONFIG_BT_MSFTEXT) void msft_do_open(struct hci_dev *hdev); void msft_do_close(struct hci_dev *hdev); void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb); +__u64 msft_get_features(struct hci_dev *hdev); #else static inline void msft_do_open(struct hci_dev *hdev) {} static inline void msft_do_close(struct hci_dev *hdev) {} static inline void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb) {} +static inline __u64 msft_get_features(struct hci_dev *hdev) { return 0; } #endif -- cgit v1.2.3-59-g8ed1b From b139553db5cd940d66095fb97de1727e9a19369f Mon Sep 17 00:00:00 2001 From: Miao-chen Chou Date: Wed, 17 Jun 2020 16:39:14 +0200 Subject: Bluetooth: Add handler of MGMT_OP_ADD_ADV_PATTERNS_MONITOR This adds the request handler of MGMT_OP_ADD_ADV_PATTERNS_MONITOR command. Note that the controller-based monitoring is not yet in place. This tracks the content of the monitor without sending HCI traffic, so the request returns immediately. The following manual test was performed. - Issue btmgmt advmon-add with valid and invalid inputs. - Issue btmgmt advmon-add more the allowed number of monitors. Signed-off-by: Miao-chen Chou Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- include/net/bluetooth/hci_core.h | 2 + net/bluetooth/hci_core.c | 40 ++++++++++++++++ net/bluetooth/mgmt.c | 100 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 142 insertions(+) (limited to 'include') diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 4e9d51087674..13fad419ae7d 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1281,6 +1281,8 @@ int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance); void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired); void hci_adv_monitors_clear(struct hci_dev *hdev); +void hci_free_adv_monitor(struct adv_monitor *monitor); +int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor); void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 53aec32a5850..ce481fab349d 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -2998,9 +2998,49 @@ int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags, /* This function requires the caller holds hdev->lock */ void hci_adv_monitors_clear(struct hci_dev *hdev) { + struct adv_monitor *monitor; + int handle; + + idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) + hci_free_adv_monitor(monitor); + idr_destroy(&hdev->adv_monitors_idr); } +void hci_free_adv_monitor(struct adv_monitor *monitor) +{ + struct adv_pattern *pattern; + struct adv_pattern *tmp; + + if (!monitor) + return; + + list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) + kfree(pattern); + + kfree(monitor); +} + +/* This function requires the caller holds hdev->lock */ +int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) +{ + int min, max, handle; + + if (!monitor) + return -EINVAL; + + min = HCI_MIN_ADV_MONITOR_HANDLE; + max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES; + handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max, + GFP_KERNEL); + if (handle < 0) + return handle; + + hdev->adv_monitors_cnt++; + monitor->handle = handle; + return 0; +} + struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list, bdaddr_t *bdaddr, u8 type) { diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 8aec7fbe9a38..1eca36e51706 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -120,6 +120,7 @@ static const u16 mgmt_commands[] = { MGMT_OP_GET_DEVICE_FLAGS, MGMT_OP_SET_DEVICE_FLAGS, MGMT_OP_READ_ADV_MONITOR_FEATURES, + MGMT_OP_ADD_ADV_PATTERNS_MONITOR, }; static const u16 mgmt_events[] = { @@ -4020,6 +4021,103 @@ static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev, MGMT_STATUS_SUCCESS, rp, rp_size); } +static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_cp_add_adv_patterns_monitor *cp = data; + struct mgmt_rp_add_adv_patterns_monitor rp; + struct adv_monitor *m = NULL; + struct adv_pattern *p = NULL; + __u8 cp_ofst = 0, cp_len = 0; + unsigned int mp_cnt = 0; + int err, i; + + BT_DBG("request for %s", hdev->name); + + if (len <= sizeof(*cp) || cp->pattern_count == 0) { + err = mgmt_cmd_status(sk, hdev->id, + MGMT_OP_ADD_ADV_PATTERNS_MONITOR, + MGMT_STATUS_INVALID_PARAMS); + goto failed; + } + + m = kmalloc(sizeof(*m), GFP_KERNEL); + if (!m) { + err = -ENOMEM; + goto failed; + } + + INIT_LIST_HEAD(&m->patterns); + m->active = false; + + for (i = 0; i < cp->pattern_count; i++) { + if (++mp_cnt > HCI_MAX_ADV_MONITOR_NUM_PATTERNS) { + err = mgmt_cmd_status(sk, hdev->id, + MGMT_OP_ADD_ADV_PATTERNS_MONITOR, + MGMT_STATUS_INVALID_PARAMS); + goto failed; + } + + cp_ofst = cp->patterns[i].offset; + cp_len = cp->patterns[i].length; + if (cp_ofst >= HCI_MAX_AD_LENGTH || + cp_len > HCI_MAX_AD_LENGTH || + (cp_ofst + cp_len) > HCI_MAX_AD_LENGTH) { + err = mgmt_cmd_status(sk, hdev->id, + MGMT_OP_ADD_ADV_PATTERNS_MONITOR, + MGMT_STATUS_INVALID_PARAMS); + goto failed; + } + + p = kmalloc(sizeof(*p), GFP_KERNEL); + if (!p) { + err = -ENOMEM; + goto failed; + } + + p->ad_type = cp->patterns[i].ad_type; + p->offset = cp->patterns[i].offset; + p->length = cp->patterns[i].length; + memcpy(p->value, cp->patterns[i].value, p->length); + + INIT_LIST_HEAD(&p->list); + list_add(&p->list, &m->patterns); + } + + if (mp_cnt != cp->pattern_count) { + err = mgmt_cmd_status(sk, hdev->id, + MGMT_OP_ADD_ADV_PATTERNS_MONITOR, + MGMT_STATUS_INVALID_PARAMS); + goto failed; + } + + hci_dev_lock(hdev); + + err = hci_add_adv_monitor(hdev, m); + if (err) { + if (err == -ENOSPC) { + mgmt_cmd_status(sk, hdev->id, + MGMT_OP_ADD_ADV_PATTERNS_MONITOR, + MGMT_STATUS_NO_RESOURCES); + } + goto unlock; + } + + hci_dev_unlock(hdev); + + rp.monitor_handle = cpu_to_le16(m->handle); + + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADV_PATTERNS_MONITOR, + MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); + +unlock: + hci_dev_unlock(hdev); + +failed: + hci_free_adv_monitor(m); + return err; +} + static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status, u16 opcode, struct sk_buff *skb) { @@ -7489,6 +7587,8 @@ static const struct hci_mgmt_handler mgmt_handlers[] = { { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE }, { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE }, { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE }, + { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE, + HCI_MGMT_VAR_LEN }, }; void mgmt_index_added(struct hci_dev *hdev) -- cgit v1.2.3-59-g8ed1b From bd2fbc6cb815b5171facb42526f6db206d920e13 Mon Sep 17 00:00:00 2001 From: Miao-chen Chou Date: Wed, 17 Jun 2020 16:39:15 +0200 Subject: Bluetooth: Add handler of MGMT_OP_REMOVE_ADV_MONITOR This adds the request handler of MGMT_OP_REMOVE_ADV_MONITOR command. Note that the controller-based monitoring is not yet in place. This removes the internal monitor(s) without sending HCI traffic, so the request returns immediately. The following test was performed. - Issue btmgmt advmon-remove with valid and invalid handles. Signed-off-by: Miao-chen Chou Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- include/net/bluetooth/hci_core.h | 1 + net/bluetooth/hci_core.c | 31 +++++++++++++++++++++++++++++++ net/bluetooth/mgmt.c | 35 +++++++++++++++++++++++++++++++++++ 3 files changed, 67 insertions(+) (limited to 'include') diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 13fad419ae7d..c54f9295892e 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1283,6 +1283,7 @@ void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired); void hci_adv_monitors_clear(struct hci_dev *hdev); void hci_free_adv_monitor(struct adv_monitor *monitor); int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor); +int hci_remove_adv_monitor(struct hci_dev *hdev, u16 handle); void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index ce481fab349d..59132b3e2cde 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -3041,6 +3041,37 @@ int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) return 0; } +static int free_adv_monitor(int id, void *ptr, void *data) +{ + struct hci_dev *hdev = data; + struct adv_monitor *monitor = ptr; + + idr_remove(&hdev->adv_monitors_idr, monitor->handle); + hci_free_adv_monitor(monitor); + + return 0; +} + +/* This function requires the caller holds hdev->lock */ +int hci_remove_adv_monitor(struct hci_dev *hdev, u16 handle) +{ + struct adv_monitor *monitor; + + if (handle) { + monitor = idr_find(&hdev->adv_monitors_idr, handle); + if (!monitor) + return -ENOENT; + + idr_remove(&hdev->adv_monitors_idr, monitor->handle); + hci_free_adv_monitor(monitor); + } else { + /* Remove all monitors if handle is 0. */ + idr_for_each(&hdev->adv_monitors_idr, &free_adv_monitor, hdev); + } + + return 0; +} + struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list, bdaddr_t *bdaddr, u8 type) { diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 1eca36e51706..cff24fde72d2 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -121,6 +121,7 @@ static const u16 mgmt_commands[] = { MGMT_OP_SET_DEVICE_FLAGS, MGMT_OP_READ_ADV_MONITOR_FEATURES, MGMT_OP_ADD_ADV_PATTERNS_MONITOR, + MGMT_OP_REMOVE_ADV_MONITOR, }; static const u16 mgmt_events[] = { @@ -4118,6 +4119,39 @@ failed: return err; } +static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_cp_remove_adv_monitor *cp = data; + struct mgmt_rp_remove_adv_monitor rp; + u16 handle; + int err; + + BT_DBG("request for %s", hdev->name); + + hci_dev_lock(hdev); + + handle = __le16_to_cpu(cp->monitor_handle); + + err = hci_remove_adv_monitor(hdev, handle); + if (err == -ENOENT) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR, + MGMT_STATUS_INVALID_INDEX); + goto unlock; + } + + hci_dev_unlock(hdev); + + rp.monitor_handle = cp->monitor_handle; + + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR, + MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); + +unlock: + hci_dev_unlock(hdev); + return err; +} + static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status, u16 opcode, struct sk_buff *skb) { @@ -7589,6 +7623,7 @@ static const struct hci_mgmt_handler mgmt_handlers[] = { { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE }, { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE, HCI_MGMT_VAR_LEN }, + { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE }, }; void mgmt_index_added(struct hci_dev *hdev) -- cgit v1.2.3-59-g8ed1b From 8208f5a9d435e58ee7f53a24d9ccbe7787944537 Mon Sep 17 00:00:00 2001 From: Miao-chen Chou Date: Wed, 17 Jun 2020 16:39:18 +0200 Subject: Bluetooth: Update background scan and report device based on advertisement monitors This calls hci_update_background_scan() when there is any update on the advertisement monitors. If there is at least one advertisement monitor, the filtering policy of scan parameters should be 0x00. This also reports device found mgmt events if there is at least one monitor. The following cases were tested with btmgmt advmon-* commands. (1) add a ADV monitor and observe that the passive scanning is triggered. (2) remove the last ADV monitor and observe that the passive scanning is terminated. (3) with a LE peripheral paired, repeat (1) and observe the passive scanning continues. (4) with a LE peripheral paired, repeat (2) and observe the passive scanning continues. (5) with a ADV monitor, suspend/resume the host and observe the passive scanning continues. Signed-off-by: Miao-chen Chou Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- include/net/bluetooth/hci_core.h | 1 + net/bluetooth/hci_core.c | 13 +++++++++++++ net/bluetooth/hci_event.c | 5 +++-- net/bluetooth/hci_request.c | 17 ++++++++++++++--- net/bluetooth/mgmt.c | 5 ++++- 5 files changed, 35 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index c54f9295892e..524057598ffd 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1284,6 +1284,7 @@ void hci_adv_monitors_clear(struct hci_dev *hdev); void hci_free_adv_monitor(struct adv_monitor *monitor); int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor); int hci_remove_adv_monitor(struct hci_dev *hdev, u16 handle); +bool hci_is_adv_monitoring(struct hci_dev *hdev); void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 59132b3e2cde..7959b851cc63 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -3005,6 +3005,8 @@ void hci_adv_monitors_clear(struct hci_dev *hdev) hci_free_adv_monitor(monitor); idr_destroy(&hdev->adv_monitors_idr); + + hci_update_background_scan(hdev); } void hci_free_adv_monitor(struct adv_monitor *monitor) @@ -3038,6 +3040,9 @@ int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) hdev->adv_monitors_cnt++; monitor->handle = handle; + + hci_update_background_scan(hdev); + return 0; } @@ -3069,9 +3074,17 @@ int hci_remove_adv_monitor(struct hci_dev *hdev, u16 handle) idr_for_each(&hdev->adv_monitors_idr, &free_adv_monitor, hdev); } + hci_update_background_scan(hdev); + return 0; } +/* This function requires the caller holds hdev->lock */ +bool hci_is_adv_monitoring(struct hci_dev *hdev) +{ + return !idr_is_empty(&hdev->adv_monitors_idr); +} + struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list, bdaddr_t *bdaddr, u8 type) { diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 8981954ff4c4..e08d4dd9a24e 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -5447,14 +5447,15 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, /* Passive scanning shouldn't trigger any device found events, * except for devices marked as CONN_REPORT for which we do send - * device found events. + * device found events, or advertisement monitoring requested. */ if (hdev->le_scan_type == LE_SCAN_PASSIVE) { if (type == LE_ADV_DIRECT_IND) return; if (!hci_pend_le_action_lookup(&hdev->pend_le_reports, - bdaddr, bdaddr_type)) + bdaddr, bdaddr_type) && + idr_is_empty(&hdev->adv_monitors_idr)) return; if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND) diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index eee9c007a5fb..29decd7e8051 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -413,11 +413,15 @@ static void __hci_update_background_scan(struct hci_request *req) */ hci_discovery_filter_clear(hdev); + BT_DBG("%s ADV monitoring is %s", hdev->name, + hci_is_adv_monitoring(hdev) ? "on" : "off"); + if (list_empty(&hdev->pend_le_conns) && - list_empty(&hdev->pend_le_reports)) { + list_empty(&hdev->pend_le_reports) && + !hci_is_adv_monitoring(hdev)) { /* If there is no pending LE connections or devices - * to be scanned for, we should stop the background - * scanning. + * to be scanned for or no ADV monitors, we should stop the + * background scanning. */ /* If controller is not scanning we are done. */ @@ -794,6 +798,13 @@ static u8 update_white_list(struct hci_request *req) return 0x00; } + /* Once the controller offloading of advertisement monitor is in place, + * the if condition should include the support of MSFT extension + * support. + */ + if (!idr_is_empty(&hdev->adv_monitors_idr)) + return 0x00; + /* Select filter policy to use white list */ return 0x01; } diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index b194da4de2d7..ec66160a673c 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -8575,8 +8575,11 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, if (!hci_discovery_active(hdev)) { if (link_type == ACL_LINK) return; - if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports)) + if (link_type == LE_LINK && + list_empty(&hdev->pend_le_reports) && + !hci_is_adv_monitoring(hdev)) { return; + } } if (hdev->discovery.result_filtering) { -- cgit v1.2.3-59-g8ed1b From 76b139965575e51224d33ea721d9d00a542b6b39 Mon Sep 17 00:00:00 2001 From: Manish Mandlik Date: Wed, 17 Jun 2020 16:39:19 +0200 Subject: Bluetooth: Terminate the link if pairing is cancelled If user decides to cancel the ongoing pairing process (e.g. by clicking the cancel button on pairing/passkey window), abort any ongoing pairing and then terminate the link if it was created because of the pair device action. Signed-off-by: Manish Mandlik Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- include/net/bluetooth/hci_core.h | 14 ++++++++++++-- net/bluetooth/hci_conn.c | 11 ++++++++--- net/bluetooth/l2cap_core.c | 6 ++++-- net/bluetooth/mgmt.c | 22 ++++++++++++++++++---- 4 files changed, 42 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 524057598ffd..77d29341b064 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -564,6 +564,12 @@ struct hci_dev { #define HCI_PHY_HANDLE(handle) (handle & 0xff) +enum conn_reasons { + CONN_REASON_PAIR_DEVICE, + CONN_REASON_L2CAP_CHAN, + CONN_REASON_SCO_CONNECT, +}; + struct hci_conn { struct list_head list; @@ -615,6 +621,8 @@ struct hci_conn { __s8 max_tx_power; unsigned long flags; + enum conn_reasons conn_reason; + __u32 clock; __u16 clock_accuracy; @@ -1040,12 +1048,14 @@ struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle); struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, u8 dst_type, u8 sec_level, - u16 conn_timeout); + u16 conn_timeout, + enum conn_reasons conn_reason); struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, u8 dst_type, u8 sec_level, u16 conn_timeout, u8 role, bdaddr_t *direct_rpa); struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, - u8 sec_level, u8 auth_type); + u8 sec_level, u8 auth_type, + enum conn_reasons conn_reason); struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, __u16 setting); int hci_conn_check_link_mode(struct hci_conn *conn); diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 9bdffc4e79b0..47f3a45d7dcb 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -1174,7 +1174,8 @@ static int hci_explicit_conn_params_set(struct hci_dev *hdev, /* This function requires the caller holds hdev->lock */ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, u8 dst_type, u8 sec_level, - u16 conn_timeout) + u16 conn_timeout, + enum conn_reasons conn_reason) { struct hci_conn *conn; @@ -1219,6 +1220,7 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, conn->sec_level = BT_SECURITY_LOW; conn->pending_sec_level = sec_level; conn->conn_timeout = conn_timeout; + conn->conn_reason = conn_reason; hci_update_background_scan(hdev); @@ -1228,7 +1230,8 @@ done: } struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, - u8 sec_level, u8 auth_type) + u8 sec_level, u8 auth_type, + enum conn_reasons conn_reason) { struct hci_conn *acl; @@ -1248,6 +1251,7 @@ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, hci_conn_hold(acl); + acl->conn_reason = conn_reason; if (acl->state == BT_OPEN || acl->state == BT_CLOSED) { acl->sec_level = BT_SECURITY_LOW; acl->pending_sec_level = sec_level; @@ -1264,7 +1268,8 @@ struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, struct hci_conn *acl; struct hci_conn *sco; - acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING); + acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING, + CONN_REASON_SCO_CONNECT); if (IS_ERR(acl)) return acl; diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index fe913a5c754a..35d2bc569a2d 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -7893,11 +7893,13 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, else hcon = hci_connect_le_scan(hdev, dst, dst_type, chan->sec_level, - HCI_LE_CONN_TIMEOUT); + HCI_LE_CONN_TIMEOUT, + CONN_REASON_L2CAP_CHAN); } else { u8 auth_type = l2cap_get_auth_type(chan); - hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type); + hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type, + CONN_REASON_L2CAP_CHAN); } if (IS_ERR(hcon)) { diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index ec66160a673c..2a732cab1dc9 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -2931,7 +2931,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, if (cp->addr.type == BDADDR_BREDR) { conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level, - auth_type); + auth_type, CONN_REASON_PAIR_DEVICE); } else { u8 addr_type = le_addr_type(cp->addr.type); struct hci_conn_params *p; @@ -2950,9 +2950,9 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT) p->auto_connect = HCI_AUTO_CONN_DISABLED; - conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, - addr_type, sec_level, - HCI_LE_CONN_TIMEOUT); + conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type, + sec_level, HCI_LE_CONN_TIMEOUT, + CONN_REASON_PAIR_DEVICE); } if (IS_ERR(conn)) { @@ -3053,6 +3053,20 @@ static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data, err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0, addr, sizeof(*addr)); + + /* Since user doesn't want to proceed with the connection, abort any + * ongoing pairing and then terminate the link if it was created + * because of the pair device action. + */ + if (addr->type == BDADDR_BREDR) + hci_remove_link_key(hdev, &addr->bdaddr); + else + smp_cancel_and_remove_pairing(hdev, &addr->bdaddr, + le_addr_type(addr->type)); + + if (conn->conn_reason == CONN_REASON_PAIR_DEVICE) + hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM); + unlock: hci_dev_unlock(hdev); return err; -- cgit v1.2.3-59-g8ed1b From e0bcc58d876c6ece9720310509a908b7637e37cf Mon Sep 17 00:00:00 2001 From: Benjamin Gaignard Date: Wed, 3 Jun 2020 14:54:36 +0200 Subject: mfd: stm32: Add defines to be used for clkevent purpose Add defines to be able to enable/clear irq and configure one shot mode. Signed-off-by: Benjamin Gaignard Signed-off-by: Lee Jones --- include/linux/mfd/stm32-lptimer.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include') diff --git a/include/linux/mfd/stm32-lptimer.h b/include/linux/mfd/stm32-lptimer.h index 605f62264825..90b20550c1c8 100644 --- a/include/linux/mfd/stm32-lptimer.h +++ b/include/linux/mfd/stm32-lptimer.h @@ -27,10 +27,15 @@ #define STM32_LPTIM_CMPOK BIT(3) /* STM32_LPTIM_ICR - bit fields */ +#define STM32_LPTIM_ARRMCF BIT(1) #define STM32_LPTIM_CMPOKCF_ARROKCF GENMASK(4, 3) +/* STM32_LPTIM_IER - bit flieds */ +#define STM32_LPTIM_ARRMIE BIT(1) + /* STM32_LPTIM_CR - bit fields */ #define STM32_LPTIM_CNTSTRT BIT(2) +#define STM32_LPTIM_SNGSTRT BIT(1) #define STM32_LPTIM_ENABLE BIT(0) /* STM32_LPTIM_CFGR - bit fields */ -- cgit v1.2.3-59-g8ed1b From 7f8a137f736f7366820c485c5a0d34d65b9d0125 Mon Sep 17 00:00:00 2001 From: Charles Keepax Date: Mon, 15 Jun 2020 14:53:22 +0100 Subject: mfd: madera: Remove unused forward declaration of madera_codec_pdata This forward declaration is redundant since the header including the full data structure is included. Signed-off-by: Charles Keepax Signed-off-by: Lee Jones --- include/linux/mfd/madera/pdata.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include') diff --git a/include/linux/mfd/madera/pdata.h b/include/linux/mfd/madera/pdata.h index fa9595dd42ba..601cbbc10370 100644 --- a/include/linux/mfd/madera/pdata.h +++ b/include/linux/mfd/madera/pdata.h @@ -21,7 +21,6 @@ struct gpio_desc; struct pinctrl_map; -struct madera_codec_pdata; /** * struct madera_pdata - Configuration data for Madera devices -- cgit v1.2.3-59-g8ed1b From 6c27219e34911601955b72c754adfc11c527ba7b Mon Sep 17 00:00:00 2001 From: Neil Armstrong Date: Mon, 8 Jun 2020 11:17:36 +0200 Subject: mfd: Add support for the Khadas System control Microcontroller This Microcontroller is present on the Khadas VIM1, VIM2, VIM3 and Edge boards. It has multiple boot control features like password check, power-on options, power-off control and system FAN control on recent boards. This implements a very basic MFD driver with the fan control and User NVMEM cells. Signed-off-by: Neil Armstrong Signed-off-by: Lee Jones --- drivers/mfd/Kconfig | 21 ++++++ drivers/mfd/Makefile | 1 + drivers/mfd/khadas-mcu.c | 142 +++++++++++++++++++++++++++++++++++++++++ include/linux/mfd/khadas-mcu.h | 91 ++++++++++++++++++++++++++ 4 files changed, 255 insertions(+) create mode 100644 drivers/mfd/khadas-mcu.c create mode 100644 include/linux/mfd/khadas-mcu.h (limited to 'include') diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index a37d7d171382..d13bb0abfd6f 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -2053,6 +2053,27 @@ config MFD_WCD934X This driver provides common support WCD934x audio codec and its associated Pin Controller, Soundwire Controller and Audio codec. +config MFD_KHADAS_MCU + tristate "Support for Khadas System control Microcontroller" + depends on I2C + depends on ARCH_MESON || ARCH_ROCKCHIP || COMPILE_TEST + select MFD_CORE + select REGMAP_I2C + help + Support for the Khadas System control Microcontroller interface + present on their VIM and Edge boards. + + This Microcontroller is present on the Khadas VIM1, VIM2, VIM3 and + Edge boards. + + It provides multiple boot control features like password check, + power-on options, power-off control and system FAN control on recent + boards. + + This driver provides common support for accessing the device, + additional drivers must be enabled in order to use the functionality + of the device. + menu "Multimedia Capabilities Port drivers" depends on ARCH_SA1100 diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 9367a92f795a..1c8d6be3347d 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -262,5 +262,6 @@ obj-$(CONFIG_MFD_ROHM_BD70528) += rohm-bd70528.o obj-$(CONFIG_MFD_ROHM_BD71828) += rohm-bd71828.o obj-$(CONFIG_MFD_ROHM_BD718XX) += rohm-bd718x7.o obj-$(CONFIG_MFD_STMFX) += stmfx.o +obj-$(CONFIG_MFD_KHADAS_MCU) += khadas-mcu.o obj-$(CONFIG_SGI_MFD_IOC3) += ioc3.o diff --git a/drivers/mfd/khadas-mcu.c b/drivers/mfd/khadas-mcu.c new file mode 100644 index 000000000000..44d5bb462dab --- /dev/null +++ b/drivers/mfd/khadas-mcu.c @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for Khadas System control Microcontroller + * + * Copyright (C) 2020 BayLibre SAS + * + * Author(s): Neil Armstrong + */ +#include +#include +#include +#include +#include +#include + +static bool khadas_mcu_reg_volatile(struct device *dev, unsigned int reg) +{ + if (reg >= KHADAS_MCU_USER_DATA_0_REG && + reg < KHADAS_MCU_PWR_OFF_CMD_REG) + return true; + + switch (reg) { + case KHADAS_MCU_PWR_OFF_CMD_REG: + case KHADAS_MCU_PASSWD_START_REG: + case KHADAS_MCU_CHECK_VEN_PASSWD_REG: + case KHADAS_MCU_CHECK_USER_PASSWD_REG: + case KHADAS_MCU_WOL_INIT_START_REG: + case KHADAS_MCU_CMD_FAN_STATUS_CTRL_REG: + return true; + default: + return false; + } +} + +static bool khadas_mcu_reg_writeable(struct device *dev, unsigned int reg) +{ + switch (reg) { + case KHADAS_MCU_PASSWD_VEN_0_REG: + case KHADAS_MCU_PASSWD_VEN_1_REG: + case KHADAS_MCU_PASSWD_VEN_2_REG: + case KHADAS_MCU_PASSWD_VEN_3_REG: + case KHADAS_MCU_PASSWD_VEN_4_REG: + case KHADAS_MCU_PASSWD_VEN_5_REG: + case KHADAS_MCU_MAC_0_REG: + case KHADAS_MCU_MAC_1_REG: + case KHADAS_MCU_MAC_2_REG: + case KHADAS_MCU_MAC_3_REG: + case KHADAS_MCU_MAC_4_REG: + case KHADAS_MCU_MAC_5_REG: + case KHADAS_MCU_USID_0_REG: + case KHADAS_MCU_USID_1_REG: + case KHADAS_MCU_USID_2_REG: + case KHADAS_MCU_USID_3_REG: + case KHADAS_MCU_USID_4_REG: + case KHADAS_MCU_USID_5_REG: + case KHADAS_MCU_VERSION_0_REG: + case KHADAS_MCU_VERSION_1_REG: + case KHADAS_MCU_DEVICE_NO_0_REG: + case KHADAS_MCU_DEVICE_NO_1_REG: + case KHADAS_MCU_FACTORY_TEST_REG: + case KHADAS_MCU_SHUTDOWN_NORMAL_STATUS_REG: + return false; + default: + return true; + } +} + +static const struct regmap_config khadas_mcu_regmap_config = { + .reg_bits = 8, + .reg_stride = 1, + .val_bits = 8, + .max_register = KHADAS_MCU_CMD_FAN_STATUS_CTRL_REG, + .volatile_reg = khadas_mcu_reg_volatile, + .writeable_reg = khadas_mcu_reg_writeable, + .cache_type = REGCACHE_RBTREE, +}; + +static struct mfd_cell khadas_mcu_fan_cells[] = { + /* VIM1/2 Rev13+ and VIM3 only */ + { .name = "khadas-mcu-fan-ctrl", }, +}; + +static struct mfd_cell khadas_mcu_cells[] = { + { .name = "khadas-mcu-user-mem", }, +}; + +static int khadas_mcu_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + struct khadas_mcu *ddata; + int ret; + + ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL); + if (!ddata) + return -ENOMEM; + + i2c_set_clientdata(client, ddata); + + ddata->dev = dev; + + ddata->regmap = devm_regmap_init_i2c(client, &khadas_mcu_regmap_config); + if (IS_ERR(ddata->regmap)) { + ret = PTR_ERR(ddata->regmap); + dev_err(dev, "Failed to allocate register map: %d\n", ret); + return ret; + } + + ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, + khadas_mcu_cells, + ARRAY_SIZE(khadas_mcu_cells), + NULL, 0, NULL); + if (ret) + return ret; + + if (of_find_property(dev->of_node, "#cooling-cells", NULL)) + return devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, + khadas_mcu_fan_cells, + ARRAY_SIZE(khadas_mcu_fan_cells), + NULL, 0, NULL); + + return 0; +} + +static const struct of_device_id khadas_mcu_of_match[] = { + { .compatible = "khadas,mcu", }, + {}, +}; +MODULE_DEVICE_TABLE(of, khadas_mcu_of_match); + +static struct i2c_driver khadas_mcu_driver = { + .driver = { + .name = "khadas-mcu-core", + .of_match_table = of_match_ptr(khadas_mcu_of_match), + }, + .probe = khadas_mcu_probe, +}; +module_i2c_driver(khadas_mcu_driver); + +MODULE_DESCRIPTION("Khadas MCU core driver"); +MODULE_AUTHOR("Neil Armstrong "); +MODULE_LICENSE("GPL v2"); diff --git a/include/linux/mfd/khadas-mcu.h b/include/linux/mfd/khadas-mcu.h new file mode 100644 index 000000000000..a99ba2ed0e4e --- /dev/null +++ b/include/linux/mfd/khadas-mcu.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Khadas System control Microcontroller Register map + * + * Copyright (C) 2020 BayLibre SAS + * + * Author(s): Neil Armstrong + */ + +#ifndef MFD_KHADAS_MCU_H +#define MFD_KHADAS_MCU_H + +#define KHADAS_MCU_PASSWD_VEN_0_REG 0x00 /* RO */ +#define KHADAS_MCU_PASSWD_VEN_1_REG 0x01 /* RO */ +#define KHADAS_MCU_PASSWD_VEN_2_REG 0x02 /* RO */ +#define KHADAS_MCU_PASSWD_VEN_3_REG 0x03 /* RO */ +#define KHADAS_MCU_PASSWD_VEN_4_REG 0x04 /* RO */ +#define KHADAS_MCU_PASSWD_VEN_5_REG 0x05 /* RO */ +#define KHADAS_MCU_MAC_0_REG 0x06 /* RO */ +#define KHADAS_MCU_MAC_1_REG 0x07 /* RO */ +#define KHADAS_MCU_MAC_2_REG 0x08 /* RO */ +#define KHADAS_MCU_MAC_3_REG 0x09 /* RO */ +#define KHADAS_MCU_MAC_4_REG 0x0a /* RO */ +#define KHADAS_MCU_MAC_5_REG 0x0b /* RO */ +#define KHADAS_MCU_USID_0_REG 0x0c /* RO */ +#define KHADAS_MCU_USID_1_REG 0x0d /* RO */ +#define KHADAS_MCU_USID_2_REG 0x0e /* RO */ +#define KHADAS_MCU_USID_3_REG 0x0f /* RO */ +#define KHADAS_MCU_USID_4_REG 0x10 /* RO */ +#define KHADAS_MCU_USID_5_REG 0x11 /* RO */ +#define KHADAS_MCU_VERSION_0_REG 0x12 /* RO */ +#define KHADAS_MCU_VERSION_1_REG 0x13 /* RO */ +#define KHADAS_MCU_DEVICE_NO_0_REG 0x14 /* RO */ +#define KHADAS_MCU_DEVICE_NO_1_REG 0x15 /* RO */ +#define KHADAS_MCU_FACTORY_TEST_REG 0x16 /* R */ +#define KHADAS_MCU_BOOT_MODE_REG 0x20 /* RW */ +#define KHADAS_MCU_BOOT_EN_WOL_REG 0x21 /* RW */ +#define KHADAS_MCU_BOOT_EN_RTC_REG 0x22 /* RW */ +#define KHADAS_MCU_BOOT_EN_EXP_REG 0x23 /* RW */ +#define KHADAS_MCU_BOOT_EN_IR_REG 0x24 /* RW */ +#define KHADAS_MCU_BOOT_EN_DCIN_REG 0x25 /* RW */ +#define KHADAS_MCU_BOOT_EN_KEY_REG 0x26 /* RW */ +#define KHADAS_MCU_KEY_MODE_REG 0x27 /* RW */ +#define KHADAS_MCU_LED_MODE_ON_REG 0x28 /* RW */ +#define KHADAS_MCU_LED_MODE_OFF_REG 0x29 /* RW */ +#define KHADAS_MCU_SHUTDOWN_NORMAL_REG 0x2c /* RW */ +#define KHADAS_MCU_MAC_SWITCH_REG 0x2d /* RW */ +#define KHADAS_MCU_MCU_SLEEP_MODE_REG 0x2e /* RW */ +#define KHADAS_MCU_IR_CODE1_0_REG 0x2f /* RW */ +#define KHADAS_MCU_IR_CODE1_1_REG 0x30 /* RW */ +#define KHADAS_MCU_IR_CODE1_2_REG 0x31 /* RW */ +#define KHADAS_MCU_IR_CODE1_3_REG 0x32 /* RW */ +#define KHADAS_MCU_USB_PCIE_SWITCH_REG 0x33 /* RW */ +#define KHADAS_MCU_IR_CODE2_0_REG 0x34 /* RW */ +#define KHADAS_MCU_IR_CODE2_1_REG 0x35 /* RW */ +#define KHADAS_MCU_IR_CODE2_2_REG 0x36 /* RW */ +#define KHADAS_MCU_IR_CODE2_3_REG 0x37 /* RW */ +#define KHADAS_MCU_PASSWD_USER_0_REG 0x40 /* RW */ +#define KHADAS_MCU_PASSWD_USER_1_REG 0x41 /* RW */ +#define KHADAS_MCU_PASSWD_USER_2_REG 0x42 /* RW */ +#define KHADAS_MCU_PASSWD_USER_3_REG 0x43 /* RW */ +#define KHADAS_MCU_PASSWD_USER_4_REG 0x44 /* RW */ +#define KHADAS_MCU_PASSWD_USER_5_REG 0x45 /* RW */ +#define KHADAS_MCU_USER_DATA_0_REG 0x46 /* RW 56 bytes */ +#define KHADAS_MCU_PWR_OFF_CMD_REG 0x80 /* WO */ +#define KHADAS_MCU_PASSWD_START_REG 0x81 /* WO */ +#define KHADAS_MCU_CHECK_VEN_PASSWD_REG 0x82 /* WO */ +#define KHADAS_MCU_CHECK_USER_PASSWD_REG 0x83 /* WO */ +#define KHADAS_MCU_SHUTDOWN_NORMAL_STATUS_REG 0x86 /* RO */ +#define KHADAS_MCU_WOL_INIT_START_REG 0x87 /* WO */ +#define KHADAS_MCU_CMD_FAN_STATUS_CTRL_REG 0x88 /* WO */ + +enum { + KHADAS_BOARD_VIM1 = 0x1, + KHADAS_BOARD_VIM2, + KHADAS_BOARD_VIM3, + KHADAS_BOARD_EDGE = 0x11, + KHADAS_BOARD_EDGE_V, +}; + +/** + * struct khadas_mcu - Khadas MCU structure + * @device: device reference used for logs + * @regmap: register map + */ +struct khadas_mcu { + struct device *dev; + struct regmap *regmap; +}; + +#endif /* MFD_KHADAS_MCU_H */ -- cgit v1.2.3-59-g8ed1b From 81c7462883b0cc0a4eeef0687f80ad5b5baee5f6 Mon Sep 17 00:00:00 2001 From: Macpaul Lin Date: Thu, 18 Jun 2020 17:13:38 +0800 Subject: USB: replace hardcode maximum usb string length by definition Replace hardcoded maximum USB string length (126 bytes) by definition "USB_MAX_STRING_LEN". Signed-off-by: Macpaul Lin Acked-by: Alan Stern Link: https://lore.kernel.org/r/1592471618-29428-1-git-send-email-macpaul.lin@mediatek.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/composite.c | 4 ++-- drivers/usb/gadget/configfs.c | 2 +- drivers/usb/gadget/usbstring.c | 4 ++-- include/uapi/linux/usb/ch9.h | 3 +++ 4 files changed, 8 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 5c1eb96a5c57..8fbf73467fef 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -1085,7 +1085,7 @@ static void collect_langs(struct usb_gadget_strings **sp, __le16 *buf) while (*sp) { s = *sp; language = cpu_to_le16(s->language); - for (tmp = buf; *tmp && tmp < &buf[126]; tmp++) { + for (tmp = buf; *tmp && tmp < &buf[USB_MAX_STRING_LEN]; tmp++) { if (*tmp == language) goto repeat; } @@ -1160,7 +1160,7 @@ static int get_string(struct usb_composite_dev *cdev, collect_langs(sp, s->wData); } - for (len = 0; len <= 126 && s->wData[len]; len++) + for (len = 0; len <= USB_MAX_STRING_LEN && s->wData[len]; len++) continue; if (!len) return -EINVAL; diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 9dc06a4e1b30..56051bb97349 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -103,7 +103,7 @@ static int usb_string_copy(const char *s, char **s_copy) char *str; char *copy = *s_copy; ret = strlen(s); - if (ret > 126) + if (ret > USB_MAX_STRING_LEN) return -EOVERFLOW; str = kstrdup(s, GFP_KERNEL); diff --git a/drivers/usb/gadget/usbstring.c b/drivers/usb/gadget/usbstring.c index 58a4d3325090..30f889ad3ad2 100644 --- a/drivers/usb/gadget/usbstring.c +++ b/drivers/usb/gadget/usbstring.c @@ -55,9 +55,9 @@ usb_gadget_get_string (const struct usb_gadget_strings *table, int id, u8 *buf) return -EINVAL; /* string descriptors have length, tag, then UTF16-LE text */ - len = min ((size_t) 126, strlen (s->s)); + len = min((size_t)USB_MAX_STRING_LEN, strlen(s->s)); len = utf8s_to_utf16s(s->s, len, UTF16_LITTLE_ENDIAN, - (wchar_t *) &buf[2], 126); + (wchar_t *) &buf[2], USB_MAX_STRING_LEN); if (len < 0) return -EINVAL; buf [0] = (len + 1) * 2; diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h index 456ab0c2b586..b1ed2ccfe9cf 100644 --- a/include/uapi/linux/usb/ch9.h +++ b/include/uapi/linux/usb/ch9.h @@ -368,6 +368,9 @@ struct usb_config_descriptor { /*-------------------------------------------------------------------------*/ +/* USB String descriptors can contain at most 126 characters. */ +#define USB_MAX_STRING_LEN 126 + /* USB_DT_STRING: String descriptor */ struct usb_string_descriptor { __u8 bLength; -- cgit v1.2.3-59-g8ed1b From 9c77b803f263573b6019e4828825709845c37d45 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 17 Jun 2020 20:53:22 -0700 Subject: net: tso: double TSO_HEADER_SIZE value Transport header size could be 60 bytes, and network header size can also be 60 bytes. Add the Ethernet header and we are above 128 bytes. Since drivers using net/core/tso.c usually allocates one DMA coherent piece of memory per TX queue, this patch might cause issues if a driver was using too many slots. For 1024 slots, we would need 256 KB of physically contiguous memory instead of 128 KB. Alternative fix would be to add checks in the fast path, but this involves more work in all drivers using net/core/tso.c. Fixes: f9cbe9a556af ("net: define the TSO header size in net/tso.h") Signed-off-by: Eric Dumazet Cc: Antoine Tenart Signed-off-by: David S. Miller --- include/net/tso.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/tso.h b/include/net/tso.h index 7e166a570349..c33dd00c161f 100644 --- a/include/net/tso.h +++ b/include/net/tso.h @@ -4,7 +4,7 @@ #include -#define TSO_HEADER_SIZE 128 +#define TSO_HEADER_SIZE 256 struct tso_t { int next_frag_idx; -- cgit v1.2.3-59-g8ed1b From 185c3e5860227065dcb6ee884b45e0debe4762dd Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 17 Jun 2020 20:53:23 -0700 Subject: net: tso: shrink struct tso_t size field can be an int, no need for size_t Removes a 32bit hole on 64bit kernels. And align fields for better readability. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/tso.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/net/tso.h b/include/net/tso.h index c33dd00c161f..d9b0a14b2a57 100644 --- a/include/net/tso.h +++ b/include/net/tso.h @@ -7,12 +7,12 @@ #define TSO_HEADER_SIZE 256 struct tso_t { - int next_frag_idx; - void *data; - size_t size; - u16 ip_id; - bool ipv6; - u32 tcp_seq; + int next_frag_idx; + int size; + void *data; + u16 ip_id; + bool ipv6; + u32 tcp_seq; }; int tso_count_descs(struct sk_buff *skb); -- cgit v1.2.3-59-g8ed1b From 504b912150983a8b2499bbf9e4501336677404c9 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 17 Jun 2020 20:53:24 -0700 Subject: net: tso: constify tso_count_descs() and friends skb argument of tso_count_descs(), tso_build_hdr() and tso_build_data() can be const. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/tso.h | 6 +++--- net/core/tso.c | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/net/tso.h b/include/net/tso.h index d9b0a14b2a57..32d9272ade6a 100644 --- a/include/net/tso.h +++ b/include/net/tso.h @@ -15,10 +15,10 @@ struct tso_t { u32 tcp_seq; }; -int tso_count_descs(struct sk_buff *skb); -void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso, +int tso_count_descs(const struct sk_buff *skb); +void tso_build_hdr(const struct sk_buff *skb, char *hdr, struct tso_t *tso, int size, bool is_last); -void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size); +void tso_build_data(const struct sk_buff *skb, struct tso_t *tso, int size); void tso_start(struct sk_buff *skb, struct tso_t *tso); #endif /* _TSO_H */ diff --git a/net/core/tso.c b/net/core/tso.c index d4d5c077ad72..56487e3bb26d 100644 --- a/net/core/tso.c +++ b/net/core/tso.c @@ -6,14 +6,14 @@ #include /* Calculate expected number of TX descriptors */ -int tso_count_descs(struct sk_buff *skb) +int tso_count_descs(const struct sk_buff *skb) { /* The Marvell Way */ return skb_shinfo(skb)->gso_segs * 2 + skb_shinfo(skb)->nr_frags; } EXPORT_SYMBOL(tso_count_descs); -void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso, +void tso_build_hdr(const struct sk_buff *skb, char *hdr, struct tso_t *tso, int size, bool is_last) { struct tcphdr *tcph; @@ -44,7 +44,7 @@ void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso, } EXPORT_SYMBOL(tso_build_hdr); -void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size) +void tso_build_data(const struct sk_buff *skb, struct tso_t *tso, int size) { tso->tcp_seq += size; tso->size -= size; -- cgit v1.2.3-59-g8ed1b From 761b331cb6902dc0a08f786e9fa0dbd572059027 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 17 Jun 2020 20:53:25 -0700 Subject: net: tso: cache transport header length Add tlen field into struct tso_t, and change tso_start() to return skb_transport_offset(skb) + tso->tlen This removes from callers the need to use tcp_hdrlen(skb) and will ease UDP segmentation offload addition. v2: calls tso_start() earlier in otx2_sq_append_tso() [Jakub] Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/thunder/nicvf_queues.c | 5 +++-- drivers/net/ethernet/freescale/fec_main.c | 5 ++--- drivers/net/ethernet/marvell/mv643xx_eth.c | 5 ++--- drivers/net/ethernet/marvell/mvneta.c | 5 ++--- drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | 6 +++--- drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c | 6 +++--- include/net/tso.h | 3 ++- net/core/tso.c | 11 +++++++---- 8 files changed, 24 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 069e7413f1ef..a45223f0cca5 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -1489,9 +1489,10 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, int seg_subdescs = 0, desc_cnt = 0; int seg_len, total_len, data_left; int hdr_qentry = qentry; - int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + int hdr_len; + + hdr_len = tso_start(skb, &tso); - tso_start(skb, &tso); total_len = skb->len - hdr_len; while (total_len > 0) { char *hdr; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 2d0d313ee7c5..9f80a33c5b16 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -710,8 +710,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - int total_len, data_left; + int hdr_len, total_len, data_left; struct bufdesc *bdp = txq->bd.cur; struct tso_t tso; unsigned int index = 0; @@ -731,7 +730,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, } /* Initialize the TSO handler, and prepare the first payload */ - tso_start(skb, &tso); + hdr_len = tso_start(skb, &tso); total_len = skb->len - hdr_len; while (total_len > 0) { diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 4d4b6243318a..90e6111ce534 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -816,10 +816,9 @@ static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb, struct net_device *dev) { struct mv643xx_eth_private *mp = txq_to_mp(txq); - int total_len, data_left, ret; + int hdr_len, total_len, data_left, ret; int desc_count = 0; struct tso_t tso; - int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); struct tx_desc *first_tx_desc; u32 first_cmd_sts = 0; @@ -832,7 +831,7 @@ static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb, first_tx_desc = &txq->tx_desc_area[txq->tx_curr_desc]; /* Initialize the TSO handler, and prepare the first payload */ - tso_start(skb, &tso); + hdr_len = tso_start(skb, &tso); total_len = skb->len - hdr_len; while (total_len > 0) { diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 946925bbcb2d..95b447c14411 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -2604,11 +2604,10 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq, static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev, struct mvneta_tx_queue *txq) { - int total_len, data_left; + int hdr_len, total_len, data_left; int desc_count = 0; struct mvneta_port *pp = netdev_priv(dev); struct tso_t tso; - int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); int i; /* Count needed descriptors */ @@ -2621,7 +2620,7 @@ static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev, } /* Initialize the TSO handler, and prepare the first payload */ - tso_start(skb, &tso); + hdr_len = tso_start(skb, &tso); total_len = skb->len - hdr_len; while (total_len > 0) { diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 24f4d8e0da98..e9f287568026 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -3160,9 +3160,8 @@ static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev, struct mvpp2_txq_pcpu *txq_pcpu) { struct mvpp2_port *port = netdev_priv(dev); + int hdr_sz, i, len, descs = 0; struct tso_t tso; - int hdr_sz = skb_transport_offset(skb) + tcp_hdrlen(skb); - int i, len, descs = 0; /* Check number of available descriptors */ if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) || @@ -3170,7 +3169,8 @@ static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev, tso_count_descs(skb))) return 0; - tso_start(skb, &tso); + hdr_sz = tso_start(skb, &tso); + len = skb->len - hdr_sz; while (len > 0) { int left = min_t(int, skb_shinfo(skb)->gso_size, len); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index b04f5429d72d..3a5b34a2a7a6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -619,13 +619,14 @@ static void otx2_sq_append_tso(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, struct sk_buff *skb, u16 qidx) { struct netdev_queue *txq = netdev_get_tx_queue(pfvf->netdev, qidx); - int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - int tcp_data, seg_len, pkt_len, offset; + int hdr_len, tcp_data, seg_len, pkt_len, offset; struct nix_sqe_hdr_s *sqe_hdr; int first_sqe = sq->head; struct sg_list list; struct tso_t tso; + hdr_len = tso_start(skb, &tso); + /* Map SKB's fragments to DMA. * It's done here to avoid mapping for every TSO segment's packet. */ @@ -636,7 +637,6 @@ static void otx2_sq_append_tso(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, netdev_tx_sent_queue(txq, skb->len); - tso_start(skb, &tso); tcp_data = skb->len - hdr_len; while (tcp_data > 0) { char *hdr; diff --git a/include/net/tso.h b/include/net/tso.h index 32d9272ade6a..62c98a9c60f1 100644 --- a/include/net/tso.h +++ b/include/net/tso.h @@ -11,6 +11,7 @@ struct tso_t { int size; void *data; u16 ip_id; + u8 tlen; /* transport header len */ bool ipv6; u32 tcp_seq; }; @@ -19,6 +20,6 @@ int tso_count_descs(const struct sk_buff *skb); void tso_build_hdr(const struct sk_buff *skb, char *hdr, struct tso_t *tso, int size, bool is_last); void tso_build_data(const struct sk_buff *skb, struct tso_t *tso, int size); -void tso_start(struct sk_buff *skb, struct tso_t *tso); +int tso_start(struct sk_buff *skb, struct tso_t *tso); #endif /* _TSO_H */ diff --git a/net/core/tso.c b/net/core/tso.c index 56487e3bb26d..9f35518815bd 100644 --- a/net/core/tso.c +++ b/net/core/tso.c @@ -17,7 +17,7 @@ void tso_build_hdr(const struct sk_buff *skb, char *hdr, struct tso_t *tso, int size, bool is_last) { struct tcphdr *tcph; - int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + int hdr_len = skb_transport_offset(skb) + tso->tlen; int mac_hdr_len = skb_network_offset(skb); memcpy(hdr, skb->data, hdr_len); @@ -30,7 +30,7 @@ void tso_build_hdr(const struct sk_buff *skb, char *hdr, struct tso_t *tso, } else { struct ipv6hdr *iph = (void *)(hdr + mac_hdr_len); - iph->payload_len = htons(size + tcp_hdrlen(skb)); + iph->payload_len = htons(size + tso->tlen); } tcph = (struct tcphdr *)(hdr + skb_transport_offset(skb)); put_unaligned_be32(tso->tcp_seq, &tcph->seq); @@ -62,10 +62,12 @@ void tso_build_data(const struct sk_buff *skb, struct tso_t *tso, int size) } EXPORT_SYMBOL(tso_build_data); -void tso_start(struct sk_buff *skb, struct tso_t *tso) +int tso_start(struct sk_buff *skb, struct tso_t *tso) { - int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + int tlen = tcp_hdrlen(skb); + int hdr_len = skb_transport_offset(skb) + tlen; + tso->tlen = tlen; tso->ip_id = ntohs(ip_hdr(skb)->id); tso->tcp_seq = ntohl(tcp_hdr(skb)->seq); tso->next_frag_idx = 0; @@ -83,5 +85,6 @@ void tso_start(struct sk_buff *skb, struct tso_t *tso) tso->data = skb_frag_address(frag); tso->next_frag_idx++; } + return hdr_len; } EXPORT_SYMBOL(tso_start); -- cgit v1.2.3-59-g8ed1b From 91c7eaa686c3b7ae2d5b2aed22a45a02c8baa30e Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Thu, 18 Jun 2020 11:42:53 +0200 Subject: USB: rename USB quirk to USB_QUIRK_ENDPOINT_IGNORE MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The USB core has a quirk flag to ignore specific endpoints, so rename it to be more obvious what this quirk does. Cc: Johan Hovold Cc: Alan Stern Cc: Richard Dodd Cc: Hans de Goede Cc: Jonathan Cox Cc: Bastien Nocera Cc: "Thiébaud Weksteen" Cc: Nishad Kamdar Link: https://lore.kernel.org/r/20200618094300.1887727-2-gregkh@linuxfoundation.org Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/config.c | 8 ++++---- drivers/usb/core/quirks.c | 18 +++++++++--------- drivers/usb/core/usb.h | 2 +- include/linux/usb/quirks.h | 4 ++-- 4 files changed, 16 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index b7918f695434..37442f423a41 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -298,10 +298,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, goto skip_to_next_endpoint_or_interface_descriptor; } - /* Ignore blacklisted endpoints */ - if (udev->quirks & USB_QUIRK_ENDPOINT_BLACKLIST) { - if (usb_endpoint_is_blacklisted(udev, ifp, d)) { - dev_warn(ddev, "config %d interface %d altsetting %d has a blacklisted endpoint with address 0x%X, skipping\n", + /* Ignore some endpoints */ + if (udev->quirks & USB_QUIRK_ENDPOINT_IGNORE) { + if (usb_endpoint_is_ignored(udev, ifp, d)) { + dev_warn(ddev, "config %d interface %d altsetting %d has an ignored endpoint with address 0x%X, skipping\n", cfgno, inum, asnum, d->bEndpointAddress); goto skip_to_next_endpoint_or_interface_descriptor; diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 3e8efe759c3e..20dccf34182d 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -359,7 +359,7 @@ static const struct usb_device_id usb_quirk_list[] = { /* Sound Devices USBPre2 */ { USB_DEVICE(0x0926, 0x0202), .driver_info = - USB_QUIRK_ENDPOINT_BLACKLIST }, + USB_QUIRK_ENDPOINT_IGNORE }, /* Keytouch QWERTY Panel keyboard */ { USB_DEVICE(0x0926, 0x3333), .driver_info = @@ -493,24 +493,24 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = { }; /* - * Entries for blacklisted endpoints that should be ignored when parsing - * configuration descriptors. + * Entries for endpoints that should be ignored when parsing configuration + * descriptors. * - * Matched for devices with USB_QUIRK_ENDPOINT_BLACKLIST. + * Matched for devices with USB_QUIRK_ENDPOINT_IGNORE. */ -static const struct usb_device_id usb_endpoint_blacklist[] = { +static const struct usb_device_id usb_endpoint_ignore[] = { { USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0202, 1), .driver_info = 0x85 }, { } }; -bool usb_endpoint_is_blacklisted(struct usb_device *udev, - struct usb_host_interface *intf, - struct usb_endpoint_descriptor *epd) +bool usb_endpoint_is_ignored(struct usb_device *udev, + struct usb_host_interface *intf, + struct usb_endpoint_descriptor *epd) { const struct usb_device_id *id; unsigned int address; - for (id = usb_endpoint_blacklist; id->match_flags; ++id) { + for (id = usb_endpoint_ignore; id->match_flags; ++id) { if (!usb_match_device(udev, id)) continue; diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h index 19e4c550bc73..98e7d1ee63dc 100644 --- a/drivers/usb/core/usb.h +++ b/drivers/usb/core/usb.h @@ -37,7 +37,7 @@ extern void usb_authorize_interface(struct usb_interface *); extern void usb_detect_quirks(struct usb_device *udev); extern void usb_detect_interface_quirks(struct usb_device *udev); extern void usb_release_quirk_list(void); -extern bool usb_endpoint_is_blacklisted(struct usb_device *udev, +extern bool usb_endpoint_is_ignored(struct usb_device *udev, struct usb_host_interface *intf, struct usb_endpoint_descriptor *epd); extern int usb_remove_device(struct usb_device *udev); diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h index 22c1f579afe3..5e4c497f54d6 100644 --- a/include/linux/usb/quirks.h +++ b/include/linux/usb/quirks.h @@ -69,7 +69,7 @@ /* Hub needs extra delay after resetting its port. */ #define USB_QUIRK_HUB_SLOW_RESET BIT(14) -/* device has blacklisted endpoints */ -#define USB_QUIRK_ENDPOINT_BLACKLIST BIT(15) +/* device has endpoints that should be ignored */ +#define USB_QUIRK_ENDPOINT_IGNORE BIT(15) #endif /* __LINUX_USB_QUIRKS_H */ -- cgit v1.2.3-59-g8ed1b From 94b292b277343190175d39172c903c0c5fb814f1 Mon Sep 17 00:00:00 2001 From: Ben Davis Date: Mon, 1 Jun 2020 17:28:17 +0100 Subject: drm: drm_fourcc: add NV15, Q410, Q401 YUV formats DRM_FORMAT_NV15 is a 2 plane format suitable for linear and 16x16 block-linear memory layouts (DRM_FORMAT_MOD_SAMSUNG_16_16_TILE). The format is similar to P010 with 4:2:0 sub-sampling but has no padding between components. Instead, luminance and chrominance samples are grouped into 4s so that each group is packed into an integer number of bytes: YYYY = UVUV = 4 * 10 bits = 40 bits = 5 bytes The '15' suffix refers to the optimum effective bits per pixel which is achieved when the total number of luminance samples is a multiple of 8. Q410 and Q401 are both 3 plane non-subsampled formats with 16 bits per component, but only 10 bits are used and 6 are padded. 'Q' is chosen as the first letter to denote 3 plane YUV444, (and is the next letter along from P which is usually 2 plane). V2: Updated block_w of NV15 to {4, 2, 0} V3: Updated commit message to include specific modifier name NV15: Tested-by: Jonas Karlman Reviewed-by: Brian Starkey Signed-off-by: Ben Davis Signed-off-by: Liviu Dudau Link: https://patchwork.freedesktop.org/patch/msgid/20200601162817.18230-1-ben.davis@arm.com --- drivers/gpu/drm/drm_fourcc.c | 12 ++++++++++++ include/uapi/drm/drm_fourcc.h | 22 ++++++++++++++++++++++ 2 files changed, 34 insertions(+) (limited to 'include') diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c index b234bfaeda06..722c7ebe4e88 100644 --- a/drivers/gpu/drm/drm_fourcc.c +++ b/drivers/gpu/drm/drm_fourcc.c @@ -274,6 +274,18 @@ const struct drm_format_info *__drm_format_info(u32 format) { .format = DRM_FORMAT_YUV420_10BIT, .depth = 0, .num_planes = 1, .cpp = { 0, 0, 0 }, .hsub = 2, .vsub = 2, .is_yuv = true }, + { .format = DRM_FORMAT_NV15, .depth = 0, + .num_planes = 2, .char_per_block = { 5, 5, 0 }, + .block_w = { 4, 2, 0 }, .block_h = { 1, 1, 0 }, .hsub = 2, + .vsub = 2, .is_yuv = true }, + { .format = DRM_FORMAT_Q410, .depth = 0, + .num_planes = 3, .char_per_block = { 2, 2, 2 }, + .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 0, + .vsub = 0, .is_yuv = true }, + { .format = DRM_FORMAT_Q401, .depth = 0, + .num_planes = 3, .char_per_block = { 2, 2, 2 }, + .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 0, + .vsub = 0, .is_yuv = true }, }; unsigned int i; diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index 9e488d10f8b4..3c4055e48371 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -236,6 +236,12 @@ extern "C" { #define DRM_FORMAT_NV61 fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */ #define DRM_FORMAT_NV24 fourcc_code('N', 'V', '2', '4') /* non-subsampled Cr:Cb plane */ #define DRM_FORMAT_NV42 fourcc_code('N', 'V', '4', '2') /* non-subsampled Cb:Cr plane */ +/* + * 2 plane YCbCr + * index 0 = Y plane, [39:0] Y3:Y2:Y1:Y0 little endian + * index 1 = Cr:Cb plane, [39:0] Cr1:Cb1:Cr0:Cb0 little endian + */ +#define DRM_FORMAT_NV15 fourcc_code('N', 'V', '1', '5') /* 2x2 subsampled Cr:Cb plane */ /* * 2 plane YCbCr MSB aligned @@ -265,6 +271,22 @@ extern "C" { */ #define DRM_FORMAT_P016 fourcc_code('P', '0', '1', '6') /* 2x2 subsampled Cr:Cb plane 16 bits per channel */ +/* 3 plane non-subsampled (444) YCbCr + * 16 bits per component, but only 10 bits are used and 6 bits are padded + * index 0: Y plane, [15:0] Y:x [10:6] little endian + * index 1: Cb plane, [15:0] Cb:x [10:6] little endian + * index 2: Cr plane, [15:0] Cr:x [10:6] little endian + */ +#define DRM_FORMAT_Q410 fourcc_code('Q', '4', '1', '0') + +/* 3 plane non-subsampled (444) YCrCb + * 16 bits per component, but only 10 bits are used and 6 bits are padded + * index 0: Y plane, [15:0] Y:x [10:6] little endian + * index 1: Cr plane, [15:0] Cr:x [10:6] little endian + * index 2: Cb plane, [15:0] Cb:x [10:6] little endian + */ +#define DRM_FORMAT_Q401 fourcc_code('Q', '4', '0', '1') + /* * 3 plane YCbCr * index 0: Y plane, [7:0] Y -- cgit v1.2.3-59-g8ed1b From 79ce058032c391b12af928b1e30abf92482a270f Mon Sep 17 00:00:00 2001 From: Ben Davis Date: Thu, 30 Apr 2020 09:32:20 +0100 Subject: drm: drm_fourcc: Add uncompressed AFBC modifier AFBC has a mode that guarantees use of AFBC with an uncompressed payloads, we add a new modifier to support this mode. V2: updated modifier comment Signed-off-by: Ben Davis Acked-by: Liviu Dudau Signed-off-by: Liviu Dudau Link: https://patchwork.freedesktop.org/patch/msgid/20200430083220.17347-1-ben.davis@arm.com --- include/uapi/drm/drm_fourcc.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'include') diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index 3c4055e48371..299f9ac4840b 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -808,6 +808,18 @@ extern "C" { */ #define AFBC_FORMAT_MOD_BCH (1ULL << 11) +/* AFBC uncompressed storage mode + * + * Indicates that the buffer is using AFBC uncompressed storage mode. + * In this mode all superblock payloads in the buffer use the uncompressed + * storage mode, which is usually only used for data which cannot be compressed. + * The buffer layout is the same as for AFBC buffers without USM set, this only + * affects the storage mode of the individual superblocks. Note that even a + * buffer without USM set may use uncompressed storage mode for some or all + * superblocks, USM just guarantees it for all. + */ +#define AFBC_FORMAT_MOD_USM (1ULL << 12) + /* * Arm 16x16 Block U-Interleaved modifier * -- cgit v1.2.3-59-g8ed1b From df06230106e95347ec613f1707a704b04737c59b Mon Sep 17 00:00:00 2001 From: Dmitry Shmidt Date: Wed, 10 Jun 2020 10:30:11 +0200 Subject: dt-bindings: clk: g12a-clkc: Add NNA CLK Source clock IDs This adds the Neural Network Accelerator IP source clocks. Signed-off-by: Dmitry Shmidt Signed-off-by: Neil Armstrong Signed-off-by: Jerome Brunet Acked-by: Rob Herring Link: https://lore.kernel.org/r/20200610083012.5024-2-narmstrong@baylibre.com --- include/dt-bindings/clock/g12a-clkc.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/dt-bindings/clock/g12a-clkc.h b/include/dt-bindings/clock/g12a-clkc.h index b0d65d73db96..40d49940d8a8 100644 --- a/include/dt-bindings/clock/g12a-clkc.h +++ b/include/dt-bindings/clock/g12a-clkc.h @@ -145,5 +145,7 @@ #define CLKID_CPU3_CLK 255 #define CLKID_SPICC0_SCLK 258 #define CLKID_SPICC1_SCLK 261 +#define CLKID_NNA_AXI_CLK 264 +#define CLKID_NNA_CORE_CLK 267 #endif /* __G12A_CLKC_H */ -- cgit v1.2.3-59-g8ed1b From 5bf74682118b3003c8f9b0b0ec596e473fc6eb82 Mon Sep 17 00:00:00 2001 From: "Andrea Parri (Microsoft)" Date: Wed, 17 Jun 2020 18:46:35 +0200 Subject: Drivers: hv: vmbus: Remove the target_vp field from the vmbus_channel struct The field is read only in __vmbus_open() and it is already stored twice (after a call to hv_cpu_number_to_vp_number()) in target_cpu_store() and init_vp_index(); there is no need to "cache" its value in the channel data structure. Suggested-by: Michael Kelley Signed-off-by: Andrea Parri (Microsoft) Link: https://lore.kernel.org/r/20200617164642.37393-2-parri.andrea@gmail.com Reviewed-by: Michael Kelley Signed-off-by: Wei Liu --- drivers/hv/channel.c | 3 ++- drivers/hv/channel_mgmt.c | 3 --- drivers/hv/vmbus_drv.c | 2 -- include/linux/hyperv.h | 15 +++++++-------- 4 files changed, 9 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index 90070b337c10..8848d1548b3f 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -18,6 +18,7 @@ #include #include #include +#include #include "hyperv_vmbus.h" @@ -176,7 +177,7 @@ static int __vmbus_open(struct vmbus_channel *newchannel, open_msg->child_relid = newchannel->offermsg.child_relid; open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle; open_msg->downstream_ringbuffer_pageoffset = newchannel->ringbuffer_send_offset; - open_msg->target_vp = newchannel->target_vp; + open_msg->target_vp = hv_cpu_number_to_vp_number(newchannel->target_cpu); if (userdatalen) memcpy(open_msg->userdata, userdata, userdatalen); diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 417a95e5094d..278e39221807 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -704,8 +704,6 @@ static void init_vp_index(struct vmbus_channel *channel) */ channel->numa_node = cpu_to_node(VMBUS_CONNECT_CPU); channel->target_cpu = VMBUS_CONNECT_CPU; - channel->target_vp = - hv_cpu_number_to_vp_number(VMBUS_CONNECT_CPU); if (perf_chn) hv_set_alloced_cpu(VMBUS_CONNECT_CPU); return; @@ -739,7 +737,6 @@ static void init_vp_index(struct vmbus_channel *channel) cpumask_set_cpu(target_cpu, alloced_mask); channel->target_cpu = target_cpu; - channel->target_vp = hv_cpu_number_to_vp_number(target_cpu); free_cpumask_var(available_mask); } diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 9147ee9d5f7d..d2ddb46f1359 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -23,7 +23,6 @@ #include #include -#include #include #include #include @@ -1779,7 +1778,6 @@ static ssize_t target_cpu_store(struct vmbus_channel *channel, */ channel->target_cpu = target_cpu; - channel->target_vp = hv_cpu_number_to_vp_number(target_cpu); channel->numa_node = cpu_to_node(target_cpu); /* See init_vp_index(). */ diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 40df3103e890..738efdb194b0 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -803,15 +803,14 @@ struct vmbus_channel { u64 sig_event; /* - * Starting with win8, this field will be used to specify - * the target virtual processor on which to deliver the interrupt for - * the host to guest communication. - * Prior to win8, incoming channel interrupts would only - * be delivered on cpu 0. Setting this value to 0 would - * preserve the earlier behavior. + * Starting with win8, this field will be used to specify the + * target CPU on which to deliver the interrupt for the host + * to guest communication. + * + * Prior to win8, incoming channel interrupts would only be + * delivered on CPU 0. Setting this value to 0 would preserve + * the earlier behavior. */ - u32 target_vp; - /* The corresponding CPUID in the guest */ u32 target_cpu; int numa_node; /* -- cgit v1.2.3-59-g8ed1b From 458d090fbad59d1f849ee15e78d0471784d428b6 Mon Sep 17 00:00:00 2001 From: "Andrea Parri (Microsoft)" Date: Wed, 17 Jun 2020 18:46:36 +0200 Subject: Drivers: hv: vmbus: Remove the numa_node field from the vmbus_channel struct The field is read only in numa_node_show() and it is already stored twice (after a call to cpu_to_node()) in target_cpu_store() and init_vp_index(); there is no need to "cache" its value in the channel data structure. Signed-off-by: Andrea Parri (Microsoft) Link: https://lore.kernel.org/r/20200617164642.37393-3-parri.andrea@gmail.com Reviewed-by: Michael Kelley Signed-off-by: Wei Liu --- drivers/hv/channel_mgmt.c | 2 -- drivers/hv/vmbus_drv.c | 3 +-- include/linux/hyperv.h | 1 - 3 files changed, 1 insertion(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 278e39221807..36dd8b6c544a 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -702,7 +702,6 @@ static void init_vp_index(struct vmbus_channel *channel) * In case alloc_cpumask_var() fails, bind it to * VMBUS_CONNECT_CPU. */ - channel->numa_node = cpu_to_node(VMBUS_CONNECT_CPU); channel->target_cpu = VMBUS_CONNECT_CPU; if (perf_chn) hv_set_alloced_cpu(VMBUS_CONNECT_CPU); @@ -719,7 +718,6 @@ static void init_vp_index(struct vmbus_channel *channel) continue; break; } - channel->numa_node = numa_node; alloced_mask = &hv_context.hv_numa_map[numa_node]; if (cpumask_weight(alloced_mask) == diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index d2ddb46f1359..de44d76c8ace 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -226,7 +226,7 @@ static ssize_t numa_node_show(struct device *dev, if (!hv_dev->channel) return -ENODEV; - return sprintf(buf, "%d\n", hv_dev->channel->numa_node); + return sprintf(buf, "%d\n", cpu_to_node(hv_dev->channel->target_cpu)); } static DEVICE_ATTR_RO(numa_node); #endif @@ -1778,7 +1778,6 @@ static ssize_t target_cpu_store(struct vmbus_channel *channel, */ channel->target_cpu = target_cpu; - channel->numa_node = cpu_to_node(target_cpu); /* See init_vp_index(). */ if (hv_is_perf_channel(channel)) diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 738efdb194b0..690394b79d72 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -812,7 +812,6 @@ struct vmbus_channel { * the earlier behavior. */ u32 target_cpu; - int numa_node; /* * Support for sub-channels. For high performance devices, * it will be useful to have multiple sub-channels to support -- cgit v1.2.3-59-g8ed1b From e32b16c31339e37d3cdc814f2773b74c1dbf660c Mon Sep 17 00:00:00 2001 From: Prashant Malani Date: Thu, 28 May 2020 04:36:03 -0700 Subject: platform/chrome: cros_ec: Update mux state bits Sync the EC_CMD_USB_PD_MUX_INFO mux state bit fields with the Chrome EC code base. The newly added bit fields will be used for cros-ec-typec mux control. Signed-off-by: Prashant Malani Signed-off-by: Enric Balletbo i Serra --- include/linux/platform_data/cros_ec_commands.h | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h index 69210881ebac..a7b0fc440c35 100644 --- a/include/linux/platform_data/cros_ec_commands.h +++ b/include/linux/platform_data/cros_ec_commands.h @@ -5207,11 +5207,15 @@ struct ec_params_usb_pd_mux_info { } __ec_align1; /* Flags representing mux state */ -#define USB_PD_MUX_USB_ENABLED BIT(0) /* USB connected */ -#define USB_PD_MUX_DP_ENABLED BIT(1) /* DP connected */ -#define USB_PD_MUX_POLARITY_INVERTED BIT(2) /* CC line Polarity inverted */ -#define USB_PD_MUX_HPD_IRQ BIT(3) /* HPD IRQ is asserted */ -#define USB_PD_MUX_HPD_LVL BIT(4) /* HPD level is asserted */ +#define USB_PD_MUX_NONE 0 /* Open switch */ +#define USB_PD_MUX_USB_ENABLED BIT(0) /* USB connected */ +#define USB_PD_MUX_DP_ENABLED BIT(1) /* DP connected */ +#define USB_PD_MUX_POLARITY_INVERTED BIT(2) /* CC line Polarity inverted */ +#define USB_PD_MUX_HPD_IRQ BIT(3) /* HPD IRQ is asserted */ +#define USB_PD_MUX_HPD_LVL BIT(4) /* HPD level is asserted */ +#define USB_PD_MUX_SAFE_MODE BIT(5) /* DP is in safe mode */ +#define USB_PD_MUX_TBT_COMPAT_ENABLED BIT(6) /* TBT compat enabled */ +#define USB_PD_MUX_USB4_ENABLED BIT(7) /* USB4 enabled */ struct ec_response_usb_pd_mux_info { uint8_t flags; /* USB_PD_MUX_*-encoded USB mux state */ -- cgit v1.2.3-59-g8ed1b From 4b61d3e8d3daebbde7ec02d593f84248fdf8bec2 Mon Sep 17 00:00:00 2001 From: Po Liu Date: Fri, 19 Jun 2020 14:01:07 +0800 Subject: net: qos offload add flow status with dropped count This patch adds a drop frames counter to tc flower offloading. Reporting h/w dropped frames is necessary for some actions. Some actions like police action and the coming introduced stream gate action would produce dropped frames which is necessary for user. Status update shows how many filtered packets increasing and how many dropped in those packets. v2: Changes - Update commit comments suggest by Jiri Pirko. Signed-off-by: Po Liu Reviewed-by: Simon Horman Reviewed-by: Vlad Buslov Signed-off-by: David S. Miller --- drivers/net/dsa/sja1105/sja1105_vl.c | 2 +- drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c | 2 +- drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c | 2 +- drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c | 2 +- drivers/net/ethernet/freescale/enetc/enetc_qos.c | 7 +++++-- drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 4 ++-- drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c | 2 +- drivers/net/ethernet/mscc/ocelot_flower.c | 2 +- drivers/net/ethernet/netronome/nfp/flower/offload.c | 2 +- drivers/net/ethernet/netronome/nfp/flower/qos_conf.c | 2 +- include/net/act_api.h | 11 ++++++----- include/net/flow_offload.h | 5 ++++- include/net/pkt_cls.h | 5 +++-- net/sched/act_api.c | 10 ++++------ net/sched/act_ct.c | 6 +++--- net/sched/act_gact.c | 7 ++++--- net/sched/act_gate.c | 6 +++--- net/sched/act_mirred.c | 6 +++--- net/sched/act_pedit.c | 6 +++--- net/sched/act_police.c | 4 ++-- net/sched/act_skbedit.c | 5 +++-- net/sched/act_vlan.c | 6 +++--- net/sched/cls_flower.c | 1 + net/sched/cls_matchall.c | 3 ++- 25 files changed, 60 insertions(+), 50 deletions(-) (limited to 'include') diff --git a/drivers/net/dsa/sja1105/sja1105_vl.c b/drivers/net/dsa/sja1105/sja1105_vl.c index bdfd6c4e190d..9ddc49b7eb8f 100644 --- a/drivers/net/dsa/sja1105/sja1105_vl.c +++ b/drivers/net/dsa/sja1105/sja1105_vl.c @@ -771,7 +771,7 @@ int sja1105_vl_stats(struct sja1105_private *priv, int port, pkts = timingerr + unreleased + lengtherr; - flow_stats_update(stats, 0, pkts - rule->vl.stats.pkts, + flow_stats_update(stats, 0, pkts - rule->vl.stats.pkts, 0, jiffies - rule->vl.stats.lastused, FLOW_ACTION_HW_STATS_IMMEDIATE); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 0eef4f5e4a46..4d482d75a20b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -1638,7 +1638,7 @@ static int bnxt_tc_get_flow_stats(struct bnxt *bp, lastused = flow->lastused; spin_unlock(&flow->stats_lock); - flow_stats_update(&tc_flow_cmd->stats, stats.bytes, stats.packets, + flow_stats_update(&tc_flow_cmd->stats, stats.bytes, stats.packets, 0, lastused, FLOW_ACTION_HW_STATS_DELAYED); return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index 4a5fa9eba0b6..030de20a5d27 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -902,7 +902,7 @@ int cxgb4_tc_flower_stats(struct net_device *dev, if (ofld_stats->prev_packet_count != packets) ofld_stats->last_used = jiffies; flow_stats_update(&cls->stats, bytes - ofld_stats->byte_count, - packets - ofld_stats->packet_count, + packets - ofld_stats->packet_count, 0, ofld_stats->last_used, FLOW_ACTION_HW_STATS_IMMEDIATE); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c index c88c47a14fbb..c439b5bce9c9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c @@ -346,7 +346,7 @@ int cxgb4_tc_matchall_stats(struct net_device *dev, flow_stats_update(&cls_matchall->stats, bytes - tc_port_matchall->ingress.bytes, packets - tc_port_matchall->ingress.packets, - tc_port_matchall->ingress.last_used, + 0, tc_port_matchall->ingress.last_used, FLOW_ACTION_HW_STATS_IMMEDIATE); tc_port_matchall->ingress.packets = packets; diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c index fd3df19eaa32..fb76903eca90 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c @@ -1291,12 +1291,15 @@ static int enetc_psfp_get_stats(struct enetc_ndev_priv *priv, spin_lock(&epsfp.psfp_lock); stats.pkts = counters.matching_frames_count - filter->stats.pkts; + stats.drops = counters.not_passing_frames_count - + filter->stats.drops; stats.lastused = filter->stats.lastused; filter->stats.pkts += stats.pkts; + filter->stats.drops += stats.drops; spin_unlock(&epsfp.psfp_lock); - flow_stats_update(&f->stats, 0x0, stats.pkts, stats.lastused, - FLOW_ACTION_HW_STATS_DELAYED); + flow_stats_update(&f->stats, 0x0, stats.pkts, stats.drops, + stats.lastused, FLOW_ACTION_HW_STATS_DELAYED); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index 430025550fad..c7107da03212 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -672,7 +672,7 @@ mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft, return -ENOENT; mlx5_fc_query_cached(entry->counter, &bytes, &packets, &lastuse); - flow_stats_update(&f->stats, bytes, packets, lastuse, + flow_stats_update(&f->stats, bytes, packets, 0, lastuse, FLOW_ACTION_HW_STATS_DELAYED); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 7fc84f58e28a..bc9c0ac15f99 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -4828,7 +4828,7 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, no_peer_counter: mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); out: - flow_stats_update(&f->stats, bytes, packets, lastuse, + flow_stats_update(&f->stats, bytes, packets, 0, lastuse, FLOW_ACTION_HW_STATS_DELAYED); trace_mlx5e_stats_flower(f); errout: @@ -4946,7 +4946,7 @@ void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv, dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets; dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes; rpriv->prev_vf_vport_stats = cur_stats; - flow_stats_update(&ma->stats, dbytes, dpkts, jiffies, + flow_stats_update(&ma->stats, dbytes, dpkts, 0, jiffies, FLOW_ACTION_HW_STATS_DELAYED); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 51e1b3930c56..61d21043d83a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -633,7 +633,7 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp, if (err) goto err_rule_get_stats; - flow_stats_update(&f->stats, bytes, packets, lastuse, used_hw_stats); + flow_stats_update(&f->stats, bytes, packets, 0, lastuse, used_hw_stats); mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); return 0; diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c index 5ce172e22b43..c90bafbd651f 100644 --- a/drivers/net/ethernet/mscc/ocelot_flower.c +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -244,7 +244,7 @@ int ocelot_cls_flower_stats(struct ocelot *ocelot, int port, if (ret) return ret; - flow_stats_update(&f->stats, 0x0, ace.stats.pkts, 0x0, + flow_stats_update(&f->stats, 0x0, ace.stats.pkts, 0, 0x0, FLOW_ACTION_HW_STATS_IMMEDIATE); return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 695d24b9dd92..234c652700e1 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -1491,7 +1491,7 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev, nfp_flower_update_merge_stats(app, nfp_flow); flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes, - priv->stats[ctx_id].pkts, priv->stats[ctx_id].used, + priv->stats[ctx_id].pkts, 0, priv->stats[ctx_id].used, FLOW_ACTION_HW_STATS_DELAYED); priv->stats[ctx_id].pkts = 0; diff --git a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c index d18a830e4264..bb327d48d1ab 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c @@ -319,7 +319,7 @@ nfp_flower_stats_rate_limiter(struct nfp_app *app, struct net_device *netdev, prev_stats->bytes = curr_stats->bytes; spin_unlock_bh(&fl_priv->qos_stats_lock); - flow_stats_update(&flow->stats, diff_bytes, diff_pkts, + flow_stats_update(&flow->stats, diff_bytes, diff_pkts, 0, repr_priv->qos_table.last_update, FLOW_ACTION_HW_STATS_DELAYED); return 0; diff --git a/include/net/act_api.h b/include/net/act_api.h index 8c3934880670..cb382a89ea58 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -106,7 +106,7 @@ struct tc_action_ops { struct netlink_callback *, int, const struct tc_action_ops *, struct netlink_ext_ack *); - void (*stats_update)(struct tc_action *, u64, u32, u64, bool); + void (*stats_update)(struct tc_action *, u64, u64, u64, u64, bool); size_t (*get_fill_size)(const struct tc_action *act); struct net_device *(*get_dev)(const struct tc_action *a, tc_action_priv_destructor *destructor); @@ -232,8 +232,8 @@ static inline void tcf_action_inc_overlimit_qstats(struct tc_action *a) spin_unlock(&a->tcfa_lock); } -void tcf_action_update_stats(struct tc_action *a, u64 bytes, u32 packets, - bool drop, bool hw); +void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets, + u64 drops, bool hw); int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int); int tcf_action_check_ctrlact(int action, struct tcf_proto *tp, @@ -244,13 +244,14 @@ struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action, #endif /* CONFIG_NET_CLS_ACT */ static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes, - u64 packets, u64 lastuse, bool hw) + u64 packets, u64 drops, + u64 lastuse, bool hw) { #ifdef CONFIG_NET_CLS_ACT if (!a->ops->stats_update) return; - a->ops->stats_update(a, bytes, packets, lastuse, hw); + a->ops->stats_update(a, bytes, packets, drops, lastuse, hw); #endif } diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index f2c8311a0433..00c15f14c434 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -389,17 +389,20 @@ static inline bool flow_rule_match_key(const struct flow_rule *rule, struct flow_stats { u64 pkts; u64 bytes; + u64 drops; u64 lastused; enum flow_action_hw_stats used_hw_stats; bool used_hw_stats_valid; }; static inline void flow_stats_update(struct flow_stats *flow_stats, - u64 bytes, u64 pkts, u64 lastused, + u64 bytes, u64 pkts, + u64 drops, u64 lastused, enum flow_action_hw_stats used_hw_stats) { flow_stats->pkts += pkts; flow_stats->bytes += bytes; + flow_stats->drops += drops; flow_stats->lastused = max_t(u64, flow_stats->lastused, lastused); /* The driver should pass value with a maximum of one bit set. diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index ed65619cbc47..ff017e5b3ea2 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -262,7 +262,7 @@ static inline void tcf_exts_put_net(struct tcf_exts *exts) static inline void tcf_exts_stats_update(const struct tcf_exts *exts, - u64 bytes, u64 packets, u64 lastuse, + u64 bytes, u64 packets, u64 drops, u64 lastuse, u8 used_hw_stats, bool used_hw_stats_valid) { #ifdef CONFIG_NET_CLS_ACT @@ -273,7 +273,8 @@ tcf_exts_stats_update(const struct tcf_exts *exts, for (i = 0; i < exts->nr_actions; i++) { struct tc_action *a = exts->actions[i]; - tcf_action_stats_update(a, bytes, packets, lastuse, true); + tcf_action_stats_update(a, bytes, packets, drops, + lastuse, true); a->used_hw_stats = used_hw_stats; a->used_hw_stats_valid = used_hw_stats_valid; } diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 8ac7eb0a8309..4c4466f18801 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -1059,14 +1059,13 @@ err: return err; } -void tcf_action_update_stats(struct tc_action *a, u64 bytes, u32 packets, - bool drop, bool hw) +void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets, + u64 drops, bool hw) { if (a->cpu_bstats) { _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets); - if (drop) - this_cpu_ptr(a->cpu_qstats)->drops += packets; + this_cpu_ptr(a->cpu_qstats)->drops += drops; if (hw) _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw), @@ -1075,8 +1074,7 @@ void tcf_action_update_stats(struct tc_action *a, u64 bytes, u32 packets, } _bstats_update(&a->tcfa_bstats, bytes, packets); - if (drop) - a->tcfa_qstats.drops += packets; + a->tcfa_qstats.drops += drops; if (hw) _bstats_update(&a->tcfa_bstats_hw, bytes, packets); } diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c index e9f3576cbf71..1b9c6d4a1b6b 100644 --- a/net/sched/act_ct.c +++ b/net/sched/act_ct.c @@ -1450,12 +1450,12 @@ static int tcf_ct_search(struct net *net, struct tc_action **a, u32 index) return tcf_idr_search(tn, a, index); } -static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets, - u64 lastuse, bool hw) +static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets, + u64 drops, u64 lastuse, bool hw) { struct tcf_ct *c = to_ct(a); - tcf_action_update_stats(a, bytes, packets, false, hw); + tcf_action_update_stats(a, bytes, packets, drops, hw); c->tcf_tm.lastuse = max_t(u64, c->tcf_tm.lastuse, lastuse); } diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index 416065772719..410e3bbfb9ca 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c @@ -171,14 +171,15 @@ static int tcf_gact_act(struct sk_buff *skb, const struct tc_action *a, return action; } -static void tcf_gact_stats_update(struct tc_action *a, u64 bytes, u32 packets, - u64 lastuse, bool hw) +static void tcf_gact_stats_update(struct tc_action *a, u64 bytes, u64 packets, + u64 drops, u64 lastuse, bool hw) { struct tcf_gact *gact = to_gact(a); int action = READ_ONCE(gact->tcf_action); struct tcf_t *tm = &gact->tcf_tm; - tcf_action_update_stats(a, bytes, packets, action == TC_ACT_SHOT, hw); + tcf_action_update_stats(a, bytes, packets, + action == TC_ACT_SHOT ? packets : drops, hw); tm->lastuse = max_t(u64, tm->lastuse, lastuse); } diff --git a/net/sched/act_gate.c b/net/sched/act_gate.c index 9c628591f452..c818844846b1 100644 --- a/net/sched/act_gate.c +++ b/net/sched/act_gate.c @@ -568,13 +568,13 @@ static int tcf_gate_walker(struct net *net, struct sk_buff *skb, return tcf_generic_walker(tn, skb, cb, type, ops, extack); } -static void tcf_gate_stats_update(struct tc_action *a, u64 bytes, u32 packets, - u64 lastuse, bool hw) +static void tcf_gate_stats_update(struct tc_action *a, u64 bytes, u64 packets, + u64 drops, u64 lastuse, bool hw) { struct tcf_gate *gact = to_gate(a); struct tcf_t *tm = &gact->tcf_tm; - tcf_action_update_stats(a, bytes, packets, false, hw); + tcf_action_update_stats(a, bytes, packets, drops, hw); tm->lastuse = max_t(u64, tm->lastuse, lastuse); } diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 83dd82fc9f40..b2705318993b 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -312,13 +312,13 @@ out: return retval; } -static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets, - u64 lastuse, bool hw) +static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets, + u64 drops, u64 lastuse, bool hw) { struct tcf_mirred *m = to_mirred(a); struct tcf_t *tm = &m->tcf_tm; - tcf_action_update_stats(a, bytes, packets, false, hw); + tcf_action_update_stats(a, bytes, packets, drops, hw); tm->lastuse = max_t(u64, tm->lastuse, lastuse); } diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index d41d6200d9de..66986db062ed 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -409,13 +409,13 @@ done: return p->tcf_action; } -static void tcf_pedit_stats_update(struct tc_action *a, u64 bytes, u32 packets, - u64 lastuse, bool hw) +static void tcf_pedit_stats_update(struct tc_action *a, u64 bytes, u64 packets, + u64 drops, u64 lastuse, bool hw) { struct tcf_pedit *d = to_pedit(a); struct tcf_t *tm = &d->tcf_tm; - tcf_action_update_stats(a, bytes, packets, false, hw); + tcf_action_update_stats(a, bytes, packets, drops, hw); tm->lastuse = max_t(u64, tm->lastuse, lastuse); } diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 8b7a0ac96c51..0b431d493768 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -288,13 +288,13 @@ static void tcf_police_cleanup(struct tc_action *a) } static void tcf_police_stats_update(struct tc_action *a, - u64 bytes, u32 packets, + u64 bytes, u64 packets, u64 drops, u64 lastuse, bool hw) { struct tcf_police *police = to_police(a); struct tcf_t *tm = &police->tcf_tm; - tcf_action_update_stats(a, bytes, packets, false, hw); + tcf_action_update_stats(a, bytes, packets, drops, hw); tm->lastuse = max_t(u64, tm->lastuse, lastuse); } diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index b125b2be4467..361b863e0634 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -74,12 +74,13 @@ err: } static void tcf_skbedit_stats_update(struct tc_action *a, u64 bytes, - u32 packets, u64 lastuse, bool hw) + u64 packets, u64 drops, + u64 lastuse, bool hw) { struct tcf_skbedit *d = to_skbedit(a); struct tcf_t *tm = &d->tcf_tm; - tcf_action_update_stats(a, bytes, packets, false, hw); + tcf_action_update_stats(a, bytes, packets, drops, hw); tm->lastuse = max_t(u64, tm->lastuse, lastuse); } diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c index c91d3958fcbb..a5ff9f68ab02 100644 --- a/net/sched/act_vlan.c +++ b/net/sched/act_vlan.c @@ -302,13 +302,13 @@ static int tcf_vlan_walker(struct net *net, struct sk_buff *skb, return tcf_generic_walker(tn, skb, cb, type, ops, extack); } -static void tcf_vlan_stats_update(struct tc_action *a, u64 bytes, u32 packets, - u64 lastuse, bool hw) +static void tcf_vlan_stats_update(struct tc_action *a, u64 bytes, u64 packets, + u64 drops, u64 lastuse, bool hw) { struct tcf_vlan *v = to_vlan(a); struct tcf_t *tm = &v->tcf_tm; - tcf_action_update_stats(a, bytes, packets, false, hw); + tcf_action_update_stats(a, bytes, packets, drops, hw); tm->lastuse = max_t(u64, tm->lastuse, lastuse); } diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index b2da37286082..391971672d54 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -491,6 +491,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f, tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes, cls_flower.stats.pkts, + cls_flower.stats.drops, cls_flower.stats.lastused, cls_flower.stats.used_hw_stats, cls_flower.stats.used_hw_stats_valid); diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c index 8d39dbcf1746..cafb84480bab 100644 --- a/net/sched/cls_matchall.c +++ b/net/sched/cls_matchall.c @@ -338,7 +338,8 @@ static void mall_stats_hw_filter(struct tcf_proto *tp, tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, false, true); tcf_exts_stats_update(&head->exts, cls_mall.stats.bytes, - cls_mall.stats.pkts, cls_mall.stats.lastused, + cls_mall.stats.pkts, cls_mall.stats.drops, + cls_mall.stats.lastused, cls_mall.stats.used_hw_stats, cls_mall.stats.used_hw_stats_valid); } -- cgit v1.2.3-59-g8ed1b From 24cfbec99647d241c3c31a72d247ee81f9061b5b Mon Sep 17 00:00:00 2001 From: Manasi Navare Date: Sat, 20 Jun 2020 02:53:54 +0530 Subject: drm/dp: DRM DP helper for reading Ignore MSA from DPCD MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit DP sink device sets the Ignore MSA bit in its DP_DOWNSTREAM_PORT_COUNT register to indicate its ability to ignore the MSA video timing parameters and its ability to support seamless video timing change over a range of timing exposed by DisplayID and EDID. This is required for the sink to indicate that it is Adaptive sync capable. v3: * Fi the typo in commit message (Manasi) v2: * Rename to describe what the function does (Jani Nikula) Cc: Jani Nikula Cc: Ville Syrjälä Cc: Harry Wentland Cc: Nicholas Kazlauskas Signed-off-by: Manasi Navare Reviewed-by: Harry Wentland Link: https://patchwork.freedesktop.org/patch/msgid/20200619212356.19285-2-bhanuprakash.modem@intel.com --- include/drm/drm_dp_helper.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'include') diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index aed08e8cb22c..92b8d326ab4a 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -1324,6 +1324,14 @@ drm_dp_alternate_scrambler_reset_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) DP_ALTERNATE_SCRAMBLER_RESET_CAP; } +/* Ignore MSA timing for Adaptive Sync support on DP 1.4 */ +static inline bool +drm_dp_sink_can_do_video_without_timing_msa(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_DOWN_STREAM_PORT_COUNT] & + DP_MSA_TIMING_PAR_IGNORED; +} + /* * DisplayPort AUX channel */ -- cgit v1.2.3-59-g8ed1b From 0efaaa86581c596f9426482c731f262d843807b6 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Mon, 15 Jun 2020 08:50:08 +0200 Subject: docs: crypto: convert asymmetric-keys.txt to ReST This file is almost compatible with ReST. Just minor changes were needed: - Adjust document and titles markups; - Adjust numbered list markups; - Add a comments markup for the Contents section; - Add markups for literal blocks. Acked-by: Jarkko Sakkinen Signed-off-by: Mauro Carvalho Chehab Link: https://lore.kernel.org/r/c2275ea94e0507a01b020ab66dfa824d8b1c2545.1592203650.git.mchehab+huawei@kernel.org Signed-off-by: Jonathan Corbet --- Documentation/crypto/asymmetric-keys.rst | 424 ++++++++++++++++++++++++++++++ Documentation/crypto/asymmetric-keys.txt | 429 ------------------------------- Documentation/crypto/index.rst | 1 + Documentation/security/keys/core.rst | 2 +- MAINTAINERS | 2 +- crypto/asymmetric_keys/asymmetric_type.c | 2 +- crypto/asymmetric_keys/public_key.c | 2 +- crypto/asymmetric_keys/signature.c | 2 +- include/crypto/public_key.h | 2 +- include/keys/asymmetric-parser.h | 2 +- include/keys/asymmetric-subtype.h | 2 +- include/keys/asymmetric-type.h | 2 +- 12 files changed, 434 insertions(+), 438 deletions(-) create mode 100644 Documentation/crypto/asymmetric-keys.rst delete mode 100644 Documentation/crypto/asymmetric-keys.txt (limited to 'include') diff --git a/Documentation/crypto/asymmetric-keys.rst b/Documentation/crypto/asymmetric-keys.rst new file mode 100644 index 000000000000..349f44a29392 --- /dev/null +++ b/Documentation/crypto/asymmetric-keys.rst @@ -0,0 +1,424 @@ +.. SPDX-License-Identifier: GPL-2.0 + +============================================= +Asymmetric / Public-key Cryptography Key Type +============================================= + +.. Contents: + + - Overview. + - Key identification. + - Accessing asymmetric keys. + - Signature verification. + - Asymmetric key subtypes. + - Instantiation data parsers. + - Keyring link restrictions. + + +Overview +======== + +The "asymmetric" key type is designed to be a container for the keys used in +public-key cryptography, without imposing any particular restrictions on the +form or mechanism of the cryptography or form of the key. + +The asymmetric key is given a subtype that defines what sort of data is +associated with the key and provides operations to describe and destroy it. +However, no requirement is made that the key data actually be stored in the +key. + +A completely in-kernel key retention and operation subtype can be defined, but +it would also be possible to provide access to cryptographic hardware (such as +a TPM) that might be used to both retain the relevant key and perform +operations using that key. In such a case, the asymmetric key would then +merely be an interface to the TPM driver. + +Also provided is the concept of a data parser. Data parsers are responsible +for extracting information from the blobs of data passed to the instantiation +function. The first data parser that recognises the blob gets to set the +subtype of the key and define the operations that can be done on that key. + +A data parser may interpret the data blob as containing the bits representing a +key, or it may interpret it as a reference to a key held somewhere else in the +system (for example, a TPM). + + +Key Identification +================== + +If a key is added with an empty name, the instantiation data parsers are given +the opportunity to pre-parse a key and to determine the description the key +should be given from the content of the key. + +This can then be used to refer to the key, either by complete match or by +partial match. The key type may also use other criteria to refer to a key. + +The asymmetric key type's match function can then perform a wider range of +comparisons than just the straightforward comparison of the description with +the criterion string: + + 1) If the criterion string is of the form "id:" then the match + function will examine a key's fingerprint to see if the hex digits given + after the "id:" match the tail. For instance:: + + keyctl search @s asymmetric id:5acc2142 + + will match a key with fingerprint:: + + 1A00 2040 7601 7889 DE11 882C 3823 04AD 5ACC 2142 + + 2) If the criterion string is of the form ":" then the + match will match the ID as in (1), but with the added restriction that + only keys of the specified subtype (e.g. tpm) will be matched. For + instance:: + + keyctl search @s asymmetric tpm:5acc2142 + +Looking in /proc/keys, the last 8 hex digits of the key fingerprint are +displayed, along with the subtype:: + + 1a39e171 I----- 1 perm 3f010000 0 0 asymmetric modsign.0: DSA 5acc2142 [] + + +Accessing Asymmetric Keys +========================= + +For general access to asymmetric keys from within the kernel, the following +inclusion is required:: + + #include + +This gives access to functions for dealing with asymmetric / public keys. +Three enums are defined there for representing public-key cryptography +algorithms:: + + enum pkey_algo + +digest algorithms used by those:: + + enum pkey_hash_algo + +and key identifier representations:: + + enum pkey_id_type + +Note that the key type representation types are required because key +identifiers from different standards aren't necessarily compatible. For +instance, PGP generates key identifiers by hashing the key data plus some +PGP-specific metadata, whereas X.509 has arbitrary certificate identifiers. + +The operations defined upon a key are: + + 1) Signature verification. + +Other operations are possible (such as encryption) with the same key data +required for verification, but not currently supported, and others +(eg. decryption and signature generation) require extra key data. + + +Signature Verification +---------------------- + +An operation is provided to perform cryptographic signature verification, using +an asymmetric key to provide or to provide access to the public key:: + + int verify_signature(const struct key *key, + const struct public_key_signature *sig); + +The caller must have already obtained the key from some source and can then use +it to check the signature. The caller must have parsed the signature and +transferred the relevant bits to the structure pointed to by sig:: + + struct public_key_signature { + u8 *digest; + u8 digest_size; + enum pkey_hash_algo pkey_hash_algo : 8; + u8 nr_mpi; + union { + MPI mpi[2]; + ... + }; + }; + +The algorithm used must be noted in sig->pkey_hash_algo, and all the MPIs that +make up the actual signature must be stored in sig->mpi[] and the count of MPIs +placed in sig->nr_mpi. + +In addition, the data must have been digested by the caller and the resulting +hash must be pointed to by sig->digest and the size of the hash be placed in +sig->digest_size. + +The function will return 0 upon success or -EKEYREJECTED if the signature +doesn't match. + +The function may also return -ENOTSUPP if an unsupported public-key algorithm +or public-key/hash algorithm combination is specified or the key doesn't +support the operation; -EBADMSG or -ERANGE if some of the parameters have weird +data; or -ENOMEM if an allocation can't be performed. -EINVAL can be returned +if the key argument is the wrong type or is incompletely set up. + + +Asymmetric Key Subtypes +======================= + +Asymmetric keys have a subtype that defines the set of operations that can be +performed on that key and that determines what data is attached as the key +payload. The payload format is entirely at the whim of the subtype. + +The subtype is selected by the key data parser and the parser must initialise +the data required for it. The asymmetric key retains a reference on the +subtype module. + +The subtype definition structure can be found in:: + + #include + +and looks like the following:: + + struct asymmetric_key_subtype { + struct module *owner; + const char *name; + + void (*describe)(const struct key *key, struct seq_file *m); + void (*destroy)(void *payload); + int (*query)(const struct kernel_pkey_params *params, + struct kernel_pkey_query *info); + int (*eds_op)(struct kernel_pkey_params *params, + const void *in, void *out); + int (*verify_signature)(const struct key *key, + const struct public_key_signature *sig); + }; + +Asymmetric keys point to this with their payload[asym_subtype] member. + +The owner and name fields should be set to the owning module and the name of +the subtype. Currently, the name is only used for print statements. + +There are a number of operations defined by the subtype: + + 1) describe(). + + Mandatory. This allows the subtype to display something in /proc/keys + against the key. For instance the name of the public key algorithm type + could be displayed. The key type will display the tail of the key + identity string after this. + + 2) destroy(). + + Mandatory. This should free the memory associated with the key. The + asymmetric key will look after freeing the fingerprint and releasing the + reference on the subtype module. + + 3) query(). + + Mandatory. This is a function for querying the capabilities of a key. + + 4) eds_op(). + + Optional. This is the entry point for the encryption, decryption and + signature creation operations (which are distinguished by the operation ID + in the parameter struct). The subtype may do anything it likes to + implement an operation, including offloading to hardware. + + 5) verify_signature(). + + Optional. This is the entry point for signature verification. The + subtype may do anything it likes to implement an operation, including + offloading to hardware. + +Instantiation Data Parsers +========================== + +The asymmetric key type doesn't generally want to store or to deal with a raw +blob of data that holds the key data. It would have to parse it and error +check it each time it wanted to use it. Further, the contents of the blob may +have various checks that can be performed on it (eg. self-signatures, validity +dates) and may contain useful data about the key (identifiers, capabilities). + +Also, the blob may represent a pointer to some hardware containing the key +rather than the key itself. + +Examples of blob formats for which parsers could be implemented include: + + - OpenPGP packet stream [RFC 4880]. + - X.509 ASN.1 stream. + - Pointer to TPM key. + - Pointer to UEFI key. + - PKCS#8 private key [RFC 5208]. + - PKCS#5 encrypted private key [RFC 2898]. + +During key instantiation each parser in the list is tried until one doesn't +return -EBADMSG. + +The parser definition structure can be found in:: + + #include + +and looks like the following:: + + struct asymmetric_key_parser { + struct module *owner; + const char *name; + + int (*parse)(struct key_preparsed_payload *prep); + }; + +The owner and name fields should be set to the owning module and the name of +the parser. + +There is currently only a single operation defined by the parser, and it is +mandatory: + + 1) parse(). + + This is called to preparse the key from the key creation and update paths. + In particular, it is called during the key creation _before_ a key is + allocated, and as such, is permitted to provide the key's description in + the case that the caller declines to do so. + + The caller passes a pointer to the following struct with all of the fields + cleared, except for data, datalen and quotalen [see + Documentation/security/keys/core.rst]:: + + struct key_preparsed_payload { + char *description; + void *payload[4]; + const void *data; + size_t datalen; + size_t quotalen; + }; + + The instantiation data is in a blob pointed to by data and is datalen in + size. The parse() function is not permitted to change these two values at + all, and shouldn't change any of the other values _unless_ they are + recognise the blob format and will not return -EBADMSG to indicate it is + not theirs. + + If the parser is happy with the blob, it should propose a description for + the key and attach it to ->description, ->payload[asym_subtype] should be + set to point to the subtype to be used, ->payload[asym_crypto] should be + set to point to the initialised data for that subtype, + ->payload[asym_key_ids] should point to one or more hex fingerprints and + quotalen should be updated to indicate how much quota this key should + account for. + + When clearing up, the data attached to ->payload[asym_key_ids] and + ->description will be kfree()'d and the data attached to + ->payload[asm_crypto] will be passed to the subtype's ->destroy() method + to be disposed of. A module reference for the subtype pointed to by + ->payload[asym_subtype] will be put. + + + If the data format is not recognised, -EBADMSG should be returned. If it + is recognised, but the key cannot for some reason be set up, some other + negative error code should be returned. On success, 0 should be returned. + + The key's fingerprint string may be partially matched upon. For a + public-key algorithm such as RSA and DSA this will likely be a printable + hex version of the key's fingerprint. + +Functions are provided to register and unregister parsers:: + + int register_asymmetric_key_parser(struct asymmetric_key_parser *parser); + void unregister_asymmetric_key_parser(struct asymmetric_key_parser *subtype); + +Parsers may not have the same name. The names are otherwise only used for +displaying in debugging messages. + + +Keyring Link Restrictions +========================= + +Keyrings created from userspace using add_key can be configured to check the +signature of the key being linked. Keys without a valid signature are not +allowed to link. + +Several restriction methods are available: + + 1) Restrict using the kernel builtin trusted keyring + + - Option string used with KEYCTL_RESTRICT_KEYRING: + - "builtin_trusted" + + The kernel builtin trusted keyring will be searched for the signing key. + If the builtin trusted keyring is not configured, all links will be + rejected. The ca_keys kernel parameter also affects which keys are used + for signature verification. + + 2) Restrict using the kernel builtin and secondary trusted keyrings + + - Option string used with KEYCTL_RESTRICT_KEYRING: + - "builtin_and_secondary_trusted" + + The kernel builtin and secondary trusted keyrings will be searched for the + signing key. If the secondary trusted keyring is not configured, this + restriction will behave like the "builtin_trusted" option. The ca_keys + kernel parameter also affects which keys are used for signature + verification. + + 3) Restrict using a separate key or keyring + + - Option string used with KEYCTL_RESTRICT_KEYRING: + - "key_or_keyring:[:chain]" + + Whenever a key link is requested, the link will only succeed if the key + being linked is signed by one of the designated keys. This key may be + specified directly by providing a serial number for one asymmetric key, or + a group of keys may be searched for the signing key by providing the + serial number for a keyring. + + When the "chain" option is provided at the end of the string, the keys + within the destination keyring will also be searched for signing keys. + This allows for verification of certificate chains by adding each + certificate in order (starting closest to the root) to a keyring. For + instance, one keyring can be populated with links to a set of root + certificates, with a separate, restricted keyring set up for each + certificate chain to be validated:: + + # Create and populate a keyring for root certificates + root_id=`keyctl add keyring root-certs "" @s` + keyctl padd asymmetric "" $root_id < root1.cert + keyctl padd asymmetric "" $root_id < root2.cert + + # Create and restrict a keyring for the certificate chain + chain_id=`keyctl add keyring chain "" @s` + keyctl restrict_keyring $chain_id asymmetric key_or_keyring:$root_id:chain + + # Attempt to add each certificate in the chain, starting with the + # certificate closest to the root. + keyctl padd asymmetric "" $chain_id < intermediateA.cert + keyctl padd asymmetric "" $chain_id < intermediateB.cert + keyctl padd asymmetric "" $chain_id < end-entity.cert + + If the final end-entity certificate is successfully added to the "chain" + keyring, we can be certain that it has a valid signing chain going back to + one of the root certificates. + + A single keyring can be used to verify a chain of signatures by + restricting the keyring after linking the root certificate:: + + # Create a keyring for the certificate chain and add the root + chain2_id=`keyctl add keyring chain2 "" @s` + keyctl padd asymmetric "" $chain2_id < root1.cert + + # Restrict the keyring that already has root1.cert linked. The cert + # will remain linked by the keyring. + keyctl restrict_keyring $chain2_id asymmetric key_or_keyring:0:chain + + # Attempt to add each certificate in the chain, starting with the + # certificate closest to the root. + keyctl padd asymmetric "" $chain2_id < intermediateA.cert + keyctl padd asymmetric "" $chain2_id < intermediateB.cert + keyctl padd asymmetric "" $chain2_id < end-entity.cert + + If the final end-entity certificate is successfully added to the "chain2" + keyring, we can be certain that there is a valid signing chain going back + to the root certificate that was added before the keyring was restricted. + + +In all of these cases, if the signing key is found the signature of the key to +be linked will be verified using the signing key. The requested key is added +to the keyring only if the signature is successfully verified. -ENOKEY is +returned if the parent certificate could not be found, or -EKEYREJECTED is +returned if the signature check fails or the key is blacklisted. Other errors +may be returned if the signature check could not be performed. diff --git a/Documentation/crypto/asymmetric-keys.txt b/Documentation/crypto/asymmetric-keys.txt deleted file mode 100644 index 8763866b11cf..000000000000 --- a/Documentation/crypto/asymmetric-keys.txt +++ /dev/null @@ -1,429 +0,0 @@ - ============================================= - ASYMMETRIC / PUBLIC-KEY CRYPTOGRAPHY KEY TYPE - ============================================= - -Contents: - - - Overview. - - Key identification. - - Accessing asymmetric keys. - - Signature verification. - - Asymmetric key subtypes. - - Instantiation data parsers. - - Keyring link restrictions. - - -======== -OVERVIEW -======== - -The "asymmetric" key type is designed to be a container for the keys used in -public-key cryptography, without imposing any particular restrictions on the -form or mechanism of the cryptography or form of the key. - -The asymmetric key is given a subtype that defines what sort of data is -associated with the key and provides operations to describe and destroy it. -However, no requirement is made that the key data actually be stored in the -key. - -A completely in-kernel key retention and operation subtype can be defined, but -it would also be possible to provide access to cryptographic hardware (such as -a TPM) that might be used to both retain the relevant key and perform -operations using that key. In such a case, the asymmetric key would then -merely be an interface to the TPM driver. - -Also provided is the concept of a data parser. Data parsers are responsible -for extracting information from the blobs of data passed to the instantiation -function. The first data parser that recognises the blob gets to set the -subtype of the key and define the operations that can be done on that key. - -A data parser may interpret the data blob as containing the bits representing a -key, or it may interpret it as a reference to a key held somewhere else in the -system (for example, a TPM). - - -================== -KEY IDENTIFICATION -================== - -If a key is added with an empty name, the instantiation data parsers are given -the opportunity to pre-parse a key and to determine the description the key -should be given from the content of the key. - -This can then be used to refer to the key, either by complete match or by -partial match. The key type may also use other criteria to refer to a key. - -The asymmetric key type's match function can then perform a wider range of -comparisons than just the straightforward comparison of the description with -the criterion string: - - (1) If the criterion string is of the form "id:" then the match - function will examine a key's fingerprint to see if the hex digits given - after the "id:" match the tail. For instance: - - keyctl search @s asymmetric id:5acc2142 - - will match a key with fingerprint: - - 1A00 2040 7601 7889 DE11 882C 3823 04AD 5ACC 2142 - - (2) If the criterion string is of the form ":" then the - match will match the ID as in (1), but with the added restriction that - only keys of the specified subtype (e.g. tpm) will be matched. For - instance: - - keyctl search @s asymmetric tpm:5acc2142 - -Looking in /proc/keys, the last 8 hex digits of the key fingerprint are -displayed, along with the subtype: - - 1a39e171 I----- 1 perm 3f010000 0 0 asymmetric modsign.0: DSA 5acc2142 [] - - -========================= -ACCESSING ASYMMETRIC KEYS -========================= - -For general access to asymmetric keys from within the kernel, the following -inclusion is required: - - #include - -This gives access to functions for dealing with asymmetric / public keys. -Three enums are defined there for representing public-key cryptography -algorithms: - - enum pkey_algo - -digest algorithms used by those: - - enum pkey_hash_algo - -and key identifier representations: - - enum pkey_id_type - -Note that the key type representation types are required because key -identifiers from different standards aren't necessarily compatible. For -instance, PGP generates key identifiers by hashing the key data plus some -PGP-specific metadata, whereas X.509 has arbitrary certificate identifiers. - -The operations defined upon a key are: - - (1) Signature verification. - -Other operations are possible (such as encryption) with the same key data -required for verification, but not currently supported, and others -(eg. decryption and signature generation) require extra key data. - - -SIGNATURE VERIFICATION ----------------------- - -An operation is provided to perform cryptographic signature verification, using -an asymmetric key to provide or to provide access to the public key. - - int verify_signature(const struct key *key, - const struct public_key_signature *sig); - -The caller must have already obtained the key from some source and can then use -it to check the signature. The caller must have parsed the signature and -transferred the relevant bits to the structure pointed to by sig. - - struct public_key_signature { - u8 *digest; - u8 digest_size; - enum pkey_hash_algo pkey_hash_algo : 8; - u8 nr_mpi; - union { - MPI mpi[2]; - ... - }; - }; - -The algorithm used must be noted in sig->pkey_hash_algo, and all the MPIs that -make up the actual signature must be stored in sig->mpi[] and the count of MPIs -placed in sig->nr_mpi. - -In addition, the data must have been digested by the caller and the resulting -hash must be pointed to by sig->digest and the size of the hash be placed in -sig->digest_size. - -The function will return 0 upon success or -EKEYREJECTED if the signature -doesn't match. - -The function may also return -ENOTSUPP if an unsupported public-key algorithm -or public-key/hash algorithm combination is specified or the key doesn't -support the operation; -EBADMSG or -ERANGE if some of the parameters have weird -data; or -ENOMEM if an allocation can't be performed. -EINVAL can be returned -if the key argument is the wrong type or is incompletely set up. - - -======================= -ASYMMETRIC KEY SUBTYPES -======================= - -Asymmetric keys have a subtype that defines the set of operations that can be -performed on that key and that determines what data is attached as the key -payload. The payload format is entirely at the whim of the subtype. - -The subtype is selected by the key data parser and the parser must initialise -the data required for it. The asymmetric key retains a reference on the -subtype module. - -The subtype definition structure can be found in: - - #include - -and looks like the following: - - struct asymmetric_key_subtype { - struct module *owner; - const char *name; - - void (*describe)(const struct key *key, struct seq_file *m); - void (*destroy)(void *payload); - int (*query)(const struct kernel_pkey_params *params, - struct kernel_pkey_query *info); - int (*eds_op)(struct kernel_pkey_params *params, - const void *in, void *out); - int (*verify_signature)(const struct key *key, - const struct public_key_signature *sig); - }; - -Asymmetric keys point to this with their payload[asym_subtype] member. - -The owner and name fields should be set to the owning module and the name of -the subtype. Currently, the name is only used for print statements. - -There are a number of operations defined by the subtype: - - (1) describe(). - - Mandatory. This allows the subtype to display something in /proc/keys - against the key. For instance the name of the public key algorithm type - could be displayed. The key type will display the tail of the key - identity string after this. - - (2) destroy(). - - Mandatory. This should free the memory associated with the key. The - asymmetric key will look after freeing the fingerprint and releasing the - reference on the subtype module. - - (3) query(). - - Mandatory. This is a function for querying the capabilities of a key. - - (4) eds_op(). - - Optional. This is the entry point for the encryption, decryption and - signature creation operations (which are distinguished by the operation ID - in the parameter struct). The subtype may do anything it likes to - implement an operation, including offloading to hardware. - - (5) verify_signature(). - - Optional. This is the entry point for signature verification. The - subtype may do anything it likes to implement an operation, including - offloading to hardware. - - -========================== -INSTANTIATION DATA PARSERS -========================== - -The asymmetric key type doesn't generally want to store or to deal with a raw -blob of data that holds the key data. It would have to parse it and error -check it each time it wanted to use it. Further, the contents of the blob may -have various checks that can be performed on it (eg. self-signatures, validity -dates) and may contain useful data about the key (identifiers, capabilities). - -Also, the blob may represent a pointer to some hardware containing the key -rather than the key itself. - -Examples of blob formats for which parsers could be implemented include: - - - OpenPGP packet stream [RFC 4880]. - - X.509 ASN.1 stream. - - Pointer to TPM key. - - Pointer to UEFI key. - - PKCS#8 private key [RFC 5208]. - - PKCS#5 encrypted private key [RFC 2898]. - -During key instantiation each parser in the list is tried until one doesn't -return -EBADMSG. - -The parser definition structure can be found in: - - #include - -and looks like the following: - - struct asymmetric_key_parser { - struct module *owner; - const char *name; - - int (*parse)(struct key_preparsed_payload *prep); - }; - -The owner and name fields should be set to the owning module and the name of -the parser. - -There is currently only a single operation defined by the parser, and it is -mandatory: - - (1) parse(). - - This is called to preparse the key from the key creation and update paths. - In particular, it is called during the key creation _before_ a key is - allocated, and as such, is permitted to provide the key's description in - the case that the caller declines to do so. - - The caller passes a pointer to the following struct with all of the fields - cleared, except for data, datalen and quotalen [see - Documentation/security/keys/core.rst]. - - struct key_preparsed_payload { - char *description; - void *payload[4]; - const void *data; - size_t datalen; - size_t quotalen; - }; - - The instantiation data is in a blob pointed to by data and is datalen in - size. The parse() function is not permitted to change these two values at - all, and shouldn't change any of the other values _unless_ they are - recognise the blob format and will not return -EBADMSG to indicate it is - not theirs. - - If the parser is happy with the blob, it should propose a description for - the key and attach it to ->description, ->payload[asym_subtype] should be - set to point to the subtype to be used, ->payload[asym_crypto] should be - set to point to the initialised data for that subtype, - ->payload[asym_key_ids] should point to one or more hex fingerprints and - quotalen should be updated to indicate how much quota this key should - account for. - - When clearing up, the data attached to ->payload[asym_key_ids] and - ->description will be kfree()'d and the data attached to - ->payload[asm_crypto] will be passed to the subtype's ->destroy() method - to be disposed of. A module reference for the subtype pointed to by - ->payload[asym_subtype] will be put. - - - If the data format is not recognised, -EBADMSG should be returned. If it - is recognised, but the key cannot for some reason be set up, some other - negative error code should be returned. On success, 0 should be returned. - - The key's fingerprint string may be partially matched upon. For a - public-key algorithm such as RSA and DSA this will likely be a printable - hex version of the key's fingerprint. - -Functions are provided to register and unregister parsers: - - int register_asymmetric_key_parser(struct asymmetric_key_parser *parser); - void unregister_asymmetric_key_parser(struct asymmetric_key_parser *subtype); - -Parsers may not have the same name. The names are otherwise only used for -displaying in debugging messages. - - -========================= -KEYRING LINK RESTRICTIONS -========================= - -Keyrings created from userspace using add_key can be configured to check the -signature of the key being linked. Keys without a valid signature are not -allowed to link. - -Several restriction methods are available: - - (1) Restrict using the kernel builtin trusted keyring - - - Option string used with KEYCTL_RESTRICT_KEYRING: - - "builtin_trusted" - - The kernel builtin trusted keyring will be searched for the signing key. - If the builtin trusted keyring is not configured, all links will be - rejected. The ca_keys kernel parameter also affects which keys are used - for signature verification. - - (2) Restrict using the kernel builtin and secondary trusted keyrings - - - Option string used with KEYCTL_RESTRICT_KEYRING: - - "builtin_and_secondary_trusted" - - The kernel builtin and secondary trusted keyrings will be searched for the - signing key. If the secondary trusted keyring is not configured, this - restriction will behave like the "builtin_trusted" option. The ca_keys - kernel parameter also affects which keys are used for signature - verification. - - (3) Restrict using a separate key or keyring - - - Option string used with KEYCTL_RESTRICT_KEYRING: - - "key_or_keyring:[:chain]" - - Whenever a key link is requested, the link will only succeed if the key - being linked is signed by one of the designated keys. This key may be - specified directly by providing a serial number for one asymmetric key, or - a group of keys may be searched for the signing key by providing the - serial number for a keyring. - - When the "chain" option is provided at the end of the string, the keys - within the destination keyring will also be searched for signing keys. - This allows for verification of certificate chains by adding each - certificate in order (starting closest to the root) to a keyring. For - instance, one keyring can be populated with links to a set of root - certificates, with a separate, restricted keyring set up for each - certificate chain to be validated: - - # Create and populate a keyring for root certificates - root_id=`keyctl add keyring root-certs "" @s` - keyctl padd asymmetric "" $root_id < root1.cert - keyctl padd asymmetric "" $root_id < root2.cert - - # Create and restrict a keyring for the certificate chain - chain_id=`keyctl add keyring chain "" @s` - keyctl restrict_keyring $chain_id asymmetric key_or_keyring:$root_id:chain - - # Attempt to add each certificate in the chain, starting with the - # certificate closest to the root. - keyctl padd asymmetric "" $chain_id < intermediateA.cert - keyctl padd asymmetric "" $chain_id < intermediateB.cert - keyctl padd asymmetric "" $chain_id < end-entity.cert - - If the final end-entity certificate is successfully added to the "chain" - keyring, we can be certain that it has a valid signing chain going back to - one of the root certificates. - - A single keyring can be used to verify a chain of signatures by - restricting the keyring after linking the root certificate: - - # Create a keyring for the certificate chain and add the root - chain2_id=`keyctl add keyring chain2 "" @s` - keyctl padd asymmetric "" $chain2_id < root1.cert - - # Restrict the keyring that already has root1.cert linked. The cert - # will remain linked by the keyring. - keyctl restrict_keyring $chain2_id asymmetric key_or_keyring:0:chain - - # Attempt to add each certificate in the chain, starting with the - # certificate closest to the root. - keyctl padd asymmetric "" $chain2_id < intermediateA.cert - keyctl padd asymmetric "" $chain2_id < intermediateB.cert - keyctl padd asymmetric "" $chain2_id < end-entity.cert - - If the final end-entity certificate is successfully added to the "chain2" - keyring, we can be certain that there is a valid signing chain going back - to the root certificate that was added before the keyring was restricted. - - -In all of these cases, if the signing key is found the signature of the key to -be linked will be verified using the signing key. The requested key is added -to the keyring only if the signature is successfully verified. -ENOKEY is -returned if the parent certificate could not be found, or -EKEYREJECTED is -returned if the signature check fails or the key is blacklisted. Other errors -may be returned if the signature check could not be performed. diff --git a/Documentation/crypto/index.rst b/Documentation/crypto/index.rst index c4ff5d791233..2bcaf422731e 100644 --- a/Documentation/crypto/index.rst +++ b/Documentation/crypto/index.rst @@ -18,6 +18,7 @@ for cryptographic use cases, as well as programming examples. intro architecture + asymmetric-keys devel-algos userspace-if crypto_engine diff --git a/Documentation/security/keys/core.rst b/Documentation/security/keys/core.rst index cdc42ccc12e4..aa0081685ee1 100644 --- a/Documentation/security/keys/core.rst +++ b/Documentation/security/keys/core.rst @@ -912,7 +912,7 @@ The keyctl syscall functions are: One application of restricted keyrings is to verify X.509 certificate chains or individual certificate signatures using the asymmetric key type. - See Documentation/crypto/asymmetric-keys.txt for specific restrictions + See Documentation/crypto/asymmetric-keys.rst for specific restrictions applicable to the asymmetric key type. diff --git a/MAINTAINERS b/MAINTAINERS index 68f21d46614c..156bd19d10eb 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2827,7 +2827,7 @@ ASYMMETRIC KEYS M: David Howells L: keyrings@vger.kernel.org S: Maintained -F: Documentation/crypto/asymmetric-keys.txt +F: Documentation/crypto/asymmetric-keys.rst F: crypto/asymmetric_keys/ F: include/crypto/pkcs7.h F: include/crypto/public_key.h diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c index 6e5fc8e31f01..33e77d846caa 100644 --- a/crypto/asymmetric_keys/asymmetric_type.c +++ b/crypto/asymmetric_keys/asymmetric_type.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* Asymmetric public-key cryptography key type * - * See Documentation/crypto/asymmetric-keys.txt + * See Documentation/crypto/asymmetric-keys.rst * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c index d7f43d4ea925..da4d0b82d018 100644 --- a/crypto/asymmetric_keys/public_key.c +++ b/crypto/asymmetric_keys/public_key.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* In-software asymmetric public-key crypto subtype * - * See Documentation/crypto/asymmetric-keys.txt + * See Documentation/crypto/asymmetric-keys.rst * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) diff --git a/crypto/asymmetric_keys/signature.c b/crypto/asymmetric_keys/signature.c index e24a031db1e4..4aff3eebec17 100644 --- a/crypto/asymmetric_keys/signature.c +++ b/crypto/asymmetric_keys/signature.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* Signature verification with an asymmetric key * - * See Documentation/crypto/asymmetric-keys.txt + * See Documentation/crypto/asymmetric-keys.rst * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h index 0588ef3bc6ff..11f535cfb810 100644 --- a/include/crypto/public_key.h +++ b/include/crypto/public_key.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ /* Asymmetric public-key algorithm definitions * - * See Documentation/crypto/asymmetric-keys.txt + * See Documentation/crypto/asymmetric-keys.rst * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) diff --git a/include/keys/asymmetric-parser.h b/include/keys/asymmetric-parser.h index 8a21d6a613ab..c47dc5405f79 100644 --- a/include/keys/asymmetric-parser.h +++ b/include/keys/asymmetric-parser.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ /* Asymmetric public-key cryptography data parser * - * See Documentation/crypto/asymmetric-keys.txt + * See Documentation/crypto/asymmetric-keys.rst * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h index 21407815d9c3..d55171f640a0 100644 --- a/include/keys/asymmetric-subtype.h +++ b/include/keys/asymmetric-subtype.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ /* Asymmetric public-key cryptography key subtype * - * See Documentation/crypto/asymmetric-keys.txt + * See Documentation/crypto/asymmetric-keys.rst * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) diff --git a/include/keys/asymmetric-type.h b/include/keys/asymmetric-type.h index 91cfd9bd9385..a29d3ff2e7e8 100644 --- a/include/keys/asymmetric-type.h +++ b/include/keys/asymmetric-type.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ /* Asymmetric Public-key cryptography key type interface * - * See Documentation/crypto/asymmetric-keys.txt + * See Documentation/crypto/asymmetric-keys.rst * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) -- cgit v1.2.3-59-g8ed1b From 8e2a46a40fa76570e535e5baa2d351510b6e61fa Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Mon, 15 Jun 2020 08:50:25 +0200 Subject: docs: move remaining stuff under Documentation/*.txt to Documentation/staging There are several files that I was unable to find a proper place for them, and 3 ones that are still in plain old text format. Let's place those stuff behind the carpet, as we'd like to keep the root directory clean. We can later discuss and move those into better places. Signed-off-by: Mauro Carvalho Chehab Link: https://lore.kernel.org/r/11bd0d75e65a874f7c276a0aeab0fe13f3376f5f.1592203650.git.mchehab+huawei@kernel.org Signed-off-by: Jonathan Corbet --- Documentation/crc32.txt | 189 -------- Documentation/index.rst | 13 + Documentation/kprobes.txt | 801 ---------------------------------- Documentation/lzo.txt | 202 --------- Documentation/remoteproc.txt | 359 --------------- Documentation/rpmsg.txt | 341 --------------- Documentation/speculation.txt | 90 ---- Documentation/staging/crc32.rst | 189 ++++++++ Documentation/staging/index.rst | 32 ++ Documentation/staging/kprobes.rst | 801 ++++++++++++++++++++++++++++++++++ Documentation/staging/lzo.rst | 202 +++++++++ Documentation/staging/remoteproc.rst | 359 +++++++++++++++ Documentation/staging/rpmsg.rst | 341 +++++++++++++++ Documentation/staging/speculation.rst | 92 ++++ Documentation/staging/static-keys.rst | 331 ++++++++++++++ Documentation/staging/tee.rst | 277 ++++++++++++ Documentation/staging/xz.rst | 127 ++++++ Documentation/static-keys.txt | 331 -------------- Documentation/tee.txt | 276 ------------ Documentation/trace/kprobetrace.rst | 2 +- Documentation/xz.txt | 127 ------ MAINTAINERS | 8 +- include/linux/jump_label.h | 2 +- lib/crc32.c | 2 +- lib/lzo/lzo1x_decompress_safe.c | 2 +- lib/xz/Kconfig | 2 +- samples/kprobes/kprobe_example.c | 2 +- samples/kprobes/kretprobe_example.c | 2 +- 28 files changed, 2775 insertions(+), 2727 deletions(-) delete mode 100644 Documentation/crc32.txt delete mode 100644 Documentation/kprobes.txt delete mode 100644 Documentation/lzo.txt delete mode 100644 Documentation/remoteproc.txt delete mode 100644 Documentation/rpmsg.txt delete mode 100644 Documentation/speculation.txt create mode 100644 Documentation/staging/crc32.rst create mode 100644 Documentation/staging/index.rst create mode 100644 Documentation/staging/kprobes.rst create mode 100644 Documentation/staging/lzo.rst create mode 100644 Documentation/staging/remoteproc.rst create mode 100644 Documentation/staging/rpmsg.rst create mode 100644 Documentation/staging/speculation.rst create mode 100644 Documentation/staging/static-keys.rst create mode 100644 Documentation/staging/tee.rst create mode 100644 Documentation/staging/xz.rst delete mode 100644 Documentation/static-keys.txt delete mode 100644 Documentation/tee.txt delete mode 100644 Documentation/xz.txt (limited to 'include') diff --git a/Documentation/crc32.txt b/Documentation/crc32.txt deleted file mode 100644 index 8a6860f33b4e..000000000000 --- a/Documentation/crc32.txt +++ /dev/null @@ -1,189 +0,0 @@ -================================= -brief tutorial on CRC computation -================================= - -A CRC is a long-division remainder. You add the CRC to the message, -and the whole thing (message+CRC) is a multiple of the given -CRC polynomial. To check the CRC, you can either check that the -CRC matches the recomputed value, *or* you can check that the -remainder computed on the message+CRC is 0. This latter approach -is used by a lot of hardware implementations, and is why so many -protocols put the end-of-frame flag after the CRC. - -It's actually the same long division you learned in school, except that: - -- We're working in binary, so the digits are only 0 and 1, and -- When dividing polynomials, there are no carries. Rather than add and - subtract, we just xor. Thus, we tend to get a bit sloppy about - the difference between adding and subtracting. - -Like all division, the remainder is always smaller than the divisor. -To produce a 32-bit CRC, the divisor is actually a 33-bit CRC polynomial. -Since it's 33 bits long, bit 32 is always going to be set, so usually the -CRC is written in hex with the most significant bit omitted. (If you're -familiar with the IEEE 754 floating-point format, it's the same idea.) - -Note that a CRC is computed over a string of *bits*, so you have -to decide on the endianness of the bits within each byte. To get -the best error-detecting properties, this should correspond to the -order they're actually sent. For example, standard RS-232 serial is -little-endian; the most significant bit (sometimes used for parity) -is sent last. And when appending a CRC word to a message, you should -do it in the right order, matching the endianness. - -Just like with ordinary division, you proceed one digit (bit) at a time. -Each step of the division you take one more digit (bit) of the dividend -and append it to the current remainder. Then you figure out the -appropriate multiple of the divisor to subtract to being the remainder -back into range. In binary, this is easy - it has to be either 0 or 1, -and to make the XOR cancel, it's just a copy of bit 32 of the remainder. - -When computing a CRC, we don't care about the quotient, so we can -throw the quotient bit away, but subtract the appropriate multiple of -the polynomial from the remainder and we're back to where we started, -ready to process the next bit. - -A big-endian CRC written this way would be coded like:: - - for (i = 0; i < input_bits; i++) { - multiple = remainder & 0x80000000 ? CRCPOLY : 0; - remainder = (remainder << 1 | next_input_bit()) ^ multiple; - } - -Notice how, to get at bit 32 of the shifted remainder, we look -at bit 31 of the remainder *before* shifting it. - -But also notice how the next_input_bit() bits we're shifting into -the remainder don't actually affect any decision-making until -32 bits later. Thus, the first 32 cycles of this are pretty boring. -Also, to add the CRC to a message, we need a 32-bit-long hole for it at -the end, so we have to add 32 extra cycles shifting in zeros at the -end of every message. - -These details lead to a standard trick: rearrange merging in the -next_input_bit() until the moment it's needed. Then the first 32 cycles -can be precomputed, and merging in the final 32 zero bits to make room -for the CRC can be skipped entirely. This changes the code to:: - - for (i = 0; i < input_bits; i++) { - remainder ^= next_input_bit() << 31; - multiple = (remainder & 0x80000000) ? CRCPOLY : 0; - remainder = (remainder << 1) ^ multiple; - } - -With this optimization, the little-endian code is particularly simple:: - - for (i = 0; i < input_bits; i++) { - remainder ^= next_input_bit(); - multiple = (remainder & 1) ? CRCPOLY : 0; - remainder = (remainder >> 1) ^ multiple; - } - -The most significant coefficient of the remainder polynomial is stored -in the least significant bit of the binary "remainder" variable. -The other details of endianness have been hidden in CRCPOLY (which must -be bit-reversed) and next_input_bit(). - -As long as next_input_bit is returning the bits in a sensible order, we don't -*have* to wait until the last possible moment to merge in additional bits. -We can do it 8 bits at a time rather than 1 bit at a time:: - - for (i = 0; i < input_bytes; i++) { - remainder ^= next_input_byte() << 24; - for (j = 0; j < 8; j++) { - multiple = (remainder & 0x80000000) ? CRCPOLY : 0; - remainder = (remainder << 1) ^ multiple; - } - } - -Or in little-endian:: - - for (i = 0; i < input_bytes; i++) { - remainder ^= next_input_byte(); - for (j = 0; j < 8; j++) { - multiple = (remainder & 1) ? CRCPOLY : 0; - remainder = (remainder >> 1) ^ multiple; - } - } - -If the input is a multiple of 32 bits, you can even XOR in a 32-bit -word at a time and increase the inner loop count to 32. - -You can also mix and match the two loop styles, for example doing the -bulk of a message byte-at-a-time and adding bit-at-a-time processing -for any fractional bytes at the end. - -To reduce the number of conditional branches, software commonly uses -the byte-at-a-time table method, popularized by Dilip V. Sarwate, -"Computation of Cyclic Redundancy Checks via Table Look-Up", Comm. ACM -v.31 no.8 (August 1998) p. 1008-1013. - -Here, rather than just shifting one bit of the remainder to decide -in the correct multiple to subtract, we can shift a byte at a time. -This produces a 40-bit (rather than a 33-bit) intermediate remainder, -and the correct multiple of the polynomial to subtract is found using -a 256-entry lookup table indexed by the high 8 bits. - -(The table entries are simply the CRC-32 of the given one-byte messages.) - -When space is more constrained, smaller tables can be used, e.g. two -4-bit shifts followed by a lookup in a 16-entry table. - -It is not practical to process much more than 8 bits at a time using this -technique, because tables larger than 256 entries use too much memory and, -more importantly, too much of the L1 cache. - -To get higher software performance, a "slicing" technique can be used. -See "High Octane CRC Generation with the Intel Slicing-by-8 Algorithm", -ftp://download.intel.com/technology/comms/perfnet/download/slicing-by-8.pdf - -This does not change the number of table lookups, but does increase -the parallelism. With the classic Sarwate algorithm, each table lookup -must be completed before the index of the next can be computed. - -A "slicing by 2" technique would shift the remainder 16 bits at a time, -producing a 48-bit intermediate remainder. Rather than doing a single -lookup in a 65536-entry table, the two high bytes are looked up in -two different 256-entry tables. Each contains the remainder required -to cancel out the corresponding byte. The tables are different because the -polynomials to cancel are different. One has non-zero coefficients from -x^32 to x^39, while the other goes from x^40 to x^47. - -Since modern processors can handle many parallel memory operations, this -takes barely longer than a single table look-up and thus performs almost -twice as fast as the basic Sarwate algorithm. - -This can be extended to "slicing by 4" using 4 256-entry tables. -Each step, 32 bits of data is fetched, XORed with the CRC, and the result -broken into bytes and looked up in the tables. Because the 32-bit shift -leaves the low-order bits of the intermediate remainder zero, the -final CRC is simply the XOR of the 4 table look-ups. - -But this still enforces sequential execution: a second group of table -look-ups cannot begin until the previous groups 4 table look-ups have all -been completed. Thus, the processor's load/store unit is sometimes idle. - -To make maximum use of the processor, "slicing by 8" performs 8 look-ups -in parallel. Each step, the 32-bit CRC is shifted 64 bits and XORed -with 64 bits of input data. What is important to note is that 4 of -those 8 bytes are simply copies of the input data; they do not depend -on the previous CRC at all. Thus, those 4 table look-ups may commence -immediately, without waiting for the previous loop iteration. - -By always having 4 loads in flight, a modern superscalar processor can -be kept busy and make full use of its L1 cache. - -Two more details about CRC implementation in the real world: - -Normally, appending zero bits to a message which is already a multiple -of a polynomial produces a larger multiple of that polynomial. Thus, -a basic CRC will not detect appended zero bits (or bytes). To enable -a CRC to detect this condition, it's common to invert the CRC before -appending it. This makes the remainder of the message+crc come out not -as zero, but some fixed non-zero value. (The CRC of the inversion -pattern, 0xffffffff.) - -The same problem applies to zero bits prepended to the message, and a -similar solution is used. Instead of starting the CRC computation with -a remainder of 0, an initial remainder of all ones is used. As long as -you start the same way on decoding, it doesn't make a difference. diff --git a/Documentation/index.rst b/Documentation/index.rst index 71eca3171574..3b491af0122d 100644 --- a/Documentation/index.rst +++ b/Documentation/index.rst @@ -182,6 +182,19 @@ subprojects. filesystems/ext4/index +Other documentation +------------------- + +There are several unsorted documents that don't seem to fit on other parts +of the documentation body, or may require some adjustments and/or conversion +to ReStructured Text format, or are simply too old. + +.. toctree:: + :maxdepth: 2 + + staging/index + + Translations ------------ diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt deleted file mode 100644 index 8baab8832c5b..000000000000 --- a/Documentation/kprobes.txt +++ /dev/null @@ -1,801 +0,0 @@ -======================= -Kernel Probes (Kprobes) -======================= - -:Author: Jim Keniston -:Author: Prasanna S Panchamukhi -:Author: Masami Hiramatsu - -.. CONTENTS - - 1. Concepts: Kprobes, and Return Probes - 2. Architectures Supported - 3. Configuring Kprobes - 4. API Reference - 5. Kprobes Features and Limitations - 6. Probe Overhead - 7. TODO - 8. Kprobes Example - 9. Kretprobes Example - 10. Deprecated Features - Appendix A: The kprobes debugfs interface - Appendix B: The kprobes sysctl interface - -Concepts: Kprobes and Return Probes -========================================= - -Kprobes enables you to dynamically break into any kernel routine and -collect debugging and performance information non-disruptively. You -can trap at almost any kernel code address [1]_, specifying a handler -routine to be invoked when the breakpoint is hit. - -.. [1] some parts of the kernel code can not be trapped, see - :ref:`kprobes_blacklist`) - -There are currently two types of probes: kprobes, and kretprobes -(also called return probes). A kprobe can be inserted on virtually -any instruction in the kernel. A return probe fires when a specified -function returns. - -In the typical case, Kprobes-based instrumentation is packaged as -a kernel module. The module's init function installs ("registers") -one or more probes, and the exit function unregisters them. A -registration function such as register_kprobe() specifies where -the probe is to be inserted and what handler is to be called when -the probe is hit. - -There are also ``register_/unregister_*probes()`` functions for batch -registration/unregistration of a group of ``*probes``. These functions -can speed up unregistration process when you have to unregister -a lot of probes at once. - -The next four subsections explain how the different types of -probes work and how jump optimization works. They explain certain -things that you'll need to know in order to make the best use of -Kprobes -- e.g., the difference between a pre_handler and -a post_handler, and how to use the maxactive and nmissed fields of -a kretprobe. But if you're in a hurry to start using Kprobes, you -can skip ahead to :ref:`kprobes_archs_supported`. - -How Does a Kprobe Work? ------------------------ - -When a kprobe is registered, Kprobes makes a copy of the probed -instruction and replaces the first byte(s) of the probed instruction -with a breakpoint instruction (e.g., int3 on i386 and x86_64). - -When a CPU hits the breakpoint instruction, a trap occurs, the CPU's -registers are saved, and control passes to Kprobes via the -notifier_call_chain mechanism. Kprobes executes the "pre_handler" -associated with the kprobe, passing the handler the addresses of the -kprobe struct and the saved registers. - -Next, Kprobes single-steps its copy of the probed instruction. -(It would be simpler to single-step the actual instruction in place, -but then Kprobes would have to temporarily remove the breakpoint -instruction. This would open a small time window when another CPU -could sail right past the probepoint.) - -After the instruction is single-stepped, Kprobes executes the -"post_handler," if any, that is associated with the kprobe. -Execution then continues with the instruction following the probepoint. - -Changing Execution Path ------------------------ - -Since kprobes can probe into a running kernel code, it can change the -register set, including instruction pointer. This operation requires -maximum care, such as keeping the stack frame, recovering the execution -path etc. Since it operates on a running kernel and needs deep knowledge -of computer architecture and concurrent computing, you can easily shoot -your foot. - -If you change the instruction pointer (and set up other related -registers) in pre_handler, you must return !0 so that kprobes stops -single stepping and just returns to the given address. -This also means post_handler should not be called anymore. - -Note that this operation may be harder on some architectures which use -TOC (Table of Contents) for function call, since you have to setup a new -TOC for your function in your module, and recover the old one after -returning from it. - -Return Probes -------------- - -How Does a Return Probe Work? -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -When you call register_kretprobe(), Kprobes establishes a kprobe at -the entry to the function. When the probed function is called and this -probe is hit, Kprobes saves a copy of the return address, and replaces -the return address with the address of a "trampoline." The trampoline -is an arbitrary piece of code -- typically just a nop instruction. -At boot time, Kprobes registers a kprobe at the trampoline. - -When the probed function executes its return instruction, control -passes to the trampoline and that probe is hit. Kprobes' trampoline -handler calls the user-specified return handler associated with the -kretprobe, then sets the saved instruction pointer to the saved return -address, and that's where execution resumes upon return from the trap. - -While the probed function is executing, its return address is -stored in an object of type kretprobe_instance. Before calling -register_kretprobe(), the user sets the maxactive field of the -kretprobe struct to specify how many instances of the specified -function can be probed simultaneously. register_kretprobe() -pre-allocates the indicated number of kretprobe_instance objects. - -For example, if the function is non-recursive and is called with a -spinlock held, maxactive = 1 should be enough. If the function is -non-recursive and can never relinquish the CPU (e.g., via a semaphore -or preemption), NR_CPUS should be enough. If maxactive <= 0, it is -set to a default value. If CONFIG_PREEMPT is enabled, the default -is max(10, 2*NR_CPUS). Otherwise, the default is NR_CPUS. - -It's not a disaster if you set maxactive too low; you'll just miss -some probes. In the kretprobe struct, the nmissed field is set to -zero when the return probe is registered, and is incremented every -time the probed function is entered but there is no kretprobe_instance -object available for establishing the return probe. - -Kretprobe entry-handler -^^^^^^^^^^^^^^^^^^^^^^^ - -Kretprobes also provides an optional user-specified handler which runs -on function entry. This handler is specified by setting the entry_handler -field of the kretprobe struct. Whenever the kprobe placed by kretprobe at the -function entry is hit, the user-defined entry_handler, if any, is invoked. -If the entry_handler returns 0 (success) then a corresponding return handler -is guaranteed to be called upon function return. If the entry_handler -returns a non-zero error then Kprobes leaves the return address as is, and -the kretprobe has no further effect for that particular function instance. - -Multiple entry and return handler invocations are matched using the unique -kretprobe_instance object associated with them. Additionally, a user -may also specify per return-instance private data to be part of each -kretprobe_instance object. This is especially useful when sharing private -data between corresponding user entry and return handlers. The size of each -private data object can be specified at kretprobe registration time by -setting the data_size field of the kretprobe struct. This data can be -accessed through the data field of each kretprobe_instance object. - -In case probed function is entered but there is no kretprobe_instance -object available, then in addition to incrementing the nmissed count, -the user entry_handler invocation is also skipped. - -.. _kprobes_jump_optimization: - -How Does Jump Optimization Work? --------------------------------- - -If your kernel is built with CONFIG_OPTPROBES=y (currently this flag -is automatically set 'y' on x86/x86-64, non-preemptive kernel) and -the "debug.kprobes_optimization" kernel parameter is set to 1 (see -sysctl(8)), Kprobes tries to reduce probe-hit overhead by using a jump -instruction instead of a breakpoint instruction at each probepoint. - -Init a Kprobe -^^^^^^^^^^^^^ - -When a probe is registered, before attempting this optimization, -Kprobes inserts an ordinary, breakpoint-based kprobe at the specified -address. So, even if it's not possible to optimize this particular -probepoint, there'll be a probe there. - -Safety Check -^^^^^^^^^^^^ - -Before optimizing a probe, Kprobes performs the following safety checks: - -- Kprobes verifies that the region that will be replaced by the jump - instruction (the "optimized region") lies entirely within one function. - (A jump instruction is multiple bytes, and so may overlay multiple - instructions.) - -- Kprobes analyzes the entire function and verifies that there is no - jump into the optimized region. Specifically: - - - the function contains no indirect jump; - - the function contains no instruction that causes an exception (since - the fixup code triggered by the exception could jump back into the - optimized region -- Kprobes checks the exception tables to verify this); - - there is no near jump to the optimized region (other than to the first - byte). - -- For each instruction in the optimized region, Kprobes verifies that - the instruction can be executed out of line. - -Preparing Detour Buffer -^^^^^^^^^^^^^^^^^^^^^^^ - -Next, Kprobes prepares a "detour" buffer, which contains the following -instruction sequence: - -- code to push the CPU's registers (emulating a breakpoint trap) -- a call to the trampoline code which calls user's probe handlers. -- code to restore registers -- the instructions from the optimized region -- a jump back to the original execution path. - -Pre-optimization -^^^^^^^^^^^^^^^^ - -After preparing the detour buffer, Kprobes verifies that none of the -following situations exist: - -- The probe has a post_handler. -- Other instructions in the optimized region are probed. -- The probe is disabled. - -In any of the above cases, Kprobes won't start optimizing the probe. -Since these are temporary situations, Kprobes tries to start -optimizing it again if the situation is changed. - -If the kprobe can be optimized, Kprobes enqueues the kprobe to an -optimizing list, and kicks the kprobe-optimizer workqueue to optimize -it. If the to-be-optimized probepoint is hit before being optimized, -Kprobes returns control to the original instruction path by setting -the CPU's instruction pointer to the copied code in the detour buffer --- thus at least avoiding the single-step. - -Optimization -^^^^^^^^^^^^ - -The Kprobe-optimizer doesn't insert the jump instruction immediately; -rather, it calls synchronize_rcu() for safety first, because it's -possible for a CPU to be interrupted in the middle of executing the -optimized region [3]_. As you know, synchronize_rcu() can ensure -that all interruptions that were active when synchronize_rcu() -was called are done, but only if CONFIG_PREEMPT=n. So, this version -of kprobe optimization supports only kernels with CONFIG_PREEMPT=n [4]_. - -After that, the Kprobe-optimizer calls stop_machine() to replace -the optimized region with a jump instruction to the detour buffer, -using text_poke_smp(). - -Unoptimization -^^^^^^^^^^^^^^ - -When an optimized kprobe is unregistered, disabled, or blocked by -another kprobe, it will be unoptimized. If this happens before -the optimization is complete, the kprobe is just dequeued from the -optimized list. If the optimization has been done, the jump is -replaced with the original code (except for an int3 breakpoint in -the first byte) by using text_poke_smp(). - -.. [3] Please imagine that the 2nd instruction is interrupted and then - the optimizer replaces the 2nd instruction with the jump *address* - while the interrupt handler is running. When the interrupt - returns to original address, there is no valid instruction, - and it causes an unexpected result. - -.. [4] This optimization-safety checking may be replaced with the - stop-machine method that ksplice uses for supporting a CONFIG_PREEMPT=y - kernel. - -NOTE for geeks: -The jump optimization changes the kprobe's pre_handler behavior. -Without optimization, the pre_handler can change the kernel's execution -path by changing regs->ip and returning 1. However, when the probe -is optimized, that modification is ignored. Thus, if you want to -tweak the kernel's execution path, you need to suppress optimization, -using one of the following techniques: - -- Specify an empty function for the kprobe's post_handler. - -or - -- Execute 'sysctl -w debug.kprobes_optimization=n' - -.. _kprobes_blacklist: - -Blacklist ---------- - -Kprobes can probe most of the kernel except itself. This means -that there are some functions where kprobes cannot probe. Probing -(trapping) such functions can cause a recursive trap (e.g. double -fault) or the nested probe handler may never be called. -Kprobes manages such functions as a blacklist. -If you want to add a function into the blacklist, you just need -to (1) include linux/kprobes.h and (2) use NOKPROBE_SYMBOL() macro -to specify a blacklisted function. -Kprobes checks the given probe address against the blacklist and -rejects registering it, if the given address is in the blacklist. - -.. _kprobes_archs_supported: - -Architectures Supported -======================= - -Kprobes and return probes are implemented on the following -architectures: - -- i386 (Supports jump optimization) -- x86_64 (AMD-64, EM64T) (Supports jump optimization) -- ppc64 -- ia64 (Does not support probes on instruction slot1.) -- sparc64 (Return probes not yet implemented.) -- arm -- ppc -- mips -- s390 -- parisc - -Configuring Kprobes -=================== - -When configuring the kernel using make menuconfig/xconfig/oldconfig, -ensure that CONFIG_KPROBES is set to "y". Under "General setup", look -for "Kprobes". - -So that you can load and unload Kprobes-based instrumentation modules, -make sure "Loadable module support" (CONFIG_MODULES) and "Module -unloading" (CONFIG_MODULE_UNLOAD) are set to "y". - -Also make sure that CONFIG_KALLSYMS and perhaps even CONFIG_KALLSYMS_ALL -are set to "y", since kallsyms_lookup_name() is used by the in-kernel -kprobe address resolution code. - -If you need to insert a probe in the middle of a function, you may find -it useful to "Compile the kernel with debug info" (CONFIG_DEBUG_INFO), -so you can use "objdump -d -l vmlinux" to see the source-to-object -code mapping. - -API Reference -============= - -The Kprobes API includes a "register" function and an "unregister" -function for each type of probe. The API also includes "register_*probes" -and "unregister_*probes" functions for (un)registering arrays of probes. -Here are terse, mini-man-page specifications for these functions and -the associated probe handlers that you'll write. See the files in the -samples/kprobes/ sub-directory for examples. - -register_kprobe ---------------- - -:: - - #include - int register_kprobe(struct kprobe *kp); - -Sets a breakpoint at the address kp->addr. When the breakpoint is -hit, Kprobes calls kp->pre_handler. After the probed instruction -is single-stepped, Kprobe calls kp->post_handler. If a fault -occurs during execution of kp->pre_handler or kp->post_handler, -or during single-stepping of the probed instruction, Kprobes calls -kp->fault_handler. Any or all handlers can be NULL. If kp->flags -is set KPROBE_FLAG_DISABLED, that kp will be registered but disabled, -so, its handlers aren't hit until calling enable_kprobe(kp). - -.. note:: - - 1. With the introduction of the "symbol_name" field to struct kprobe, - the probepoint address resolution will now be taken care of by the kernel. - The following will now work:: - - kp.symbol_name = "symbol_name"; - - (64-bit powerpc intricacies such as function descriptors are handled - transparently) - - 2. Use the "offset" field of struct kprobe if the offset into the symbol - to install a probepoint is known. This field is used to calculate the - probepoint. - - 3. Specify either the kprobe "symbol_name" OR the "addr". If both are - specified, kprobe registration will fail with -EINVAL. - - 4. With CISC architectures (such as i386 and x86_64), the kprobes code - does not validate if the kprobe.addr is at an instruction boundary. - Use "offset" with caution. - -register_kprobe() returns 0 on success, or a negative errno otherwise. - -User's pre-handler (kp->pre_handler):: - - #include - #include - int pre_handler(struct kprobe *p, struct pt_regs *regs); - -Called with p pointing to the kprobe associated with the breakpoint, -and regs pointing to the struct containing the registers saved when -the breakpoint was hit. Return 0 here unless you're a Kprobes geek. - -User's post-handler (kp->post_handler):: - - #include - #include - void post_handler(struct kprobe *p, struct pt_regs *regs, - unsigned long flags); - -p and regs are as described for the pre_handler. flags always seems -to be zero. - -User's fault-handler (kp->fault_handler):: - - #include - #include - int fault_handler(struct kprobe *p, struct pt_regs *regs, int trapnr); - -p and regs are as described for the pre_handler. trapnr is the -architecture-specific trap number associated with the fault (e.g., -on i386, 13 for a general protection fault or 14 for a page fault). -Returns 1 if it successfully handled the exception. - -register_kretprobe ------------------- - -:: - - #include - int register_kretprobe(struct kretprobe *rp); - -Establishes a return probe for the function whose address is -rp->kp.addr. When that function returns, Kprobes calls rp->handler. -You must set rp->maxactive appropriately before you call -register_kretprobe(); see "How Does a Return Probe Work?" for details. - -register_kretprobe() returns 0 on success, or a negative errno -otherwise. - -User's return-probe handler (rp->handler):: - - #include - #include - int kretprobe_handler(struct kretprobe_instance *ri, - struct pt_regs *regs); - -regs is as described for kprobe.pre_handler. ri points to the -kretprobe_instance object, of which the following fields may be -of interest: - -- ret_addr: the return address -- rp: points to the corresponding kretprobe object -- task: points to the corresponding task struct -- data: points to per return-instance private data; see "Kretprobe - entry-handler" for details. - -The regs_return_value(regs) macro provides a simple abstraction to -extract the return value from the appropriate register as defined by -the architecture's ABI. - -The handler's return value is currently ignored. - -unregister_*probe ------------------- - -:: - - #include - void unregister_kprobe(struct kprobe *kp); - void unregister_kretprobe(struct kretprobe *rp); - -Removes the specified probe. The unregister function can be called -at any time after the probe has been registered. - -.. note:: - - If the functions find an incorrect probe (ex. an unregistered probe), - they clear the addr field of the probe. - -register_*probes ----------------- - -:: - - #include - int register_kprobes(struct kprobe **kps, int num); - int register_kretprobes(struct kretprobe **rps, int num); - -Registers each of the num probes in the specified array. If any -error occurs during registration, all probes in the array, up to -the bad probe, are safely unregistered before the register_*probes -function returns. - -- kps/rps: an array of pointers to ``*probe`` data structures -- num: the number of the array entries. - -.. note:: - - You have to allocate(or define) an array of pointers and set all - of the array entries before using these functions. - -unregister_*probes ------------------- - -:: - - #include - void unregister_kprobes(struct kprobe **kps, int num); - void unregister_kretprobes(struct kretprobe **rps, int num); - -Removes each of the num probes in the specified array at once. - -.. note:: - - If the functions find some incorrect probes (ex. unregistered - probes) in the specified array, they clear the addr field of those - incorrect probes. However, other probes in the array are - unregistered correctly. - -disable_*probe --------------- - -:: - - #include - int disable_kprobe(struct kprobe *kp); - int disable_kretprobe(struct kretprobe *rp); - -Temporarily disables the specified ``*probe``. You can enable it again by using -enable_*probe(). You must specify the probe which has been registered. - -enable_*probe -------------- - -:: - - #include - int enable_kprobe(struct kprobe *kp); - int enable_kretprobe(struct kretprobe *rp); - -Enables ``*probe`` which has been disabled by disable_*probe(). You must specify -the probe which has been registered. - -Kprobes Features and Limitations -================================ - -Kprobes allows multiple probes at the same address. Also, -a probepoint for which there is a post_handler cannot be optimized. -So if you install a kprobe with a post_handler, at an optimized -probepoint, the probepoint will be unoptimized automatically. - -In general, you can install a probe anywhere in the kernel. -In particular, you can probe interrupt handlers. Known exceptions -are discussed in this section. - -The register_*probe functions will return -EINVAL if you attempt -to install a probe in the code that implements Kprobes (mostly -kernel/kprobes.c and ``arch/*/kernel/kprobes.c``, but also functions such -as do_page_fault and notifier_call_chain). - -If you install a probe in an inline-able function, Kprobes makes -no attempt to chase down all inline instances of the function and -install probes there. gcc may inline a function without being asked, -so keep this in mind if you're not seeing the probe hits you expect. - -A probe handler can modify the environment of the probed function --- e.g., by modifying kernel data structures, or by modifying the -contents of the pt_regs struct (which are restored to the registers -upon return from the breakpoint). So Kprobes can be used, for example, -to install a bug fix or to inject faults for testing. Kprobes, of -course, has no way to distinguish the deliberately injected faults -from the accidental ones. Don't drink and probe. - -Kprobes makes no attempt to prevent probe handlers from stepping on -each other -- e.g., probing printk() and then calling printk() from a -probe handler. If a probe handler hits a probe, that second probe's -handlers won't be run in that instance, and the kprobe.nmissed member -of the second probe will be incremented. - -As of Linux v2.6.15-rc1, multiple handlers (or multiple instances of -the same handler) may run concurrently on different CPUs. - -Kprobes does not use mutexes or allocate memory except during -registration and unregistration. - -Probe handlers are run with preemption disabled or interrupt disabled, -which depends on the architecture and optimization state. (e.g., -kretprobe handlers and optimized kprobe handlers run without interrupt -disabled on x86/x86-64). In any case, your handler should not yield -the CPU (e.g., by attempting to acquire a semaphore, or waiting I/O). - -Since a return probe is implemented by replacing the return -address with the trampoline's address, stack backtraces and calls -to __builtin_return_address() will typically yield the trampoline's -address instead of the real return address for kretprobed functions. -(As far as we can tell, __builtin_return_address() is used only -for instrumentation and error reporting.) - -If the number of times a function is called does not match the number -of times it returns, registering a return probe on that function may -produce undesirable results. In such a case, a line: -kretprobe BUG!: Processing kretprobe d000000000041aa8 @ c00000000004f48c -gets printed. With this information, one will be able to correlate the -exact instance of the kretprobe that caused the problem. We have the -do_exit() case covered. do_execve() and do_fork() are not an issue. -We're unaware of other specific cases where this could be a problem. - -If, upon entry to or exit from a function, the CPU is running on -a stack other than that of the current task, registering a return -probe on that function may produce undesirable results. For this -reason, Kprobes doesn't support return probes (or kprobes) -on the x86_64 version of __switch_to(); the registration functions -return -EINVAL. - -On x86/x86-64, since the Jump Optimization of Kprobes modifies -instructions widely, there are some limitations to optimization. To -explain it, we introduce some terminology. Imagine a 3-instruction -sequence consisting of a two 2-byte instructions and one 3-byte -instruction. - -:: - - IA - | - [-2][-1][0][1][2][3][4][5][6][7] - [ins1][ins2][ ins3 ] - [<- DCR ->] - [<- JTPR ->] - - ins1: 1st Instruction - ins2: 2nd Instruction - ins3: 3rd Instruction - IA: Insertion Address - JTPR: Jump Target Prohibition Region - DCR: Detoured Code Region - -The instructions in DCR are copied to the out-of-line buffer -of the kprobe, because the bytes in DCR are replaced by -a 5-byte jump instruction. So there are several limitations. - -a) The instructions in DCR must be relocatable. -b) The instructions in DCR must not include a call instruction. -c) JTPR must not be targeted by any jump or call instruction. -d) DCR must not straddle the border between functions. - -Anyway, these limitations are checked by the in-kernel instruction -decoder, so you don't need to worry about that. - -Probe Overhead -============== - -On a typical CPU in use in 2005, a kprobe hit takes 0.5 to 1.0 -microseconds to process. Specifically, a benchmark that hits the same -probepoint repeatedly, firing a simple handler each time, reports 1-2 -million hits per second, depending on the architecture. A return-probe -hit typically takes 50-75% longer than a kprobe hit. -When you have a return probe set on a function, adding a kprobe at -the entry to that function adds essentially no overhead. - -Here are sample overhead figures (in usec) for different architectures:: - - k = kprobe; r = return probe; kr = kprobe + return probe - on same function - - i386: Intel Pentium M, 1495 MHz, 2957.31 bogomips - k = 0.57 usec; r = 0.92; kr = 0.99 - - x86_64: AMD Opteron 246, 1994 MHz, 3971.48 bogomips - k = 0.49 usec; r = 0.80; kr = 0.82 - - ppc64: POWER5 (gr), 1656 MHz (SMT disabled, 1 virtual CPU per physical CPU) - k = 0.77 usec; r = 1.26; kr = 1.45 - -Optimized Probe Overhead ------------------------- - -Typically, an optimized kprobe hit takes 0.07 to 0.1 microseconds to -process. Here are sample overhead figures (in usec) for x86 architectures:: - - k = unoptimized kprobe, b = boosted (single-step skipped), o = optimized kprobe, - r = unoptimized kretprobe, rb = boosted kretprobe, ro = optimized kretprobe. - - i386: Intel(R) Xeon(R) E5410, 2.33GHz, 4656.90 bogomips - k = 0.80 usec; b = 0.33; o = 0.05; r = 1.10; rb = 0.61; ro = 0.33 - - x86-64: Intel(R) Xeon(R) E5410, 2.33GHz, 4656.90 bogomips - k = 0.99 usec; b = 0.43; o = 0.06; r = 1.24; rb = 0.68; ro = 0.30 - -TODO -==== - -a. SystemTap (http://sourceware.org/systemtap): Provides a simplified - programming interface for probe-based instrumentation. Try it out. -b. Kernel return probes for sparc64. -c. Support for other architectures. -d. User-space probes. -e. Watchpoint probes (which fire on data references). - -Kprobes Example -=============== - -See samples/kprobes/kprobe_example.c - -Kretprobes Example -================== - -See samples/kprobes/kretprobe_example.c - -For additional information on Kprobes, refer to the following URLs: - -- http://www-106.ibm.com/developerworks/library/l-kprobes.html?ca=dgr-lnxw42Kprobe -- http://www.redhat.com/magazine/005mar05/features/kprobes/ -- http://www-users.cs.umn.edu/~boutcher/kprobes/ -- http://www.linuxsymposium.org/2006/linuxsymposium_procv2.pdf (pages 101-115) - -Deprecated Features -=================== - -Jprobes is now a deprecated feature. People who are depending on it should -migrate to other tracing features or use older kernels. Please consider to -migrate your tool to one of the following options: - -- Use trace-event to trace target function with arguments. - - trace-event is a low-overhead (and almost no visible overhead if it - is off) statically defined event interface. You can define new events - and trace it via ftrace or any other tracing tools. - - See the following urls: - - - https://lwn.net/Articles/379903/ - - https://lwn.net/Articles/381064/ - - https://lwn.net/Articles/383362/ - -- Use ftrace dynamic events (kprobe event) with perf-probe. - - If you build your kernel with debug info (CONFIG_DEBUG_INFO=y), you can - find which register/stack is assigned to which local variable or arguments - by using perf-probe and set up new event to trace it. - - See following documents: - - - Documentation/trace/kprobetrace.rst - - Documentation/trace/events.rst - - tools/perf/Documentation/perf-probe.txt - - -The kprobes debugfs interface -============================= - - -With recent kernels (> 2.6.20) the list of registered kprobes is visible -under the /sys/kernel/debug/kprobes/ directory (assuming debugfs is mounted at //sys/kernel/debug). - -/sys/kernel/debug/kprobes/list: Lists all registered probes on the system:: - - c015d71a k vfs_read+0x0 - c03dedc5 r tcp_v4_rcv+0x0 - -The first column provides the kernel address where the probe is inserted. -The second column identifies the type of probe (k - kprobe and r - kretprobe) -while the third column specifies the symbol+offset of the probe. -If the probed function belongs to a module, the module name is also -specified. Following columns show probe status. If the probe is on -a virtual address that is no longer valid (module init sections, module -virtual addresses that correspond to modules that've been unloaded), -such probes are marked with [GONE]. If the probe is temporarily disabled, -such probes are marked with [DISABLED]. If the probe is optimized, it is -marked with [OPTIMIZED]. If the probe is ftrace-based, it is marked with -[FTRACE]. - -/sys/kernel/debug/kprobes/enabled: Turn kprobes ON/OFF forcibly. - -Provides a knob to globally and forcibly turn registered kprobes ON or OFF. -By default, all kprobes are enabled. By echoing "0" to this file, all -registered probes will be disarmed, till such time a "1" is echoed to this -file. Note that this knob just disarms and arms all kprobes and doesn't -change each probe's disabling state. This means that disabled kprobes (marked -[DISABLED]) will be not enabled if you turn ON all kprobes by this knob. - - -The kprobes sysctl interface -============================ - -/proc/sys/debug/kprobes-optimization: Turn kprobes optimization ON/OFF. - -When CONFIG_OPTPROBES=y, this sysctl interface appears and it provides -a knob to globally and forcibly turn jump optimization (see section -:ref:`kprobes_jump_optimization`) ON or OFF. By default, jump optimization -is allowed (ON). If you echo "0" to this file or set -"debug.kprobes_optimization" to 0 via sysctl, all optimized probes will be -unoptimized, and any new probes registered after that will not be optimized. - -Note that this knob *changes* the optimized state. This means that optimized -probes (marked [OPTIMIZED]) will be unoptimized ([OPTIMIZED] tag will be -removed). If the knob is turned on, they will be optimized again. - diff --git a/Documentation/lzo.txt b/Documentation/lzo.txt deleted file mode 100644 index f65b51523014..000000000000 --- a/Documentation/lzo.txt +++ /dev/null @@ -1,202 +0,0 @@ -=========================================================== -LZO stream format as understood by Linux's LZO decompressor -=========================================================== - -Introduction -============ - - This is not a specification. No specification seems to be publicly available - for the LZO stream format. This document describes what input format the LZO - decompressor as implemented in the Linux kernel understands. The file subject - of this analysis is lib/lzo/lzo1x_decompress_safe.c. No analysis was made on - the compressor nor on any other implementations though it seems likely that - the format matches the standard one. The purpose of this document is to - better understand what the code does in order to propose more efficient fixes - for future bug reports. - -Description -=========== - - The stream is composed of a series of instructions, operands, and data. The - instructions consist in a few bits representing an opcode, and bits forming - the operands for the instruction, whose size and position depend on the - opcode and on the number of literals copied by previous instruction. The - operands are used to indicate: - - - a distance when copying data from the dictionary (past output buffer) - - a length (number of bytes to copy from dictionary) - - the number of literals to copy, which is retained in variable "state" - as a piece of information for next instructions. - - Optionally depending on the opcode and operands, extra data may follow. These - extra data can be a complement for the operand (eg: a length or a distance - encoded on larger values), or a literal to be copied to the output buffer. - - The first byte of the block follows a different encoding from other bytes, it - seems to be optimized for literal use only, since there is no dictionary yet - prior to that byte. - - Lengths are always encoded on a variable size starting with a small number - of bits in the operand. If the number of bits isn't enough to represent the - length, up to 255 may be added in increments by consuming more bytes with a - rate of at most 255 per extra byte (thus the compression ratio cannot exceed - around 255:1). The variable length encoding using #bits is always the same:: - - length = byte & ((1 << #bits) - 1) - if (!length) { - length = ((1 << #bits) - 1) - length += 255*(number of zero bytes) - length += first-non-zero-byte - } - length += constant (generally 2 or 3) - - For references to the dictionary, distances are relative to the output - pointer. Distances are encoded using very few bits belonging to certain - ranges, resulting in multiple copy instructions using different encodings. - Certain encodings involve one extra byte, others involve two extra bytes - forming a little-endian 16-bit quantity (marked LE16 below). - - After any instruction except the large literal copy, 0, 1, 2 or 3 literals - are copied before starting the next instruction. The number of literals that - were copied may change the meaning and behaviour of the next instruction. In - practice, only one instruction needs to know whether 0, less than 4, or more - literals were copied. This is the information stored in the variable - in this implementation. This number of immediate literals to be copied is - generally encoded in the last two bits of the instruction but may also be - taken from the last two bits of an extra operand (eg: distance). - - End of stream is declared when a block copy of distance 0 is seen. Only one - instruction may encode this distance (0001HLLL), it takes one LE16 operand - for the distance, thus requiring 3 bytes. - - .. important:: - - In the code some length checks are missing because certain instructions - are called under the assumption that a certain number of bytes follow - because it has already been guaranteed before parsing the instructions. - They just have to "refill" this credit if they consume extra bytes. This - is an implementation design choice independent on the algorithm or - encoding. - -Versions - -0: Original version -1: LZO-RLE - -Version 1 of LZO implements an extension to encode runs of zeros using run -length encoding. This improves speed for data with many zeros, which is a -common case for zram. This modifies the bitstream in a backwards compatible way -(v1 can correctly decompress v0 compressed data, but v0 cannot read v1 data). - -For maximum compatibility, both versions are available under different names -(lzo and lzo-rle). Differences in the encoding are noted in this document with -e.g.: version 1 only. - -Byte sequences -============== - - First byte encoding:: - - 0..16 : follow regular instruction encoding, see below. It is worth - noting that code 16 will represent a block copy from the - dictionary which is empty, and that it will always be - invalid at this place. - - 17 : bitstream version. If the first byte is 17, and compressed - stream length is at least 5 bytes (length of shortest possible - versioned bitstream), the next byte gives the bitstream version - (version 1 only). - Otherwise, the bitstream version is 0. - - 18..21 : copy 0..3 literals - state = (byte - 17) = 0..3 [ copy literals ] - skip byte - - 22..255 : copy literal string - length = (byte - 17) = 4..238 - state = 4 [ don't copy extra literals ] - skip byte - - Instruction encoding:: - - 0 0 0 0 X X X X (0..15) - Depends on the number of literals copied by the last instruction. - If last instruction did not copy any literal (state == 0), this - encoding will be a copy of 4 or more literal, and must be interpreted - like this : - - 0 0 0 0 L L L L (0..15) : copy long literal string - length = 3 + (L ?: 15 + (zero_bytes * 255) + non_zero_byte) - state = 4 (no extra literals are copied) - - If last instruction used to copy between 1 to 3 literals (encoded in - the instruction's opcode or distance), the instruction is a copy of a - 2-byte block from the dictionary within a 1kB distance. It is worth - noting that this instruction provides little savings since it uses 2 - bytes to encode a copy of 2 other bytes but it encodes the number of - following literals for free. It must be interpreted like this : - - 0 0 0 0 D D S S (0..15) : copy 2 bytes from <= 1kB distance - length = 2 - state = S (copy S literals after this block) - Always followed by exactly one byte : H H H H H H H H - distance = (H << 2) + D + 1 - - If last instruction used to copy 4 or more literals (as detected by - state == 4), the instruction becomes a copy of a 3-byte block from the - dictionary from a 2..3kB distance, and must be interpreted like this : - - 0 0 0 0 D D S S (0..15) : copy 3 bytes from 2..3 kB distance - length = 3 - state = S (copy S literals after this block) - Always followed by exactly one byte : H H H H H H H H - distance = (H << 2) + D + 2049 - - 0 0 0 1 H L L L (16..31) - Copy of a block within 16..48kB distance (preferably less than 10B) - length = 2 + (L ?: 7 + (zero_bytes * 255) + non_zero_byte) - Always followed by exactly one LE16 : D D D D D D D D : D D D D D D S S - distance = 16384 + (H << 14) + D - state = S (copy S literals after this block) - End of stream is reached if distance == 16384 - In version 1 only, to prevent ambiguity with the RLE case when - ((distance & 0x803f) == 0x803f) && (261 <= length <= 264), the - compressor must not emit block copies where distance and length - meet these conditions. - - In version 1 only, this instruction is also used to encode a run of - zeros if distance = 0xbfff, i.e. H = 1 and the D bits are all 1. - In this case, it is followed by a fourth byte, X. - run length = ((X << 3) | (0 0 0 0 0 L L L)) + 4 - - 0 0 1 L L L L L (32..63) - Copy of small block within 16kB distance (preferably less than 34B) - length = 2 + (L ?: 31 + (zero_bytes * 255) + non_zero_byte) - Always followed by exactly one LE16 : D D D D D D D D : D D D D D D S S - distance = D + 1 - state = S (copy S literals after this block) - - 0 1 L D D D S S (64..127) - Copy 3-4 bytes from block within 2kB distance - state = S (copy S literals after this block) - length = 3 + L - Always followed by exactly one byte : H H H H H H H H - distance = (H << 3) + D + 1 - - 1 L L D D D S S (128..255) - Copy 5-8 bytes from block within 2kB distance - state = S (copy S literals after this block) - length = 5 + L - Always followed by exactly one byte : H H H H H H H H - distance = (H << 3) + D + 1 - -Authors -======= - - This document was written by Willy Tarreau on 2014/07/19 during an - analysis of the decompression code available in Linux 3.16-rc5, and updated - by Dave Rodgman on 2018/10/30 to introduce run-length - encoding. The code is tricky, it is possible that this document contains - mistakes or that a few corner cases were overlooked. In any case, please - report any doubt, fix, or proposed updates to the author(s) so that the - document can be updated. diff --git a/Documentation/remoteproc.txt b/Documentation/remoteproc.txt deleted file mode 100644 index 2be1147256e0..000000000000 --- a/Documentation/remoteproc.txt +++ /dev/null @@ -1,359 +0,0 @@ -========================== -Remote Processor Framework -========================== - -Introduction -============ - -Modern SoCs typically have heterogeneous remote processor devices in asymmetric -multiprocessing (AMP) configurations, which may be running different instances -of operating system, whether it's Linux or any other flavor of real-time OS. - -OMAP4, for example, has dual Cortex-A9, dual Cortex-M3 and a C64x+ DSP. -In a typical configuration, the dual cortex-A9 is running Linux in a SMP -configuration, and each of the other three cores (two M3 cores and a DSP) -is running its own instance of RTOS in an AMP configuration. - -The remoteproc framework allows different platforms/architectures to -control (power on, load firmware, power off) those remote processors while -abstracting the hardware differences, so the entire driver doesn't need to be -duplicated. In addition, this framework also adds rpmsg virtio devices -for remote processors that supports this kind of communication. This way, -platform-specific remoteproc drivers only need to provide a few low-level -handlers, and then all rpmsg drivers will then just work -(for more information about the virtio-based rpmsg bus and its drivers, -please read Documentation/rpmsg.txt). -Registration of other types of virtio devices is now also possible. Firmwares -just need to publish what kind of virtio devices do they support, and then -remoteproc will add those devices. This makes it possible to reuse the -existing virtio drivers with remote processor backends at a minimal development -cost. - -User API -======== - -:: - - int rproc_boot(struct rproc *rproc) - -Boot a remote processor (i.e. load its firmware, power it on, ...). - -If the remote processor is already powered on, this function immediately -returns (successfully). - -Returns 0 on success, and an appropriate error value otherwise. -Note: to use this function you should already have a valid rproc -handle. There are several ways to achieve that cleanly (devres, pdata, -the way remoteproc_rpmsg.c does this, or, if this becomes prevalent, we -might also consider using dev_archdata for this). - -:: - - void rproc_shutdown(struct rproc *rproc) - -Power off a remote processor (previously booted with rproc_boot()). -In case @rproc is still being used by an additional user(s), then -this function will just decrement the power refcount and exit, -without really powering off the device. - -Every call to rproc_boot() must (eventually) be accompanied by a call -to rproc_shutdown(). Calling rproc_shutdown() redundantly is a bug. - -.. note:: - - we're not decrementing the rproc's refcount, only the power refcount. - which means that the @rproc handle stays valid even after - rproc_shutdown() returns, and users can still use it with a subsequent - rproc_boot(), if needed. - -:: - - struct rproc *rproc_get_by_phandle(phandle phandle) - -Find an rproc handle using a device tree phandle. Returns the rproc -handle on success, and NULL on failure. This function increments -the remote processor's refcount, so always use rproc_put() to -decrement it back once rproc isn't needed anymore. - -Typical usage -============= - -:: - - #include - - /* in case we were given a valid 'rproc' handle */ - int dummy_rproc_example(struct rproc *my_rproc) - { - int ret; - - /* let's power on and boot our remote processor */ - ret = rproc_boot(my_rproc); - if (ret) { - /* - * something went wrong. handle it and leave. - */ - } - - /* - * our remote processor is now powered on... give it some work - */ - - /* let's shut it down now */ - rproc_shutdown(my_rproc); - } - -API for implementors -==================== - -:: - - struct rproc *rproc_alloc(struct device *dev, const char *name, - const struct rproc_ops *ops, - const char *firmware, int len) - -Allocate a new remote processor handle, but don't register -it yet. Required parameters are the underlying device, the -name of this remote processor, platform-specific ops handlers, -the name of the firmware to boot this rproc with, and the -length of private data needed by the allocating rproc driver (in bytes). - -This function should be used by rproc implementations during -initialization of the remote processor. - -After creating an rproc handle using this function, and when ready, -implementations should then call rproc_add() to complete -the registration of the remote processor. - -On success, the new rproc is returned, and on failure, NULL. - -.. note:: - - **never** directly deallocate @rproc, even if it was not registered - yet. Instead, when you need to unroll rproc_alloc(), use rproc_free(). - -:: - - void rproc_free(struct rproc *rproc) - -Free an rproc handle that was allocated by rproc_alloc. - -This function essentially unrolls rproc_alloc(), by decrementing the -rproc's refcount. It doesn't directly free rproc; that would happen -only if there are no other references to rproc and its refcount now -dropped to zero. - -:: - - int rproc_add(struct rproc *rproc) - -Register @rproc with the remoteproc framework, after it has been -allocated with rproc_alloc(). - -This is called by the platform-specific rproc implementation, whenever -a new remote processor device is probed. - -Returns 0 on success and an appropriate error code otherwise. -Note: this function initiates an asynchronous firmware loading -context, which will look for virtio devices supported by the rproc's -firmware. - -If found, those virtio devices will be created and added, so as a result -of registering this remote processor, additional virtio drivers might get -probed. - -:: - - int rproc_del(struct rproc *rproc) - -Unroll rproc_add(). - -This function should be called when the platform specific rproc -implementation decides to remove the rproc device. it should -_only_ be called if a previous invocation of rproc_add() -has completed successfully. - -After rproc_del() returns, @rproc is still valid, and its -last refcount should be decremented by calling rproc_free(). - -Returns 0 on success and -EINVAL if @rproc isn't valid. - -:: - - void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type) - -Report a crash in a remoteproc - -This function must be called every time a crash is detected by the -platform specific rproc implementation. This should not be called from a -non-remoteproc driver. This function can be called from atomic/interrupt -context. - -Implementation callbacks -======================== - -These callbacks should be provided by platform-specific remoteproc -drivers:: - - /** - * struct rproc_ops - platform-specific device handlers - * @start: power on the device and boot it - * @stop: power off the device - * @kick: kick a virtqueue (virtqueue id given as a parameter) - */ - struct rproc_ops { - int (*start)(struct rproc *rproc); - int (*stop)(struct rproc *rproc); - void (*kick)(struct rproc *rproc, int vqid); - }; - -Every remoteproc implementation should at least provide the ->start and ->stop -handlers. If rpmsg/virtio functionality is also desired, then the ->kick handler -should be provided as well. - -The ->start() handler takes an rproc handle and should then power on the -device and boot it (use rproc->priv to access platform-specific private data). -The boot address, in case needed, can be found in rproc->bootaddr (remoteproc -core puts there the ELF entry point). -On success, 0 should be returned, and on failure, an appropriate error code. - -The ->stop() handler takes an rproc handle and powers the device down. -On success, 0 is returned, and on failure, an appropriate error code. - -The ->kick() handler takes an rproc handle, and an index of a virtqueue -where new message was placed in. Implementations should interrupt the remote -processor and let it know it has pending messages. Notifying remote processors -the exact virtqueue index to look in is optional: it is easy (and not -too expensive) to go through the existing virtqueues and look for new buffers -in the used rings. - -Binary Firmware Structure -========================= - -At this point remoteproc supports ELF32 and ELF64 firmware binaries. However, -it is quite expected that other platforms/devices which we'd want to -support with this framework will be based on different binary formats. - -When those use cases show up, we will have to decouple the binary format -from the framework core, so we can support several binary formats without -duplicating common code. - -When the firmware is parsed, its various segments are loaded to memory -according to the specified device address (might be a physical address -if the remote processor is accessing memory directly). - -In addition to the standard ELF segments, most remote processors would -also include a special section which we call "the resource table". - -The resource table contains system resources that the remote processor -requires before it should be powered on, such as allocation of physically -contiguous memory, or iommu mapping of certain on-chip peripherals. -Remotecore will only power up the device after all the resource table's -requirement are met. - -In addition to system resources, the resource table may also contain -resource entries that publish the existence of supported features -or configurations by the remote processor, such as trace buffers and -supported virtio devices (and their configurations). - -The resource table begins with this header:: - - /** - * struct resource_table - firmware resource table header - * @ver: version number - * @num: number of resource entries - * @reserved: reserved (must be zero) - * @offset: array of offsets pointing at the various resource entries - * - * The header of the resource table, as expressed by this structure, - * contains a version number (should we need to change this format in the - * future), the number of available resource entries, and their offsets - * in the table. - */ - struct resource_table { - u32 ver; - u32 num; - u32 reserved[2]; - u32 offset[0]; - } __packed; - -Immediately following this header are the resource entries themselves, -each of which begins with the following resource entry header:: - - /** - * struct fw_rsc_hdr - firmware resource entry header - * @type: resource type - * @data: resource data - * - * Every resource entry begins with a 'struct fw_rsc_hdr' header providing - * its @type. The content of the entry itself will immediately follow - * this header, and it should be parsed according to the resource type. - */ - struct fw_rsc_hdr { - u32 type; - u8 data[0]; - } __packed; - -Some resources entries are mere announcements, where the host is informed -of specific remoteproc configuration. Other entries require the host to -do something (e.g. allocate a system resource). Sometimes a negotiation -is expected, where the firmware requests a resource, and once allocated, -the host should provide back its details (e.g. address of an allocated -memory region). - -Here are the various resource types that are currently supported:: - - /** - * enum fw_resource_type - types of resource entries - * - * @RSC_CARVEOUT: request for allocation of a physically contiguous - * memory region. - * @RSC_DEVMEM: request to iommu_map a memory-based peripheral. - * @RSC_TRACE: announces the availability of a trace buffer into which - * the remote processor will be writing logs. - * @RSC_VDEV: declare support for a virtio device, and serve as its - * virtio header. - * @RSC_LAST: just keep this one at the end - * @RSC_VENDOR_START: start of the vendor specific resource types range - * @RSC_VENDOR_END: end of the vendor specific resource types range - * - * Please note that these values are used as indices to the rproc_handle_rsc - * lookup table, so please keep them sane. Moreover, @RSC_LAST is used to - * check the validity of an index before the lookup table is accessed, so - * please update it as needed. - */ - enum fw_resource_type { - RSC_CARVEOUT = 0, - RSC_DEVMEM = 1, - RSC_TRACE = 2, - RSC_VDEV = 3, - RSC_LAST = 4, - RSC_VENDOR_START = 128, - RSC_VENDOR_END = 512, - }; - -For more details regarding a specific resource type, please see its -dedicated structure in include/linux/remoteproc.h. - -We also expect that platform-specific resource entries will show up -at some point. When that happens, we could easily add a new RSC_PLATFORM -type, and hand those resources to the platform-specific rproc driver to handle. - -Virtio and remoteproc -===================== - -The firmware should provide remoteproc information about virtio devices -that it supports, and their configurations: a RSC_VDEV resource entry -should specify the virtio device id (as in virtio_ids.h), virtio features, -virtio config space, vrings information, etc. - -When a new remote processor is registered, the remoteproc framework -will look for its resource table and will register the virtio devices -it supports. A firmware may support any number of virtio devices, and -of any type (a single remote processor can also easily support several -rpmsg virtio devices this way, if desired). - -Of course, RSC_VDEV resource entries are only good enough for static -allocation of virtio devices. Dynamic allocations will also be made possible -using the rpmsg bus (similar to how we already do dynamic allocations of -rpmsg channels; read more about it in rpmsg.txt). diff --git a/Documentation/rpmsg.txt b/Documentation/rpmsg.txt deleted file mode 100644 index 24b7a9e1a5f9..000000000000 --- a/Documentation/rpmsg.txt +++ /dev/null @@ -1,341 +0,0 @@ -============================================ -Remote Processor Messaging (rpmsg) Framework -============================================ - -.. note:: - - This document describes the rpmsg bus and how to write rpmsg drivers. - To learn how to add rpmsg support for new platforms, check out remoteproc.txt - (also a resident of Documentation/). - -Introduction -============ - -Modern SoCs typically employ heterogeneous remote processor devices in -asymmetric multiprocessing (AMP) configurations, which may be running -different instances of operating system, whether it's Linux or any other -flavor of real-time OS. - -OMAP4, for example, has dual Cortex-A9, dual Cortex-M3 and a C64x+ DSP. -Typically, the dual cortex-A9 is running Linux in a SMP configuration, -and each of the other three cores (two M3 cores and a DSP) is running -its own instance of RTOS in an AMP configuration. - -Typically AMP remote processors employ dedicated DSP codecs and multimedia -hardware accelerators, and therefore are often used to offload CPU-intensive -multimedia tasks from the main application processor. - -These remote processors could also be used to control latency-sensitive -sensors, drive random hardware blocks, or just perform background tasks -while the main CPU is idling. - -Users of those remote processors can either be userland apps (e.g. multimedia -frameworks talking with remote OMX components) or kernel drivers (controlling -hardware accessible only by the remote processor, reserving kernel-controlled -resources on behalf of the remote processor, etc..). - -Rpmsg is a virtio-based messaging bus that allows kernel drivers to communicate -with remote processors available on the system. In turn, drivers could then -expose appropriate user space interfaces, if needed. - -When writing a driver that exposes rpmsg communication to userland, please -keep in mind that remote processors might have direct access to the -system's physical memory and other sensitive hardware resources (e.g. on -OMAP4, remote cores and hardware accelerators may have direct access to the -physical memory, gpio banks, dma controllers, i2c bus, gptimers, mailbox -devices, hwspinlocks, etc..). Moreover, those remote processors might be -running RTOS where every task can access the entire memory/devices exposed -to the processor. To minimize the risks of rogue (or buggy) userland code -exploiting remote bugs, and by that taking over the system, it is often -desired to limit userland to specific rpmsg channels (see definition below) -it can send messages on, and if possible, minimize how much control -it has over the content of the messages. - -Every rpmsg device is a communication channel with a remote processor (thus -rpmsg devices are called channels). Channels are identified by a textual name -and have a local ("source") rpmsg address, and remote ("destination") rpmsg -address. - -When a driver starts listening on a channel, its rx callback is bound with -a unique rpmsg local address (a 32-bit integer). This way when inbound messages -arrive, the rpmsg core dispatches them to the appropriate driver according -to their destination address (this is done by invoking the driver's rx handler -with the payload of the inbound message). - - -User API -======== - -:: - - int rpmsg_send(struct rpmsg_channel *rpdev, void *data, int len); - -sends a message across to the remote processor on a given channel. -The caller should specify the channel, the data it wants to send, -and its length (in bytes). The message will be sent on the specified -channel, i.e. its source and destination address fields will be -set to the channel's src and dst addresses. - -In case there are no TX buffers available, the function will block until -one becomes available (i.e. until the remote processor consumes -a tx buffer and puts it back on virtio's used descriptor ring), -or a timeout of 15 seconds elapses. When the latter happens, --ERESTARTSYS is returned. - -The function can only be called from a process context (for now). -Returns 0 on success and an appropriate error value on failure. - -:: - - int rpmsg_sendto(struct rpmsg_channel *rpdev, void *data, int len, u32 dst); - -sends a message across to the remote processor on a given channel, -to a destination address provided by the caller. - -The caller should specify the channel, the data it wants to send, -its length (in bytes), and an explicit destination address. - -The message will then be sent to the remote processor to which the -channel belongs, using the channel's src address, and the user-provided -dst address (thus the channel's dst address will be ignored). - -In case there are no TX buffers available, the function will block until -one becomes available (i.e. until the remote processor consumes -a tx buffer and puts it back on virtio's used descriptor ring), -or a timeout of 15 seconds elapses. When the latter happens, --ERESTARTSYS is returned. - -The function can only be called from a process context (for now). -Returns 0 on success and an appropriate error value on failure. - -:: - - int rpmsg_send_offchannel(struct rpmsg_channel *rpdev, u32 src, u32 dst, - void *data, int len); - - -sends a message across to the remote processor, using the src and dst -addresses provided by the user. - -The caller should specify the channel, the data it wants to send, -its length (in bytes), and explicit source and destination addresses. -The message will then be sent to the remote processor to which the -channel belongs, but the channel's src and dst addresses will be -ignored (and the user-provided addresses will be used instead). - -In case there are no TX buffers available, the function will block until -one becomes available (i.e. until the remote processor consumes -a tx buffer and puts it back on virtio's used descriptor ring), -or a timeout of 15 seconds elapses. When the latter happens, --ERESTARTSYS is returned. - -The function can only be called from a process context (for now). -Returns 0 on success and an appropriate error value on failure. - -:: - - int rpmsg_trysend(struct rpmsg_channel *rpdev, void *data, int len); - -sends a message across to the remote processor on a given channel. -The caller should specify the channel, the data it wants to send, -and its length (in bytes). The message will be sent on the specified -channel, i.e. its source and destination address fields will be -set to the channel's src and dst addresses. - -In case there are no TX buffers available, the function will immediately -return -ENOMEM without waiting until one becomes available. - -The function can only be called from a process context (for now). -Returns 0 on success and an appropriate error value on failure. - -:: - - int rpmsg_trysendto(struct rpmsg_channel *rpdev, void *data, int len, u32 dst) - - -sends a message across to the remote processor on a given channel, -to a destination address provided by the user. - -The user should specify the channel, the data it wants to send, -its length (in bytes), and an explicit destination address. - -The message will then be sent to the remote processor to which the -channel belongs, using the channel's src address, and the user-provided -dst address (thus the channel's dst address will be ignored). - -In case there are no TX buffers available, the function will immediately -return -ENOMEM without waiting until one becomes available. - -The function can only be called from a process context (for now). -Returns 0 on success and an appropriate error value on failure. - -:: - - int rpmsg_trysend_offchannel(struct rpmsg_channel *rpdev, u32 src, u32 dst, - void *data, int len); - - -sends a message across to the remote processor, using source and -destination addresses provided by the user. - -The user should specify the channel, the data it wants to send, -its length (in bytes), and explicit source and destination addresses. -The message will then be sent to the remote processor to which the -channel belongs, but the channel's src and dst addresses will be -ignored (and the user-provided addresses will be used instead). - -In case there are no TX buffers available, the function will immediately -return -ENOMEM without waiting until one becomes available. - -The function can only be called from a process context (for now). -Returns 0 on success and an appropriate error value on failure. - -:: - - struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_channel *rpdev, - void (*cb)(struct rpmsg_channel *, void *, int, void *, u32), - void *priv, u32 addr); - -every rpmsg address in the system is bound to an rx callback (so when -inbound messages arrive, they are dispatched by the rpmsg bus using the -appropriate callback handler) by means of an rpmsg_endpoint struct. - -This function allows drivers to create such an endpoint, and by that, -bind a callback, and possibly some private data too, to an rpmsg address -(either one that is known in advance, or one that will be dynamically -assigned for them). - -Simple rpmsg drivers need not call rpmsg_create_ept, because an endpoint -is already created for them when they are probed by the rpmsg bus -(using the rx callback they provide when they registered to the rpmsg bus). - -So things should just work for simple drivers: they already have an -endpoint, their rx callback is bound to their rpmsg address, and when -relevant inbound messages arrive (i.e. messages which their dst address -equals to the src address of their rpmsg channel), the driver's handler -is invoked to process it. - -That said, more complicated drivers might do need to allocate -additional rpmsg addresses, and bind them to different rx callbacks. -To accomplish that, those drivers need to call this function. -Drivers should provide their channel (so the new endpoint would bind -to the same remote processor their channel belongs to), an rx callback -function, an optional private data (which is provided back when the -rx callback is invoked), and an address they want to bind with the -callback. If addr is RPMSG_ADDR_ANY, then rpmsg_create_ept will -dynamically assign them an available rpmsg address (drivers should have -a very good reason why not to always use RPMSG_ADDR_ANY here). - -Returns a pointer to the endpoint on success, or NULL on error. - -:: - - void rpmsg_destroy_ept(struct rpmsg_endpoint *ept); - - -destroys an existing rpmsg endpoint. user should provide a pointer -to an rpmsg endpoint that was previously created with rpmsg_create_ept(). - -:: - - int register_rpmsg_driver(struct rpmsg_driver *rpdrv); - - -registers an rpmsg driver with the rpmsg bus. user should provide -a pointer to an rpmsg_driver struct, which contains the driver's -->probe() and ->remove() functions, an rx callback, and an id_table -specifying the names of the channels this driver is interested to -be probed with. - -:: - - void unregister_rpmsg_driver(struct rpmsg_driver *rpdrv); - - -unregisters an rpmsg driver from the rpmsg bus. user should provide -a pointer to a previously-registered rpmsg_driver struct. -Returns 0 on success, and an appropriate error value on failure. - - -Typical usage -============= - -The following is a simple rpmsg driver, that sends an "hello!" message -on probe(), and whenever it receives an incoming message, it dumps its -content to the console. - -:: - - #include - #include - #include - - static void rpmsg_sample_cb(struct rpmsg_channel *rpdev, void *data, int len, - void *priv, u32 src) - { - print_hex_dump(KERN_INFO, "incoming message:", DUMP_PREFIX_NONE, - 16, 1, data, len, true); - } - - static int rpmsg_sample_probe(struct rpmsg_channel *rpdev) - { - int err; - - dev_info(&rpdev->dev, "chnl: 0x%x -> 0x%x\n", rpdev->src, rpdev->dst); - - /* send a message on our channel */ - err = rpmsg_send(rpdev, "hello!", 6); - if (err) { - pr_err("rpmsg_send failed: %d\n", err); - return err; - } - - return 0; - } - - static void rpmsg_sample_remove(struct rpmsg_channel *rpdev) - { - dev_info(&rpdev->dev, "rpmsg sample client driver is removed\n"); - } - - static struct rpmsg_device_id rpmsg_driver_sample_id_table[] = { - { .name = "rpmsg-client-sample" }, - { }, - }; - MODULE_DEVICE_TABLE(rpmsg, rpmsg_driver_sample_id_table); - - static struct rpmsg_driver rpmsg_sample_client = { - .drv.name = KBUILD_MODNAME, - .id_table = rpmsg_driver_sample_id_table, - .probe = rpmsg_sample_probe, - .callback = rpmsg_sample_cb, - .remove = rpmsg_sample_remove, - }; - module_rpmsg_driver(rpmsg_sample_client); - -.. note:: - - a similar sample which can be built and loaded can be found - in samples/rpmsg/. - -Allocations of rpmsg channels -============================= - -At this point we only support dynamic allocations of rpmsg channels. - -This is possible only with remote processors that have the VIRTIO_RPMSG_F_NS -virtio device feature set. This feature bit means that the remote -processor supports dynamic name service announcement messages. - -When this feature is enabled, creation of rpmsg devices (i.e. channels) -is completely dynamic: the remote processor announces the existence of a -remote rpmsg service by sending a name service message (which contains -the name and rpmsg addr of the remote service, see struct rpmsg_ns_msg). - -This message is then handled by the rpmsg bus, which in turn dynamically -creates and registers an rpmsg channel (which represents the remote service). -If/when a relevant rpmsg driver is registered, it will be immediately probed -by the bus, and can then start sending messages to the remote service. - -The plan is also to add static creation of rpmsg channels via the virtio -config space, but it's not implemented yet. diff --git a/Documentation/speculation.txt b/Documentation/speculation.txt deleted file mode 100644 index 50d7ea857cff..000000000000 --- a/Documentation/speculation.txt +++ /dev/null @@ -1,90 +0,0 @@ -This document explains potential effects of speculation, and how undesirable -effects can be mitigated portably using common APIs. - -=========== -Speculation -=========== - -To improve performance and minimize average latencies, many contemporary CPUs -employ speculative execution techniques such as branch prediction, performing -work which may be discarded at a later stage. - -Typically speculative execution cannot be observed from architectural state, -such as the contents of registers. However, in some cases it is possible to -observe its impact on microarchitectural state, such as the presence or -absence of data in caches. Such state may form side-channels which can be -observed to extract secret information. - -For example, in the presence of branch prediction, it is possible for bounds -checks to be ignored by code which is speculatively executed. Consider the -following code:: - - int load_array(int *array, unsigned int index) - { - if (index >= MAX_ARRAY_ELEMS) - return 0; - else - return array[index]; - } - -Which, on arm64, may be compiled to an assembly sequence such as:: - - CMP , #MAX_ARRAY_ELEMS - B.LT less - MOV , #0 - RET - less: - LDR , [, ] - RET - -It is possible that a CPU mis-predicts the conditional branch, and -speculatively loads array[index], even if index >= MAX_ARRAY_ELEMS. This -value will subsequently be discarded, but the speculated load may affect -microarchitectural state which can be subsequently measured. - -More complex sequences involving multiple dependent memory accesses may -result in sensitive information being leaked. Consider the following -code, building on the prior example:: - - int load_dependent_arrays(int *arr1, int *arr2, int index) - { - int val1, val2, - - val1 = load_array(arr1, index); - val2 = load_array(arr2, val1); - - return val2; - } - -Under speculation, the first call to load_array() may return the value -of an out-of-bounds address, while the second call will influence -microarchitectural state dependent on this value. This may provide an -arbitrary read primitive. - -==================================== -Mitigating speculation side-channels -==================================== - -The kernel provides a generic API to ensure that bounds checks are -respected even under speculation. Architectures which are affected by -speculation-based side-channels are expected to implement these -primitives. - -The array_index_nospec() helper in can be used to -prevent information from being leaked via side-channels. - -A call to array_index_nospec(index, size) returns a sanitized index -value that is bounded to [0, size) even under cpu speculation -conditions. - -This can be used to protect the earlier load_array() example:: - - int load_array(int *array, unsigned int index) - { - if (index >= MAX_ARRAY_ELEMS) - return 0; - else { - index = array_index_nospec(index, MAX_ARRAY_ELEMS); - return array[index]; - } - } diff --git a/Documentation/staging/crc32.rst b/Documentation/staging/crc32.rst new file mode 100644 index 000000000000..8a6860f33b4e --- /dev/null +++ b/Documentation/staging/crc32.rst @@ -0,0 +1,189 @@ +================================= +brief tutorial on CRC computation +================================= + +A CRC is a long-division remainder. You add the CRC to the message, +and the whole thing (message+CRC) is a multiple of the given +CRC polynomial. To check the CRC, you can either check that the +CRC matches the recomputed value, *or* you can check that the +remainder computed on the message+CRC is 0. This latter approach +is used by a lot of hardware implementations, and is why so many +protocols put the end-of-frame flag after the CRC. + +It's actually the same long division you learned in school, except that: + +- We're working in binary, so the digits are only 0 and 1, and +- When dividing polynomials, there are no carries. Rather than add and + subtract, we just xor. Thus, we tend to get a bit sloppy about + the difference between adding and subtracting. + +Like all division, the remainder is always smaller than the divisor. +To produce a 32-bit CRC, the divisor is actually a 33-bit CRC polynomial. +Since it's 33 bits long, bit 32 is always going to be set, so usually the +CRC is written in hex with the most significant bit omitted. (If you're +familiar with the IEEE 754 floating-point format, it's the same idea.) + +Note that a CRC is computed over a string of *bits*, so you have +to decide on the endianness of the bits within each byte. To get +the best error-detecting properties, this should correspond to the +order they're actually sent. For example, standard RS-232 serial is +little-endian; the most significant bit (sometimes used for parity) +is sent last. And when appending a CRC word to a message, you should +do it in the right order, matching the endianness. + +Just like with ordinary division, you proceed one digit (bit) at a time. +Each step of the division you take one more digit (bit) of the dividend +and append it to the current remainder. Then you figure out the +appropriate multiple of the divisor to subtract to being the remainder +back into range. In binary, this is easy - it has to be either 0 or 1, +and to make the XOR cancel, it's just a copy of bit 32 of the remainder. + +When computing a CRC, we don't care about the quotient, so we can +throw the quotient bit away, but subtract the appropriate multiple of +the polynomial from the remainder and we're back to where we started, +ready to process the next bit. + +A big-endian CRC written this way would be coded like:: + + for (i = 0; i < input_bits; i++) { + multiple = remainder & 0x80000000 ? CRCPOLY : 0; + remainder = (remainder << 1 | next_input_bit()) ^ multiple; + } + +Notice how, to get at bit 32 of the shifted remainder, we look +at bit 31 of the remainder *before* shifting it. + +But also notice how the next_input_bit() bits we're shifting into +the remainder don't actually affect any decision-making until +32 bits later. Thus, the first 32 cycles of this are pretty boring. +Also, to add the CRC to a message, we need a 32-bit-long hole for it at +the end, so we have to add 32 extra cycles shifting in zeros at the +end of every message. + +These details lead to a standard trick: rearrange merging in the +next_input_bit() until the moment it's needed. Then the first 32 cycles +can be precomputed, and merging in the final 32 zero bits to make room +for the CRC can be skipped entirely. This changes the code to:: + + for (i = 0; i < input_bits; i++) { + remainder ^= next_input_bit() << 31; + multiple = (remainder & 0x80000000) ? CRCPOLY : 0; + remainder = (remainder << 1) ^ multiple; + } + +With this optimization, the little-endian code is particularly simple:: + + for (i = 0; i < input_bits; i++) { + remainder ^= next_input_bit(); + multiple = (remainder & 1) ? CRCPOLY : 0; + remainder = (remainder >> 1) ^ multiple; + } + +The most significant coefficient of the remainder polynomial is stored +in the least significant bit of the binary "remainder" variable. +The other details of endianness have been hidden in CRCPOLY (which must +be bit-reversed) and next_input_bit(). + +As long as next_input_bit is returning the bits in a sensible order, we don't +*have* to wait until the last possible moment to merge in additional bits. +We can do it 8 bits at a time rather than 1 bit at a time:: + + for (i = 0; i < input_bytes; i++) { + remainder ^= next_input_byte() << 24; + for (j = 0; j < 8; j++) { + multiple = (remainder & 0x80000000) ? CRCPOLY : 0; + remainder = (remainder << 1) ^ multiple; + } + } + +Or in little-endian:: + + for (i = 0; i < input_bytes; i++) { + remainder ^= next_input_byte(); + for (j = 0; j < 8; j++) { + multiple = (remainder & 1) ? CRCPOLY : 0; + remainder = (remainder >> 1) ^ multiple; + } + } + +If the input is a multiple of 32 bits, you can even XOR in a 32-bit +word at a time and increase the inner loop count to 32. + +You can also mix and match the two loop styles, for example doing the +bulk of a message byte-at-a-time and adding bit-at-a-time processing +for any fractional bytes at the end. + +To reduce the number of conditional branches, software commonly uses +the byte-at-a-time table method, popularized by Dilip V. Sarwate, +"Computation of Cyclic Redundancy Checks via Table Look-Up", Comm. ACM +v.31 no.8 (August 1998) p. 1008-1013. + +Here, rather than just shifting one bit of the remainder to decide +in the correct multiple to subtract, we can shift a byte at a time. +This produces a 40-bit (rather than a 33-bit) intermediate remainder, +and the correct multiple of the polynomial to subtract is found using +a 256-entry lookup table indexed by the high 8 bits. + +(The table entries are simply the CRC-32 of the given one-byte messages.) + +When space is more constrained, smaller tables can be used, e.g. two +4-bit shifts followed by a lookup in a 16-entry table. + +It is not practical to process much more than 8 bits at a time using this +technique, because tables larger than 256 entries use too much memory and, +more importantly, too much of the L1 cache. + +To get higher software performance, a "slicing" technique can be used. +See "High Octane CRC Generation with the Intel Slicing-by-8 Algorithm", +ftp://download.intel.com/technology/comms/perfnet/download/slicing-by-8.pdf + +This does not change the number of table lookups, but does increase +the parallelism. With the classic Sarwate algorithm, each table lookup +must be completed before the index of the next can be computed. + +A "slicing by 2" technique would shift the remainder 16 bits at a time, +producing a 48-bit intermediate remainder. Rather than doing a single +lookup in a 65536-entry table, the two high bytes are looked up in +two different 256-entry tables. Each contains the remainder required +to cancel out the corresponding byte. The tables are different because the +polynomials to cancel are different. One has non-zero coefficients from +x^32 to x^39, while the other goes from x^40 to x^47. + +Since modern processors can handle many parallel memory operations, this +takes barely longer than a single table look-up and thus performs almost +twice as fast as the basic Sarwate algorithm. + +This can be extended to "slicing by 4" using 4 256-entry tables. +Each step, 32 bits of data is fetched, XORed with the CRC, and the result +broken into bytes and looked up in the tables. Because the 32-bit shift +leaves the low-order bits of the intermediate remainder zero, the +final CRC is simply the XOR of the 4 table look-ups. + +But this still enforces sequential execution: a second group of table +look-ups cannot begin until the previous groups 4 table look-ups have all +been completed. Thus, the processor's load/store unit is sometimes idle. + +To make maximum use of the processor, "slicing by 8" performs 8 look-ups +in parallel. Each step, the 32-bit CRC is shifted 64 bits and XORed +with 64 bits of input data. What is important to note is that 4 of +those 8 bytes are simply copies of the input data; they do not depend +on the previous CRC at all. Thus, those 4 table look-ups may commence +immediately, without waiting for the previous loop iteration. + +By always having 4 loads in flight, a modern superscalar processor can +be kept busy and make full use of its L1 cache. + +Two more details about CRC implementation in the real world: + +Normally, appending zero bits to a message which is already a multiple +of a polynomial produces a larger multiple of that polynomial. Thus, +a basic CRC will not detect appended zero bits (or bytes). To enable +a CRC to detect this condition, it's common to invert the CRC before +appending it. This makes the remainder of the message+crc come out not +as zero, but some fixed non-zero value. (The CRC of the inversion +pattern, 0xffffffff.) + +The same problem applies to zero bits prepended to the message, and a +similar solution is used. Instead of starting the CRC computation with +a remainder of 0, an initial remainder of all ones is used. As long as +you start the same way on decoding, it doesn't make a difference. diff --git a/Documentation/staging/index.rst b/Documentation/staging/index.rst new file mode 100644 index 000000000000..8e98517675ca --- /dev/null +++ b/Documentation/staging/index.rst @@ -0,0 +1,32 @@ +.. SPDX-License-Identifier: GPL-2.0 + +Unsorted Documentation +====================== + +.. toctree:: + :maxdepth: 2 + + crc32 + kprobes + lzo + remoteproc + rpmsg + speculation + static-keys + tee + xz + +Atomic Types +============ + +.. literalinclude:: ../atomic_t.txt + +Atomic bitops +============= + +.. literalinclude:: ../atomic_bitops.txt + +Memory Barriers +=============== + +.. literalinclude:: ../memory-barriers.txt diff --git a/Documentation/staging/kprobes.rst b/Documentation/staging/kprobes.rst new file mode 100644 index 000000000000..8baab8832c5b --- /dev/null +++ b/Documentation/staging/kprobes.rst @@ -0,0 +1,801 @@ +======================= +Kernel Probes (Kprobes) +======================= + +:Author: Jim Keniston +:Author: Prasanna S Panchamukhi +:Author: Masami Hiramatsu + +.. CONTENTS + + 1. Concepts: Kprobes, and Return Probes + 2. Architectures Supported + 3. Configuring Kprobes + 4. API Reference + 5. Kprobes Features and Limitations + 6. Probe Overhead + 7. TODO + 8. Kprobes Example + 9. Kretprobes Example + 10. Deprecated Features + Appendix A: The kprobes debugfs interface + Appendix B: The kprobes sysctl interface + +Concepts: Kprobes and Return Probes +========================================= + +Kprobes enables you to dynamically break into any kernel routine and +collect debugging and performance information non-disruptively. You +can trap at almost any kernel code address [1]_, specifying a handler +routine to be invoked when the breakpoint is hit. + +.. [1] some parts of the kernel code can not be trapped, see + :ref:`kprobes_blacklist`) + +There are currently two types of probes: kprobes, and kretprobes +(also called return probes). A kprobe can be inserted on virtually +any instruction in the kernel. A return probe fires when a specified +function returns. + +In the typical case, Kprobes-based instrumentation is packaged as +a kernel module. The module's init function installs ("registers") +one or more probes, and the exit function unregisters them. A +registration function such as register_kprobe() specifies where +the probe is to be inserted and what handler is to be called when +the probe is hit. + +There are also ``register_/unregister_*probes()`` functions for batch +registration/unregistration of a group of ``*probes``. These functions +can speed up unregistration process when you have to unregister +a lot of probes at once. + +The next four subsections explain how the different types of +probes work and how jump optimization works. They explain certain +things that you'll need to know in order to make the best use of +Kprobes -- e.g., the difference between a pre_handler and +a post_handler, and how to use the maxactive and nmissed fields of +a kretprobe. But if you're in a hurry to start using Kprobes, you +can skip ahead to :ref:`kprobes_archs_supported`. + +How Does a Kprobe Work? +----------------------- + +When a kprobe is registered, Kprobes makes a copy of the probed +instruction and replaces the first byte(s) of the probed instruction +with a breakpoint instruction (e.g., int3 on i386 and x86_64). + +When a CPU hits the breakpoint instruction, a trap occurs, the CPU's +registers are saved, and control passes to Kprobes via the +notifier_call_chain mechanism. Kprobes executes the "pre_handler" +associated with the kprobe, passing the handler the addresses of the +kprobe struct and the saved registers. + +Next, Kprobes single-steps its copy of the probed instruction. +(It would be simpler to single-step the actual instruction in place, +but then Kprobes would have to temporarily remove the breakpoint +instruction. This would open a small time window when another CPU +could sail right past the probepoint.) + +After the instruction is single-stepped, Kprobes executes the +"post_handler," if any, that is associated with the kprobe. +Execution then continues with the instruction following the probepoint. + +Changing Execution Path +----------------------- + +Since kprobes can probe into a running kernel code, it can change the +register set, including instruction pointer. This operation requires +maximum care, such as keeping the stack frame, recovering the execution +path etc. Since it operates on a running kernel and needs deep knowledge +of computer architecture and concurrent computing, you can easily shoot +your foot. + +If you change the instruction pointer (and set up other related +registers) in pre_handler, you must return !0 so that kprobes stops +single stepping and just returns to the given address. +This also means post_handler should not be called anymore. + +Note that this operation may be harder on some architectures which use +TOC (Table of Contents) for function call, since you have to setup a new +TOC for your function in your module, and recover the old one after +returning from it. + +Return Probes +------------- + +How Does a Return Probe Work? +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +When you call register_kretprobe(), Kprobes establishes a kprobe at +the entry to the function. When the probed function is called and this +probe is hit, Kprobes saves a copy of the return address, and replaces +the return address with the address of a "trampoline." The trampoline +is an arbitrary piece of code -- typically just a nop instruction. +At boot time, Kprobes registers a kprobe at the trampoline. + +When the probed function executes its return instruction, control +passes to the trampoline and that probe is hit. Kprobes' trampoline +handler calls the user-specified return handler associated with the +kretprobe, then sets the saved instruction pointer to the saved return +address, and that's where execution resumes upon return from the trap. + +While the probed function is executing, its return address is +stored in an object of type kretprobe_instance. Before calling +register_kretprobe(), the user sets the maxactive field of the +kretprobe struct to specify how many instances of the specified +function can be probed simultaneously. register_kretprobe() +pre-allocates the indicated number of kretprobe_instance objects. + +For example, if the function is non-recursive and is called with a +spinlock held, maxactive = 1 should be enough. If the function is +non-recursive and can never relinquish the CPU (e.g., via a semaphore +or preemption), NR_CPUS should be enough. If maxactive <= 0, it is +set to a default value. If CONFIG_PREEMPT is enabled, the default +is max(10, 2*NR_CPUS). Otherwise, the default is NR_CPUS. + +It's not a disaster if you set maxactive too low; you'll just miss +some probes. In the kretprobe struct, the nmissed field is set to +zero when the return probe is registered, and is incremented every +time the probed function is entered but there is no kretprobe_instance +object available for establishing the return probe. + +Kretprobe entry-handler +^^^^^^^^^^^^^^^^^^^^^^^ + +Kretprobes also provides an optional user-specified handler which runs +on function entry. This handler is specified by setting the entry_handler +field of the kretprobe struct. Whenever the kprobe placed by kretprobe at the +function entry is hit, the user-defined entry_handler, if any, is invoked. +If the entry_handler returns 0 (success) then a corresponding return handler +is guaranteed to be called upon function return. If the entry_handler +returns a non-zero error then Kprobes leaves the return address as is, and +the kretprobe has no further effect for that particular function instance. + +Multiple entry and return handler invocations are matched using the unique +kretprobe_instance object associated with them. Additionally, a user +may also specify per return-instance private data to be part of each +kretprobe_instance object. This is especially useful when sharing private +data between corresponding user entry and return handlers. The size of each +private data object can be specified at kretprobe registration time by +setting the data_size field of the kretprobe struct. This data can be +accessed through the data field of each kretprobe_instance object. + +In case probed function is entered but there is no kretprobe_instance +object available, then in addition to incrementing the nmissed count, +the user entry_handler invocation is also skipped. + +.. _kprobes_jump_optimization: + +How Does Jump Optimization Work? +-------------------------------- + +If your kernel is built with CONFIG_OPTPROBES=y (currently this flag +is automatically set 'y' on x86/x86-64, non-preemptive kernel) and +the "debug.kprobes_optimization" kernel parameter is set to 1 (see +sysctl(8)), Kprobes tries to reduce probe-hit overhead by using a jump +instruction instead of a breakpoint instruction at each probepoint. + +Init a Kprobe +^^^^^^^^^^^^^ + +When a probe is registered, before attempting this optimization, +Kprobes inserts an ordinary, breakpoint-based kprobe at the specified +address. So, even if it's not possible to optimize this particular +probepoint, there'll be a probe there. + +Safety Check +^^^^^^^^^^^^ + +Before optimizing a probe, Kprobes performs the following safety checks: + +- Kprobes verifies that the region that will be replaced by the jump + instruction (the "optimized region") lies entirely within one function. + (A jump instruction is multiple bytes, and so may overlay multiple + instructions.) + +- Kprobes analyzes the entire function and verifies that there is no + jump into the optimized region. Specifically: + + - the function contains no indirect jump; + - the function contains no instruction that causes an exception (since + the fixup code triggered by the exception could jump back into the + optimized region -- Kprobes checks the exception tables to verify this); + - there is no near jump to the optimized region (other than to the first + byte). + +- For each instruction in the optimized region, Kprobes verifies that + the instruction can be executed out of line. + +Preparing Detour Buffer +^^^^^^^^^^^^^^^^^^^^^^^ + +Next, Kprobes prepares a "detour" buffer, which contains the following +instruction sequence: + +- code to push the CPU's registers (emulating a breakpoint trap) +- a call to the trampoline code which calls user's probe handlers. +- code to restore registers +- the instructions from the optimized region +- a jump back to the original execution path. + +Pre-optimization +^^^^^^^^^^^^^^^^ + +After preparing the detour buffer, Kprobes verifies that none of the +following situations exist: + +- The probe has a post_handler. +- Other instructions in the optimized region are probed. +- The probe is disabled. + +In any of the above cases, Kprobes won't start optimizing the probe. +Since these are temporary situations, Kprobes tries to start +optimizing it again if the situation is changed. + +If the kprobe can be optimized, Kprobes enqueues the kprobe to an +optimizing list, and kicks the kprobe-optimizer workqueue to optimize +it. If the to-be-optimized probepoint is hit before being optimized, +Kprobes returns control to the original instruction path by setting +the CPU's instruction pointer to the copied code in the detour buffer +-- thus at least avoiding the single-step. + +Optimization +^^^^^^^^^^^^ + +The Kprobe-optimizer doesn't insert the jump instruction immediately; +rather, it calls synchronize_rcu() for safety first, because it's +possible for a CPU to be interrupted in the middle of executing the +optimized region [3]_. As you know, synchronize_rcu() can ensure +that all interruptions that were active when synchronize_rcu() +was called are done, but only if CONFIG_PREEMPT=n. So, this version +of kprobe optimization supports only kernels with CONFIG_PREEMPT=n [4]_. + +After that, the Kprobe-optimizer calls stop_machine() to replace +the optimized region with a jump instruction to the detour buffer, +using text_poke_smp(). + +Unoptimization +^^^^^^^^^^^^^^ + +When an optimized kprobe is unregistered, disabled, or blocked by +another kprobe, it will be unoptimized. If this happens before +the optimization is complete, the kprobe is just dequeued from the +optimized list. If the optimization has been done, the jump is +replaced with the original code (except for an int3 breakpoint in +the first byte) by using text_poke_smp(). + +.. [3] Please imagine that the 2nd instruction is interrupted and then + the optimizer replaces the 2nd instruction with the jump *address* + while the interrupt handler is running. When the interrupt + returns to original address, there is no valid instruction, + and it causes an unexpected result. + +.. [4] This optimization-safety checking may be replaced with the + stop-machine method that ksplice uses for supporting a CONFIG_PREEMPT=y + kernel. + +NOTE for geeks: +The jump optimization changes the kprobe's pre_handler behavior. +Without optimization, the pre_handler can change the kernel's execution +path by changing regs->ip and returning 1. However, when the probe +is optimized, that modification is ignored. Thus, if you want to +tweak the kernel's execution path, you need to suppress optimization, +using one of the following techniques: + +- Specify an empty function for the kprobe's post_handler. + +or + +- Execute 'sysctl -w debug.kprobes_optimization=n' + +.. _kprobes_blacklist: + +Blacklist +--------- + +Kprobes can probe most of the kernel except itself. This means +that there are some functions where kprobes cannot probe. Probing +(trapping) such functions can cause a recursive trap (e.g. double +fault) or the nested probe handler may never be called. +Kprobes manages such functions as a blacklist. +If you want to add a function into the blacklist, you just need +to (1) include linux/kprobes.h and (2) use NOKPROBE_SYMBOL() macro +to specify a blacklisted function. +Kprobes checks the given probe address against the blacklist and +rejects registering it, if the given address is in the blacklist. + +.. _kprobes_archs_supported: + +Architectures Supported +======================= + +Kprobes and return probes are implemented on the following +architectures: + +- i386 (Supports jump optimization) +- x86_64 (AMD-64, EM64T) (Supports jump optimization) +- ppc64 +- ia64 (Does not support probes on instruction slot1.) +- sparc64 (Return probes not yet implemented.) +- arm +- ppc +- mips +- s390 +- parisc + +Configuring Kprobes +=================== + +When configuring the kernel using make menuconfig/xconfig/oldconfig, +ensure that CONFIG_KPROBES is set to "y". Under "General setup", look +for "Kprobes". + +So that you can load and unload Kprobes-based instrumentation modules, +make sure "Loadable module support" (CONFIG_MODULES) and "Module +unloading" (CONFIG_MODULE_UNLOAD) are set to "y". + +Also make sure that CONFIG_KALLSYMS and perhaps even CONFIG_KALLSYMS_ALL +are set to "y", since kallsyms_lookup_name() is used by the in-kernel +kprobe address resolution code. + +If you need to insert a probe in the middle of a function, you may find +it useful to "Compile the kernel with debug info" (CONFIG_DEBUG_INFO), +so you can use "objdump -d -l vmlinux" to see the source-to-object +code mapping. + +API Reference +============= + +The Kprobes API includes a "register" function and an "unregister" +function for each type of probe. The API also includes "register_*probes" +and "unregister_*probes" functions for (un)registering arrays of probes. +Here are terse, mini-man-page specifications for these functions and +the associated probe handlers that you'll write. See the files in the +samples/kprobes/ sub-directory for examples. + +register_kprobe +--------------- + +:: + + #include + int register_kprobe(struct kprobe *kp); + +Sets a breakpoint at the address kp->addr. When the breakpoint is +hit, Kprobes calls kp->pre_handler. After the probed instruction +is single-stepped, Kprobe calls kp->post_handler. If a fault +occurs during execution of kp->pre_handler or kp->post_handler, +or during single-stepping of the probed instruction, Kprobes calls +kp->fault_handler. Any or all handlers can be NULL. If kp->flags +is set KPROBE_FLAG_DISABLED, that kp will be registered but disabled, +so, its handlers aren't hit until calling enable_kprobe(kp). + +.. note:: + + 1. With the introduction of the "symbol_name" field to struct kprobe, + the probepoint address resolution will now be taken care of by the kernel. + The following will now work:: + + kp.symbol_name = "symbol_name"; + + (64-bit powerpc intricacies such as function descriptors are handled + transparently) + + 2. Use the "offset" field of struct kprobe if the offset into the symbol + to install a probepoint is known. This field is used to calculate the + probepoint. + + 3. Specify either the kprobe "symbol_name" OR the "addr". If both are + specified, kprobe registration will fail with -EINVAL. + + 4. With CISC architectures (such as i386 and x86_64), the kprobes code + does not validate if the kprobe.addr is at an instruction boundary. + Use "offset" with caution. + +register_kprobe() returns 0 on success, or a negative errno otherwise. + +User's pre-handler (kp->pre_handler):: + + #include + #include + int pre_handler(struct kprobe *p, struct pt_regs *regs); + +Called with p pointing to the kprobe associated with the breakpoint, +and regs pointing to the struct containing the registers saved when +the breakpoint was hit. Return 0 here unless you're a Kprobes geek. + +User's post-handler (kp->post_handler):: + + #include + #include + void post_handler(struct kprobe *p, struct pt_regs *regs, + unsigned long flags); + +p and regs are as described for the pre_handler. flags always seems +to be zero. + +User's fault-handler (kp->fault_handler):: + + #include + #include + int fault_handler(struct kprobe *p, struct pt_regs *regs, int trapnr); + +p and regs are as described for the pre_handler. trapnr is the +architecture-specific trap number associated with the fault (e.g., +on i386, 13 for a general protection fault or 14 for a page fault). +Returns 1 if it successfully handled the exception. + +register_kretprobe +------------------ + +:: + + #include + int register_kretprobe(struct kretprobe *rp); + +Establishes a return probe for the function whose address is +rp->kp.addr. When that function returns, Kprobes calls rp->handler. +You must set rp->maxactive appropriately before you call +register_kretprobe(); see "How Does a Return Probe Work?" for details. + +register_kretprobe() returns 0 on success, or a negative errno +otherwise. + +User's return-probe handler (rp->handler):: + + #include + #include + int kretprobe_handler(struct kretprobe_instance *ri, + struct pt_regs *regs); + +regs is as described for kprobe.pre_handler. ri points to the +kretprobe_instance object, of which the following fields may be +of interest: + +- ret_addr: the return address +- rp: points to the corresponding kretprobe object +- task: points to the corresponding task struct +- data: points to per return-instance private data; see "Kretprobe + entry-handler" for details. + +The regs_return_value(regs) macro provides a simple abstraction to +extract the return value from the appropriate register as defined by +the architecture's ABI. + +The handler's return value is currently ignored. + +unregister_*probe +------------------ + +:: + + #include + void unregister_kprobe(struct kprobe *kp); + void unregister_kretprobe(struct kretprobe *rp); + +Removes the specified probe. The unregister function can be called +at any time after the probe has been registered. + +.. note:: + + If the functions find an incorrect probe (ex. an unregistered probe), + they clear the addr field of the probe. + +register_*probes +---------------- + +:: + + #include + int register_kprobes(struct kprobe **kps, int num); + int register_kretprobes(struct kretprobe **rps, int num); + +Registers each of the num probes in the specified array. If any +error occurs during registration, all probes in the array, up to +the bad probe, are safely unregistered before the register_*probes +function returns. + +- kps/rps: an array of pointers to ``*probe`` data structures +- num: the number of the array entries. + +.. note:: + + You have to allocate(or define) an array of pointers and set all + of the array entries before using these functions. + +unregister_*probes +------------------ + +:: + + #include + void unregister_kprobes(struct kprobe **kps, int num); + void unregister_kretprobes(struct kretprobe **rps, int num); + +Removes each of the num probes in the specified array at once. + +.. note:: + + If the functions find some incorrect probes (ex. unregistered + probes) in the specified array, they clear the addr field of those + incorrect probes. However, other probes in the array are + unregistered correctly. + +disable_*probe +-------------- + +:: + + #include + int disable_kprobe(struct kprobe *kp); + int disable_kretprobe(struct kretprobe *rp); + +Temporarily disables the specified ``*probe``. You can enable it again by using +enable_*probe(). You must specify the probe which has been registered. + +enable_*probe +------------- + +:: + + #include + int enable_kprobe(struct kprobe *kp); + int enable_kretprobe(struct kretprobe *rp); + +Enables ``*probe`` which has been disabled by disable_*probe(). You must specify +the probe which has been registered. + +Kprobes Features and Limitations +================================ + +Kprobes allows multiple probes at the same address. Also, +a probepoint for which there is a post_handler cannot be optimized. +So if you install a kprobe with a post_handler, at an optimized +probepoint, the probepoint will be unoptimized automatically. + +In general, you can install a probe anywhere in the kernel. +In particular, you can probe interrupt handlers. Known exceptions +are discussed in this section. + +The register_*probe functions will return -EINVAL if you attempt +to install a probe in the code that implements Kprobes (mostly +kernel/kprobes.c and ``arch/*/kernel/kprobes.c``, but also functions such +as do_page_fault and notifier_call_chain). + +If you install a probe in an inline-able function, Kprobes makes +no attempt to chase down all inline instances of the function and +install probes there. gcc may inline a function without being asked, +so keep this in mind if you're not seeing the probe hits you expect. + +A probe handler can modify the environment of the probed function +-- e.g., by modifying kernel data structures, or by modifying the +contents of the pt_regs struct (which are restored to the registers +upon return from the breakpoint). So Kprobes can be used, for example, +to install a bug fix or to inject faults for testing. Kprobes, of +course, has no way to distinguish the deliberately injected faults +from the accidental ones. Don't drink and probe. + +Kprobes makes no attempt to prevent probe handlers from stepping on +each other -- e.g., probing printk() and then calling printk() from a +probe handler. If a probe handler hits a probe, that second probe's +handlers won't be run in that instance, and the kprobe.nmissed member +of the second probe will be incremented. + +As of Linux v2.6.15-rc1, multiple handlers (or multiple instances of +the same handler) may run concurrently on different CPUs. + +Kprobes does not use mutexes or allocate memory except during +registration and unregistration. + +Probe handlers are run with preemption disabled or interrupt disabled, +which depends on the architecture and optimization state. (e.g., +kretprobe handlers and optimized kprobe handlers run without interrupt +disabled on x86/x86-64). In any case, your handler should not yield +the CPU (e.g., by attempting to acquire a semaphore, or waiting I/O). + +Since a return probe is implemented by replacing the return +address with the trampoline's address, stack backtraces and calls +to __builtin_return_address() will typically yield the trampoline's +address instead of the real return address for kretprobed functions. +(As far as we can tell, __builtin_return_address() is used only +for instrumentation and error reporting.) + +If the number of times a function is called does not match the number +of times it returns, registering a return probe on that function may +produce undesirable results. In such a case, a line: +kretprobe BUG!: Processing kretprobe d000000000041aa8 @ c00000000004f48c +gets printed. With this information, one will be able to correlate the +exact instance of the kretprobe that caused the problem. We have the +do_exit() case covered. do_execve() and do_fork() are not an issue. +We're unaware of other specific cases where this could be a problem. + +If, upon entry to or exit from a function, the CPU is running on +a stack other than that of the current task, registering a return +probe on that function may produce undesirable results. For this +reason, Kprobes doesn't support return probes (or kprobes) +on the x86_64 version of __switch_to(); the registration functions +return -EINVAL. + +On x86/x86-64, since the Jump Optimization of Kprobes modifies +instructions widely, there are some limitations to optimization. To +explain it, we introduce some terminology. Imagine a 3-instruction +sequence consisting of a two 2-byte instructions and one 3-byte +instruction. + +:: + + IA + | + [-2][-1][0][1][2][3][4][5][6][7] + [ins1][ins2][ ins3 ] + [<- DCR ->] + [<- JTPR ->] + + ins1: 1st Instruction + ins2: 2nd Instruction + ins3: 3rd Instruction + IA: Insertion Address + JTPR: Jump Target Prohibition Region + DCR: Detoured Code Region + +The instructions in DCR are copied to the out-of-line buffer +of the kprobe, because the bytes in DCR are replaced by +a 5-byte jump instruction. So there are several limitations. + +a) The instructions in DCR must be relocatable. +b) The instructions in DCR must not include a call instruction. +c) JTPR must not be targeted by any jump or call instruction. +d) DCR must not straddle the border between functions. + +Anyway, these limitations are checked by the in-kernel instruction +decoder, so you don't need to worry about that. + +Probe Overhead +============== + +On a typical CPU in use in 2005, a kprobe hit takes 0.5 to 1.0 +microseconds to process. Specifically, a benchmark that hits the same +probepoint repeatedly, firing a simple handler each time, reports 1-2 +million hits per second, depending on the architecture. A return-probe +hit typically takes 50-75% longer than a kprobe hit. +When you have a return probe set on a function, adding a kprobe at +the entry to that function adds essentially no overhead. + +Here are sample overhead figures (in usec) for different architectures:: + + k = kprobe; r = return probe; kr = kprobe + return probe + on same function + + i386: Intel Pentium M, 1495 MHz, 2957.31 bogomips + k = 0.57 usec; r = 0.92; kr = 0.99 + + x86_64: AMD Opteron 246, 1994 MHz, 3971.48 bogomips + k = 0.49 usec; r = 0.80; kr = 0.82 + + ppc64: POWER5 (gr), 1656 MHz (SMT disabled, 1 virtual CPU per physical CPU) + k = 0.77 usec; r = 1.26; kr = 1.45 + +Optimized Probe Overhead +------------------------ + +Typically, an optimized kprobe hit takes 0.07 to 0.1 microseconds to +process. Here are sample overhead figures (in usec) for x86 architectures:: + + k = unoptimized kprobe, b = boosted (single-step skipped), o = optimized kprobe, + r = unoptimized kretprobe, rb = boosted kretprobe, ro = optimized kretprobe. + + i386: Intel(R) Xeon(R) E5410, 2.33GHz, 4656.90 bogomips + k = 0.80 usec; b = 0.33; o = 0.05; r = 1.10; rb = 0.61; ro = 0.33 + + x86-64: Intel(R) Xeon(R) E5410, 2.33GHz, 4656.90 bogomips + k = 0.99 usec; b = 0.43; o = 0.06; r = 1.24; rb = 0.68; ro = 0.30 + +TODO +==== + +a. SystemTap (http://sourceware.org/systemtap): Provides a simplified + programming interface for probe-based instrumentation. Try it out. +b. Kernel return probes for sparc64. +c. Support for other architectures. +d. User-space probes. +e. Watchpoint probes (which fire on data references). + +Kprobes Example +=============== + +See samples/kprobes/kprobe_example.c + +Kretprobes Example +================== + +See samples/kprobes/kretprobe_example.c + +For additional information on Kprobes, refer to the following URLs: + +- http://www-106.ibm.com/developerworks/library/l-kprobes.html?ca=dgr-lnxw42Kprobe +- http://www.redhat.com/magazine/005mar05/features/kprobes/ +- http://www-users.cs.umn.edu/~boutcher/kprobes/ +- http://www.linuxsymposium.org/2006/linuxsymposium_procv2.pdf (pages 101-115) + +Deprecated Features +=================== + +Jprobes is now a deprecated feature. People who are depending on it should +migrate to other tracing features or use older kernels. Please consider to +migrate your tool to one of the following options: + +- Use trace-event to trace target function with arguments. + + trace-event is a low-overhead (and almost no visible overhead if it + is off) statically defined event interface. You can define new events + and trace it via ftrace or any other tracing tools. + + See the following urls: + + - https://lwn.net/Articles/379903/ + - https://lwn.net/Articles/381064/ + - https://lwn.net/Articles/383362/ + +- Use ftrace dynamic events (kprobe event) with perf-probe. + + If you build your kernel with debug info (CONFIG_DEBUG_INFO=y), you can + find which register/stack is assigned to which local variable or arguments + by using perf-probe and set up new event to trace it. + + See following documents: + + - Documentation/trace/kprobetrace.rst + - Documentation/trace/events.rst + - tools/perf/Documentation/perf-probe.txt + + +The kprobes debugfs interface +============================= + + +With recent kernels (> 2.6.20) the list of registered kprobes is visible +under the /sys/kernel/debug/kprobes/ directory (assuming debugfs is mounted at //sys/kernel/debug). + +/sys/kernel/debug/kprobes/list: Lists all registered probes on the system:: + + c015d71a k vfs_read+0x0 + c03dedc5 r tcp_v4_rcv+0x0 + +The first column provides the kernel address where the probe is inserted. +The second column identifies the type of probe (k - kprobe and r - kretprobe) +while the third column specifies the symbol+offset of the probe. +If the probed function belongs to a module, the module name is also +specified. Following columns show probe status. If the probe is on +a virtual address that is no longer valid (module init sections, module +virtual addresses that correspond to modules that've been unloaded), +such probes are marked with [GONE]. If the probe is temporarily disabled, +such probes are marked with [DISABLED]. If the probe is optimized, it is +marked with [OPTIMIZED]. If the probe is ftrace-based, it is marked with +[FTRACE]. + +/sys/kernel/debug/kprobes/enabled: Turn kprobes ON/OFF forcibly. + +Provides a knob to globally and forcibly turn registered kprobes ON or OFF. +By default, all kprobes are enabled. By echoing "0" to this file, all +registered probes will be disarmed, till such time a "1" is echoed to this +file. Note that this knob just disarms and arms all kprobes and doesn't +change each probe's disabling state. This means that disabled kprobes (marked +[DISABLED]) will be not enabled if you turn ON all kprobes by this knob. + + +The kprobes sysctl interface +============================ + +/proc/sys/debug/kprobes-optimization: Turn kprobes optimization ON/OFF. + +When CONFIG_OPTPROBES=y, this sysctl interface appears and it provides +a knob to globally and forcibly turn jump optimization (see section +:ref:`kprobes_jump_optimization`) ON or OFF. By default, jump optimization +is allowed (ON). If you echo "0" to this file or set +"debug.kprobes_optimization" to 0 via sysctl, all optimized probes will be +unoptimized, and any new probes registered after that will not be optimized. + +Note that this knob *changes* the optimized state. This means that optimized +probes (marked [OPTIMIZED]) will be unoptimized ([OPTIMIZED] tag will be +removed). If the knob is turned on, they will be optimized again. + diff --git a/Documentation/staging/lzo.rst b/Documentation/staging/lzo.rst new file mode 100644 index 000000000000..f65b51523014 --- /dev/null +++ b/Documentation/staging/lzo.rst @@ -0,0 +1,202 @@ +=========================================================== +LZO stream format as understood by Linux's LZO decompressor +=========================================================== + +Introduction +============ + + This is not a specification. No specification seems to be publicly available + for the LZO stream format. This document describes what input format the LZO + decompressor as implemented in the Linux kernel understands. The file subject + of this analysis is lib/lzo/lzo1x_decompress_safe.c. No analysis was made on + the compressor nor on any other implementations though it seems likely that + the format matches the standard one. The purpose of this document is to + better understand what the code does in order to propose more efficient fixes + for future bug reports. + +Description +=========== + + The stream is composed of a series of instructions, operands, and data. The + instructions consist in a few bits representing an opcode, and bits forming + the operands for the instruction, whose size and position depend on the + opcode and on the number of literals copied by previous instruction. The + operands are used to indicate: + + - a distance when copying data from the dictionary (past output buffer) + - a length (number of bytes to copy from dictionary) + - the number of literals to copy, which is retained in variable "state" + as a piece of information for next instructions. + + Optionally depending on the opcode and operands, extra data may follow. These + extra data can be a complement for the operand (eg: a length or a distance + encoded on larger values), or a literal to be copied to the output buffer. + + The first byte of the block follows a different encoding from other bytes, it + seems to be optimized for literal use only, since there is no dictionary yet + prior to that byte. + + Lengths are always encoded on a variable size starting with a small number + of bits in the operand. If the number of bits isn't enough to represent the + length, up to 255 may be added in increments by consuming more bytes with a + rate of at most 255 per extra byte (thus the compression ratio cannot exceed + around 255:1). The variable length encoding using #bits is always the same:: + + length = byte & ((1 << #bits) - 1) + if (!length) { + length = ((1 << #bits) - 1) + length += 255*(number of zero bytes) + length += first-non-zero-byte + } + length += constant (generally 2 or 3) + + For references to the dictionary, distances are relative to the output + pointer. Distances are encoded using very few bits belonging to certain + ranges, resulting in multiple copy instructions using different encodings. + Certain encodings involve one extra byte, others involve two extra bytes + forming a little-endian 16-bit quantity (marked LE16 below). + + After any instruction except the large literal copy, 0, 1, 2 or 3 literals + are copied before starting the next instruction. The number of literals that + were copied may change the meaning and behaviour of the next instruction. In + practice, only one instruction needs to know whether 0, less than 4, or more + literals were copied. This is the information stored in the variable + in this implementation. This number of immediate literals to be copied is + generally encoded in the last two bits of the instruction but may also be + taken from the last two bits of an extra operand (eg: distance). + + End of stream is declared when a block copy of distance 0 is seen. Only one + instruction may encode this distance (0001HLLL), it takes one LE16 operand + for the distance, thus requiring 3 bytes. + + .. important:: + + In the code some length checks are missing because certain instructions + are called under the assumption that a certain number of bytes follow + because it has already been guaranteed before parsing the instructions. + They just have to "refill" this credit if they consume extra bytes. This + is an implementation design choice independent on the algorithm or + encoding. + +Versions + +0: Original version +1: LZO-RLE + +Version 1 of LZO implements an extension to encode runs of zeros using run +length encoding. This improves speed for data with many zeros, which is a +common case for zram. This modifies the bitstream in a backwards compatible way +(v1 can correctly decompress v0 compressed data, but v0 cannot read v1 data). + +For maximum compatibility, both versions are available under different names +(lzo and lzo-rle). Differences in the encoding are noted in this document with +e.g.: version 1 only. + +Byte sequences +============== + + First byte encoding:: + + 0..16 : follow regular instruction encoding, see below. It is worth + noting that code 16 will represent a block copy from the + dictionary which is empty, and that it will always be + invalid at this place. + + 17 : bitstream version. If the first byte is 17, and compressed + stream length is at least 5 bytes (length of shortest possible + versioned bitstream), the next byte gives the bitstream version + (version 1 only). + Otherwise, the bitstream version is 0. + + 18..21 : copy 0..3 literals + state = (byte - 17) = 0..3 [ copy literals ] + skip byte + + 22..255 : copy literal string + length = (byte - 17) = 4..238 + state = 4 [ don't copy extra literals ] + skip byte + + Instruction encoding:: + + 0 0 0 0 X X X X (0..15) + Depends on the number of literals copied by the last instruction. + If last instruction did not copy any literal (state == 0), this + encoding will be a copy of 4 or more literal, and must be interpreted + like this : + + 0 0 0 0 L L L L (0..15) : copy long literal string + length = 3 + (L ?: 15 + (zero_bytes * 255) + non_zero_byte) + state = 4 (no extra literals are copied) + + If last instruction used to copy between 1 to 3 literals (encoded in + the instruction's opcode or distance), the instruction is a copy of a + 2-byte block from the dictionary within a 1kB distance. It is worth + noting that this instruction provides little savings since it uses 2 + bytes to encode a copy of 2 other bytes but it encodes the number of + following literals for free. It must be interpreted like this : + + 0 0 0 0 D D S S (0..15) : copy 2 bytes from <= 1kB distance + length = 2 + state = S (copy S literals after this block) + Always followed by exactly one byte : H H H H H H H H + distance = (H << 2) + D + 1 + + If last instruction used to copy 4 or more literals (as detected by + state == 4), the instruction becomes a copy of a 3-byte block from the + dictionary from a 2..3kB distance, and must be interpreted like this : + + 0 0 0 0 D D S S (0..15) : copy 3 bytes from 2..3 kB distance + length = 3 + state = S (copy S literals after this block) + Always followed by exactly one byte : H H H H H H H H + distance = (H << 2) + D + 2049 + + 0 0 0 1 H L L L (16..31) + Copy of a block within 16..48kB distance (preferably less than 10B) + length = 2 + (L ?: 7 + (zero_bytes * 255) + non_zero_byte) + Always followed by exactly one LE16 : D D D D D D D D : D D D D D D S S + distance = 16384 + (H << 14) + D + state = S (copy S literals after this block) + End of stream is reached if distance == 16384 + In version 1 only, to prevent ambiguity with the RLE case when + ((distance & 0x803f) == 0x803f) && (261 <= length <= 264), the + compressor must not emit block copies where distance and length + meet these conditions. + + In version 1 only, this instruction is also used to encode a run of + zeros if distance = 0xbfff, i.e. H = 1 and the D bits are all 1. + In this case, it is followed by a fourth byte, X. + run length = ((X << 3) | (0 0 0 0 0 L L L)) + 4 + + 0 0 1 L L L L L (32..63) + Copy of small block within 16kB distance (preferably less than 34B) + length = 2 + (L ?: 31 + (zero_bytes * 255) + non_zero_byte) + Always followed by exactly one LE16 : D D D D D D D D : D D D D D D S S + distance = D + 1 + state = S (copy S literals after this block) + + 0 1 L D D D S S (64..127) + Copy 3-4 bytes from block within 2kB distance + state = S (copy S literals after this block) + length = 3 + L + Always followed by exactly one byte : H H H H H H H H + distance = (H << 3) + D + 1 + + 1 L L D D D S S (128..255) + Copy 5-8 bytes from block within 2kB distance + state = S (copy S literals after this block) + length = 5 + L + Always followed by exactly one byte : H H H H H H H H + distance = (H << 3) + D + 1 + +Authors +======= + + This document was written by Willy Tarreau on 2014/07/19 during an + analysis of the decompression code available in Linux 3.16-rc5, and updated + by Dave Rodgman on 2018/10/30 to introduce run-length + encoding. The code is tricky, it is possible that this document contains + mistakes or that a few corner cases were overlooked. In any case, please + report any doubt, fix, or proposed updates to the author(s) so that the + document can be updated. diff --git a/Documentation/staging/remoteproc.rst b/Documentation/staging/remoteproc.rst new file mode 100644 index 000000000000..9cccd3dd6a4b --- /dev/null +++ b/Documentation/staging/remoteproc.rst @@ -0,0 +1,359 @@ +========================== +Remote Processor Framework +========================== + +Introduction +============ + +Modern SoCs typically have heterogeneous remote processor devices in asymmetric +multiprocessing (AMP) configurations, which may be running different instances +of operating system, whether it's Linux or any other flavor of real-time OS. + +OMAP4, for example, has dual Cortex-A9, dual Cortex-M3 and a C64x+ DSP. +In a typical configuration, the dual cortex-A9 is running Linux in a SMP +configuration, and each of the other three cores (two M3 cores and a DSP) +is running its own instance of RTOS in an AMP configuration. + +The remoteproc framework allows different platforms/architectures to +control (power on, load firmware, power off) those remote processors while +abstracting the hardware differences, so the entire driver doesn't need to be +duplicated. In addition, this framework also adds rpmsg virtio devices +for remote processors that supports this kind of communication. This way, +platform-specific remoteproc drivers only need to provide a few low-level +handlers, and then all rpmsg drivers will then just work +(for more information about the virtio-based rpmsg bus and its drivers, +please read Documentation/staging/rpmsg.rst). +Registration of other types of virtio devices is now also possible. Firmwares +just need to publish what kind of virtio devices do they support, and then +remoteproc will add those devices. This makes it possible to reuse the +existing virtio drivers with remote processor backends at a minimal development +cost. + +User API +======== + +:: + + int rproc_boot(struct rproc *rproc) + +Boot a remote processor (i.e. load its firmware, power it on, ...). + +If the remote processor is already powered on, this function immediately +returns (successfully). + +Returns 0 on success, and an appropriate error value otherwise. +Note: to use this function you should already have a valid rproc +handle. There are several ways to achieve that cleanly (devres, pdata, +the way remoteproc_rpmsg.c does this, or, if this becomes prevalent, we +might also consider using dev_archdata for this). + +:: + + void rproc_shutdown(struct rproc *rproc) + +Power off a remote processor (previously booted with rproc_boot()). +In case @rproc is still being used by an additional user(s), then +this function will just decrement the power refcount and exit, +without really powering off the device. + +Every call to rproc_boot() must (eventually) be accompanied by a call +to rproc_shutdown(). Calling rproc_shutdown() redundantly is a bug. + +.. note:: + + we're not decrementing the rproc's refcount, only the power refcount. + which means that the @rproc handle stays valid even after + rproc_shutdown() returns, and users can still use it with a subsequent + rproc_boot(), if needed. + +:: + + struct rproc *rproc_get_by_phandle(phandle phandle) + +Find an rproc handle using a device tree phandle. Returns the rproc +handle on success, and NULL on failure. This function increments +the remote processor's refcount, so always use rproc_put() to +decrement it back once rproc isn't needed anymore. + +Typical usage +============= + +:: + + #include + + /* in case we were given a valid 'rproc' handle */ + int dummy_rproc_example(struct rproc *my_rproc) + { + int ret; + + /* let's power on and boot our remote processor */ + ret = rproc_boot(my_rproc); + if (ret) { + /* + * something went wrong. handle it and leave. + */ + } + + /* + * our remote processor is now powered on... give it some work + */ + + /* let's shut it down now */ + rproc_shutdown(my_rproc); + } + +API for implementors +==================== + +:: + + struct rproc *rproc_alloc(struct device *dev, const char *name, + const struct rproc_ops *ops, + const char *firmware, int len) + +Allocate a new remote processor handle, but don't register +it yet. Required parameters are the underlying device, the +name of this remote processor, platform-specific ops handlers, +the name of the firmware to boot this rproc with, and the +length of private data needed by the allocating rproc driver (in bytes). + +This function should be used by rproc implementations during +initialization of the remote processor. + +After creating an rproc handle using this function, and when ready, +implementations should then call rproc_add() to complete +the registration of the remote processor. + +On success, the new rproc is returned, and on failure, NULL. + +.. note:: + + **never** directly deallocate @rproc, even if it was not registered + yet. Instead, when you need to unroll rproc_alloc(), use rproc_free(). + +:: + + void rproc_free(struct rproc *rproc) + +Free an rproc handle that was allocated by rproc_alloc. + +This function essentially unrolls rproc_alloc(), by decrementing the +rproc's refcount. It doesn't directly free rproc; that would happen +only if there are no other references to rproc and its refcount now +dropped to zero. + +:: + + int rproc_add(struct rproc *rproc) + +Register @rproc with the remoteproc framework, after it has been +allocated with rproc_alloc(). + +This is called by the platform-specific rproc implementation, whenever +a new remote processor device is probed. + +Returns 0 on success and an appropriate error code otherwise. +Note: this function initiates an asynchronous firmware loading +context, which will look for virtio devices supported by the rproc's +firmware. + +If found, those virtio devices will be created and added, so as a result +of registering this remote processor, additional virtio drivers might get +probed. + +:: + + int rproc_del(struct rproc *rproc) + +Unroll rproc_add(). + +This function should be called when the platform specific rproc +implementation decides to remove the rproc device. it should +_only_ be called if a previous invocation of rproc_add() +has completed successfully. + +After rproc_del() returns, @rproc is still valid, and its +last refcount should be decremented by calling rproc_free(). + +Returns 0 on success and -EINVAL if @rproc isn't valid. + +:: + + void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type) + +Report a crash in a remoteproc + +This function must be called every time a crash is detected by the +platform specific rproc implementation. This should not be called from a +non-remoteproc driver. This function can be called from atomic/interrupt +context. + +Implementation callbacks +======================== + +These callbacks should be provided by platform-specific remoteproc +drivers:: + + /** + * struct rproc_ops - platform-specific device handlers + * @start: power on the device and boot it + * @stop: power off the device + * @kick: kick a virtqueue (virtqueue id given as a parameter) + */ + struct rproc_ops { + int (*start)(struct rproc *rproc); + int (*stop)(struct rproc *rproc); + void (*kick)(struct rproc *rproc, int vqid); + }; + +Every remoteproc implementation should at least provide the ->start and ->stop +handlers. If rpmsg/virtio functionality is also desired, then the ->kick handler +should be provided as well. + +The ->start() handler takes an rproc handle and should then power on the +device and boot it (use rproc->priv to access platform-specific private data). +The boot address, in case needed, can be found in rproc->bootaddr (remoteproc +core puts there the ELF entry point). +On success, 0 should be returned, and on failure, an appropriate error code. + +The ->stop() handler takes an rproc handle and powers the device down. +On success, 0 is returned, and on failure, an appropriate error code. + +The ->kick() handler takes an rproc handle, and an index of a virtqueue +where new message was placed in. Implementations should interrupt the remote +processor and let it know it has pending messages. Notifying remote processors +the exact virtqueue index to look in is optional: it is easy (and not +too expensive) to go through the existing virtqueues and look for new buffers +in the used rings. + +Binary Firmware Structure +========================= + +At this point remoteproc supports ELF32 and ELF64 firmware binaries. However, +it is quite expected that other platforms/devices which we'd want to +support with this framework will be based on different binary formats. + +When those use cases show up, we will have to decouple the binary format +from the framework core, so we can support several binary formats without +duplicating common code. + +When the firmware is parsed, its various segments are loaded to memory +according to the specified device address (might be a physical address +if the remote processor is accessing memory directly). + +In addition to the standard ELF segments, most remote processors would +also include a special section which we call "the resource table". + +The resource table contains system resources that the remote processor +requires before it should be powered on, such as allocation of physically +contiguous memory, or iommu mapping of certain on-chip peripherals. +Remotecore will only power up the device after all the resource table's +requirement are met. + +In addition to system resources, the resource table may also contain +resource entries that publish the existence of supported features +or configurations by the remote processor, such as trace buffers and +supported virtio devices (and their configurations). + +The resource table begins with this header:: + + /** + * struct resource_table - firmware resource table header + * @ver: version number + * @num: number of resource entries + * @reserved: reserved (must be zero) + * @offset: array of offsets pointing at the various resource entries + * + * The header of the resource table, as expressed by this structure, + * contains a version number (should we need to change this format in the + * future), the number of available resource entries, and their offsets + * in the table. + */ + struct resource_table { + u32 ver; + u32 num; + u32 reserved[2]; + u32 offset[0]; + } __packed; + +Immediately following this header are the resource entries themselves, +each of which begins with the following resource entry header:: + + /** + * struct fw_rsc_hdr - firmware resource entry header + * @type: resource type + * @data: resource data + * + * Every resource entry begins with a 'struct fw_rsc_hdr' header providing + * its @type. The content of the entry itself will immediately follow + * this header, and it should be parsed according to the resource type. + */ + struct fw_rsc_hdr { + u32 type; + u8 data[0]; + } __packed; + +Some resources entries are mere announcements, where the host is informed +of specific remoteproc configuration. Other entries require the host to +do something (e.g. allocate a system resource). Sometimes a negotiation +is expected, where the firmware requests a resource, and once allocated, +the host should provide back its details (e.g. address of an allocated +memory region). + +Here are the various resource types that are currently supported:: + + /** + * enum fw_resource_type - types of resource entries + * + * @RSC_CARVEOUT: request for allocation of a physically contiguous + * memory region. + * @RSC_DEVMEM: request to iommu_map a memory-based peripheral. + * @RSC_TRACE: announces the availability of a trace buffer into which + * the remote processor will be writing logs. + * @RSC_VDEV: declare support for a virtio device, and serve as its + * virtio header. + * @RSC_LAST: just keep this one at the end + * @RSC_VENDOR_START: start of the vendor specific resource types range + * @RSC_VENDOR_END: end of the vendor specific resource types range + * + * Please note that these values are used as indices to the rproc_handle_rsc + * lookup table, so please keep them sane. Moreover, @RSC_LAST is used to + * check the validity of an index before the lookup table is accessed, so + * please update it as needed. + */ + enum fw_resource_type { + RSC_CARVEOUT = 0, + RSC_DEVMEM = 1, + RSC_TRACE = 2, + RSC_VDEV = 3, + RSC_LAST = 4, + RSC_VENDOR_START = 128, + RSC_VENDOR_END = 512, + }; + +For more details regarding a specific resource type, please see its +dedicated structure in include/linux/remoteproc.h. + +We also expect that platform-specific resource entries will show up +at some point. When that happens, we could easily add a new RSC_PLATFORM +type, and hand those resources to the platform-specific rproc driver to handle. + +Virtio and remoteproc +===================== + +The firmware should provide remoteproc information about virtio devices +that it supports, and their configurations: a RSC_VDEV resource entry +should specify the virtio device id (as in virtio_ids.h), virtio features, +virtio config space, vrings information, etc. + +When a new remote processor is registered, the remoteproc framework +will look for its resource table and will register the virtio devices +it supports. A firmware may support any number of virtio devices, and +of any type (a single remote processor can also easily support several +rpmsg virtio devices this way, if desired). + +Of course, RSC_VDEV resource entries are only good enough for static +allocation of virtio devices. Dynamic allocations will also be made possible +using the rpmsg bus (similar to how we already do dynamic allocations of +rpmsg channels; read more about it in rpmsg.txt). diff --git a/Documentation/staging/rpmsg.rst b/Documentation/staging/rpmsg.rst new file mode 100644 index 000000000000..24b7a9e1a5f9 --- /dev/null +++ b/Documentation/staging/rpmsg.rst @@ -0,0 +1,341 @@ +============================================ +Remote Processor Messaging (rpmsg) Framework +============================================ + +.. note:: + + This document describes the rpmsg bus and how to write rpmsg drivers. + To learn how to add rpmsg support for new platforms, check out remoteproc.txt + (also a resident of Documentation/). + +Introduction +============ + +Modern SoCs typically employ heterogeneous remote processor devices in +asymmetric multiprocessing (AMP) configurations, which may be running +different instances of operating system, whether it's Linux or any other +flavor of real-time OS. + +OMAP4, for example, has dual Cortex-A9, dual Cortex-M3 and a C64x+ DSP. +Typically, the dual cortex-A9 is running Linux in a SMP configuration, +and each of the other three cores (two M3 cores and a DSP) is running +its own instance of RTOS in an AMP configuration. + +Typically AMP remote processors employ dedicated DSP codecs and multimedia +hardware accelerators, and therefore are often used to offload CPU-intensive +multimedia tasks from the main application processor. + +These remote processors could also be used to control latency-sensitive +sensors, drive random hardware blocks, or just perform background tasks +while the main CPU is idling. + +Users of those remote processors can either be userland apps (e.g. multimedia +frameworks talking with remote OMX components) or kernel drivers (controlling +hardware accessible only by the remote processor, reserving kernel-controlled +resources on behalf of the remote processor, etc..). + +Rpmsg is a virtio-based messaging bus that allows kernel drivers to communicate +with remote processors available on the system. In turn, drivers could then +expose appropriate user space interfaces, if needed. + +When writing a driver that exposes rpmsg communication to userland, please +keep in mind that remote processors might have direct access to the +system's physical memory and other sensitive hardware resources (e.g. on +OMAP4, remote cores and hardware accelerators may have direct access to the +physical memory, gpio banks, dma controllers, i2c bus, gptimers, mailbox +devices, hwspinlocks, etc..). Moreover, those remote processors might be +running RTOS where every task can access the entire memory/devices exposed +to the processor. To minimize the risks of rogue (or buggy) userland code +exploiting remote bugs, and by that taking over the system, it is often +desired to limit userland to specific rpmsg channels (see definition below) +it can send messages on, and if possible, minimize how much control +it has over the content of the messages. + +Every rpmsg device is a communication channel with a remote processor (thus +rpmsg devices are called channels). Channels are identified by a textual name +and have a local ("source") rpmsg address, and remote ("destination") rpmsg +address. + +When a driver starts listening on a channel, its rx callback is bound with +a unique rpmsg local address (a 32-bit integer). This way when inbound messages +arrive, the rpmsg core dispatches them to the appropriate driver according +to their destination address (this is done by invoking the driver's rx handler +with the payload of the inbound message). + + +User API +======== + +:: + + int rpmsg_send(struct rpmsg_channel *rpdev, void *data, int len); + +sends a message across to the remote processor on a given channel. +The caller should specify the channel, the data it wants to send, +and its length (in bytes). The message will be sent on the specified +channel, i.e. its source and destination address fields will be +set to the channel's src and dst addresses. + +In case there are no TX buffers available, the function will block until +one becomes available (i.e. until the remote processor consumes +a tx buffer and puts it back on virtio's used descriptor ring), +or a timeout of 15 seconds elapses. When the latter happens, +-ERESTARTSYS is returned. + +The function can only be called from a process context (for now). +Returns 0 on success and an appropriate error value on failure. + +:: + + int rpmsg_sendto(struct rpmsg_channel *rpdev, void *data, int len, u32 dst); + +sends a message across to the remote processor on a given channel, +to a destination address provided by the caller. + +The caller should specify the channel, the data it wants to send, +its length (in bytes), and an explicit destination address. + +The message will then be sent to the remote processor to which the +channel belongs, using the channel's src address, and the user-provided +dst address (thus the channel's dst address will be ignored). + +In case there are no TX buffers available, the function will block until +one becomes available (i.e. until the remote processor consumes +a tx buffer and puts it back on virtio's used descriptor ring), +or a timeout of 15 seconds elapses. When the latter happens, +-ERESTARTSYS is returned. + +The function can only be called from a process context (for now). +Returns 0 on success and an appropriate error value on failure. + +:: + + int rpmsg_send_offchannel(struct rpmsg_channel *rpdev, u32 src, u32 dst, + void *data, int len); + + +sends a message across to the remote processor, using the src and dst +addresses provided by the user. + +The caller should specify the channel, the data it wants to send, +its length (in bytes), and explicit source and destination addresses. +The message will then be sent to the remote processor to which the +channel belongs, but the channel's src and dst addresses will be +ignored (and the user-provided addresses will be used instead). + +In case there are no TX buffers available, the function will block until +one becomes available (i.e. until the remote processor consumes +a tx buffer and puts it back on virtio's used descriptor ring), +or a timeout of 15 seconds elapses. When the latter happens, +-ERESTARTSYS is returned. + +The function can only be called from a process context (for now). +Returns 0 on success and an appropriate error value on failure. + +:: + + int rpmsg_trysend(struct rpmsg_channel *rpdev, void *data, int len); + +sends a message across to the remote processor on a given channel. +The caller should specify the channel, the data it wants to send, +and its length (in bytes). The message will be sent on the specified +channel, i.e. its source and destination address fields will be +set to the channel's src and dst addresses. + +In case there are no TX buffers available, the function will immediately +return -ENOMEM without waiting until one becomes available. + +The function can only be called from a process context (for now). +Returns 0 on success and an appropriate error value on failure. + +:: + + int rpmsg_trysendto(struct rpmsg_channel *rpdev, void *data, int len, u32 dst) + + +sends a message across to the remote processor on a given channel, +to a destination address provided by the user. + +The user should specify the channel, the data it wants to send, +its length (in bytes), and an explicit destination address. + +The message will then be sent to the remote processor to which the +channel belongs, using the channel's src address, and the user-provided +dst address (thus the channel's dst address will be ignored). + +In case there are no TX buffers available, the function will immediately +return -ENOMEM without waiting until one becomes available. + +The function can only be called from a process context (for now). +Returns 0 on success and an appropriate error value on failure. + +:: + + int rpmsg_trysend_offchannel(struct rpmsg_channel *rpdev, u32 src, u32 dst, + void *data, int len); + + +sends a message across to the remote processor, using source and +destination addresses provided by the user. + +The user should specify the channel, the data it wants to send, +its length (in bytes), and explicit source and destination addresses. +The message will then be sent to the remote processor to which the +channel belongs, but the channel's src and dst addresses will be +ignored (and the user-provided addresses will be used instead). + +In case there are no TX buffers available, the function will immediately +return -ENOMEM without waiting until one becomes available. + +The function can only be called from a process context (for now). +Returns 0 on success and an appropriate error value on failure. + +:: + + struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_channel *rpdev, + void (*cb)(struct rpmsg_channel *, void *, int, void *, u32), + void *priv, u32 addr); + +every rpmsg address in the system is bound to an rx callback (so when +inbound messages arrive, they are dispatched by the rpmsg bus using the +appropriate callback handler) by means of an rpmsg_endpoint struct. + +This function allows drivers to create such an endpoint, and by that, +bind a callback, and possibly some private data too, to an rpmsg address +(either one that is known in advance, or one that will be dynamically +assigned for them). + +Simple rpmsg drivers need not call rpmsg_create_ept, because an endpoint +is already created for them when they are probed by the rpmsg bus +(using the rx callback they provide when they registered to the rpmsg bus). + +So things should just work for simple drivers: they already have an +endpoint, their rx callback is bound to their rpmsg address, and when +relevant inbound messages arrive (i.e. messages which their dst address +equals to the src address of their rpmsg channel), the driver's handler +is invoked to process it. + +That said, more complicated drivers might do need to allocate +additional rpmsg addresses, and bind them to different rx callbacks. +To accomplish that, those drivers need to call this function. +Drivers should provide their channel (so the new endpoint would bind +to the same remote processor their channel belongs to), an rx callback +function, an optional private data (which is provided back when the +rx callback is invoked), and an address they want to bind with the +callback. If addr is RPMSG_ADDR_ANY, then rpmsg_create_ept will +dynamically assign them an available rpmsg address (drivers should have +a very good reason why not to always use RPMSG_ADDR_ANY here). + +Returns a pointer to the endpoint on success, or NULL on error. + +:: + + void rpmsg_destroy_ept(struct rpmsg_endpoint *ept); + + +destroys an existing rpmsg endpoint. user should provide a pointer +to an rpmsg endpoint that was previously created with rpmsg_create_ept(). + +:: + + int register_rpmsg_driver(struct rpmsg_driver *rpdrv); + + +registers an rpmsg driver with the rpmsg bus. user should provide +a pointer to an rpmsg_driver struct, which contains the driver's +->probe() and ->remove() functions, an rx callback, and an id_table +specifying the names of the channels this driver is interested to +be probed with. + +:: + + void unregister_rpmsg_driver(struct rpmsg_driver *rpdrv); + + +unregisters an rpmsg driver from the rpmsg bus. user should provide +a pointer to a previously-registered rpmsg_driver struct. +Returns 0 on success, and an appropriate error value on failure. + + +Typical usage +============= + +The following is a simple rpmsg driver, that sends an "hello!" message +on probe(), and whenever it receives an incoming message, it dumps its +content to the console. + +:: + + #include + #include + #include + + static void rpmsg_sample_cb(struct rpmsg_channel *rpdev, void *data, int len, + void *priv, u32 src) + { + print_hex_dump(KERN_INFO, "incoming message:", DUMP_PREFIX_NONE, + 16, 1, data, len, true); + } + + static int rpmsg_sample_probe(struct rpmsg_channel *rpdev) + { + int err; + + dev_info(&rpdev->dev, "chnl: 0x%x -> 0x%x\n", rpdev->src, rpdev->dst); + + /* send a message on our channel */ + err = rpmsg_send(rpdev, "hello!", 6); + if (err) { + pr_err("rpmsg_send failed: %d\n", err); + return err; + } + + return 0; + } + + static void rpmsg_sample_remove(struct rpmsg_channel *rpdev) + { + dev_info(&rpdev->dev, "rpmsg sample client driver is removed\n"); + } + + static struct rpmsg_device_id rpmsg_driver_sample_id_table[] = { + { .name = "rpmsg-client-sample" }, + { }, + }; + MODULE_DEVICE_TABLE(rpmsg, rpmsg_driver_sample_id_table); + + static struct rpmsg_driver rpmsg_sample_client = { + .drv.name = KBUILD_MODNAME, + .id_table = rpmsg_driver_sample_id_table, + .probe = rpmsg_sample_probe, + .callback = rpmsg_sample_cb, + .remove = rpmsg_sample_remove, + }; + module_rpmsg_driver(rpmsg_sample_client); + +.. note:: + + a similar sample which can be built and loaded can be found + in samples/rpmsg/. + +Allocations of rpmsg channels +============================= + +At this point we only support dynamic allocations of rpmsg channels. + +This is possible only with remote processors that have the VIRTIO_RPMSG_F_NS +virtio device feature set. This feature bit means that the remote +processor supports dynamic name service announcement messages. + +When this feature is enabled, creation of rpmsg devices (i.e. channels) +is completely dynamic: the remote processor announces the existence of a +remote rpmsg service by sending a name service message (which contains +the name and rpmsg addr of the remote service, see struct rpmsg_ns_msg). + +This message is then handled by the rpmsg bus, which in turn dynamically +creates and registers an rpmsg channel (which represents the remote service). +If/when a relevant rpmsg driver is registered, it will be immediately probed +by the bus, and can then start sending messages to the remote service. + +The plan is also to add static creation of rpmsg channels via the virtio +config space, but it's not implemented yet. diff --git a/Documentation/staging/speculation.rst b/Documentation/staging/speculation.rst new file mode 100644 index 000000000000..8045d99bcf12 --- /dev/null +++ b/Documentation/staging/speculation.rst @@ -0,0 +1,92 @@ +=========== +Speculation +=========== + +This document explains potential effects of speculation, and how undesirable +effects can be mitigated portably using common APIs. + +------------------------------------------------------------------------------ + +To improve performance and minimize average latencies, many contemporary CPUs +employ speculative execution techniques such as branch prediction, performing +work which may be discarded at a later stage. + +Typically speculative execution cannot be observed from architectural state, +such as the contents of registers. However, in some cases it is possible to +observe its impact on microarchitectural state, such as the presence or +absence of data in caches. Such state may form side-channels which can be +observed to extract secret information. + +For example, in the presence of branch prediction, it is possible for bounds +checks to be ignored by code which is speculatively executed. Consider the +following code:: + + int load_array(int *array, unsigned int index) + { + if (index >= MAX_ARRAY_ELEMS) + return 0; + else + return array[index]; + } + +Which, on arm64, may be compiled to an assembly sequence such as:: + + CMP , #MAX_ARRAY_ELEMS + B.LT less + MOV , #0 + RET + less: + LDR , [, ] + RET + +It is possible that a CPU mis-predicts the conditional branch, and +speculatively loads array[index], even if index >= MAX_ARRAY_ELEMS. This +value will subsequently be discarded, but the speculated load may affect +microarchitectural state which can be subsequently measured. + +More complex sequences involving multiple dependent memory accesses may +result in sensitive information being leaked. Consider the following +code, building on the prior example:: + + int load_dependent_arrays(int *arr1, int *arr2, int index) + { + int val1, val2, + + val1 = load_array(arr1, index); + val2 = load_array(arr2, val1); + + return val2; + } + +Under speculation, the first call to load_array() may return the value +of an out-of-bounds address, while the second call will influence +microarchitectural state dependent on this value. This may provide an +arbitrary read primitive. + +==================================== +Mitigating speculation side-channels +==================================== + +The kernel provides a generic API to ensure that bounds checks are +respected even under speculation. Architectures which are affected by +speculation-based side-channels are expected to implement these +primitives. + +The array_index_nospec() helper in can be used to +prevent information from being leaked via side-channels. + +A call to array_index_nospec(index, size) returns a sanitized index +value that is bounded to [0, size) even under cpu speculation +conditions. + +This can be used to protect the earlier load_array() example:: + + int load_array(int *array, unsigned int index) + { + if (index >= MAX_ARRAY_ELEMS) + return 0; + else { + index = array_index_nospec(index, MAX_ARRAY_ELEMS); + return array[index]; + } + } diff --git a/Documentation/staging/static-keys.rst b/Documentation/staging/static-keys.rst new file mode 100644 index 000000000000..38290b9f25eb --- /dev/null +++ b/Documentation/staging/static-keys.rst @@ -0,0 +1,331 @@ +=========== +Static Keys +=========== + +.. warning:: + + DEPRECATED API: + + The use of 'struct static_key' directly, is now DEPRECATED. In addition + static_key_{true,false}() is also DEPRECATED. IE DO NOT use the following:: + + struct static_key false = STATIC_KEY_INIT_FALSE; + struct static_key true = STATIC_KEY_INIT_TRUE; + static_key_true() + static_key_false() + + The updated API replacements are:: + + DEFINE_STATIC_KEY_TRUE(key); + DEFINE_STATIC_KEY_FALSE(key); + DEFINE_STATIC_KEY_ARRAY_TRUE(keys, count); + DEFINE_STATIC_KEY_ARRAY_FALSE(keys, count); + static_branch_likely() + static_branch_unlikely() + +Abstract +======== + +Static keys allows the inclusion of seldom used features in +performance-sensitive fast-path kernel code, via a GCC feature and a code +patching technique. A quick example:: + + DEFINE_STATIC_KEY_FALSE(key); + + ... + + if (static_branch_unlikely(&key)) + do unlikely code + else + do likely code + + ... + static_branch_enable(&key); + ... + static_branch_disable(&key); + ... + +The static_branch_unlikely() branch will be generated into the code with as little +impact to the likely code path as possible. + + +Motivation +========== + + +Currently, tracepoints are implemented using a conditional branch. The +conditional check requires checking a global variable for each tracepoint. +Although the overhead of this check is small, it increases when the memory +cache comes under pressure (memory cache lines for these global variables may +be shared with other memory accesses). As we increase the number of tracepoints +in the kernel this overhead may become more of an issue. In addition, +tracepoints are often dormant (disabled) and provide no direct kernel +functionality. Thus, it is highly desirable to reduce their impact as much as +possible. Although tracepoints are the original motivation for this work, other +kernel code paths should be able to make use of the static keys facility. + + +Solution +======== + + +gcc (v4.5) adds a new 'asm goto' statement that allows branching to a label: + +https://gcc.gnu.org/ml/gcc-patches/2009-07/msg01556.html + +Using the 'asm goto', we can create branches that are either taken or not taken +by default, without the need to check memory. Then, at run-time, we can patch +the branch site to change the branch direction. + +For example, if we have a simple branch that is disabled by default:: + + if (static_branch_unlikely(&key)) + printk("I am the true branch\n"); + +Thus, by default the 'printk' will not be emitted. And the code generated will +consist of a single atomic 'no-op' instruction (5 bytes on x86), in the +straight-line code path. When the branch is 'flipped', we will patch the +'no-op' in the straight-line codepath with a 'jump' instruction to the +out-of-line true branch. Thus, changing branch direction is expensive but +branch selection is basically 'free'. That is the basic tradeoff of this +optimization. + +This lowlevel patching mechanism is called 'jump label patching', and it gives +the basis for the static keys facility. + +Static key label API, usage and examples +======================================== + + +In order to make use of this optimization you must first define a key:: + + DEFINE_STATIC_KEY_TRUE(key); + +or:: + + DEFINE_STATIC_KEY_FALSE(key); + + +The key must be global, that is, it can't be allocated on the stack or dynamically +allocated at run-time. + +The key is then used in code as:: + + if (static_branch_unlikely(&key)) + do unlikely code + else + do likely code + +Or:: + + if (static_branch_likely(&key)) + do likely code + else + do unlikely code + +Keys defined via DEFINE_STATIC_KEY_TRUE(), or DEFINE_STATIC_KEY_FALSE, may +be used in either static_branch_likely() or static_branch_unlikely() +statements. + +Branch(es) can be set true via:: + + static_branch_enable(&key); + +or false via:: + + static_branch_disable(&key); + +The branch(es) can then be switched via reference counts:: + + static_branch_inc(&key); + ... + static_branch_dec(&key); + +Thus, 'static_branch_inc()' means 'make the branch true', and +'static_branch_dec()' means 'make the branch false' with appropriate +reference counting. For example, if the key is initialized true, a +static_branch_dec(), will switch the branch to false. And a subsequent +static_branch_inc(), will change the branch back to true. Likewise, if the +key is initialized false, a 'static_branch_inc()', will change the branch to +true. And then a 'static_branch_dec()', will again make the branch false. + +The state and the reference count can be retrieved with 'static_key_enabled()' +and 'static_key_count()'. In general, if you use these functions, they +should be protected with the same mutex used around the enable/disable +or increment/decrement function. + +Note that switching branches results in some locks being taken, +particularly the CPU hotplug lock (in order to avoid races against +CPUs being brought in the kernel while the kernel is getting +patched). Calling the static key API from within a hotplug notifier is +thus a sure deadlock recipe. In order to still allow use of the +functionality, the following functions are provided: + + static_key_enable_cpuslocked() + static_key_disable_cpuslocked() + static_branch_enable_cpuslocked() + static_branch_disable_cpuslocked() + +These functions are *not* general purpose, and must only be used when +you really know that you're in the above context, and no other. + +Where an array of keys is required, it can be defined as:: + + DEFINE_STATIC_KEY_ARRAY_TRUE(keys, count); + +or:: + + DEFINE_STATIC_KEY_ARRAY_FALSE(keys, count); + +4) Architecture level code patching interface, 'jump labels' + + +There are a few functions and macros that architectures must implement in order +to take advantage of this optimization. If there is no architecture support, we +simply fall back to a traditional, load, test, and jump sequence. Also, the +struct jump_entry table must be at least 4-byte aligned because the +static_key->entry field makes use of the two least significant bits. + +* ``select HAVE_ARCH_JUMP_LABEL``, + see: arch/x86/Kconfig + +* ``#define JUMP_LABEL_NOP_SIZE``, + see: arch/x86/include/asm/jump_label.h + +* ``__always_inline bool arch_static_branch(struct static_key *key, bool branch)``, + see: arch/x86/include/asm/jump_label.h + +* ``__always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)``, + see: arch/x86/include/asm/jump_label.h + +* ``void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type)``, + see: arch/x86/kernel/jump_label.c + +* ``__init_or_module void arch_jump_label_transform_static(struct jump_entry *entry, enum jump_label_type type)``, + see: arch/x86/kernel/jump_label.c + +* ``struct jump_entry``, + see: arch/x86/include/asm/jump_label.h + + +5) Static keys / jump label analysis, results (x86_64): + + +As an example, let's add the following branch to 'getppid()', such that the +system call now looks like:: + + SYSCALL_DEFINE0(getppid) + { + int pid; + + + if (static_branch_unlikely(&key)) + + printk("I am the true branch\n"); + + rcu_read_lock(); + pid = task_tgid_vnr(rcu_dereference(current->real_parent)); + rcu_read_unlock(); + + return pid; + } + +The resulting instructions with jump labels generated by GCC is:: + + ffffffff81044290 : + ffffffff81044290: 55 push %rbp + ffffffff81044291: 48 89 e5 mov %rsp,%rbp + ffffffff81044294: e9 00 00 00 00 jmpq ffffffff81044299 + ffffffff81044299: 65 48 8b 04 25 c0 b6 mov %gs:0xb6c0,%rax + ffffffff810442a0: 00 00 + ffffffff810442a2: 48 8b 80 80 02 00 00 mov 0x280(%rax),%rax + ffffffff810442a9: 48 8b 80 b0 02 00 00 mov 0x2b0(%rax),%rax + ffffffff810442b0: 48 8b b8 e8 02 00 00 mov 0x2e8(%rax),%rdi + ffffffff810442b7: e8 f4 d9 00 00 callq ffffffff81051cb0 + ffffffff810442bc: 5d pop %rbp + ffffffff810442bd: 48 98 cltq + ffffffff810442bf: c3 retq + ffffffff810442c0: 48 c7 c7 e3 54 98 81 mov $0xffffffff819854e3,%rdi + ffffffff810442c7: 31 c0 xor %eax,%eax + ffffffff810442c9: e8 71 13 6d 00 callq ffffffff8171563f + ffffffff810442ce: eb c9 jmp ffffffff81044299 + +Without the jump label optimization it looks like:: + + ffffffff810441f0 : + ffffffff810441f0: 8b 05 8a 52 d8 00 mov 0xd8528a(%rip),%eax # ffffffff81dc9480 + ffffffff810441f6: 55 push %rbp + ffffffff810441f7: 48 89 e5 mov %rsp,%rbp + ffffffff810441fa: 85 c0 test %eax,%eax + ffffffff810441fc: 75 27 jne ffffffff81044225 + ffffffff810441fe: 65 48 8b 04 25 c0 b6 mov %gs:0xb6c0,%rax + ffffffff81044205: 00 00 + ffffffff81044207: 48 8b 80 80 02 00 00 mov 0x280(%rax),%rax + ffffffff8104420e: 48 8b 80 b0 02 00 00 mov 0x2b0(%rax),%rax + ffffffff81044215: 48 8b b8 e8 02 00 00 mov 0x2e8(%rax),%rdi + ffffffff8104421c: e8 2f da 00 00 callq ffffffff81051c50 + ffffffff81044221: 5d pop %rbp + ffffffff81044222: 48 98 cltq + ffffffff81044224: c3 retq + ffffffff81044225: 48 c7 c7 13 53 98 81 mov $0xffffffff81985313,%rdi + ffffffff8104422c: 31 c0 xor %eax,%eax + ffffffff8104422e: e8 60 0f 6d 00 callq ffffffff81715193 + ffffffff81044233: eb c9 jmp ffffffff810441fe + ffffffff81044235: 66 66 2e 0f 1f 84 00 data32 nopw %cs:0x0(%rax,%rax,1) + ffffffff8104423c: 00 00 00 00 + +Thus, the disable jump label case adds a 'mov', 'test' and 'jne' instruction +vs. the jump label case just has a 'no-op' or 'jmp 0'. (The jmp 0, is patched +to a 5 byte atomic no-op instruction at boot-time.) Thus, the disabled jump +label case adds:: + + 6 (mov) + 2 (test) + 2 (jne) = 10 - 5 (5 byte jump 0) = 5 addition bytes. + +If we then include the padding bytes, the jump label code saves, 16 total bytes +of instruction memory for this small function. In this case the non-jump label +function is 80 bytes long. Thus, we have saved 20% of the instruction +footprint. We can in fact improve this even further, since the 5-byte no-op +really can be a 2-byte no-op since we can reach the branch with a 2-byte jmp. +However, we have not yet implemented optimal no-op sizes (they are currently +hard-coded). + +Since there are a number of static key API uses in the scheduler paths, +'pipe-test' (also known as 'perf bench sched pipe') can be used to show the +performance improvement. Testing done on 3.3.0-rc2: + +jump label disabled:: + + Performance counter stats for 'bash -c /tmp/pipe-test' (50 runs): + + 855.700314 task-clock # 0.534 CPUs utilized ( +- 0.11% ) + 200,003 context-switches # 0.234 M/sec ( +- 0.00% ) + 0 CPU-migrations # 0.000 M/sec ( +- 39.58% ) + 487 page-faults # 0.001 M/sec ( +- 0.02% ) + 1,474,374,262 cycles # 1.723 GHz ( +- 0.17% ) + stalled-cycles-frontend + stalled-cycles-backend + 1,178,049,567 instructions # 0.80 insns per cycle ( +- 0.06% ) + 208,368,926 branches # 243.507 M/sec ( +- 0.06% ) + 5,569,188 branch-misses # 2.67% of all branches ( +- 0.54% ) + + 1.601607384 seconds time elapsed ( +- 0.07% ) + +jump label enabled:: + + Performance counter stats for 'bash -c /tmp/pipe-test' (50 runs): + + 841.043185 task-clock # 0.533 CPUs utilized ( +- 0.12% ) + 200,004 context-switches # 0.238 M/sec ( +- 0.00% ) + 0 CPU-migrations # 0.000 M/sec ( +- 40.87% ) + 487 page-faults # 0.001 M/sec ( +- 0.05% ) + 1,432,559,428 cycles # 1.703 GHz ( +- 0.18% ) + stalled-cycles-frontend + stalled-cycles-backend + 1,175,363,994 instructions # 0.82 insns per cycle ( +- 0.04% ) + 206,859,359 branches # 245.956 M/sec ( +- 0.04% ) + 4,884,119 branch-misses # 2.36% of all branches ( +- 0.85% ) + + 1.579384366 seconds time elapsed + +The percentage of saved branches is .7%, and we've saved 12% on +'branch-misses'. This is where we would expect to get the most savings, since +this optimization is about reducing the number of branches. In addition, we've +saved .2% on instructions, and 2.8% on cycles and 1.4% on elapsed time. diff --git a/Documentation/staging/tee.rst b/Documentation/staging/tee.rst new file mode 100644 index 000000000000..62e8ba64d04f --- /dev/null +++ b/Documentation/staging/tee.rst @@ -0,0 +1,277 @@ +============= +TEE subsystem +============= + +This document describes the TEE subsystem in Linux. + +A TEE (Trusted Execution Environment) is a trusted OS running in some +secure environment, for example, TrustZone on ARM CPUs, or a separate +secure co-processor etc. A TEE driver handles the details needed to +communicate with the TEE. + +This subsystem deals with: + +- Registration of TEE drivers + +- Managing shared memory between Linux and the TEE + +- Providing a generic API to the TEE + +The TEE interface +================= + +include/uapi/linux/tee.h defines the generic interface to a TEE. + +User space (the client) connects to the driver by opening /dev/tee[0-9]* or +/dev/teepriv[0-9]*. + +- TEE_IOC_SHM_ALLOC allocates shared memory and returns a file descriptor + which user space can mmap. When user space doesn't need the file + descriptor any more, it should be closed. When shared memory isn't needed + any longer it should be unmapped with munmap() to allow the reuse of + memory. + +- TEE_IOC_VERSION lets user space know which TEE this driver handles and + its capabilities. + +- TEE_IOC_OPEN_SESSION opens a new session to a Trusted Application. + +- TEE_IOC_INVOKE invokes a function in a Trusted Application. + +- TEE_IOC_CANCEL may cancel an ongoing TEE_IOC_OPEN_SESSION or TEE_IOC_INVOKE. + +- TEE_IOC_CLOSE_SESSION closes a session to a Trusted Application. + +There are two classes of clients, normal clients and supplicants. The latter is +a helper process for the TEE to access resources in Linux, for example file +system access. A normal client opens /dev/tee[0-9]* and a supplicant opens +/dev/teepriv[0-9]. + +Much of the communication between clients and the TEE is opaque to the +driver. The main job for the driver is to receive requests from the +clients, forward them to the TEE and send back the results. In the case of +supplicants the communication goes in the other direction, the TEE sends +requests to the supplicant which then sends back the result. + +The TEE kernel interface +======================== + +Kernel provides a TEE bus infrastructure where a Trusted Application is +represented as a device identified via Universally Unique Identifier (UUID) and +client drivers register a table of supported device UUIDs. + +TEE bus infrastructure registers following APIs: +- match(): iterates over the client driver UUID table to find a corresponding + match for device UUID. If a match is found, then this particular device is + probed via corresponding probe API registered by the client driver. This + process happens whenever a device or a client driver is registered with TEE + bus. +- uevent(): notifies user-space (udev) whenever a new device is registered on + TEE bus for auto-loading of modularized client drivers. + +TEE bus device enumeration is specific to underlying TEE implementation, so it +is left open for TEE drivers to provide corresponding implementation. + +Then TEE client driver can talk to a matched Trusted Application using APIs +listed in include/linux/tee_drv.h. + +TEE client driver example +------------------------- + +Suppose a TEE client driver needs to communicate with a Trusted Application +having UUID: ``ac6a4085-0e82-4c33-bf98-8eb8e118b6c2``, so driver registration +snippet would look like:: + + static const struct tee_client_device_id client_id_table[] = { + {UUID_INIT(0xac6a4085, 0x0e82, 0x4c33, + 0xbf, 0x98, 0x8e, 0xb8, 0xe1, 0x18, 0xb6, 0xc2)}, + {} + }; + + MODULE_DEVICE_TABLE(tee, client_id_table); + + static struct tee_client_driver client_driver = { + .id_table = client_id_table, + .driver = { + .name = DRIVER_NAME, + .bus = &tee_bus_type, + .probe = client_probe, + .remove = client_remove, + }, + }; + + static int __init client_init(void) + { + return driver_register(&client_driver.driver); + } + + static void __exit client_exit(void) + { + driver_unregister(&client_driver.driver); + } + + module_init(client_init); + module_exit(client_exit); + +OP-TEE driver +============= + +The OP-TEE driver handles OP-TEE [1] based TEEs. Currently it is only the ARM +TrustZone based OP-TEE solution that is supported. + +Lowest level of communication with OP-TEE builds on ARM SMC Calling +Convention (SMCCC) [2], which is the foundation for OP-TEE's SMC interface +[3] used internally by the driver. Stacked on top of that is OP-TEE Message +Protocol [4]. + +OP-TEE SMC interface provides the basic functions required by SMCCC and some +additional functions specific for OP-TEE. The most interesting functions are: + +- OPTEE_SMC_FUNCID_CALLS_UID (part of SMCCC) returns the version information + which is then returned by TEE_IOC_VERSION + +- OPTEE_SMC_CALL_GET_OS_UUID returns the particular OP-TEE implementation, used + to tell, for instance, a TrustZone OP-TEE apart from an OP-TEE running on a + separate secure co-processor. + +- OPTEE_SMC_CALL_WITH_ARG drives the OP-TEE message protocol + +- OPTEE_SMC_GET_SHM_CONFIG lets the driver and OP-TEE agree on which memory + range to used for shared memory between Linux and OP-TEE. + +The GlobalPlatform TEE Client API [5] is implemented on top of the generic +TEE API. + +Picture of the relationship between the different components in the +OP-TEE architecture:: + + User space Kernel Secure world + ~~~~~~~~~~ ~~~~~~ ~~~~~~~~~~~~ + +--------+ +-------------+ + | Client | | Trusted | + +--------+ | Application | + /\ +-------------+ + || +----------+ /\ + || |tee- | || + || |supplicant| \/ + || +----------+ +-------------+ + \/ /\ | TEE Internal| + +-------+ || | API | + + TEE | || +--------+--------+ +-------------+ + | Client| || | TEE | OP-TEE | | OP-TEE | + | API | \/ | subsys | driver | | Trusted OS | + +-------+----------------+----+-------+----+-----------+-------------+ + | Generic TEE API | | OP-TEE MSG | + | IOCTL (TEE_IOC_*) | | SMCCC (OPTEE_SMC_CALL_*) | + +-----------------------------+ +------------------------------+ + +RPC (Remote Procedure Call) are requests from secure world to kernel driver +or tee-supplicant. An RPC is identified by a special range of SMCCC return +values from OPTEE_SMC_CALL_WITH_ARG. RPC messages which are intended for the +kernel are handled by the kernel driver. Other RPC messages will be forwarded to +tee-supplicant without further involvement of the driver, except switching +shared memory buffer representation. + +OP-TEE device enumeration +------------------------- + +OP-TEE provides a pseudo Trusted Application: drivers/tee/optee/device.c in +order to support device enumeration. In other words, OP-TEE driver invokes this +application to retrieve a list of Trusted Applications which can be registered +as devices on the TEE bus. + +AMD-TEE driver +============== + +The AMD-TEE driver handles the communication with AMD's TEE environment. The +TEE environment is provided by AMD Secure Processor. + +The AMD Secure Processor (formerly called Platform Security Processor or PSP) +is a dedicated processor that features ARM TrustZone technology, along with a +software-based Trusted Execution Environment (TEE) designed to enable +third-party Trusted Applications. This feature is currently enabled only for +APUs. + +The following picture shows a high level overview of AMD-TEE:: + + | + x86 | + | + User space (Kernel space) | AMD Secure Processor (PSP) + ~~~~~~~~~~ ~~~~~~~~~~~~~~ | ~~~~~~~~~~~~~~~~~~~~~~~~~~ + | + +--------+ | +-------------+ + | Client | | | Trusted | + +--------+ | | Application | + /\ | +-------------+ + || | /\ + || | || + || | \/ + || | +----------+ + || | | TEE | + || | | Internal | + \/ | | API | + +---------+ +-----------+---------+ +----------+ + | TEE | | TEE | AMD-TEE | | AMD-TEE | + | Client | | subsystem | driver | | Trusted | + | API | | | | | OS | + +---------+-----------+----+------+---------+---------+----------+ + | Generic TEE API | | ASP | Mailbox | + | IOCTL (TEE_IOC_*) | | driver | Register Protocol | + +--------------------------+ +---------+--------------------+ + +At the lowest level (in x86), the AMD Secure Processor (ASP) driver uses the +CPU to PSP mailbox regsister to submit commands to the PSP. The format of the +command buffer is opaque to the ASP driver. It's role is to submit commands to +the secure processor and return results to AMD-TEE driver. The interface +between AMD-TEE driver and AMD Secure Processor driver can be found in [6]. + +The AMD-TEE driver packages the command buffer payload for processing in TEE. +The command buffer format for the different TEE commands can be found in [7]. + +The TEE commands supported by AMD-TEE Trusted OS are: + +* TEE_CMD_ID_LOAD_TA - loads a Trusted Application (TA) binary into + TEE environment. +* TEE_CMD_ID_UNLOAD_TA - unloads TA binary from TEE environment. +* TEE_CMD_ID_OPEN_SESSION - opens a session with a loaded TA. +* TEE_CMD_ID_CLOSE_SESSION - closes session with loaded TA +* TEE_CMD_ID_INVOKE_CMD - invokes a command with loaded TA +* TEE_CMD_ID_MAP_SHARED_MEM - maps shared memory +* TEE_CMD_ID_UNMAP_SHARED_MEM - unmaps shared memory + +AMD-TEE Trusted OS is the firmware running on AMD Secure Processor. + +The AMD-TEE driver registers itself with TEE subsystem and implements the +following driver function callbacks: + +* get_version - returns the driver implementation id and capability. +* open - sets up the driver context data structure. +* release - frees up driver resources. +* open_session - loads the TA binary and opens session with loaded TA. +* close_session - closes session with loaded TA and unloads it. +* invoke_func - invokes a command with loaded TA. + +cancel_req driver callback is not supported by AMD-TEE. + +The GlobalPlatform TEE Client API [5] can be used by the user space (client) to +talk to AMD's TEE. AMD's TEE provides a secure environment for loading, opening +a session, invoking commands and clossing session with TA. + +References +========== + +[1] https://github.com/OP-TEE/optee_os + +[2] http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html + +[3] drivers/tee/optee/optee_smc.h + +[4] drivers/tee/optee/optee_msg.h + +[5] http://www.globalplatform.org/specificationsdevice.asp look for + "TEE Client API Specification v1.0" and click download. + +[6] include/linux/psp-tee.h + +[7] drivers/tee/amdtee/amdtee_if.h diff --git a/Documentation/staging/xz.rst b/Documentation/staging/xz.rst new file mode 100644 index 000000000000..b2f5ff12a161 --- /dev/null +++ b/Documentation/staging/xz.rst @@ -0,0 +1,127 @@ +============================ +XZ data compression in Linux +============================ + +Introduction +============ + +XZ is a general purpose data compression format with high compression +ratio and relatively fast decompression. The primary compression +algorithm (filter) is LZMA2. Additional filters can be used to improve +compression ratio even further. E.g. Branch/Call/Jump (BCJ) filters +improve compression ratio of executable data. + +The XZ decompressor in Linux is called XZ Embedded. It supports +the LZMA2 filter and optionally also BCJ filters. CRC32 is supported +for integrity checking. The home page of XZ Embedded is at +, where you can find the +latest version and also information about using the code outside +the Linux kernel. + +For userspace, XZ Utils provide a zlib-like compression library +and a gzip-like command line tool. XZ Utils can be downloaded from +. + +XZ related components in the kernel +=================================== + +The xz_dec module provides XZ decompressor with single-call (buffer +to buffer) and multi-call (stateful) APIs. The usage of the xz_dec +module is documented in include/linux/xz.h. + +The xz_dec_test module is for testing xz_dec. xz_dec_test is not +useful unless you are hacking the XZ decompressor. xz_dec_test +allocates a char device major dynamically to which one can write +.xz files from userspace. The decompressed output is thrown away. +Keep an eye on dmesg to see diagnostics printed by xz_dec_test. +See the xz_dec_test source code for the details. + +For decompressing the kernel image, initramfs, and initrd, there +is a wrapper function in lib/decompress_unxz.c. Its API is the +same as in other decompress_*.c files, which is defined in +include/linux/decompress/generic.h. + +scripts/xz_wrap.sh is a wrapper for the xz command line tool found +from XZ Utils. The wrapper sets compression options to values suitable +for compressing the kernel image. + +For kernel makefiles, two commands are provided for use with +$(call if_needed). The kernel image should be compressed with +$(call if_needed,xzkern) which will use a BCJ filter and a big LZMA2 +dictionary. It will also append a four-byte trailer containing the +uncompressed size of the file, which is needed by the boot code. +Other things should be compressed with $(call if_needed,xzmisc) +which will use no BCJ filter and 1 MiB LZMA2 dictionary. + +Notes on compression options +============================ + +Since the XZ Embedded supports only streams with no integrity check or +CRC32, make sure that you don't use some other integrity check type +when encoding files that are supposed to be decoded by the kernel. With +liblzma, you need to use either LZMA_CHECK_NONE or LZMA_CHECK_CRC32 +when encoding. With the xz command line tool, use --check=none or +--check=crc32. + +Using CRC32 is strongly recommended unless there is some other layer +which will verify the integrity of the uncompressed data anyway. +Double checking the integrity would probably be waste of CPU cycles. +Note that the headers will always have a CRC32 which will be validated +by the decoder; you can only change the integrity check type (or +disable it) for the actual uncompressed data. + +In userspace, LZMA2 is typically used with dictionary sizes of several +megabytes. The decoder needs to have the dictionary in RAM, thus big +dictionaries cannot be used for files that are intended to be decoded +by the kernel. 1 MiB is probably the maximum reasonable dictionary +size for in-kernel use (maybe more is OK for initramfs). The presets +in XZ Utils may not be optimal when creating files for the kernel, +so don't hesitate to use custom settings. Example:: + + xz --check=crc32 --lzma2=dict=512KiB inputfile + +An exception to above dictionary size limitation is when the decoder +is used in single-call mode. Decompressing the kernel itself is an +example of this situation. In single-call mode, the memory usage +doesn't depend on the dictionary size, and it is perfectly fine to +use a big dictionary: for maximum compression, the dictionary should +be at least as big as the uncompressed data itself. + +Future plans +============ + +Creating a limited XZ encoder may be considered if people think it is +useful. LZMA2 is slower to compress than e.g. Deflate or LZO even at +the fastest settings, so it isn't clear if LZMA2 encoder is wanted +into the kernel. + +Support for limited random-access reading is planned for the +decompression code. I don't know if it could have any use in the +kernel, but I know that it would be useful in some embedded projects +outside the Linux kernel. + +Conformance to the .xz file format specification +================================================ + +There are a couple of corner cases where things have been simplified +at expense of detecting errors as early as possible. These should not +matter in practice all, since they don't cause security issues. But +it is good to know this if testing the code e.g. with the test files +from XZ Utils. + +Reporting bugs +============== + +Before reporting a bug, please check that it's not fixed already +at upstream. See to get the +latest code. + +Report bugs to or visit #tukaani on +Freenode and talk to Larhzu. I don't actively read LKML or other +kernel-related mailing lists, so if there's something I should know, +you should email to me personally or use IRC. + +Don't bother Igor Pavlov with questions about the XZ implementation +in the kernel or about XZ Utils. While these two implementations +include essential code that is directly based on Igor Pavlov's code, +these implementations aren't maintained nor supported by him. diff --git a/Documentation/static-keys.txt b/Documentation/static-keys.txt deleted file mode 100644 index 38290b9f25eb..000000000000 --- a/Documentation/static-keys.txt +++ /dev/null @@ -1,331 +0,0 @@ -=========== -Static Keys -=========== - -.. warning:: - - DEPRECATED API: - - The use of 'struct static_key' directly, is now DEPRECATED. In addition - static_key_{true,false}() is also DEPRECATED. IE DO NOT use the following:: - - struct static_key false = STATIC_KEY_INIT_FALSE; - struct static_key true = STATIC_KEY_INIT_TRUE; - static_key_true() - static_key_false() - - The updated API replacements are:: - - DEFINE_STATIC_KEY_TRUE(key); - DEFINE_STATIC_KEY_FALSE(key); - DEFINE_STATIC_KEY_ARRAY_TRUE(keys, count); - DEFINE_STATIC_KEY_ARRAY_FALSE(keys, count); - static_branch_likely() - static_branch_unlikely() - -Abstract -======== - -Static keys allows the inclusion of seldom used features in -performance-sensitive fast-path kernel code, via a GCC feature and a code -patching technique. A quick example:: - - DEFINE_STATIC_KEY_FALSE(key); - - ... - - if (static_branch_unlikely(&key)) - do unlikely code - else - do likely code - - ... - static_branch_enable(&key); - ... - static_branch_disable(&key); - ... - -The static_branch_unlikely() branch will be generated into the code with as little -impact to the likely code path as possible. - - -Motivation -========== - - -Currently, tracepoints are implemented using a conditional branch. The -conditional check requires checking a global variable for each tracepoint. -Although the overhead of this check is small, it increases when the memory -cache comes under pressure (memory cache lines for these global variables may -be shared with other memory accesses). As we increase the number of tracepoints -in the kernel this overhead may become more of an issue. In addition, -tracepoints are often dormant (disabled) and provide no direct kernel -functionality. Thus, it is highly desirable to reduce their impact as much as -possible. Although tracepoints are the original motivation for this work, other -kernel code paths should be able to make use of the static keys facility. - - -Solution -======== - - -gcc (v4.5) adds a new 'asm goto' statement that allows branching to a label: - -https://gcc.gnu.org/ml/gcc-patches/2009-07/msg01556.html - -Using the 'asm goto', we can create branches that are either taken or not taken -by default, without the need to check memory. Then, at run-time, we can patch -the branch site to change the branch direction. - -For example, if we have a simple branch that is disabled by default:: - - if (static_branch_unlikely(&key)) - printk("I am the true branch\n"); - -Thus, by default the 'printk' will not be emitted. And the code generated will -consist of a single atomic 'no-op' instruction (5 bytes on x86), in the -straight-line code path. When the branch is 'flipped', we will patch the -'no-op' in the straight-line codepath with a 'jump' instruction to the -out-of-line true branch. Thus, changing branch direction is expensive but -branch selection is basically 'free'. That is the basic tradeoff of this -optimization. - -This lowlevel patching mechanism is called 'jump label patching', and it gives -the basis for the static keys facility. - -Static key label API, usage and examples -======================================== - - -In order to make use of this optimization you must first define a key:: - - DEFINE_STATIC_KEY_TRUE(key); - -or:: - - DEFINE_STATIC_KEY_FALSE(key); - - -The key must be global, that is, it can't be allocated on the stack or dynamically -allocated at run-time. - -The key is then used in code as:: - - if (static_branch_unlikely(&key)) - do unlikely code - else - do likely code - -Or:: - - if (static_branch_likely(&key)) - do likely code - else - do unlikely code - -Keys defined via DEFINE_STATIC_KEY_TRUE(), or DEFINE_STATIC_KEY_FALSE, may -be used in either static_branch_likely() or static_branch_unlikely() -statements. - -Branch(es) can be set true via:: - - static_branch_enable(&key); - -or false via:: - - static_branch_disable(&key); - -The branch(es) can then be switched via reference counts:: - - static_branch_inc(&key); - ... - static_branch_dec(&key); - -Thus, 'static_branch_inc()' means 'make the branch true', and -'static_branch_dec()' means 'make the branch false' with appropriate -reference counting. For example, if the key is initialized true, a -static_branch_dec(), will switch the branch to false. And a subsequent -static_branch_inc(), will change the branch back to true. Likewise, if the -key is initialized false, a 'static_branch_inc()', will change the branch to -true. And then a 'static_branch_dec()', will again make the branch false. - -The state and the reference count can be retrieved with 'static_key_enabled()' -and 'static_key_count()'. In general, if you use these functions, they -should be protected with the same mutex used around the enable/disable -or increment/decrement function. - -Note that switching branches results in some locks being taken, -particularly the CPU hotplug lock (in order to avoid races against -CPUs being brought in the kernel while the kernel is getting -patched). Calling the static key API from within a hotplug notifier is -thus a sure deadlock recipe. In order to still allow use of the -functionality, the following functions are provided: - - static_key_enable_cpuslocked() - static_key_disable_cpuslocked() - static_branch_enable_cpuslocked() - static_branch_disable_cpuslocked() - -These functions are *not* general purpose, and must only be used when -you really know that you're in the above context, and no other. - -Where an array of keys is required, it can be defined as:: - - DEFINE_STATIC_KEY_ARRAY_TRUE(keys, count); - -or:: - - DEFINE_STATIC_KEY_ARRAY_FALSE(keys, count); - -4) Architecture level code patching interface, 'jump labels' - - -There are a few functions and macros that architectures must implement in order -to take advantage of this optimization. If there is no architecture support, we -simply fall back to a traditional, load, test, and jump sequence. Also, the -struct jump_entry table must be at least 4-byte aligned because the -static_key->entry field makes use of the two least significant bits. - -* ``select HAVE_ARCH_JUMP_LABEL``, - see: arch/x86/Kconfig - -* ``#define JUMP_LABEL_NOP_SIZE``, - see: arch/x86/include/asm/jump_label.h - -* ``__always_inline bool arch_static_branch(struct static_key *key, bool branch)``, - see: arch/x86/include/asm/jump_label.h - -* ``__always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)``, - see: arch/x86/include/asm/jump_label.h - -* ``void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type)``, - see: arch/x86/kernel/jump_label.c - -* ``__init_or_module void arch_jump_label_transform_static(struct jump_entry *entry, enum jump_label_type type)``, - see: arch/x86/kernel/jump_label.c - -* ``struct jump_entry``, - see: arch/x86/include/asm/jump_label.h - - -5) Static keys / jump label analysis, results (x86_64): - - -As an example, let's add the following branch to 'getppid()', such that the -system call now looks like:: - - SYSCALL_DEFINE0(getppid) - { - int pid; - - + if (static_branch_unlikely(&key)) - + printk("I am the true branch\n"); - - rcu_read_lock(); - pid = task_tgid_vnr(rcu_dereference(current->real_parent)); - rcu_read_unlock(); - - return pid; - } - -The resulting instructions with jump labels generated by GCC is:: - - ffffffff81044290 : - ffffffff81044290: 55 push %rbp - ffffffff81044291: 48 89 e5 mov %rsp,%rbp - ffffffff81044294: e9 00 00 00 00 jmpq ffffffff81044299 - ffffffff81044299: 65 48 8b 04 25 c0 b6 mov %gs:0xb6c0,%rax - ffffffff810442a0: 00 00 - ffffffff810442a2: 48 8b 80 80 02 00 00 mov 0x280(%rax),%rax - ffffffff810442a9: 48 8b 80 b0 02 00 00 mov 0x2b0(%rax),%rax - ffffffff810442b0: 48 8b b8 e8 02 00 00 mov 0x2e8(%rax),%rdi - ffffffff810442b7: e8 f4 d9 00 00 callq ffffffff81051cb0 - ffffffff810442bc: 5d pop %rbp - ffffffff810442bd: 48 98 cltq - ffffffff810442bf: c3 retq - ffffffff810442c0: 48 c7 c7 e3 54 98 81 mov $0xffffffff819854e3,%rdi - ffffffff810442c7: 31 c0 xor %eax,%eax - ffffffff810442c9: e8 71 13 6d 00 callq ffffffff8171563f - ffffffff810442ce: eb c9 jmp ffffffff81044299 - -Without the jump label optimization it looks like:: - - ffffffff810441f0 : - ffffffff810441f0: 8b 05 8a 52 d8 00 mov 0xd8528a(%rip),%eax # ffffffff81dc9480 - ffffffff810441f6: 55 push %rbp - ffffffff810441f7: 48 89 e5 mov %rsp,%rbp - ffffffff810441fa: 85 c0 test %eax,%eax - ffffffff810441fc: 75 27 jne ffffffff81044225 - ffffffff810441fe: 65 48 8b 04 25 c0 b6 mov %gs:0xb6c0,%rax - ffffffff81044205: 00 00 - ffffffff81044207: 48 8b 80 80 02 00 00 mov 0x280(%rax),%rax - ffffffff8104420e: 48 8b 80 b0 02 00 00 mov 0x2b0(%rax),%rax - ffffffff81044215: 48 8b b8 e8 02 00 00 mov 0x2e8(%rax),%rdi - ffffffff8104421c: e8 2f da 00 00 callq ffffffff81051c50 - ffffffff81044221: 5d pop %rbp - ffffffff81044222: 48 98 cltq - ffffffff81044224: c3 retq - ffffffff81044225: 48 c7 c7 13 53 98 81 mov $0xffffffff81985313,%rdi - ffffffff8104422c: 31 c0 xor %eax,%eax - ffffffff8104422e: e8 60 0f 6d 00 callq ffffffff81715193 - ffffffff81044233: eb c9 jmp ffffffff810441fe - ffffffff81044235: 66 66 2e 0f 1f 84 00 data32 nopw %cs:0x0(%rax,%rax,1) - ffffffff8104423c: 00 00 00 00 - -Thus, the disable jump label case adds a 'mov', 'test' and 'jne' instruction -vs. the jump label case just has a 'no-op' or 'jmp 0'. (The jmp 0, is patched -to a 5 byte atomic no-op instruction at boot-time.) Thus, the disabled jump -label case adds:: - - 6 (mov) + 2 (test) + 2 (jne) = 10 - 5 (5 byte jump 0) = 5 addition bytes. - -If we then include the padding bytes, the jump label code saves, 16 total bytes -of instruction memory for this small function. In this case the non-jump label -function is 80 bytes long. Thus, we have saved 20% of the instruction -footprint. We can in fact improve this even further, since the 5-byte no-op -really can be a 2-byte no-op since we can reach the branch with a 2-byte jmp. -However, we have not yet implemented optimal no-op sizes (they are currently -hard-coded). - -Since there are a number of static key API uses in the scheduler paths, -'pipe-test' (also known as 'perf bench sched pipe') can be used to show the -performance improvement. Testing done on 3.3.0-rc2: - -jump label disabled:: - - Performance counter stats for 'bash -c /tmp/pipe-test' (50 runs): - - 855.700314 task-clock # 0.534 CPUs utilized ( +- 0.11% ) - 200,003 context-switches # 0.234 M/sec ( +- 0.00% ) - 0 CPU-migrations # 0.000 M/sec ( +- 39.58% ) - 487 page-faults # 0.001 M/sec ( +- 0.02% ) - 1,474,374,262 cycles # 1.723 GHz ( +- 0.17% ) - stalled-cycles-frontend - stalled-cycles-backend - 1,178,049,567 instructions # 0.80 insns per cycle ( +- 0.06% ) - 208,368,926 branches # 243.507 M/sec ( +- 0.06% ) - 5,569,188 branch-misses # 2.67% of all branches ( +- 0.54% ) - - 1.601607384 seconds time elapsed ( +- 0.07% ) - -jump label enabled:: - - Performance counter stats for 'bash -c /tmp/pipe-test' (50 runs): - - 841.043185 task-clock # 0.533 CPUs utilized ( +- 0.12% ) - 200,004 context-switches # 0.238 M/sec ( +- 0.00% ) - 0 CPU-migrations # 0.000 M/sec ( +- 40.87% ) - 487 page-faults # 0.001 M/sec ( +- 0.05% ) - 1,432,559,428 cycles # 1.703 GHz ( +- 0.18% ) - stalled-cycles-frontend - stalled-cycles-backend - 1,175,363,994 instructions # 0.82 insns per cycle ( +- 0.04% ) - 206,859,359 branches # 245.956 M/sec ( +- 0.04% ) - 4,884,119 branch-misses # 2.36% of all branches ( +- 0.85% ) - - 1.579384366 seconds time elapsed - -The percentage of saved branches is .7%, and we've saved 12% on -'branch-misses'. This is where we would expect to get the most savings, since -this optimization is about reducing the number of branches. In addition, we've -saved .2% on instructions, and 2.8% on cycles and 1.4% on elapsed time. diff --git a/Documentation/tee.txt b/Documentation/tee.txt deleted file mode 100644 index 350dd40cba45..000000000000 --- a/Documentation/tee.txt +++ /dev/null @@ -1,276 +0,0 @@ -============= -TEE subsystem -============= - -This document describes the TEE subsystem in Linux. - -A TEE (Trusted Execution Environment) is a trusted OS running in some -secure environment, for example, TrustZone on ARM CPUs, or a separate -secure co-processor etc. A TEE driver handles the details needed to -communicate with the TEE. - -This subsystem deals with: - -- Registration of TEE drivers - -- Managing shared memory between Linux and the TEE - -- Providing a generic API to the TEE - -The TEE interface -================= - -include/uapi/linux/tee.h defines the generic interface to a TEE. - -User space (the client) connects to the driver by opening /dev/tee[0-9]* or -/dev/teepriv[0-9]*. - -- TEE_IOC_SHM_ALLOC allocates shared memory and returns a file descriptor - which user space can mmap. When user space doesn't need the file - descriptor any more, it should be closed. When shared memory isn't needed - any longer it should be unmapped with munmap() to allow the reuse of - memory. - -- TEE_IOC_VERSION lets user space know which TEE this driver handles and - its capabilities. - -- TEE_IOC_OPEN_SESSION opens a new session to a Trusted Application. - -- TEE_IOC_INVOKE invokes a function in a Trusted Application. - -- TEE_IOC_CANCEL may cancel an ongoing TEE_IOC_OPEN_SESSION or TEE_IOC_INVOKE. - -- TEE_IOC_CLOSE_SESSION closes a session to a Trusted Application. - -There are two classes of clients, normal clients and supplicants. The latter is -a helper process for the TEE to access resources in Linux, for example file -system access. A normal client opens /dev/tee[0-9]* and a supplicant opens -/dev/teepriv[0-9]. - -Much of the communication between clients and the TEE is opaque to the -driver. The main job for the driver is to receive requests from the -clients, forward them to the TEE and send back the results. In the case of -supplicants the communication goes in the other direction, the TEE sends -requests to the supplicant which then sends back the result. - -The TEE kernel interface -======================== - -Kernel provides a TEE bus infrastructure where a Trusted Application is -represented as a device identified via Universally Unique Identifier (UUID) and -client drivers register a table of supported device UUIDs. - -TEE bus infrastructure registers following APIs: -- match(): iterates over the client driver UUID table to find a corresponding - match for device UUID. If a match is found, then this particular device is - probed via corresponding probe API registered by the client driver. This - process happens whenever a device or a client driver is registered with TEE - bus. -- uevent(): notifies user-space (udev) whenever a new device is registered on - TEE bus for auto-loading of modularized client drivers. - -TEE bus device enumeration is specific to underlying TEE implementation, so it -is left open for TEE drivers to provide corresponding implementation. - -Then TEE client driver can talk to a matched Trusted Application using APIs -listed in include/linux/tee_drv.h. - -TEE client driver example -------------------------- - -Suppose a TEE client driver needs to communicate with a Trusted Application -having UUID: ``ac6a4085-0e82-4c33-bf98-8eb8e118b6c2``, so driver registration -snippet would look like:: - - static const struct tee_client_device_id client_id_table[] = { - {UUID_INIT(0xac6a4085, 0x0e82, 0x4c33, - 0xbf, 0x98, 0x8e, 0xb8, 0xe1, 0x18, 0xb6, 0xc2)}, - {} - }; - - MODULE_DEVICE_TABLE(tee, client_id_table); - - static struct tee_client_driver client_driver = { - .id_table = client_id_table, - .driver = { - .name = DRIVER_NAME, - .bus = &tee_bus_type, - .probe = client_probe, - .remove = client_remove, - }, - }; - - static int __init client_init(void) - { - return driver_register(&client_driver.driver); - } - - static void __exit client_exit(void) - { - driver_unregister(&client_driver.driver); - } - - module_init(client_init); - module_exit(client_exit); - -OP-TEE driver -============= - -The OP-TEE driver handles OP-TEE [1] based TEEs. Currently it is only the ARM -TrustZone based OP-TEE solution that is supported. - -Lowest level of communication with OP-TEE builds on ARM SMC Calling -Convention (SMCCC) [2], which is the foundation for OP-TEE's SMC interface -[3] used internally by the driver. Stacked on top of that is OP-TEE Message -Protocol [4]. - -OP-TEE SMC interface provides the basic functions required by SMCCC and some -additional functions specific for OP-TEE. The most interesting functions are: - -- OPTEE_SMC_FUNCID_CALLS_UID (part of SMCCC) returns the version information - which is then returned by TEE_IOC_VERSION - -- OPTEE_SMC_CALL_GET_OS_UUID returns the particular OP-TEE implementation, used - to tell, for instance, a TrustZone OP-TEE apart from an OP-TEE running on a - separate secure co-processor. - -- OPTEE_SMC_CALL_WITH_ARG drives the OP-TEE message protocol - -- OPTEE_SMC_GET_SHM_CONFIG lets the driver and OP-TEE agree on which memory - range to used for shared memory between Linux and OP-TEE. - -The GlobalPlatform TEE Client API [5] is implemented on top of the generic -TEE API. - -Picture of the relationship between the different components in the -OP-TEE architecture:: - - User space Kernel Secure world - ~~~~~~~~~~ ~~~~~~ ~~~~~~~~~~~~ - +--------+ +-------------+ - | Client | | Trusted | - +--------+ | Application | - /\ +-------------+ - || +----------+ /\ - || |tee- | || - || |supplicant| \/ - || +----------+ +-------------+ - \/ /\ | TEE Internal| - +-------+ || | API | - + TEE | || +--------+--------+ +-------------+ - | Client| || | TEE | OP-TEE | | OP-TEE | - | API | \/ | subsys | driver | | Trusted OS | - +-------+----------------+----+-------+----+-----------+-------------+ - | Generic TEE API | | OP-TEE MSG | - | IOCTL (TEE_IOC_*) | | SMCCC (OPTEE_SMC_CALL_*) | - +-----------------------------+ +------------------------------+ - -RPC (Remote Procedure Call) are requests from secure world to kernel driver -or tee-supplicant. An RPC is identified by a special range of SMCCC return -values from OPTEE_SMC_CALL_WITH_ARG. RPC messages which are intended for the -kernel are handled by the kernel driver. Other RPC messages will be forwarded to -tee-supplicant without further involvement of the driver, except switching -shared memory buffer representation. - -OP-TEE device enumeration -------------------------- - -OP-TEE provides a pseudo Trusted Application: drivers/tee/optee/device.c in -order to support device enumeration. In other words, OP-TEE driver invokes this -application to retrieve a list of Trusted Applications which can be registered -as devices on the TEE bus. - -AMD-TEE driver -============== - -The AMD-TEE driver handles the communication with AMD's TEE environment. The -TEE environment is provided by AMD Secure Processor. - -The AMD Secure Processor (formerly called Platform Security Processor or PSP) -is a dedicated processor that features ARM TrustZone technology, along with a -software-based Trusted Execution Environment (TEE) designed to enable -third-party Trusted Applications. This feature is currently enabled only for -APUs. - -The following picture shows a high level overview of AMD-TEE:: - - | - x86 | - | - User space (Kernel space) | AMD Secure Processor (PSP) - ~~~~~~~~~~ ~~~~~~~~~~~~~~ | ~~~~~~~~~~~~~~~~~~~~~~~~~~ - | - +--------+ | +-------------+ - | Client | | | Trusted | - +--------+ | | Application | - /\ | +-------------+ - || | /\ - || | || - || | \/ - || | +----------+ - || | | TEE | - || | | Internal | - \/ | | API | - +---------+ +-----------+---------+ +----------+ - | TEE | | TEE | AMD-TEE | | AMD-TEE | - | Client | | subsystem | driver | | Trusted | - | API | | | | | OS | - +---------+-----------+----+------+---------+---------+----------+ - | Generic TEE API | | ASP | Mailbox | - | IOCTL (TEE_IOC_*) | | driver | Register Protocol | - +--------------------------+ +---------+--------------------+ - -At the lowest level (in x86), the AMD Secure Processor (ASP) driver uses the -CPU to PSP mailbox regsister to submit commands to the PSP. The format of the -command buffer is opaque to the ASP driver. It's role is to submit commands to -the secure processor and return results to AMD-TEE driver. The interface -between AMD-TEE driver and AMD Secure Processor driver can be found in [6]. - -The AMD-TEE driver packages the command buffer payload for processing in TEE. -The command buffer format for the different TEE commands can be found in [7]. - -The TEE commands supported by AMD-TEE Trusted OS are: -* TEE_CMD_ID_LOAD_TA - loads a Trusted Application (TA) binary into - TEE environment. -* TEE_CMD_ID_UNLOAD_TA - unloads TA binary from TEE environment. -* TEE_CMD_ID_OPEN_SESSION - opens a session with a loaded TA. -* TEE_CMD_ID_CLOSE_SESSION - closes session with loaded TA -* TEE_CMD_ID_INVOKE_CMD - invokes a command with loaded TA -* TEE_CMD_ID_MAP_SHARED_MEM - maps shared memory -* TEE_CMD_ID_UNMAP_SHARED_MEM - unmaps shared memory - -AMD-TEE Trusted OS is the firmware running on AMD Secure Processor. - -The AMD-TEE driver registers itself with TEE subsystem and implements the -following driver function callbacks: - -* get_version - returns the driver implementation id and capability. -* open - sets up the driver context data structure. -* release - frees up driver resources. -* open_session - loads the TA binary and opens session with loaded TA. -* close_session - closes session with loaded TA and unloads it. -* invoke_func - invokes a command with loaded TA. - -cancel_req driver callback is not supported by AMD-TEE. - -The GlobalPlatform TEE Client API [5] can be used by the user space (client) to -talk to AMD's TEE. AMD's TEE provides a secure environment for loading, opening -a session, invoking commands and clossing session with TA. - -References -========== - -[1] https://github.com/OP-TEE/optee_os - -[2] http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html - -[3] drivers/tee/optee/optee_smc.h - -[4] drivers/tee/optee/optee_msg.h - -[5] http://www.globalplatform.org/specificationsdevice.asp look for - "TEE Client API Specification v1.0" and click download. - -[6] include/linux/psp-tee.h - -[7] drivers/tee/amdtee/amdtee_if.h diff --git a/Documentation/trace/kprobetrace.rst b/Documentation/trace/kprobetrace.rst index cc4c5fc313df..c1709165c553 100644 --- a/Documentation/trace/kprobetrace.rst +++ b/Documentation/trace/kprobetrace.rst @@ -40,7 +40,7 @@ Synopsis of kprobe_events MEMADDR : Address where the probe is inserted. MAXACTIVE : Maximum number of instances of the specified function that can be probed simultaneously, or 0 for the default value - as defined in Documentation/kprobes.txt section 1.3.1. + as defined in Documentation/staging/kprobes.rst section 1.3.1. FETCHARGS : Arguments. Each probe can have up to 128 args. %REG : Fetch register REG diff --git a/Documentation/xz.txt b/Documentation/xz.txt deleted file mode 100644 index b2f5ff12a161..000000000000 --- a/Documentation/xz.txt +++ /dev/null @@ -1,127 +0,0 @@ -============================ -XZ data compression in Linux -============================ - -Introduction -============ - -XZ is a general purpose data compression format with high compression -ratio and relatively fast decompression. The primary compression -algorithm (filter) is LZMA2. Additional filters can be used to improve -compression ratio even further. E.g. Branch/Call/Jump (BCJ) filters -improve compression ratio of executable data. - -The XZ decompressor in Linux is called XZ Embedded. It supports -the LZMA2 filter and optionally also BCJ filters. CRC32 is supported -for integrity checking. The home page of XZ Embedded is at -, where you can find the -latest version and also information about using the code outside -the Linux kernel. - -For userspace, XZ Utils provide a zlib-like compression library -and a gzip-like command line tool. XZ Utils can be downloaded from -. - -XZ related components in the kernel -=================================== - -The xz_dec module provides XZ decompressor with single-call (buffer -to buffer) and multi-call (stateful) APIs. The usage of the xz_dec -module is documented in include/linux/xz.h. - -The xz_dec_test module is for testing xz_dec. xz_dec_test is not -useful unless you are hacking the XZ decompressor. xz_dec_test -allocates a char device major dynamically to which one can write -.xz files from userspace. The decompressed output is thrown away. -Keep an eye on dmesg to see diagnostics printed by xz_dec_test. -See the xz_dec_test source code for the details. - -For decompressing the kernel image, initramfs, and initrd, there -is a wrapper function in lib/decompress_unxz.c. Its API is the -same as in other decompress_*.c files, which is defined in -include/linux/decompress/generic.h. - -scripts/xz_wrap.sh is a wrapper for the xz command line tool found -from XZ Utils. The wrapper sets compression options to values suitable -for compressing the kernel image. - -For kernel makefiles, two commands are provided for use with -$(call if_needed). The kernel image should be compressed with -$(call if_needed,xzkern) which will use a BCJ filter and a big LZMA2 -dictionary. It will also append a four-byte trailer containing the -uncompressed size of the file, which is needed by the boot code. -Other things should be compressed with $(call if_needed,xzmisc) -which will use no BCJ filter and 1 MiB LZMA2 dictionary. - -Notes on compression options -============================ - -Since the XZ Embedded supports only streams with no integrity check or -CRC32, make sure that you don't use some other integrity check type -when encoding files that are supposed to be decoded by the kernel. With -liblzma, you need to use either LZMA_CHECK_NONE or LZMA_CHECK_CRC32 -when encoding. With the xz command line tool, use --check=none or ---check=crc32. - -Using CRC32 is strongly recommended unless there is some other layer -which will verify the integrity of the uncompressed data anyway. -Double checking the integrity would probably be waste of CPU cycles. -Note that the headers will always have a CRC32 which will be validated -by the decoder; you can only change the integrity check type (or -disable it) for the actual uncompressed data. - -In userspace, LZMA2 is typically used with dictionary sizes of several -megabytes. The decoder needs to have the dictionary in RAM, thus big -dictionaries cannot be used for files that are intended to be decoded -by the kernel. 1 MiB is probably the maximum reasonable dictionary -size for in-kernel use (maybe more is OK for initramfs). The presets -in XZ Utils may not be optimal when creating files for the kernel, -so don't hesitate to use custom settings. Example:: - - xz --check=crc32 --lzma2=dict=512KiB inputfile - -An exception to above dictionary size limitation is when the decoder -is used in single-call mode. Decompressing the kernel itself is an -example of this situation. In single-call mode, the memory usage -doesn't depend on the dictionary size, and it is perfectly fine to -use a big dictionary: for maximum compression, the dictionary should -be at least as big as the uncompressed data itself. - -Future plans -============ - -Creating a limited XZ encoder may be considered if people think it is -useful. LZMA2 is slower to compress than e.g. Deflate or LZO even at -the fastest settings, so it isn't clear if LZMA2 encoder is wanted -into the kernel. - -Support for limited random-access reading is planned for the -decompression code. I don't know if it could have any use in the -kernel, but I know that it would be useful in some embedded projects -outside the Linux kernel. - -Conformance to the .xz file format specification -================================================ - -There are a couple of corner cases where things have been simplified -at expense of detecting errors as early as possible. These should not -matter in practice all, since they don't cause security issues. But -it is good to know this if testing the code e.g. with the test files -from XZ Utils. - -Reporting bugs -============== - -Before reporting a bug, please check that it's not fixed already -at upstream. See to get the -latest code. - -Report bugs to or visit #tukaani on -Freenode and talk to Larhzu. I don't actively read LKML or other -kernel-related mailing lists, so if there's something I should know, -you should email to me personally or use IRC. - -Don't bother Igor Pavlov with questions about the XZ implementation -in the kernel or about XZ Utils. While these two implementations -include essential code that is directly based on Igor Pavlov's code, -these implementations aren't maintained nor supported by him. diff --git a/MAINTAINERS b/MAINTAINERS index f66fc236a325..ad90b3992887 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9597,7 +9597,7 @@ M: Anil S Keshavamurthy M: "David S. Miller" M: Masami Hiramatsu S: Maintained -F: Documentation/kprobes.txt +F: Documentation/staging/kprobes.rst F: include/asm-generic/kprobes.h F: include/linux/kprobes.h F: kernel/kprobes.c @@ -14500,7 +14500,7 @@ S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/andersson/remoteproc.git rproc-next F: Documentation/ABI/testing/sysfs-class-remoteproc F: Documentation/devicetree/bindings/remoteproc/ -F: Documentation/remoteproc.txt +F: Documentation/staging/remoteproc.rst F: drivers/remoteproc/ F: include/linux/remoteproc.h F: include/linux/remoteproc/ @@ -14512,7 +14512,7 @@ L: linux-remoteproc@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/andersson/remoteproc.git rpmsg-next F: Documentation/ABI/testing/sysfs-bus-rpmsg -F: Documentation/rpmsg.txt +F: Documentation/staging/rpmsg.rst F: drivers/rpmsg/ F: include/linux/rpmsg.h F: include/linux/rpmsg/ @@ -16761,7 +16761,7 @@ TEE SUBSYSTEM M: Jens Wiklander L: tee-dev@lists.linaro.org S: Maintained -F: Documentation/tee.txt +F: Documentation/staging/tee.rst F: drivers/tee/ F: include/linux/tee_drv.h F: include/uapi/linux/tee.h diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 3526c0aee954..32809624d422 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -68,7 +68,7 @@ * Lacking toolchain and or architecture support, static keys fall back to a * simple conditional branch. * - * Additional babbling in: Documentation/static-keys.txt + * Additional babbling in: Documentation/staging/static-keys.rst */ #ifndef __ASSEMBLY__ diff --git a/lib/crc32.c b/lib/crc32.c index 4a20455d1f61..35a03d03f973 100644 --- a/lib/crc32.c +++ b/lib/crc32.c @@ -24,7 +24,7 @@ * Version 2. See the file COPYING for more details. */ -/* see: Documentation/crc32.txt for a description of algorithms */ +/* see: Documentation/staging/crc32.rst for a description of algorithms */ #include #include diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c index 2717c7963acd..7892a40cf765 100644 --- a/lib/lzo/lzo1x_decompress_safe.c +++ b/lib/lzo/lzo1x_decompress_safe.c @@ -32,7 +32,7 @@ * depending on the base count. Since the base count is taken from a u8 * and a few bits, it is safe to assume that it will always be lower than * or equal to 2*255, thus we can always prevent any overflow by accepting - * two less 255 steps. See Documentation/lzo.txt for more information. + * two less 255 steps. See Documentation/staging/lzo.rst for more information. */ #define MAX_255_COUNT ((((size_t)~0) / 255) - 2) diff --git a/lib/xz/Kconfig b/lib/xz/Kconfig index 22528743d4ce..5cb50245a878 100644 --- a/lib/xz/Kconfig +++ b/lib/xz/Kconfig @@ -5,7 +5,7 @@ config XZ_DEC help LZMA2 compression algorithm and BCJ filters are supported using the .xz file format as the container. For integrity checking, - CRC32 is supported. See Documentation/xz.txt for more information. + CRC32 is supported. See Documentation/staging/xz.rst for more information. if XZ_DEC diff --git a/samples/kprobes/kprobe_example.c b/samples/kprobes/kprobe_example.c index 501911d1b327..240f2435ce6f 100644 --- a/samples/kprobes/kprobe_example.c +++ b/samples/kprobes/kprobe_example.c @@ -5,7 +5,7 @@ * stack trace and selected registers when _do_fork() is called. * * For more information on theory of operation of kprobes, see - * Documentation/kprobes.txt + * Documentation/staging/kprobes.rst * * You will see the trace data in /var/log/messages and on the console * whenever _do_fork() is invoked to create a new process. diff --git a/samples/kprobes/kretprobe_example.c b/samples/kprobes/kretprobe_example.c index 013e8e6ebae9..78a2da6fb3cd 100644 --- a/samples/kprobes/kretprobe_example.c +++ b/samples/kprobes/kretprobe_example.c @@ -11,7 +11,7 @@ * If no func_name is specified, _do_fork is instrumented * * For more information on theory of operation of kretprobes, see - * Documentation/kprobes.txt + * Documentation/staging/kprobes.rst * * Build and insert the kernel module as done in the kprobe example. * You will see the trace data in /var/log/messages and on the console -- cgit v1.2.3-59-g8ed1b From cc7a21b6fbd945f8d8f61422ccd27203c1fafeb7 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 19 Jun 2020 12:02:59 -0700 Subject: ipv6: icmp6: avoid indirect call for icmpv6_send() If IPv6 is builtin, we do not need an expensive indirect call to reach icmp6_send(). v2: put inline keyword before the type to avoid sparse warnings. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/icmpv6.h | 22 +++++++++++++++++++++- net/ipv6/icmp.c | 5 +++-- net/ipv6/ip6_icmp.c | 10 +++++----- 3 files changed, 29 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h index 33d379602314..1b3371ae8193 100644 --- a/include/linux/icmpv6.h +++ b/include/linux/icmpv6.h @@ -13,12 +13,32 @@ static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb) #include #if IS_ENABLED(CONFIG_IPV6) -extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info); typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info, const struct in6_addr *force_saddr); +#if IS_BUILTIN(CONFIG_IPV6) +void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, + const struct in6_addr *force_saddr); +static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) +{ + icmp6_send(skb, type, code, info, NULL); +} +static inline int inet6_register_icmp_sender(ip6_icmp_send_t *fn) +{ + BUILD_BUG_ON(fn != icmp6_send); + return 0; +} +static inline int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn) +{ + BUILD_BUG_ON(fn != icmp6_send); + return 0; +} +#else +extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info); extern int inet6_register_icmp_sender(ip6_icmp_send_t *fn); extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn); +#endif + int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type, unsigned int data_len); diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index fc5000370030..91e0f2fd2523 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -439,8 +439,8 @@ static int icmp6_iif(const struct sk_buff *skb) /* * Send an ICMP message in response to a packet in error */ -static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, - const struct in6_addr *force_saddr) +void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, + const struct in6_addr *force_saddr) { struct inet6_dev *idev = NULL; struct ipv6hdr *hdr = ipv6_hdr(skb); @@ -625,6 +625,7 @@ out: out_bh_enable: local_bh_enable(); } +EXPORT_SYMBOL(icmp6_send); /* Slightly more convenient version of icmp6_send. */ diff --git a/net/ipv6/ip6_icmp.c b/net/ipv6/ip6_icmp.c index e0086758b6ee..70c8c2f36c98 100644 --- a/net/ipv6/ip6_icmp.c +++ b/net/ipv6/ip6_icmp.c @@ -9,6 +9,8 @@ #if IS_ENABLED(CONFIG_IPV6) +#if !IS_BUILTIN(CONFIG_IPV6) + static ip6_icmp_send_t __rcu *ip6_icmp_send; int inet6_register_icmp_sender(ip6_icmp_send_t *fn) @@ -37,14 +39,12 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) rcu_read_lock(); send = rcu_dereference(ip6_icmp_send); - - if (!send) - goto out; - send(skb, type, code, info, NULL); -out: + if (send) + send(skb, type, code, info, NULL); rcu_read_unlock(); } EXPORT_SYMBOL(icmpv6_send); +#endif #if IS_ENABLED(CONFIG_NF_NAT) #include -- cgit v1.2.3-59-g8ed1b From c7f03eea07682639ef320aab348b706c330941dd Mon Sep 17 00:00:00 2001 From: Álvaro Fernández Rojas Date: Mon, 15 Jun 2020 11:02:24 +0200 Subject: mips: bmips: add BCM3368 clock definitions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add header with BCM3368 definitions in order to be able to include it from device tree files. Signed-off-by: Álvaro Fernández Rojas Link: https://lore.kernel.org/r/20200615090231.2932696-2-noltari@gmail.com Acked-by: Florian Fainelli Signed-off-by: Stephen Boyd --- include/dt-bindings/clock/bcm3368-clock.h | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 include/dt-bindings/clock/bcm3368-clock.h (limited to 'include') diff --git a/include/dt-bindings/clock/bcm3368-clock.h b/include/dt-bindings/clock/bcm3368-clock.h new file mode 100644 index 000000000000..74a7382f77b8 --- /dev/null +++ b/include/dt-bindings/clock/bcm3368-clock.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef __DT_BINDINGS_CLOCK_BCM3368_H +#define __DT_BINDINGS_CLOCK_BCM3368_H + +#define BCM3368_CLK_MAC 3 +#define BCM3368_CLK_TC 5 +#define BCM3368_CLK_US_TOP 6 +#define BCM3368_CLK_DS_TOP 7 +#define BCM3368_CLK_ACM 8 +#define BCM3368_CLK_SPI 9 +#define BCM3368_CLK_USBS 10 +#define BCM3368_CLK_BMU 11 +#define BCM3368_CLK_PCM 12 +#define BCM3368_CLK_NTP 13 +#define BCM3368_CLK_ACP_B 14 +#define BCM3368_CLK_ACP_A 15 +#define BCM3368_CLK_EMUSB 17 +#define BCM3368_CLK_ENET0 18 +#define BCM3368_CLK_ENET1 19 +#define BCM3368_CLK_USBSU 20 +#define BCM3368_CLK_EPHY 21 + +#endif /* __DT_BINDINGS_CLOCK_BCM3368_H */ -- cgit v1.2.3-59-g8ed1b From 020c89c5a981cb6d0424aadab8ae067a3b6bd8e6 Mon Sep 17 00:00:00 2001 From: Álvaro Fernández Rojas Date: Mon, 15 Jun 2020 11:02:25 +0200 Subject: mips: bmips: add BCM6318 clock definitions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add header with BCM6318 definitions in order to be able to include it from device tree files. Signed-off-by: Álvaro Fernández Rojas Link: https://lore.kernel.org/r/20200615090231.2932696-3-noltari@gmail.com Acked-by: Florian Fainelli Signed-off-by: Stephen Boyd --- include/dt-bindings/clock/bcm6318-clock.h | 42 +++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 include/dt-bindings/clock/bcm6318-clock.h (limited to 'include') diff --git a/include/dt-bindings/clock/bcm6318-clock.h b/include/dt-bindings/clock/bcm6318-clock.h new file mode 100644 index 000000000000..c4417f8983ab --- /dev/null +++ b/include/dt-bindings/clock/bcm6318-clock.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef __DT_BINDINGS_CLOCK_BCM6318_H +#define __DT_BINDINGS_CLOCK_BCM6318_H + +#define BCM6318_CLK_ADSL_ASB 0 +#define BCM6318_CLK_USB_ASB 1 +#define BCM6318_CLK_MIPS_ASB 2 +#define BCM6318_CLK_PCIE_ASB 3 +#define BCM6318_CLK_PHYMIPS_ASB 4 +#define BCM6318_CLK_ROBOSW_ASB 5 +#define BCM6318_CLK_SAR_ASB 6 +#define BCM6318_CLK_SDR_ASB 7 +#define BCM6318_CLK_SWREG_ASB 8 +#define BCM6318_CLK_PERIPH_ASB 9 +#define BCM6318_CLK_CPUBUS160 10 +#define BCM6318_CLK_ADSL 11 +#define BCM6318_CLK_SAR125 12 +#define BCM6318_CLK_MIPS 13 +#define BCM6318_CLK_PCIE 14 +#define BCM6318_CLK_ROBOSW250 16 +#define BCM6318_CLK_ROBOSW025 17 +#define BCM6318_CLK_SDR 19 +#define BCM6318_CLK_USBD 20 +#define BCM6318_CLK_HSSPI 25 +#define BCM6318_CLK_PCIE25 27 +#define BCM6318_CLK_PHYMIPS 28 +#define BCM6318_CLK_AFE 29 +#define BCM6318_CLK_QPROC 30 + +#define BCM6318_UCLK_ADSL 0 +#define BCM6318_UCLK_ARB 1 +#define BCM6318_UCLK_MIPS 2 +#define BCM6318_UCLK_PCIE 3 +#define BCM6318_UCLK_PERIPH 4 +#define BCM6318_UCLK_PHYMIPS 5 +#define BCM6318_UCLK_ROBOSW 6 +#define BCM6318_UCLK_SAR 7 +#define BCM6318_UCLK_SDR 8 +#define BCM6318_UCLK_USB 9 + +#endif /* __DT_BINDINGS_CLOCK_BCM6318_H */ -- cgit v1.2.3-59-g8ed1b From 92cd8bb27a692d93ba7442ec123e96528f5e992c Mon Sep 17 00:00:00 2001 From: Álvaro Fernández Rojas Date: Mon, 15 Jun 2020 11:02:26 +0200 Subject: mips: bmips: add BCM6328 clock definitions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add header with BCM6328 definitions in order to be able to include it from device tree files. Signed-off-by: Álvaro Fernández Rojas Link: https://lore.kernel.org/r/20200615090231.2932696-4-noltari@gmail.com Acked-by: Florian Fainelli Signed-off-by: Stephen Boyd --- include/dt-bindings/clock/bcm6328-clock.h | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 include/dt-bindings/clock/bcm6328-clock.h (limited to 'include') diff --git a/include/dt-bindings/clock/bcm6328-clock.h b/include/dt-bindings/clock/bcm6328-clock.h new file mode 100644 index 000000000000..1f6a3103f3dc --- /dev/null +++ b/include/dt-bindings/clock/bcm6328-clock.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef __DT_BINDINGS_CLOCK_BCM6328_H +#define __DT_BINDINGS_CLOCK_BCM6328_H + +#define BCM6328_CLK_PHYMIPS 0 +#define BCM6328_CLK_ADSL_QPROC 1 +#define BCM6328_CLK_ADSL_AFE 2 +#define BCM6328_CLK_ADSL 3 +#define BCM6328_CLK_MIPS 4 +#define BCM6328_CLK_SAR 5 +#define BCM6328_CLK_PCM 6 +#define BCM6328_CLK_USBD 7 +#define BCM6328_CLK_USBH 8 +#define BCM6328_CLK_HSSPI 9 +#define BCM6328_CLK_PCIE 10 +#define BCM6328_CLK_ROBOSW 11 + +#endif /* __DT_BINDINGS_CLOCK_BCM6328_H */ -- cgit v1.2.3-59-g8ed1b From d3499bda4e176de6853c24e5243f3906d9390d54 Mon Sep 17 00:00:00 2001 From: Álvaro Fernández Rojas Date: Mon, 15 Jun 2020 11:02:27 +0200 Subject: mips: bmips: add BCM6358 clock definitions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add header with BCM6358 definitions in order to be able to include it from device tree files. Signed-off-by: Álvaro Fernández Rojas Link: https://lore.kernel.org/r/20200615090231.2932696-5-noltari@gmail.com Acked-by: Florian Fainelli Signed-off-by: Stephen Boyd --- include/dt-bindings/clock/bcm6358-clock.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 include/dt-bindings/clock/bcm6358-clock.h (limited to 'include') diff --git a/include/dt-bindings/clock/bcm6358-clock.h b/include/dt-bindings/clock/bcm6358-clock.h new file mode 100644 index 000000000000..980c9cac4765 --- /dev/null +++ b/include/dt-bindings/clock/bcm6358-clock.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef __DT_BINDINGS_CLOCK_BCM6358_H +#define __DT_BINDINGS_CLOCK_BCM6358_H + +#define BCM6358_CLK_ENET 4 +#define BCM6358_CLK_ADSLPHY 5 +#define BCM6358_CLK_PCM 8 +#define BCM6358_CLK_SPI 9 +#define BCM6358_CLK_USBS 10 +#define BCM6358_CLK_SAR 11 +#define BCM6358_CLK_EMUSB 17 +#define BCM6358_CLK_ENET0 18 +#define BCM6358_CLK_ENET1 19 +#define BCM6358_CLK_USBSU 20 +#define BCM6358_CLK_EPHY 21 + +#endif /* __DT_BINDINGS_CLOCK_BCM6358_H */ -- cgit v1.2.3-59-g8ed1b From fb8fb3f13f86fda0af72c02691333fdba5164c20 Mon Sep 17 00:00:00 2001 From: Álvaro Fernández Rojas Date: Mon, 15 Jun 2020 11:02:28 +0200 Subject: mips: bmips: add BCM6362 clock definitions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add header with BCM6362 definitions in order to be able to include it from device tree files. Signed-off-by: Álvaro Fernández Rojas Link: https://lore.kernel.org/r/20200615090231.2932696-6-noltari@gmail.com Acked-by: Florian Fainelli Signed-off-by: Stephen Boyd --- include/dt-bindings/clock/bcm6362-clock.h | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 include/dt-bindings/clock/bcm6362-clock.h (limited to 'include') diff --git a/include/dt-bindings/clock/bcm6362-clock.h b/include/dt-bindings/clock/bcm6362-clock.h new file mode 100644 index 000000000000..17655cd5bf25 --- /dev/null +++ b/include/dt-bindings/clock/bcm6362-clock.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef __DT_BINDINGS_CLOCK_BCM6362_H +#define __DT_BINDINGS_CLOCK_BCM6362_H + +#define BCM6362_CLK_ADSL_QPROC 1 +#define BCM6362_CLK_ADSL_AFE 2 +#define BCM6362_CLK_ADSL 3 +#define BCM6362_CLK_MIPS 4 +#define BCM6362_CLK_WLAN_OCP 5 +#define BCM6362_CLK_SWPKT_USB 7 +#define BCM6362_CLK_SWPKT_SAR 8 +#define BCM6362_CLK_SAR 9 +#define BCM6362_CLK_ROBOSW 10 +#define BCM6362_CLK_PCM 11 +#define BCM6362_CLK_USBD 12 +#define BCM6362_CLK_USBH 13 +#define BCM6362_CLK_IPSEC 14 +#define BCM6362_CLK_SPI 15 +#define BCM6362_CLK_HSSPI 16 +#define BCM6362_CLK_PCIE 17 +#define BCM6362_CLK_FAP 18 +#define BCM6362_CLK_PHYMIPS 19 +#define BCM6362_CLK_NAND 20 + +#endif /* __DT_BINDINGS_CLOCK_BCM6362_H */ -- cgit v1.2.3-59-g8ed1b From ad31e793f246d5276bc24829cb3d1ca95c3c92ff Mon Sep 17 00:00:00 2001 From: Álvaro Fernández Rojas Date: Mon, 15 Jun 2020 11:02:29 +0200 Subject: mips: bmips: add BCM6368 clock definitions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add header with BCM6368 definitions in order to be able to include it from device tree files. Signed-off-by: Álvaro Fernández Rojas Link: https://lore.kernel.org/r/20200615090231.2932696-7-noltari@gmail.com Acked-by: Florian Fainelli Signed-off-by: Stephen Boyd --- include/dt-bindings/clock/bcm6368-clock.h | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 include/dt-bindings/clock/bcm6368-clock.h (limited to 'include') diff --git a/include/dt-bindings/clock/bcm6368-clock.h b/include/dt-bindings/clock/bcm6368-clock.h new file mode 100644 index 000000000000..f161d5333883 --- /dev/null +++ b/include/dt-bindings/clock/bcm6368-clock.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef __DT_BINDINGS_CLOCK_BCM6368_H +#define __DT_BINDINGS_CLOCK_BCM6368_H + +#define BCM6368_CLK_VDSL_QPROC 2 +#define BCM6368_CLK_VDSL_AFE 3 +#define BCM6368_CLK_VDSL_BONDING 4 +#define BCM6368_CLK_VDSL 5 +#define BCM6368_CLK_PHYMIPS 6 +#define BCM6368_CLK_SWPKT_USB 7 +#define BCM6368_CLK_SWPKT_SAR 8 +#define BCM6368_CLK_SPI 9 +#define BCM6368_CLK_USBD 10 +#define BCM6368_CLK_SAR 11 +#define BCM6368_CLK_ROBOSW 12 +#define BCM6368_CLK_UTOPIA 13 +#define BCM6368_CLK_PCM 14 +#define BCM6368_CLK_USBH 15 +#define BCM6368_CLK_DIS_GLESS 16 +#define BCM6368_CLK_NAND 17 +#define BCM6368_CLK_IPSEC 18 + +#endif /* __DT_BINDINGS_CLOCK_BCM6368_H */ -- cgit v1.2.3-59-g8ed1b From f3cd8c96a97ca970a116af092555778f792d0abf Mon Sep 17 00:00:00 2001 From: Álvaro Fernández Rojas Date: Mon, 15 Jun 2020 11:02:30 +0200 Subject: mips: bmips: add BCM63268 clock definitions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add header with BCM63268 definitions in order to be able to include it from device tree files. Signed-off-by: Álvaro Fernández Rojas Link: https://lore.kernel.org/r/20200615090231.2932696-8-noltari@gmail.com Acked-by: Florian Fainelli Signed-off-by: Stephen Boyd --- include/dt-bindings/clock/bcm63268-clock.h | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 include/dt-bindings/clock/bcm63268-clock.h (limited to 'include') diff --git a/include/dt-bindings/clock/bcm63268-clock.h b/include/dt-bindings/clock/bcm63268-clock.h new file mode 100644 index 000000000000..da23e691d359 --- /dev/null +++ b/include/dt-bindings/clock/bcm63268-clock.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef __DT_BINDINGS_CLOCK_BCM63268_H +#define __DT_BINDINGS_CLOCK_BCM63268_H + +#define BCM63268_CLK_DIS_GLESS 0 +#define BCM63268_CLK_VDSL_QPROC 1 +#define BCM63268_CLK_VDSL_AFE 2 +#define BCM63268_CLK_VDSL 3 +#define BCM63268_CLK_MIPS 4 +#define BCM63268_CLK_WLAN_OCP 5 +#define BCM63268_CLK_DECT 6 +#define BCM63268_CLK_FAP0 7 +#define BCM63268_CLK_FAP1 8 +#define BCM63268_CLK_SAR 9 +#define BCM63268_CLK_ROBOSW 10 +#define BCM63268_CLK_PCM 11 +#define BCM63268_CLK_USBD 12 +#define BCM63268_CLK_USBH 13 +#define BCM63268_CLK_IPSEC 14 +#define BCM63268_CLK_SPI 15 +#define BCM63268_CLK_HSSPI 16 +#define BCM63268_CLK_PCIE 17 +#define BCM63268_CLK_PHYMIPS 18 +#define BCM63268_CLK_GMAC 19 +#define BCM63268_CLK_NAND 20 +#define BCM63268_CLK_TBUS 27 +#define BCM63268_CLK_ROBOSW250 31 + +#endif /* __DT_BINDINGS_CLOCK_BCM63268_H */ -- cgit v1.2.3-59-g8ed1b From c2710fdf935bb1286e8eb6a6b44991bab1fe87af Mon Sep 17 00:00:00 2001 From: Dinh Nguyen Date: Tue, 16 Jun 2020 15:24:15 -0500 Subject: dt-bindings: agilex: add NAND_X_CLK and NAND_ECC_CLK Add the NAND_X_CLK and NAND_ECC_CLK clocks. Signed-off-by: Dinh Nguyen Link: https://lore.kernel.org/r/20200616202417.14376-1-dinguyen@kernel.org Signed-off-by: Stephen Boyd --- include/dt-bindings/clock/agilex-clock.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/dt-bindings/clock/agilex-clock.h b/include/dt-bindings/clock/agilex-clock.h index f19cf8ccbdd2..06feca07e08e 100644 --- a/include/dt-bindings/clock/agilex-clock.h +++ b/include/dt-bindings/clock/agilex-clock.h @@ -65,6 +65,8 @@ #define AGILEX_SDMMC_CLK 50 #define AGILEX_SPI_M_CLK 51 #define AGILEX_USB_CLK 52 -#define AGILEX_NUM_CLKS 53 +#define AGILEX_NAND_X_CLK 53 +#define AGILEX_NAND_ECC_CLK 54 +#define AGILEX_NUM_CLKS 55 #endif /* __AGILEX_CLOCK_H */ -- cgit v1.2.3-59-g8ed1b From c746053d275c8b6ff1d713addabf049c9c9a58fc Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 18 Jun 2020 14:45:58 +0100 Subject: net: phy: add support for probing MMDs >= 8 for devices-in-package Add support for probing MMDs above 7 for a valid devices-in-package specifier, but only probe the vendor MMDs for this if they also report that there the device is present in status register 2. This avoids issues where the MMD is implemented, but does not provide IEEE 802.3 compliant registers (such as the MV88X3310 PHY.) Reviewed-by: Florian Fainelli Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/phy_device.c | 40 ++++++++++++++++++++++++++++++++++++++-- include/linux/phy.h | 2 ++ 2 files changed, 40 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 09096c3ceb86..8d9af2772853 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -661,6 +661,28 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id, } EXPORT_SYMBOL(phy_device_create); +/* phy_c45_probe_present - checks to see if a MMD is present in the package + * @bus: the target MII bus + * @prtad: PHY package address on the MII bus + * @devad: PHY device (MMD) address + * + * Read the MDIO_STAT2 register, and check whether a device is responding + * at this address. + * + * Returns: negative error number on bus access error, zero if no device + * is responding, or positive if a device is present. + */ +static int phy_c45_probe_present(struct mii_bus *bus, int prtad, int devad) +{ + int stat2; + + stat2 = mdiobus_c45_read(bus, prtad, devad, MDIO_STAT2); + if (stat2 < 0) + return stat2; + + return (stat2 & MDIO_STAT2_DEVPRST) == MDIO_STAT2_DEVPRST_VAL; +} + /* get_phy_c45_devs_in_pkg - reads a MMD's devices in package registers. * @bus: the target MII bus * @addr: PHY address on the MII bus @@ -711,12 +733,26 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, { const int num_ids = ARRAY_SIZE(c45_ids->device_ids); u32 *devs = &c45_ids->devices_in_package; - int i, phy_reg; + int i, ret, phy_reg; /* Find first non-zero Devices In package. Device zero is reserved * for 802.3 c45 complied PHYs, so don't probe it at first. */ - for (i = 1; i < num_ids && *devs == 0; i++) { + for (i = 1; i < MDIO_MMD_NUM && *devs == 0; i++) { + if (i == MDIO_MMD_VEND1 || i == MDIO_MMD_VEND2) { + /* Check that there is a device present at this + * address before reading the devices-in-package + * register to avoid reading garbage from the PHY. + * Some PHYs (88x3310) vendor space is not IEEE802.3 + * compliant. + */ + ret = phy_c45_probe_present(bus, addr, i); + if (ret < 0) + return -EIO; + + if (!ret) + continue; + } phy_reg = get_phy_c45_devs_in_pkg(bus, addr, i, devs); if (phy_reg < 0) return -EIO; diff --git a/include/linux/phy.h b/include/linux/phy.h index 8c05d0fb5c00..abe318387331 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -388,6 +388,8 @@ enum phy_state { PHY_CABLETEST, }; +#define MDIO_MMD_NUM 32 + /** * struct phy_c45_device_ids - 802.3-c45 Device Identifiers * @devices_in_package: Bit vector of devices present. -- cgit v1.2.3-59-g8ed1b From 320ed3bf900075614c43499dc01db8d25717b986 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 18 Jun 2020 14:46:08 +0100 Subject: net: phy: split devices_in_package We have two competing requirements for the devices_in_package field. We want to use it as a bit array indicating which MMDs are present, but we also want to know if the Clause 22 registers are present. Since "devices in package" is a term used in the 802.3 specification, keep this as the as-specified values read from the PHY, and introduce a new member "mmds_present" to indicate which MMDs are actually present in the PHY, derived from the "devices in package" value. Reviewed-by: Florian Fainelli Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/phy-c45.c | 4 ++-- drivers/net/phy/phy_device.c | 6 +++--- drivers/net/phy/phylink.c | 8 ++++---- include/linux/phy.h | 4 +++- 4 files changed, 12 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index defe09d94422..bd11e62bfdfe 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -219,7 +219,7 @@ int genphy_c45_read_link(struct phy_device *phydev) int val, devad; bool link = true; - if (phydev->c45_ids.devices_in_package & MDIO_DEVS_AN) { + if (phydev->c45_ids.mmds_present & MDIO_DEVS_AN) { val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1); if (val < 0) return val; @@ -409,7 +409,7 @@ int genphy_c45_pma_read_abilities(struct phy_device *phydev) int val; linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported); - if (phydev->c45_ids.devices_in_package & MDIO_DEVS_AN) { + if (phydev->c45_ids.mmds_present & MDIO_DEVS_AN) { val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); if (val < 0) return val; diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 8e11e3d3a801..c1f81c4d0bb3 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -709,9 +709,6 @@ static int get_phy_c45_devs_in_pkg(struct mii_bus *bus, int addr, int dev_addr, return -EIO; *devices_in_package |= phy_reg; - /* Bit 0 doesn't represent a device, it indicates c22 regs presence */ - *devices_in_package &= ~BIT(0); - return 0; } @@ -789,6 +786,8 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, } c45_ids->devices_in_package = devs_in_pkg; + /* Bit 0 doesn't represent a device, it indicates c22 regs presence */ + c45_ids->mmds_present = devs_in_pkg & ~BIT(0); return 0; } @@ -857,6 +856,7 @@ struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45) int r; c45_ids.devices_in_package = 0; + c45_ids.mmds_present = 0; memset(c45_ids.device_ids, 0xff, sizeof(c45_ids.device_ids)); if (is_c45) diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 0ab65fb75258..7ce787c227b3 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -1638,11 +1638,11 @@ static int phylink_phy_read(struct phylink *pl, unsigned int phy_id, case MII_BMSR: case MII_PHYSID1: case MII_PHYSID2: - devad = __ffs(phydev->c45_ids.devices_in_package); + devad = __ffs(phydev->c45_ids.mmds_present); break; case MII_ADVERTISE: case MII_LPA: - if (!(phydev->c45_ids.devices_in_package & MDIO_DEVS_AN)) + if (!(phydev->c45_ids.mmds_present & MDIO_DEVS_AN)) return -EINVAL; devad = MDIO_MMD_AN; if (reg == MII_ADVERTISE) @@ -1678,11 +1678,11 @@ static int phylink_phy_write(struct phylink *pl, unsigned int phy_id, case MII_BMSR: case MII_PHYSID1: case MII_PHYSID2: - devad = __ffs(phydev->c45_ids.devices_in_package); + devad = __ffs(phydev->c45_ids.mmds_present); break; case MII_ADVERTISE: case MII_LPA: - if (!(phydev->c45_ids.devices_in_package & MDIO_DEVS_AN)) + if (!(phydev->c45_ids.mmds_present & MDIO_DEVS_AN)) return -EINVAL; devad = MDIO_MMD_AN; if (reg == MII_ADVERTISE) diff --git a/include/linux/phy.h b/include/linux/phy.h index abe318387331..19d9e040ad84 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -392,11 +392,13 @@ enum phy_state { /** * struct phy_c45_device_ids - 802.3-c45 Device Identifiers - * @devices_in_package: Bit vector of devices present. + * @devices_in_package: IEEE 802.3 devices in package register value. + * @mmds_present: bit vector of MMDs present. * @device_ids: The device identifer for each present device. */ struct phy_c45_device_ids { u32 devices_in_package; + u32 mmds_present; u32 device_ids[8]; }; -- cgit v1.2.3-59-g8ed1b From 389a338999875b6e7b111096e8b6484434556449 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 18 Jun 2020 14:46:13 +0100 Subject: net: phy: read MMD ID from all present MMDs Expand the device_ids[] array to allow all MMD IDs to be read rather than just the first 8 MMDs, but only read the ID if the MDIO_STAT2 register reports that a device really is present here for these new devices to maintain compatibility with our current behaviour. Note that only a limited number of devices have MDIO_STAT2. 88X3310 PHY vendor MMDs do are marked as present in the devices_in_package, but do not contain IEE 802.3 compatible register sets in their lower space. This avoids reading incorrect values as MMD identifiers. Reviewed-by: Florian Fainelli Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/phy_device.c | 13 +++++++++++++ include/linux/phy.h | 2 +- 2 files changed, 14 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index c1f81c4d0bb3..29ef4456ac25 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -774,6 +774,19 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, if (!(devs_in_pkg & (1 << i))) continue; + if (i == MDIO_MMD_VEND1 || i == MDIO_MMD_VEND2) { + /* Probe the "Device Present" bits for the vendor MMDs + * to ignore these if they do not contain IEEE 802.3 + * registers. + */ + ret = phy_c45_probe_present(bus, addr, i); + if (ret < 0) + return ret; + + if (!ret) + continue; + } + phy_reg = mdiobus_c45_read(bus, addr, i, MII_PHYSID1); if (phy_reg < 0) return -EIO; diff --git a/include/linux/phy.h b/include/linux/phy.h index 19d9e040ad84..9248dd2ce4ca 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -399,7 +399,7 @@ enum phy_state { struct phy_c45_device_ids { u32 devices_in_package; u32 mmds_present; - u32 device_ids[8]; + u32 device_ids[MDIO_MMD_NUM]; }; struct macsec_context; -- cgit v1.2.3-59-g8ed1b From 775f43facfe89af605d77a9669aa311b5b95cd07 Mon Sep 17 00:00:00 2001 From: "Andrea Parri (Microsoft)" Date: Wed, 17 Jun 2020 18:46:42 +0200 Subject: Drivers: hv: vmbus: Remove the lock field from the vmbus_channel struct The spinlock is (now) *not used to protect test-and-set accesses to attributes of the structure or sc_list operations. There is, AFAICT, a distinct lack of {WRITE,READ}_ONCE()s in the handling of channel->state, but the changes below do not seem to make things "worse". ;-) Signed-off-by: Andrea Parri (Microsoft) Link: https://lore.kernel.org/r/20200617164642.37393-9-parri.andrea@gmail.com Reviewed-by: Michael Kelley Signed-off-by: Wei Liu --- drivers/hv/channel.c | 6 +----- drivers/hv/channel_mgmt.c | 1 - include/linux/hyperv.h | 6 ------ 3 files changed, 1 insertion(+), 12 deletions(-) (limited to 'include') diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index 8848d1548b3f..3ebda7707e46 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -129,12 +129,8 @@ static int __vmbus_open(struct vmbus_channel *newchannel, send_pages = newchannel->ringbuffer_send_offset; recv_pages = newchannel->ringbuffer_pagecount - send_pages; - spin_lock_irqsave(&newchannel->lock, flags); - if (newchannel->state != CHANNEL_OPEN_STATE) { - spin_unlock_irqrestore(&newchannel->lock, flags); + if (newchannel->state != CHANNEL_OPEN_STATE) return -EINVAL; - } - spin_unlock_irqrestore(&newchannel->lock, flags); newchannel->state = CHANNEL_OPENING_STATE; newchannel->onchannel_callback = onchannelcallback; diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 92f8bb2077a9..591106cf58fc 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -317,7 +317,6 @@ static struct vmbus_channel *alloc_channel(void) return NULL; spin_lock_init(&channel->sched_lock); - spin_lock_init(&channel->lock); init_completion(&channel->rescind_event); INIT_LIST_HEAD(&channel->sc_list); diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 690394b79d72..38100e80360a 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -840,12 +840,6 @@ struct vmbus_channel { */ void (*chn_rescind_callback)(struct vmbus_channel *channel); - /* - * The spinlock to protect the structure. It is being used to protect - * test-and-set access to various attributes of the structure as well - * as all sc_list operations. - */ - spinlock_t lock; /* * All Sub-channels of a primary channel are linked here. */ -- cgit v1.2.3-59-g8ed1b From f11d59d87b8622d4cf9f856c0b8029fb030d8612 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Mon, 25 May 2020 14:38:53 +0300 Subject: iio: Move attach/detach of the poll func to the core All devices using a triggered buffer need to attach and detach the trigger to the device in order to properly work. Instead of doing this in each and every driver by hand move this into the core. At this point in time, all drivers should have been resolved to attach/detach the poll-function in the same order. This patch removes all explicit calls of iio_triggered_buffer_postenable() & iio_triggered_buffer_predisable() in all drivers, since the core handles now the pollfunc attach/detach. The more peculiar change is for the 'at91-sama5d2_adc' driver, since it's not immediately obvious that removing the hooks doesn't break anything. Eugen was able to test on at91-sama5d2-adc driver, sama5d2-xplained board. All seems to be fine. Signed-off-by: Lars-Peter Clausen Signed-off-by: Alexandru Ardelean Tested-by: Eugen Hristev #for at91-sama5d2-adc Signed-off-by: Jonathan Cameron --- drivers/iio/accel/adxl372.c | 20 +++-------- drivers/iio/accel/bmc150-accel-core.c | 4 +-- drivers/iio/accel/kxcjk-1013.c | 2 -- drivers/iio/accel/kxsd9.c | 2 -- drivers/iio/accel/st_accel_buffer.c | 22 +++--------- drivers/iio/accel/stk8312.c | 2 -- drivers/iio/accel/stk8ba50.c | 2 -- drivers/iio/adc/ad7266.c | 2 -- drivers/iio/adc/ad7606.c | 3 +- drivers/iio/adc/ad7766.c | 2 -- drivers/iio/adc/ad7768-1.c | 8 +---- drivers/iio/adc/ad7887.c | 2 -- drivers/iio/adc/ad_sigma_delta.c | 5 --- drivers/iio/adc/at91-sama5d2_adc.c | 18 ---------- drivers/iio/adc/dln2-adc.c | 12 +------ drivers/iio/adc/mxs-lradc-adc.c | 2 -- drivers/iio/adc/stm32-adc.c | 36 +++----------------- drivers/iio/adc/stm32-dfsdm-adc.c | 39 +++------------------- drivers/iio/adc/ti-adc084s021.c | 2 -- drivers/iio/adc/ti-ads1015.c | 2 -- drivers/iio/adc/vf610_adc.c | 7 +--- drivers/iio/adc/xilinx-xadc-core.c | 2 -- drivers/iio/buffer/industrialio-triggered-buffer.c | 10 +----- drivers/iio/chemical/atlas-sensor.c | 6 +--- drivers/iio/dummy/iio_simple_dummy_buffer.c | 14 -------- drivers/iio/gyro/bmg160_core.c | 2 -- drivers/iio/gyro/mpu3050-core.c | 2 -- drivers/iio/gyro/st_gyro_buffer.c | 21 +++--------- drivers/iio/humidity/hdc100x.c | 12 +------ drivers/iio/humidity/hts221_buffer.c | 2 -- drivers/iio/iio_core_trigger.h | 17 ++++++++++ drivers/iio/industrialio-buffer.c | 13 ++++++++ drivers/iio/industrialio-trigger.c | 22 +++--------- drivers/iio/light/gp2ap020a00f.c | 10 ------ drivers/iio/light/isl29125.c | 20 ++--------- drivers/iio/light/rpr0521.c | 2 -- drivers/iio/light/si1145.c | 2 -- drivers/iio/light/st_uvis25_core.c | 2 -- drivers/iio/light/tcs3414.c | 20 ++--------- drivers/iio/light/vcnl4000.c | 35 ++++--------------- drivers/iio/magnetometer/bmc150_magn.c | 2 -- drivers/iio/magnetometer/rm3100-core.c | 2 -- drivers/iio/magnetometer/st_magn_buffer.c | 26 ++------------- drivers/iio/potentiostat/lmp91000.c | 13 ++------ drivers/iio/pressure/st_pressure_buffer.c | 26 ++------------- drivers/iio/pressure/zpa2326.c | 27 ++++++--------- drivers/iio/proximity/sx9310.c | 2 -- drivers/iio/proximity/sx9500.c | 9 ----- include/linux/iio/trigger_consumer.h | 7 ---- 49 files changed, 93 insertions(+), 429 deletions(-) (limited to 'include') diff --git a/drivers/iio/accel/adxl372.c b/drivers/iio/accel/adxl372.c index 26ca45073a56..e7e316b75e87 100644 --- a/drivers/iio/accel/adxl372.c +++ b/drivers/iio/accel/adxl372.c @@ -795,13 +795,9 @@ static int adxl372_buffer_postenable(struct iio_dev *indio_dev) unsigned int mask; int i, ret; - ret = iio_triggered_buffer_postenable(indio_dev); - if (ret < 0) - return ret; - ret = adxl372_set_interrupts(st, ADXL372_INT1_MAP_FIFO_FULL_MSK, 0); if (ret < 0) - goto err; + return ret; mask = *indio_dev->active_scan_mask; @@ -810,10 +806,8 @@ static int adxl372_buffer_postenable(struct iio_dev *indio_dev) break; } - if (i == ARRAY_SIZE(adxl372_axis_lookup_table)) { - ret = -EINVAL; - goto err; - } + if (i == ARRAY_SIZE(adxl372_axis_lookup_table)) + return -EINVAL; st->fifo_format = adxl372_axis_lookup_table[i].fifo_format; st->fifo_set_size = bitmap_weight(indio_dev->active_scan_mask, @@ -833,14 +827,10 @@ static int adxl372_buffer_postenable(struct iio_dev *indio_dev) if (ret < 0) { st->fifo_mode = ADXL372_FIFO_BYPASSED; adxl372_set_interrupts(st, 0, 0); - goto err; + return ret; } return 0; - -err: - iio_triggered_buffer_predisable(indio_dev); - return ret; } static int adxl372_buffer_predisable(struct iio_dev *indio_dev) @@ -851,7 +841,7 @@ static int adxl372_buffer_predisable(struct iio_dev *indio_dev) st->fifo_mode = ADXL372_FIFO_BYPASSED; adxl372_configure_fifo(st); - return iio_triggered_buffer_predisable(indio_dev); + return 0; } static const struct iio_buffer_setup_ops adxl372_buffer_ops = { diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c index 8f60d0727ee8..24864d9dfab5 100644 --- a/drivers/iio/accel/bmc150-accel-core.c +++ b/drivers/iio/accel/bmc150-accel-core.c @@ -1411,7 +1411,7 @@ static int bmc150_accel_buffer_postenable(struct iio_dev *indio_dev) int ret = 0; if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) - return iio_triggered_buffer_postenable(indio_dev); + return 0; mutex_lock(&data->mutex); @@ -1443,7 +1443,7 @@ static int bmc150_accel_buffer_predisable(struct iio_dev *indio_dev) struct bmc150_accel_data *data = iio_priv(indio_dev); if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) - return iio_triggered_buffer_predisable(indio_dev); + return 0; mutex_lock(&data->mutex); diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c index 6b93521c0e17..beb38d9d607d 100644 --- a/drivers/iio/accel/kxcjk-1013.c +++ b/drivers/iio/accel/kxcjk-1013.c @@ -1027,9 +1027,7 @@ static const struct iio_chan_spec kxcjk1013_channels[] = { static const struct iio_buffer_setup_ops kxcjk1013_buffer_setup_ops = { .preenable = kxcjk1013_buffer_preenable, - .postenable = iio_triggered_buffer_postenable, .postdisable = kxcjk1013_buffer_postdisable, - .predisable = iio_triggered_buffer_predisable, }; static const struct iio_info kxcjk1013_info = { diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c index 63b1d8ee6c6f..66b2e4cf24cf 100644 --- a/drivers/iio/accel/kxsd9.c +++ b/drivers/iio/accel/kxsd9.c @@ -252,8 +252,6 @@ static int kxsd9_buffer_postdisable(struct iio_dev *indio_dev) static const struct iio_buffer_setup_ops kxsd9_buffer_setup_ops = { .preenable = kxsd9_buffer_preenable, - .postenable = iio_triggered_buffer_postenable, - .predisable = iio_triggered_buffer_predisable, .postdisable = kxsd9_buffer_postdisable, }; diff --git a/drivers/iio/accel/st_accel_buffer.c b/drivers/iio/accel/st_accel_buffer.c index b5c814ef1637..492263589e04 100644 --- a/drivers/iio/accel/st_accel_buffer.c +++ b/drivers/iio/accel/st_accel_buffer.c @@ -33,13 +33,9 @@ static int st_accel_buffer_postenable(struct iio_dev *indio_dev) { int err; - err = iio_triggered_buffer_postenable(indio_dev); - if (err < 0) - return err; - err = st_sensors_set_axis_enable(indio_dev, indio_dev->active_scan_mask[0]); if (err < 0) - goto st_accel_buffer_predisable; + return err; err = st_sensors_set_enable(indio_dev, true); if (err < 0) @@ -49,27 +45,19 @@ static int st_accel_buffer_postenable(struct iio_dev *indio_dev) st_accel_buffer_enable_all_axis: st_sensors_set_axis_enable(indio_dev, ST_SENSORS_ENABLE_ALL_AXIS); -st_accel_buffer_predisable: - iio_triggered_buffer_predisable(indio_dev); return err; } static int st_accel_buffer_predisable(struct iio_dev *indio_dev) { - int err, err2; + int err; err = st_sensors_set_enable(indio_dev, false); if (err < 0) - goto st_accel_buffer_predisable; - - err = st_sensors_set_axis_enable(indio_dev, ST_SENSORS_ENABLE_ALL_AXIS); - -st_accel_buffer_predisable: - err2 = iio_triggered_buffer_predisable(indio_dev); - if (!err) - err = err2; + return err; - return err; + return st_sensors_set_axis_enable(indio_dev, + ST_SENSORS_ENABLE_ALL_AXIS); } static const struct iio_buffer_setup_ops st_accel_buffer_setup_ops = { diff --git a/drivers/iio/accel/stk8312.c b/drivers/iio/accel/stk8312.c index 616d0b90dd92..3b59887a8581 100644 --- a/drivers/iio/accel/stk8312.c +++ b/drivers/iio/accel/stk8312.c @@ -492,8 +492,6 @@ static int stk8312_buffer_postdisable(struct iio_dev *indio_dev) static const struct iio_buffer_setup_ops stk8312_buffer_setup_ops = { .preenable = stk8312_buffer_preenable, - .postenable = iio_triggered_buffer_postenable, - .predisable = iio_triggered_buffer_predisable, .postdisable = stk8312_buffer_postdisable, }; diff --git a/drivers/iio/accel/stk8ba50.c b/drivers/iio/accel/stk8ba50.c index 98fad8cd6fe7..3ead378b02c9 100644 --- a/drivers/iio/accel/stk8ba50.c +++ b/drivers/iio/accel/stk8ba50.c @@ -376,8 +376,6 @@ static int stk8ba50_buffer_postdisable(struct iio_dev *indio_dev) static const struct iio_buffer_setup_ops stk8ba50_buffer_setup_ops = { .preenable = stk8ba50_buffer_preenable, - .postenable = iio_triggered_buffer_postenable, - .predisable = iio_triggered_buffer_predisable, .postdisable = stk8ba50_buffer_postdisable, }; diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c index 3dc15ec04f66..a8ec3efd659e 100644 --- a/drivers/iio/adc/ad7266.c +++ b/drivers/iio/adc/ad7266.c @@ -74,8 +74,6 @@ static int ad7266_postdisable(struct iio_dev *indio_dev) static const struct iio_buffer_setup_ops iio_triggered_buffer_setup_ops = { .preenable = &ad7266_preenable, - .postenable = &iio_triggered_buffer_postenable, - .predisable = &iio_triggered_buffer_predisable, .postdisable = &ad7266_postdisable, }; diff --git a/drivers/iio/adc/ad7606.c b/drivers/iio/adc/ad7606.c index 23d8277086ee..ee7b108688b3 100644 --- a/drivers/iio/adc/ad7606.c +++ b/drivers/iio/adc/ad7606.c @@ -499,7 +499,6 @@ static int ad7606_buffer_postenable(struct iio_dev *indio_dev) { struct ad7606_state *st = iio_priv(indio_dev); - iio_triggered_buffer_postenable(indio_dev); gpiod_set_value(st->gpio_convst, 1); return 0; @@ -511,7 +510,7 @@ static int ad7606_buffer_predisable(struct iio_dev *indio_dev) gpiod_set_value(st->gpio_convst, 0); - return iio_triggered_buffer_predisable(indio_dev); + return 0; } static const struct iio_buffer_setup_ops ad7606_buffer_ops = { diff --git a/drivers/iio/adc/ad7766.c b/drivers/iio/adc/ad7766.c index 4bcf8a34bc46..b6b6765be7b4 100644 --- a/drivers/iio/adc/ad7766.c +++ b/drivers/iio/adc/ad7766.c @@ -178,8 +178,6 @@ static const struct ad7766_chip_info ad7766_chip_info[] = { static const struct iio_buffer_setup_ops ad7766_buffer_setup_ops = { .preenable = &ad7766_preenable, - .postenable = &iio_triggered_buffer_postenable, - .predisable = &iio_triggered_buffer_predisable, .postdisable = &ad7766_postdisable, }; diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c index be2aed551324..0e93b0766eb4 100644 --- a/drivers/iio/adc/ad7768-1.c +++ b/drivers/iio/adc/ad7768-1.c @@ -490,7 +490,6 @@ static int ad7768_buffer_postenable(struct iio_dev *indio_dev) { struct ad7768_state *st = iio_priv(indio_dev); - iio_triggered_buffer_postenable(indio_dev); /* * Write a 1 to the LSB of the INTERFACE_FORMAT register to enter * continuous read mode. Subsequent data reads do not require an @@ -502,17 +501,12 @@ static int ad7768_buffer_postenable(struct iio_dev *indio_dev) static int ad7768_buffer_predisable(struct iio_dev *indio_dev) { struct ad7768_state *st = iio_priv(indio_dev); - int ret; /* * To exit continuous read mode, perform a single read of the ADC_DATA * reg (0x2C), which allows further configuration of the device. */ - ret = ad7768_spi_reg_read(st, AD7768_REG_ADC_DATA, 3); - if (ret < 0) - return ret; - - return iio_triggered_buffer_predisable(indio_dev); + return ad7768_spi_reg_read(st, AD7768_REG_ADC_DATA, 3); } static const struct iio_buffer_setup_ops ad7768_buffer_ops = { diff --git a/drivers/iio/adc/ad7887.c b/drivers/iio/adc/ad7887.c index d1d43fb700ba..f8e6243457bb 100644 --- a/drivers/iio/adc/ad7887.c +++ b/drivers/iio/adc/ad7887.c @@ -136,8 +136,6 @@ done: static const struct iio_buffer_setup_ops ad7887_ring_setup_ops = { .preenable = &ad7887_ring_preenable, - .postenable = &iio_triggered_buffer_postenable, - .predisable = &iio_triggered_buffer_predisable, .postdisable = &ad7887_ring_postdisable, }; diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c index dd3d54b3bc8b..3554ee6ee099 100644 --- a/drivers/iio/adc/ad_sigma_delta.c +++ b/drivers/iio/adc/ad_sigma_delta.c @@ -345,10 +345,6 @@ static int ad_sd_buffer_postenable(struct iio_dev *indio_dev) unsigned int channel; int ret; - ret = iio_triggered_buffer_postenable(indio_dev); - if (ret < 0) - return ret; - channel = find_first_bit(indio_dev->active_scan_mask, indio_dev->masklength); ret = ad_sigma_delta_set_channel(sigma_delta, @@ -441,7 +437,6 @@ static irqreturn_t ad_sd_trigger_handler(int irq, void *p) static const struct iio_buffer_setup_ops ad_sd_buffer_setup_ops = { .postenable = &ad_sd_buffer_postenable, - .predisable = &iio_triggered_buffer_predisable, .postdisable = &ad_sd_buffer_postdisable, .validate_scan_mask = &iio_validate_scan_mask_onehot, }; diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c index 1b4340a6c6bb..6cc06f1566eb 100644 --- a/drivers/iio/adc/at91-sama5d2_adc.c +++ b/drivers/iio/adc/at91-sama5d2_adc.c @@ -937,14 +937,6 @@ static int at91_adc_buffer_preenable(struct iio_dev *indio_dev) return 0; } -static int at91_adc_buffer_postenable(struct iio_dev *indio_dev) -{ - if (at91_adc_current_chan_is_touch(indio_dev)) - return 0; - - return iio_triggered_buffer_postenable(indio_dev); -} - static int at91_adc_buffer_postdisable(struct iio_dev *indio_dev) { struct at91_adc_state *st = iio_priv(indio_dev); @@ -995,19 +987,9 @@ static int at91_adc_buffer_postdisable(struct iio_dev *indio_dev) return 0; } -static int at91_adc_buffer_predisable(struct iio_dev *indio_dev) -{ - if (at91_adc_current_chan_is_touch(indio_dev)) - return 0; - - return iio_triggered_buffer_predisable(indio_dev); -} - static const struct iio_buffer_setup_ops at91_buffer_setup_ops = { .preenable = &at91_adc_buffer_preenable, .postdisable = &at91_adc_buffer_postdisable, - .postenable = &at91_adc_buffer_postenable, - .predisable = &at91_adc_buffer_predisable, }; static struct iio_trigger *at91_adc_allocate_trigger(struct iio_dev *indio, diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c index 6689cb93c6a2..0d53ef18e045 100644 --- a/drivers/iio/adc/dln2-adc.c +++ b/drivers/iio/adc/dln2-adc.c @@ -524,10 +524,6 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev) u16 conflict; unsigned int trigger_chan; - ret = iio_triggered_buffer_postenable(indio_dev); - if (ret) - return ret; - mutex_lock(&dln2->mutex); /* Enable ADC */ @@ -541,7 +537,6 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev) (int)conflict); ret = -EBUSY; } - iio_triggered_buffer_predisable(indio_dev); return ret; } @@ -555,7 +550,6 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev) mutex_unlock(&dln2->mutex); if (ret < 0) { dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__); - iio_triggered_buffer_predisable(indio_dev); return ret; } } else { @@ -568,7 +562,7 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev) static int dln2_adc_triggered_buffer_predisable(struct iio_dev *indio_dev) { - int ret, ret2; + int ret; struct dln2_adc *dln2 = iio_priv(indio_dev); mutex_lock(&dln2->mutex); @@ -586,10 +580,6 @@ static int dln2_adc_triggered_buffer_predisable(struct iio_dev *indio_dev) if (ret < 0) dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__); - ret2 = iio_triggered_buffer_predisable(indio_dev); - if (ret == 0) - ret = ret2; - return ret; } diff --git a/drivers/iio/adc/mxs-lradc-adc.c b/drivers/iio/adc/mxs-lradc-adc.c index c371565374d1..30e29f44ebd2 100644 --- a/drivers/iio/adc/mxs-lradc-adc.c +++ b/drivers/iio/adc/mxs-lradc-adc.c @@ -568,8 +568,6 @@ static bool mxs_lradc_adc_validate_scan_mask(struct iio_dev *iio, static const struct iio_buffer_setup_ops mxs_lradc_adc_buffer_ops = { .preenable = &mxs_lradc_adc_buffer_preenable, - .postenable = &iio_triggered_buffer_postenable, - .predisable = &iio_triggered_buffer_predisable, .postdisable = &mxs_lradc_adc_buffer_postdisable, .validate_scan_mask = &mxs_lradc_adc_validate_scan_mask, }; diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c index e5a2bfcb5681..3eb9ebe8372f 100644 --- a/drivers/iio/adc/stm32-adc.c +++ b/drivers/iio/adc/stm32-adc.c @@ -1492,7 +1492,7 @@ static int stm32_adc_dma_start(struct iio_dev *indio_dev) return 0; } -static int __stm32_adc_buffer_postenable(struct iio_dev *indio_dev) +static int stm32_adc_buffer_postenable(struct iio_dev *indio_dev) { struct stm32_adc *adc = iio_priv(indio_dev); struct device *dev = indio_dev->dev.parent; @@ -1537,22 +1537,7 @@ err_pm_put: return ret; } -static int stm32_adc_buffer_postenable(struct iio_dev *indio_dev) -{ - int ret; - - ret = iio_triggered_buffer_postenable(indio_dev); - if (ret < 0) - return ret; - - ret = __stm32_adc_buffer_postenable(indio_dev); - if (ret < 0) - iio_triggered_buffer_predisable(indio_dev); - - return ret; -} - -static void __stm32_adc_buffer_predisable(struct iio_dev *indio_dev) +static int stm32_adc_buffer_predisable(struct iio_dev *indio_dev) { struct stm32_adc *adc = iio_priv(indio_dev); struct device *dev = indio_dev->dev.parent; @@ -1571,19 +1556,8 @@ static void __stm32_adc_buffer_predisable(struct iio_dev *indio_dev) pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); -} - -static int stm32_adc_buffer_predisable(struct iio_dev *indio_dev) -{ - int ret; - - __stm32_adc_buffer_predisable(indio_dev); - - ret = iio_triggered_buffer_predisable(indio_dev); - if (ret < 0) - dev_err(&indio_dev->dev, "predisable failed\n"); - return ret; + return 0; } static const struct iio_buffer_setup_ops stm32_adc_buffer_setup_ops = { @@ -2024,7 +1998,7 @@ static int stm32_adc_suspend(struct device *dev) struct iio_dev *indio_dev = dev_get_drvdata(dev); if (iio_buffer_enabled(indio_dev)) - __stm32_adc_buffer_predisable(indio_dev); + stm32_adc_buffer_predisable(indio_dev); return pm_runtime_force_suspend(dev); } @@ -2046,7 +2020,7 @@ static int stm32_adc_resume(struct device *dev) if (ret < 0) return ret; - return __stm32_adc_buffer_postenable(indio_dev); + return stm32_adc_buffer_postenable(indio_dev); } #endif diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c index 017d1ba4c14e..5e10fb4f3704 100644 --- a/drivers/iio/adc/stm32-dfsdm-adc.c +++ b/drivers/iio/adc/stm32-dfsdm-adc.c @@ -995,7 +995,7 @@ static int stm32_dfsdm_update_scan_mode(struct iio_dev *indio_dev, return 0; } -static int __stm32_dfsdm_postenable(struct iio_dev *indio_dev) +static int stm32_dfsdm_postenable(struct iio_dev *indio_dev) { struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); int ret; @@ -1038,30 +1038,7 @@ err_stop_hwc: return ret; } -static int stm32_dfsdm_postenable(struct iio_dev *indio_dev) -{ - int ret; - - if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) { - ret = iio_triggered_buffer_postenable(indio_dev); - if (ret < 0) - return ret; - } - - ret = __stm32_dfsdm_postenable(indio_dev); - if (ret < 0) - goto err_predisable; - - return 0; - -err_predisable: - if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) - iio_triggered_buffer_predisable(indio_dev); - - return ret; -} - -static void __stm32_dfsdm_predisable(struct iio_dev *indio_dev) +static int stm32_dfsdm_predisable(struct iio_dev *indio_dev) { struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); @@ -1073,14 +1050,6 @@ static void __stm32_dfsdm_predisable(struct iio_dev *indio_dev) if (adc->hwc) iio_hw_consumer_disable(adc->hwc); -} - -static int stm32_dfsdm_predisable(struct iio_dev *indio_dev) -{ - __stm32_dfsdm_predisable(indio_dev); - - if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) - iio_triggered_buffer_predisable(indio_dev); return 0; } @@ -1668,7 +1637,7 @@ static int __maybe_unused stm32_dfsdm_adc_suspend(struct device *dev) struct iio_dev *indio_dev = dev_get_drvdata(dev); if (iio_buffer_enabled(indio_dev)) - __stm32_dfsdm_predisable(indio_dev); + stm32_dfsdm_predisable(indio_dev); return 0; } @@ -1691,7 +1660,7 @@ static int __maybe_unused stm32_dfsdm_adc_resume(struct device *dev) } if (iio_buffer_enabled(indio_dev)) - __stm32_dfsdm_postenable(indio_dev); + stm32_dfsdm_postenable(indio_dev); return 0; } diff --git a/drivers/iio/adc/ti-adc084s021.c b/drivers/iio/adc/ti-adc084s021.c index f22f004c1eb6..c2db2435f419 100644 --- a/drivers/iio/adc/ti-adc084s021.c +++ b/drivers/iio/adc/ti-adc084s021.c @@ -187,8 +187,6 @@ static const struct iio_info adc084s021_info = { static const struct iio_buffer_setup_ops adc084s021_buffer_setup_ops = { .preenable = adc084s021_buffer_preenable, - .postenable = iio_triggered_buffer_postenable, - .predisable = iio_triggered_buffer_predisable, .postdisable = adc084s021_buffer_postdisable, }; diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c index 1a5f520080d2..f42ab112986e 100644 --- a/drivers/iio/adc/ti-ads1015.c +++ b/drivers/iio/adc/ti-ads1015.c @@ -788,8 +788,6 @@ static int ads1015_buffer_postdisable(struct iio_dev *indio_dev) static const struct iio_buffer_setup_ops ads1015_buffer_setup_ops = { .preenable = ads1015_buffer_preenable, - .postenable = iio_triggered_buffer_postenable, - .predisable = iio_triggered_buffer_predisable, .postdisable = ads1015_buffer_postdisable, .validate_scan_mask = &iio_validate_scan_mask_onehot, }; diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c index f5637bf38d37..1d794cf3e3f1 100644 --- a/drivers/iio/adc/vf610_adc.c +++ b/drivers/iio/adc/vf610_adc.c @@ -724,13 +724,8 @@ static int vf610_adc_buffer_postenable(struct iio_dev *indio_dev) { struct vf610_adc *info = iio_priv(indio_dev); unsigned int channel; - int ret; int val; - ret = iio_triggered_buffer_postenable(indio_dev); - if (ret) - return ret; - val = readl(info->regs + VF610_REG_ADC_GC); val |= VF610_ADC_ADCON; writel(val, info->regs + VF610_REG_ADC_GC); @@ -761,7 +756,7 @@ static int vf610_adc_buffer_predisable(struct iio_dev *indio_dev) writel(hc_cfg, info->regs + VF610_REG_ADC_HC0); - return iio_triggered_buffer_predisable(indio_dev); + return 0; } static const struct iio_buffer_setup_ops iio_triggered_buffer_setup_ops = { diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c index 69be9e82fe3e..d0b7ef296afb 100644 --- a/drivers/iio/adc/xilinx-xadc-core.c +++ b/drivers/iio/adc/xilinx-xadc-core.c @@ -839,8 +839,6 @@ err: static const struct iio_buffer_setup_ops xadc_buffer_ops = { .preenable = &xadc_preenable, - .postenable = &iio_triggered_buffer_postenable, - .predisable = &iio_triggered_buffer_predisable, .postdisable = &xadc_postdisable, }; diff --git a/drivers/iio/buffer/industrialio-triggered-buffer.c b/drivers/iio/buffer/industrialio-triggered-buffer.c index e8046c1ecd6b..6c20a83f887e 100644 --- a/drivers/iio/buffer/industrialio-triggered-buffer.c +++ b/drivers/iio/buffer/industrialio-triggered-buffer.c @@ -13,11 +13,6 @@ #include #include -static const struct iio_buffer_setup_ops iio_triggered_buffer_setup_ops = { - .postenable = &iio_triggered_buffer_postenable, - .predisable = &iio_triggered_buffer_predisable, -}; - /** * iio_triggered_buffer_setup() - Setup triggered buffer and pollfunc * @indio_dev: IIO device structure @@ -67,10 +62,7 @@ int iio_triggered_buffer_setup(struct iio_dev *indio_dev, } /* Ring buffer functions - here trigger setup related */ - if (setup_ops) - indio_dev->setup_ops = setup_ops; - else - indio_dev->setup_ops = &iio_triggered_buffer_setup_ops; + indio_dev->setup_ops = setup_ops; /* Flag that polled ring buffering is possible */ indio_dev->modes |= INDIO_BUFFER_TRIGGERED; diff --git a/drivers/iio/chemical/atlas-sensor.c b/drivers/iio/chemical/atlas-sensor.c index 047f4b3f5a0e..43069636fcd5 100644 --- a/drivers/iio/chemical/atlas-sensor.c +++ b/drivers/iio/chemical/atlas-sensor.c @@ -410,10 +410,6 @@ static int atlas_buffer_postenable(struct iio_dev *indio_dev) struct atlas_data *data = iio_priv(indio_dev); int ret; - ret = iio_triggered_buffer_postenable(indio_dev); - if (ret) - return ret; - ret = pm_runtime_get_sync(&data->client->dev); if (ret < 0) { pm_runtime_put_noidle(&data->client->dev); @@ -437,7 +433,7 @@ static int atlas_buffer_predisable(struct iio_dev *indio_dev) if (ret) return ret; - return iio_triggered_buffer_predisable(indio_dev); + return 0; } static const struct iio_trigger_ops atlas_interrupt_trigger_ops = { diff --git a/drivers/iio/dummy/iio_simple_dummy_buffer.c b/drivers/iio/dummy/iio_simple_dummy_buffer.c index 17606eca42b4..8e13c53d4360 100644 --- a/drivers/iio/dummy/iio_simple_dummy_buffer.c +++ b/drivers/iio/dummy/iio_simple_dummy_buffer.c @@ -99,20 +99,6 @@ done: } static const struct iio_buffer_setup_ops iio_simple_dummy_buffer_setup_ops = { - /* - * iio_triggered_buffer_postenable: - * Generic function that simply attaches the pollfunc to the trigger. - * Replace this to mess with hardware state before we attach the - * trigger. - */ - .postenable = &iio_triggered_buffer_postenable, - /* - * iio_triggered_buffer_predisable: - * Generic function that simple detaches the pollfunc from the trigger. - * Replace this to put hardware state back again after the trigger is - * detached but before userspace knows we have disabled the ring. - */ - .predisable = &iio_triggered_buffer_predisable, }; int iio_simple_dummy_configure_buffer(struct iio_dev *indio_dev) diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c index 933492b33189..8ddda96455fc 100644 --- a/drivers/iio/gyro/bmg160_core.c +++ b/drivers/iio/gyro/bmg160_core.c @@ -1051,8 +1051,6 @@ static int bmg160_buffer_postdisable(struct iio_dev *indio_dev) static const struct iio_buffer_setup_ops bmg160_buffer_setup_ops = { .preenable = bmg160_buffer_preenable, - .postenable = iio_triggered_buffer_postenable, - .predisable = iio_triggered_buffer_predisable, .postdisable = bmg160_buffer_postdisable, }; diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c index 157330451e0a..00e58060968c 100644 --- a/drivers/iio/gyro/mpu3050-core.c +++ b/drivers/iio/gyro/mpu3050-core.c @@ -662,8 +662,6 @@ static int mpu3050_buffer_postdisable(struct iio_dev *indio_dev) static const struct iio_buffer_setup_ops mpu3050_buffer_setup_ops = { .preenable = mpu3050_buffer_preenable, - .postenable = iio_triggered_buffer_postenable, - .predisable = iio_triggered_buffer_predisable, .postdisable = mpu3050_buffer_postdisable, }; diff --git a/drivers/iio/gyro/st_gyro_buffer.c b/drivers/iio/gyro/st_gyro_buffer.c index 9c92ff7a82be..4feb7ada7195 100644 --- a/drivers/iio/gyro/st_gyro_buffer.c +++ b/drivers/iio/gyro/st_gyro_buffer.c @@ -33,13 +33,9 @@ static int st_gyro_buffer_postenable(struct iio_dev *indio_dev) { int err; - err = iio_triggered_buffer_postenable(indio_dev); - if (err < 0) - return err; - err = st_sensors_set_axis_enable(indio_dev, indio_dev->active_scan_mask[0]); if (err < 0) - goto st_gyro_buffer_predisable; + return err; err = st_sensors_set_enable(indio_dev, true); if (err < 0) @@ -49,27 +45,18 @@ static int st_gyro_buffer_postenable(struct iio_dev *indio_dev) st_gyro_buffer_enable_all_axis: st_sensors_set_axis_enable(indio_dev, ST_SENSORS_ENABLE_ALL_AXIS); -st_gyro_buffer_predisable: - iio_triggered_buffer_predisable(indio_dev); return err; } static int st_gyro_buffer_predisable(struct iio_dev *indio_dev) { - int err, err2; + int err; err = st_sensors_set_enable(indio_dev, false); if (err < 0) - goto st_gyro_buffer_predisable; - - err = st_sensors_set_axis_enable(indio_dev, ST_SENSORS_ENABLE_ALL_AXIS); - -st_gyro_buffer_predisable: - err2 = iio_triggered_buffer_predisable(indio_dev); - if (!err) - err = err2; + return err; - return err; + return st_sensors_set_axis_enable(indio_dev, ST_SENSORS_ENABLE_ALL_AXIS); } static const struct iio_buffer_setup_ops st_gyro_buffer_setup_ops = { diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c index 3331141734c8..413204cd9bbd 100644 --- a/drivers/iio/humidity/hdc100x.c +++ b/drivers/iio/humidity/hdc100x.c @@ -278,17 +278,11 @@ static int hdc100x_buffer_postenable(struct iio_dev *indio_dev) struct hdc100x_data *data = iio_priv(indio_dev); int ret; - ret = iio_triggered_buffer_postenable(indio_dev); - if (ret) - return ret; - /* Buffer is enabled. First set ACQ Mode, then attach poll func */ mutex_lock(&data->lock); ret = hdc100x_update_config(data, HDC100X_REG_CONFIG_ACQ_MODE, HDC100X_REG_CONFIG_ACQ_MODE); mutex_unlock(&data->lock); - if (ret) - iio_triggered_buffer_predisable(indio_dev); return ret; } @@ -296,16 +290,12 @@ static int hdc100x_buffer_postenable(struct iio_dev *indio_dev) static int hdc100x_buffer_predisable(struct iio_dev *indio_dev) { struct hdc100x_data *data = iio_priv(indio_dev); - int ret, ret2; + int ret; mutex_lock(&data->lock); ret = hdc100x_update_config(data, HDC100X_REG_CONFIG_ACQ_MODE, 0); mutex_unlock(&data->lock); - ret2 = iio_triggered_buffer_predisable(indio_dev); - if (ret == 0) - ret = ret2; - return ret; } diff --git a/drivers/iio/humidity/hts221_buffer.c b/drivers/iio/humidity/hts221_buffer.c index 21c6c160462d..f75f0b218fea 100644 --- a/drivers/iio/humidity/hts221_buffer.c +++ b/drivers/iio/humidity/hts221_buffer.c @@ -153,8 +153,6 @@ static int hts221_buffer_postdisable(struct iio_dev *iio_dev) static const struct iio_buffer_setup_ops hts221_buffer_ops = { .preenable = hts221_buffer_preenable, - .postenable = iio_triggered_buffer_postenable, - .predisable = iio_triggered_buffer_predisable, .postdisable = hts221_buffer_postdisable, }; diff --git a/drivers/iio/iio_core_trigger.h b/drivers/iio/iio_core_trigger.h index e59fe2f36bbb..9d1a92cc6480 100644 --- a/drivers/iio/iio_core_trigger.h +++ b/drivers/iio/iio_core_trigger.h @@ -18,6 +18,12 @@ void iio_device_register_trigger_consumer(struct iio_dev *indio_dev); **/ void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev); + +int iio_trigger_attach_poll_func(struct iio_trigger *trig, + struct iio_poll_func *pf); +int iio_trigger_detach_poll_func(struct iio_trigger *trig, + struct iio_poll_func *pf); + #else /** @@ -37,4 +43,15 @@ static void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev) { } +static inline int iio_trigger_attach_poll_func(struct iio_trigger *trig, + struct iio_poll_func *pf) +{ + return 0; +} +static inline int iio_trigger_detach_poll_func(struct iio_trigger *trig, + struct iio_poll_func *pf) +{ + return 0; +} + #endif /* CONFIG_TRIGGER_CONSUMER */ diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index 9fa238c0a7d4..329dd4d6757a 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c @@ -20,6 +20,7 @@ #include #include "iio_core.h" +#include "iio_core_trigger.h" #include #include #include @@ -972,6 +973,13 @@ static int iio_enable_buffers(struct iio_dev *indio_dev, } } + if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) { + ret = iio_trigger_attach_poll_func(indio_dev->trig, + indio_dev->pollfunc); + if (ret) + goto err_disable_buffers; + } + return 0; err_disable_buffers: @@ -998,6 +1006,11 @@ static int iio_disable_buffers(struct iio_dev *indio_dev) if (list_empty(&indio_dev->buffer_list)) return 0; + if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) { + iio_trigger_detach_poll_func(indio_dev->trig, + indio_dev->pollfunc); + } + /* * If things go wrong at some step in disable we still need to continue * to perform the other steps, otherwise we leave the device in a diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c index 53d1931f6be8..6f16357fd732 100644 --- a/drivers/iio/industrialio-trigger.c +++ b/drivers/iio/industrialio-trigger.c @@ -239,8 +239,8 @@ static void iio_trigger_put_irq(struct iio_trigger *trig, int irq) * the relevant function is in there may be the best option. */ /* Worth protecting against double additions? */ -static int iio_trigger_attach_poll_func(struct iio_trigger *trig, - struct iio_poll_func *pf) +int iio_trigger_attach_poll_func(struct iio_trigger *trig, + struct iio_poll_func *pf) { int ret = 0; bool notinuse @@ -290,8 +290,8 @@ out_put_module: return ret; } -static int iio_trigger_detach_poll_func(struct iio_trigger *trig, - struct iio_poll_func *pf) +int iio_trigger_detach_poll_func(struct iio_trigger *trig, + struct iio_poll_func *pf) { int ret = 0; bool no_other_users @@ -705,17 +705,3 @@ void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev) if (indio_dev->trig) iio_trigger_put(indio_dev->trig); } - -int iio_triggered_buffer_postenable(struct iio_dev *indio_dev) -{ - return iio_trigger_attach_poll_func(indio_dev->trig, - indio_dev->pollfunc); -} -EXPORT_SYMBOL(iio_triggered_buffer_postenable); - -int iio_triggered_buffer_predisable(struct iio_dev *indio_dev) -{ - return iio_trigger_detach_poll_func(indio_dev->trig, - indio_dev->pollfunc); -} -EXPORT_SYMBOL(iio_triggered_buffer_predisable); diff --git a/drivers/iio/light/gp2ap020a00f.c b/drivers/iio/light/gp2ap020a00f.c index dd9ad880deca..e2850c1a7353 100644 --- a/drivers/iio/light/gp2ap020a00f.c +++ b/drivers/iio/light/gp2ap020a00f.c @@ -1390,12 +1390,6 @@ static int gp2ap020a00f_buffer_postenable(struct iio_dev *indio_dev) mutex_lock(&data->lock); - err = iio_triggered_buffer_postenable(indio_dev); - if (err < 0) { - mutex_unlock(&data->lock); - return err; - } - /* * Enable triggers according to the scan_mask. Enabling either * LIGHT_CLEAR or LIGHT_IR scan mode results in enabling ALS @@ -1430,8 +1424,6 @@ static int gp2ap020a00f_buffer_postenable(struct iio_dev *indio_dev) err = -ENOMEM; error_unlock: - if (err < 0) - iio_triggered_buffer_predisable(indio_dev); mutex_unlock(&data->lock); return err; @@ -1465,8 +1457,6 @@ static int gp2ap020a00f_buffer_predisable(struct iio_dev *indio_dev) if (err == 0) kfree(data->buffer); - iio_triggered_buffer_predisable(indio_dev); - mutex_unlock(&data->lock); return err; diff --git a/drivers/iio/light/isl29125.c b/drivers/iio/light/isl29125.c index 663c72610655..b93b85dbc3a6 100644 --- a/drivers/iio/light/isl29125.c +++ b/drivers/iio/light/isl29125.c @@ -216,36 +216,20 @@ static const struct iio_info isl29125_info = { static int isl29125_buffer_postenable(struct iio_dev *indio_dev) { struct isl29125_data *data = iio_priv(indio_dev); - int err; - - err = iio_triggered_buffer_postenable(indio_dev); - if (err) - return err; data->conf1 |= ISL29125_MODE_RGB; - err = i2c_smbus_write_byte_data(data->client, ISL29125_CONF1, + return i2c_smbus_write_byte_data(data->client, ISL29125_CONF1, data->conf1); - if (err) { - iio_triggered_buffer_predisable(indio_dev); - return err; - } - - return 0; } static int isl29125_buffer_predisable(struct iio_dev *indio_dev) { struct isl29125_data *data = iio_priv(indio_dev); - int ret; data->conf1 &= ~ISL29125_MODE_MASK; data->conf1 |= ISL29125_MODE_PD; - ret = i2c_smbus_write_byte_data(data->client, ISL29125_CONF1, + return i2c_smbus_write_byte_data(data->client, ISL29125_CONF1, data->conf1); - - iio_triggered_buffer_predisable(indio_dev); - - return ret; } static const struct iio_buffer_setup_ops isl29125_buffer_setup_ops = { diff --git a/drivers/iio/light/rpr0521.c b/drivers/iio/light/rpr0521.c index c20fbc730d65..aa2972b04833 100644 --- a/drivers/iio/light/rpr0521.c +++ b/drivers/iio/light/rpr0521.c @@ -570,8 +570,6 @@ static int rpr0521_buffer_postdisable(struct iio_dev *indio_dev) static const struct iio_buffer_setup_ops rpr0521_buffer_setup_ops = { .preenable = rpr0521_buffer_preenable, - .postenable = iio_triggered_buffer_postenable, - .predisable = iio_triggered_buffer_predisable, .postdisable = rpr0521_buffer_postdisable, }; diff --git a/drivers/iio/light/si1145.c b/drivers/iio/light/si1145.c index e1f989dd3a3d..521e8adb93a7 100644 --- a/drivers/iio/light/si1145.c +++ b/drivers/iio/light/si1145.c @@ -1171,8 +1171,6 @@ static bool si1145_validate_scan_mask(struct iio_dev *indio_dev, static const struct iio_buffer_setup_ops si1145_buffer_setup_ops = { .preenable = si1145_buffer_preenable, - .postenable = iio_triggered_buffer_postenable, - .predisable = iio_triggered_buffer_predisable, .validate_scan_mask = si1145_validate_scan_mask, }; diff --git a/drivers/iio/light/st_uvis25_core.c b/drivers/iio/light/st_uvis25_core.c index 4d001d50e775..a18a82e6bbf5 100644 --- a/drivers/iio/light/st_uvis25_core.c +++ b/drivers/iio/light/st_uvis25_core.c @@ -227,8 +227,6 @@ static int st_uvis25_buffer_postdisable(struct iio_dev *iio_dev) static const struct iio_buffer_setup_ops st_uvis25_buffer_ops = { .preenable = st_uvis25_buffer_preenable, - .postenable = iio_triggered_buffer_postenable, - .predisable = iio_triggered_buffer_predisable, .postdisable = st_uvis25_buffer_postdisable, }; diff --git a/drivers/iio/light/tcs3414.c b/drivers/iio/light/tcs3414.c index bd6d3e4a0c4d..6fe5d46f80d4 100644 --- a/drivers/iio/light/tcs3414.c +++ b/drivers/iio/light/tcs3414.c @@ -243,35 +243,19 @@ static const struct iio_info tcs3414_info = { static int tcs3414_buffer_postenable(struct iio_dev *indio_dev) { struct tcs3414_data *data = iio_priv(indio_dev); - int ret; - - ret = iio_triggered_buffer_postenable(indio_dev); - if (ret) - return ret; data->control |= TCS3414_CONTROL_ADC_EN; - ret = i2c_smbus_write_byte_data(data->client, TCS3414_CONTROL, + return i2c_smbus_write_byte_data(data->client, TCS3414_CONTROL, data->control); - if (ret) - iio_triggered_buffer_predisable(indio_dev); - - return ret; } static int tcs3414_buffer_predisable(struct iio_dev *indio_dev) { struct tcs3414_data *data = iio_priv(indio_dev); - int ret, ret2; data->control &= ~TCS3414_CONTROL_ADC_EN; - ret = i2c_smbus_write_byte_data(data->client, TCS3414_CONTROL, + return i2c_smbus_write_byte_data(data->client, TCS3414_CONTROL, data->control); - - ret2 = iio_triggered_buffer_predisable(indio_dev); - if (!ret) - ret = ret2; - - return ret; } static const struct iio_buffer_setup_ops tcs3414_buffer_setup_ops = { diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c index ac1ab715d4dd..fff4b36b8b58 100644 --- a/drivers/iio/light/vcnl4000.c +++ b/drivers/iio/light/vcnl4000.c @@ -957,50 +957,29 @@ static int vcnl4010_buffer_postenable(struct iio_dev *indio_dev) int ret; int cmd; - ret = iio_triggered_buffer_postenable(indio_dev); - if (ret) - return ret; - /* Do not enable the buffer if we are already capturing events. */ - if (vcnl4010_is_in_periodic_mode(data)) { - ret = -EBUSY; - goto end; - } + if (vcnl4010_is_in_periodic_mode(data)) + return -EBUSY; ret = i2c_smbus_write_byte_data(data->client, VCNL4010_INT_CTRL, VCNL4010_INT_PROX_EN); if (ret < 0) - goto end; + return ret; cmd = VCNL4000_SELF_TIMED_EN | VCNL4000_PROX_EN; - ret = i2c_smbus_write_byte_data(data->client, VCNL4000_COMMAND, cmd); - if (ret < 0) - goto end; - - return 0; -end: - iio_triggered_buffer_predisable(indio_dev); - - return ret; + return i2c_smbus_write_byte_data(data->client, VCNL4000_COMMAND, cmd); } static int vcnl4010_buffer_predisable(struct iio_dev *indio_dev) { struct vcnl4000_data *data = iio_priv(indio_dev); - int ret, ret_disable; + int ret; ret = i2c_smbus_write_byte_data(data->client, VCNL4010_INT_CTRL, 0); if (ret < 0) - goto end; - - ret = i2c_smbus_write_byte_data(data->client, VCNL4000_COMMAND, 0); - -end: - ret_disable = iio_triggered_buffer_predisable(indio_dev); - if (ret == 0) - ret = ret_disable; + return ret; - return ret; + return i2c_smbus_write_byte_data(data->client, VCNL4000_COMMAND, 0); } static const struct iio_buffer_setup_ops vcnl4010_buffer_ops = { diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c index 8fc52057837d..fc6840f9c1fa 100644 --- a/drivers/iio/magnetometer/bmc150_magn.c +++ b/drivers/iio/magnetometer/bmc150_magn.c @@ -836,8 +836,6 @@ static int bmc150_magn_buffer_postdisable(struct iio_dev *indio_dev) static const struct iio_buffer_setup_ops bmc150_magn_buffer_setup_ops = { .preenable = bmc150_magn_buffer_preenable, - .postenable = iio_triggered_buffer_postenable, - .predisable = iio_triggered_buffer_predisable, .postdisable = bmc150_magn_buffer_postdisable, }; diff --git a/drivers/iio/magnetometer/rm3100-core.c b/drivers/iio/magnetometer/rm3100-core.c index a3e754943456..7242897a05e9 100644 --- a/drivers/iio/magnetometer/rm3100-core.c +++ b/drivers/iio/magnetometer/rm3100-core.c @@ -463,8 +463,6 @@ static int rm3100_buffer_postdisable(struct iio_dev *indio_dev) static const struct iio_buffer_setup_ops rm3100_buffer_ops = { .preenable = rm3100_buffer_preenable, - .postenable = iio_triggered_buffer_postenable, - .predisable = iio_triggered_buffer_predisable, .postdisable = rm3100_buffer_postdisable, }; diff --git a/drivers/iio/magnetometer/st_magn_buffer.c b/drivers/iio/magnetometer/st_magn_buffer.c index bb425c167a96..4917721fa2e5 100644 --- a/drivers/iio/magnetometer/st_magn_buffer.c +++ b/drivers/iio/magnetometer/st_magn_buffer.c @@ -31,34 +31,12 @@ int st_magn_trig_set_state(struct iio_trigger *trig, bool state) static int st_magn_buffer_postenable(struct iio_dev *indio_dev) { - int err; - - err = iio_triggered_buffer_postenable(indio_dev); - if (err < 0) - return err; - - err = st_sensors_set_enable(indio_dev, true); - if (err < 0) - goto st_magn_buffer_predisable; - - return 0; - -st_magn_buffer_predisable: - iio_triggered_buffer_predisable(indio_dev); - return err; + return st_sensors_set_enable(indio_dev, true); } static int st_magn_buffer_predisable(struct iio_dev *indio_dev) { - int err, err2; - - err = st_sensors_set_enable(indio_dev, false); - - err2 = iio_triggered_buffer_predisable(indio_dev); - if (!err) - err = err2; - - return err; + return st_sensors_set_enable(indio_dev, false); } static const struct iio_buffer_setup_ops st_magn_buffer_setup_ops = { diff --git a/drivers/iio/potentiostat/lmp91000.c b/drivers/iio/potentiostat/lmp91000.c index 2d601889c8c0..67ae635a05f3 100644 --- a/drivers/iio/potentiostat/lmp91000.c +++ b/drivers/iio/potentiostat/lmp91000.c @@ -278,17 +278,8 @@ static const struct iio_trigger_ops lmp91000_trigger_ops = { static int lmp91000_buffer_postenable(struct iio_dev *indio_dev) { struct lmp91000_data *data = iio_priv(indio_dev); - int err; - err = iio_triggered_buffer_postenable(indio_dev); - if (err) - return err; - - err = iio_channel_start_all_cb(data->cb_buffer); - if (err) - iio_triggered_buffer_predisable(indio_dev); - - return err; + return iio_channel_start_all_cb(data->cb_buffer); } static int lmp91000_buffer_predisable(struct iio_dev *indio_dev) @@ -297,7 +288,7 @@ static int lmp91000_buffer_predisable(struct iio_dev *indio_dev) iio_channel_stop_all_cb(data->cb_buffer); - return iio_triggered_buffer_predisable(indio_dev); + return 0; } static const struct iio_buffer_setup_ops lmp91000_buffer_setup_ops = { diff --git a/drivers/iio/pressure/st_pressure_buffer.c b/drivers/iio/pressure/st_pressure_buffer.c index 418dbf9e6e1e..7cf6f06797e1 100644 --- a/drivers/iio/pressure/st_pressure_buffer.c +++ b/drivers/iio/pressure/st_pressure_buffer.c @@ -31,34 +31,12 @@ int st_press_trig_set_state(struct iio_trigger *trig, bool state) static int st_press_buffer_postenable(struct iio_dev *indio_dev) { - int err; - - err = iio_triggered_buffer_postenable(indio_dev); - if (err < 0) - return err; - - err = st_sensors_set_enable(indio_dev, true); - if (err < 0) - goto st_press_buffer_predisable; - - return 0; - -st_press_buffer_predisable: - iio_triggered_buffer_predisable(indio_dev); - return err; + return st_sensors_set_enable(indio_dev, true); } static int st_press_buffer_predisable(struct iio_dev *indio_dev) { - int err, err2; - - err = st_sensors_set_enable(indio_dev, false); - - err2 = iio_triggered_buffer_predisable(indio_dev); - if (!err) - err = err2; - - return err; + return st_sensors_set_enable(indio_dev, false); } static const struct iio_buffer_setup_ops st_press_buffer_setup_ops = { diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c index ef818f2aebd6..2b8796ef91ef 100644 --- a/drivers/iio/pressure/zpa2326.c +++ b/drivers/iio/pressure/zpa2326.c @@ -1242,19 +1242,17 @@ static int zpa2326_postenable_buffer(struct iio_dev *indio_dev) const struct zpa2326_private *priv = iio_priv(indio_dev); int err; - /* Plug our own trigger event handler. */ - err = iio_triggered_buffer_postenable(indio_dev); - if (err) - goto err; - if (!priv->waken) { /* * We were already power supplied. Just clear hardware FIFO to * get rid of samples acquired during previous rounds (if any). */ err = zpa2326_clear_fifo(indio_dev, 0); - if (err) - goto err_buffer_predisable; + if (err) { + zpa2326_err(indio_dev, + "failed to enable buffering (%d)", err); + return err; + } } if (!iio_trigger_using_own(indio_dev) && priv->waken) { @@ -1263,18 +1261,14 @@ static int zpa2326_postenable_buffer(struct iio_dev *indio_dev) * powered up: reconfigure one-shot mode. */ err = zpa2326_config_oneshot(indio_dev, priv->irq); - if (err) - goto err_buffer_predisable; + if (err) { + zpa2326_err(indio_dev, + "failed to enable buffering (%d)", err); + return err; + } } return 0; - -err_buffer_predisable: - iio_triggered_buffer_predisable(indio_dev); -err: - zpa2326_err(indio_dev, "failed to enable buffering (%d)", err); - - return err; } static int zpa2326_postdisable_buffer(struct iio_dev *indio_dev) @@ -1287,7 +1281,6 @@ static int zpa2326_postdisable_buffer(struct iio_dev *indio_dev) static const struct iio_buffer_setup_ops zpa2326_buffer_setup_ops = { .preenable = zpa2326_preenable_buffer, .postenable = zpa2326_postenable_buffer, - .predisable = iio_triggered_buffer_predisable, .postdisable = zpa2326_postdisable_buffer }; diff --git a/drivers/iio/proximity/sx9310.c b/drivers/iio/proximity/sx9310.c index 08c6100e2c94..dc2e11b43431 100644 --- a/drivers/iio/proximity/sx9310.c +++ b/drivers/iio/proximity/sx9310.c @@ -736,8 +736,6 @@ static int sx9310_buffer_postdisable(struct iio_dev *indio_dev) static const struct iio_buffer_setup_ops sx9310_buffer_setup_ops = { .preenable = sx9310_buffer_preenable, - .postenable = iio_triggered_buffer_postenable, - .predisable = iio_triggered_buffer_predisable, .postdisable = sx9310_buffer_postdisable, }; diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c index f6eec54faef4..acb821cbad46 100644 --- a/drivers/iio/proximity/sx9500.c +++ b/drivers/iio/proximity/sx9500.c @@ -680,10 +680,6 @@ static int sx9500_buffer_postenable(struct iio_dev *indio_dev) struct sx9500_data *data = iio_priv(indio_dev); int ret = 0, i; - ret = iio_triggered_buffer_postenable(indio_dev); - if (ret) - return ret; - mutex_lock(&data->mutex); for (i = 0; i < SX9500_NUM_CHANNELS; i++) @@ -700,9 +696,6 @@ static int sx9500_buffer_postenable(struct iio_dev *indio_dev) mutex_unlock(&data->mutex); - if (ret) - iio_triggered_buffer_predisable(indio_dev); - return ret; } @@ -727,8 +720,6 @@ static int sx9500_buffer_predisable(struct iio_dev *indio_dev) mutex_unlock(&data->mutex); - iio_triggered_buffer_predisable(indio_dev); - return ret; } diff --git a/include/linux/iio/trigger_consumer.h b/include/linux/iio/trigger_consumer.h index c3c6ba5ec423..3aa2f132dd67 100644 --- a/include/linux/iio/trigger_consumer.h +++ b/include/linux/iio/trigger_consumer.h @@ -50,11 +50,4 @@ irqreturn_t iio_pollfunc_store_time(int irq, void *p); void iio_trigger_notify_done(struct iio_trigger *trig); -/* - * Two functions for common case where all that happens is a pollfunc - * is attached and detached from a trigger - */ -int iio_triggered_buffer_postenable(struct iio_dev *indio_dev); -int iio_triggered_buffer_predisable(struct iio_dev *indio_dev); - #endif -- cgit v1.2.3-59-g8ed1b From b3337eb24831db058231ea87838f316d9eb86253 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 15 Jun 2020 18:05:41 +0300 Subject: gpiolib: Introduce for_each_requested_gpio_in_range() macro Introduce for_each_requested_gpio_in_range() macro which helps to iterate over requested GPIO in a range. There are already potential users of it, which are going to be converted by the following patches. For most of them for_each_requested_gpio() shortcut has been added. Signed-off-by: Andy Shevchenko Link: https://lore.kernel.org/r/20200615150545.87964-2-andriy.shevchenko@linux.intel.com Signed-off-by: Linus Walleij --- include/linux/gpio/driver.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'include') diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index c4f272af7af5..11cdcb195635 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -474,6 +474,22 @@ struct gpio_chip { extern const char *gpiochip_is_requested(struct gpio_chip *gc, unsigned int offset); +/** + * for_each_requested_gpio_in_range - iterates over requested GPIOs in a given range + * @chip: the chip to query + * @i: loop variable + * @base: first GPIO in the range + * @size: amount of GPIOs to check starting from @base + * @label: label of current GPIO + */ +#define for_each_requested_gpio_in_range(chip, i, base, size, label) \ + for (i = 0; i < size; i++) \ + if ((label = gpiochip_is_requested(chip, base + i)) == NULL) {} else + +/* Iterates over all requested GPIO of the given @chip */ +#define for_each_requested_gpio(chip, i, label) \ + for_each_requested_gpio_in_range(chip, i, 0, chip->ngpio, label) + /* add/remove chips */ extern int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, struct lock_class_key *lock_key, -- cgit v1.2.3-59-g8ed1b From 49042c220b3a31e25902b36df71b23dc10efa0b8 Mon Sep 17 00:00:00 2001 From: Andrea Mayer Date: Sat, 20 Jun 2020 00:54:43 +0200 Subject: l3mdev: add infrastructure for table to VRF mapping Add infrastructure to l3mdev (the core code for Layer 3 master devices) in order to find out the corresponding VRF device for a given table id. Therefore, the l3mdev implementations: - can register a callback that returns the device index of the l3mdev associated with a given table id; - can offer the lookup function (table to VRF device). Signed-off-by: Andrea Mayer Signed-off-by: David S. Miller --- include/net/l3mdev.h | 39 ++++++++++++++++++++++ net/l3mdev/l3mdev.c | 93 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 132 insertions(+) (limited to 'include') diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h index e942372b077b..031c661aa14d 100644 --- a/include/net/l3mdev.h +++ b/include/net/l3mdev.h @@ -10,6 +10,16 @@ #include #include +enum l3mdev_type { + L3MDEV_TYPE_UNSPEC, + L3MDEV_TYPE_VRF, + __L3MDEV_TYPE_MAX +}; + +#define L3MDEV_TYPE_MAX (__L3MDEV_TYPE_MAX - 1) + +typedef int (*lookup_by_table_id_t)(struct net *net, u32 table_d); + /** * struct l3mdev_ops - l3mdev operations * @@ -37,6 +47,15 @@ struct l3mdev_ops { #ifdef CONFIG_NET_L3_MASTER_DEV +int l3mdev_table_lookup_register(enum l3mdev_type l3type, + lookup_by_table_id_t fn); + +void l3mdev_table_lookup_unregister(enum l3mdev_type l3type, + lookup_by_table_id_t fn); + +int l3mdev_ifindex_lookup_by_table_id(enum l3mdev_type l3type, struct net *net, + u32 table_id); + int l3mdev_fib_rule_match(struct net *net, struct flowi *fl, struct fib_lookup_arg *arg); @@ -280,6 +299,26 @@ struct sk_buff *l3mdev_ip6_out(struct sock *sk, struct sk_buff *skb) return skb; } +static inline +int l3mdev_table_lookup_register(enum l3mdev_type l3type, + lookup_by_table_id_t fn) +{ + return -EOPNOTSUPP; +} + +static inline +void l3mdev_table_lookup_unregister(enum l3mdev_type l3type, + lookup_by_table_id_t fn) +{ +} + +static inline +int l3mdev_ifindex_lookup_by_table_id(enum l3mdev_type l3type, struct net *net, + u32 table_id) +{ + return -ENODEV; +} + static inline int l3mdev_fib_rule_match(struct net *net, struct flowi *fl, struct fib_lookup_arg *arg) diff --git a/net/l3mdev/l3mdev.c b/net/l3mdev/l3mdev.c index f35899d45a9a..e71ca5aec684 100644 --- a/net/l3mdev/l3mdev.c +++ b/net/l3mdev/l3mdev.c @@ -9,6 +9,99 @@ #include #include +static DEFINE_SPINLOCK(l3mdev_lock); + +struct l3mdev_handler { + lookup_by_table_id_t dev_lookup; +}; + +static struct l3mdev_handler l3mdev_handlers[L3MDEV_TYPE_MAX + 1]; + +static int l3mdev_check_type(enum l3mdev_type l3type) +{ + if (l3type <= L3MDEV_TYPE_UNSPEC || l3type > L3MDEV_TYPE_MAX) + return -EINVAL; + + return 0; +} + +int l3mdev_table_lookup_register(enum l3mdev_type l3type, + lookup_by_table_id_t fn) +{ + struct l3mdev_handler *hdlr; + int res; + + res = l3mdev_check_type(l3type); + if (res) + return res; + + hdlr = &l3mdev_handlers[l3type]; + + spin_lock(&l3mdev_lock); + + if (hdlr->dev_lookup) { + res = -EBUSY; + goto unlock; + } + + hdlr->dev_lookup = fn; + res = 0; + +unlock: + spin_unlock(&l3mdev_lock); + + return res; +} +EXPORT_SYMBOL_GPL(l3mdev_table_lookup_register); + +void l3mdev_table_lookup_unregister(enum l3mdev_type l3type, + lookup_by_table_id_t fn) +{ + struct l3mdev_handler *hdlr; + + if (l3mdev_check_type(l3type)) + return; + + hdlr = &l3mdev_handlers[l3type]; + + spin_lock(&l3mdev_lock); + + if (hdlr->dev_lookup == fn) + hdlr->dev_lookup = NULL; + + spin_unlock(&l3mdev_lock); +} +EXPORT_SYMBOL_GPL(l3mdev_table_lookup_unregister); + +int l3mdev_ifindex_lookup_by_table_id(enum l3mdev_type l3type, + struct net *net, u32 table_id) +{ + lookup_by_table_id_t lookup; + struct l3mdev_handler *hdlr; + int ifindex = -EINVAL; + int res; + + res = l3mdev_check_type(l3type); + if (res) + return res; + + hdlr = &l3mdev_handlers[l3type]; + + spin_lock(&l3mdev_lock); + + lookup = hdlr->dev_lookup; + if (!lookup) + goto unlock; + + ifindex = lookup(net, table_id); + +unlock: + spin_unlock(&l3mdev_lock); + + return ifindex; +} +EXPORT_SYMBOL_GPL(l3mdev_ifindex_lookup_by_table_id); + /** * l3mdev_master_ifindex - get index of L3 master device * @dev: targeted interface -- cgit v1.2.3-59-g8ed1b From aae4e500e106d2ce48d5bdb21210e36efc7460cb Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sat, 20 Jun 2020 18:43:46 +0300 Subject: net: mscc: ocelot: generalize the "ACE/ACL" names Access Control Lists (and their respective Access Control Entries) are specifically entries in the VCAP IS2, the security enforcement block, according to the documentation. Let's rename the structures and functions to something more generic, so that VCAP IS1 structures (which would otherwise have to be called Ingress Classification Entries) can reuse the same code without confusion. Some renaming that was done: struct ocelot_ace_rule -> struct ocelot_vcap_filter struct ocelot_acl_block -> struct ocelot_vcap_block enum ocelot_ace_type -> enum ocelot_vcap_key_type struct ocelot_ace_vlan -> struct ocelot_vcap_key_vlan enum ocelot_ace_action -> enum ocelot_vcap_action struct ocelot_ace_stats -> struct ocelot_vcap_stats enum ocelot_ace_type -> enum ocelot_vcap_key_type struct ocelot_ace_frame_* -> struct ocelot_vcap_key_* No functional change is intended. Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/ethernet/mscc/ocelot.c | 2 +- drivers/net/ethernet/mscc/ocelot_flower.c | 122 ++++++------- drivers/net/ethernet/mscc/ocelot_police.h | 6 +- drivers/net/ethernet/mscc/ocelot_vcap.c | 286 +++++++++++++++--------------- drivers/net/ethernet/mscc/ocelot_vcap.h | 88 ++++----- include/soc/mscc/ocelot.h | 4 +- 6 files changed, 257 insertions(+), 251 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index d4ad7ffe6f6e..52b180280d2f 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -1392,7 +1392,7 @@ int ocelot_init(struct ocelot *ocelot) INIT_LIST_HEAD(&ocelot->multicast); ocelot_mact_init(ocelot); ocelot_vlan_init(ocelot); - ocelot_ace_init(ocelot); + ocelot_vcap_init(ocelot); for (port = 0; port < ocelot->num_phys_ports; port++) { /* Clear all counters (5 groups) */ diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c index d57d6948ebf2..f2a85b06a6e7 100644 --- a/drivers/net/ethernet/mscc/ocelot_flower.c +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -9,7 +9,7 @@ #include "ocelot_vcap.h" static int ocelot_flower_parse_action(struct flow_cls_offload *f, - struct ocelot_ace_rule *ace) + struct ocelot_vcap_filter *filter) { const struct flow_action_entry *a; s64 burst; @@ -26,17 +26,17 @@ static int ocelot_flower_parse_action(struct flow_cls_offload *f, flow_action_for_each(i, a, &f->rule->action) { switch (a->id) { case FLOW_ACTION_DROP: - ace->action = OCELOT_ACL_ACTION_DROP; + filter->action = OCELOT_VCAP_ACTION_DROP; break; case FLOW_ACTION_TRAP: - ace->action = OCELOT_ACL_ACTION_TRAP; + filter->action = OCELOT_VCAP_ACTION_TRAP; break; case FLOW_ACTION_POLICE: - ace->action = OCELOT_ACL_ACTION_POLICE; + filter->action = OCELOT_VCAP_ACTION_POLICE; rate = a->police.rate_bytes_ps; - ace->pol.rate = div_u64(rate, 1000) * 8; + filter->pol.rate = div_u64(rate, 1000) * 8; burst = rate * PSCHED_NS2TICKS(a->police.burst); - ace->pol.burst = div_u64(burst, PSCHED_TICKS_PER_SEC); + filter->pol.burst = div_u64(burst, PSCHED_TICKS_PER_SEC); break; default: return -EOPNOTSUPP; @@ -47,7 +47,7 @@ static int ocelot_flower_parse_action(struct flow_cls_offload *f, } static int ocelot_flower_parse(struct flow_cls_offload *f, - struct ocelot_ace_rule *ace) + struct ocelot_vcap_filter *filter) { struct flow_rule *rule = flow_cls_offload_flow_rule(f); struct flow_dissector *dissector = rule->match.dissector; @@ -88,14 +88,14 @@ static int ocelot_flower_parse(struct flow_cls_offload *f, return -EOPNOTSUPP; flow_rule_match_eth_addrs(rule, &match); - ace->type = OCELOT_ACE_TYPE_ETYPE; - ether_addr_copy(ace->frame.etype.dmac.value, + filter->key_type = OCELOT_VCAP_KEY_ETYPE; + ether_addr_copy(filter->key.etype.dmac.value, match.key->dst); - ether_addr_copy(ace->frame.etype.smac.value, + ether_addr_copy(filter->key.etype.smac.value, match.key->src); - ether_addr_copy(ace->frame.etype.dmac.mask, + ether_addr_copy(filter->key.etype.dmac.mask, match.mask->dst); - ether_addr_copy(ace->frame.etype.smac.mask, + ether_addr_copy(filter->key.etype.smac.mask, match.mask->src); goto finished_key_parsing; } @@ -105,18 +105,18 @@ static int ocelot_flower_parse(struct flow_cls_offload *f, flow_rule_match_basic(rule, &match); if (ntohs(match.key->n_proto) == ETH_P_IP) { - ace->type = OCELOT_ACE_TYPE_IPV4; - ace->frame.ipv4.proto.value[0] = + filter->key_type = OCELOT_VCAP_KEY_IPV4; + filter->key.ipv4.proto.value[0] = match.key->ip_proto; - ace->frame.ipv4.proto.mask[0] = + filter->key.ipv4.proto.mask[0] = match.mask->ip_proto; match_protocol = false; } if (ntohs(match.key->n_proto) == ETH_P_IPV6) { - ace->type = OCELOT_ACE_TYPE_IPV6; - ace->frame.ipv6.proto.value[0] = + filter->key_type = OCELOT_VCAP_KEY_IPV6; + filter->key.ipv6.proto.value[0] = match.key->ip_proto; - ace->frame.ipv6.proto.mask[0] = + filter->key.ipv6.proto.mask[0] = match.mask->ip_proto; match_protocol = false; } @@ -128,16 +128,16 @@ static int ocelot_flower_parse(struct flow_cls_offload *f, u8 *tmp; flow_rule_match_ipv4_addrs(rule, &match); - tmp = &ace->frame.ipv4.sip.value.addr[0]; + tmp = &filter->key.ipv4.sip.value.addr[0]; memcpy(tmp, &match.key->src, 4); - tmp = &ace->frame.ipv4.sip.mask.addr[0]; + tmp = &filter->key.ipv4.sip.mask.addr[0]; memcpy(tmp, &match.mask->src, 4); - tmp = &ace->frame.ipv4.dip.value.addr[0]; + tmp = &filter->key.ipv4.dip.value.addr[0]; memcpy(tmp, &match.key->dst, 4); - tmp = &ace->frame.ipv4.dip.mask.addr[0]; + tmp = &filter->key.ipv4.dip.mask.addr[0]; memcpy(tmp, &match.mask->dst, 4); match_protocol = false; } @@ -151,10 +151,10 @@ static int ocelot_flower_parse(struct flow_cls_offload *f, struct flow_match_ports match; flow_rule_match_ports(rule, &match); - ace->frame.ipv4.sport.value = ntohs(match.key->src); - ace->frame.ipv4.sport.mask = ntohs(match.mask->src); - ace->frame.ipv4.dport.value = ntohs(match.key->dst); - ace->frame.ipv4.dport.mask = ntohs(match.mask->dst); + filter->key.ipv4.sport.value = ntohs(match.key->src); + filter->key.ipv4.sport.mask = ntohs(match.mask->src); + filter->key.ipv4.dport.value = ntohs(match.key->dst); + filter->key.ipv4.dport.mask = ntohs(match.mask->dst); match_protocol = false; } @@ -162,11 +162,11 @@ static int ocelot_flower_parse(struct flow_cls_offload *f, struct flow_match_vlan match; flow_rule_match_vlan(rule, &match); - ace->type = OCELOT_ACE_TYPE_ANY; - ace->vlan.vid.value = match.key->vlan_id; - ace->vlan.vid.mask = match.mask->vlan_id; - ace->vlan.pcp.value[0] = match.key->vlan_priority; - ace->vlan.pcp.mask[0] = match.mask->vlan_priority; + filter->key_type = OCELOT_VCAP_KEY_ANY; + filter->vlan.vid.value = match.key->vlan_id; + filter->vlan.vid.mask = match.mask->vlan_id; + filter->vlan.pcp.value[0] = match.key->vlan_priority; + filter->vlan.pcp.mask[0] = match.mask->vlan_priority; match_protocol = false; } @@ -175,76 +175,76 @@ finished_key_parsing: /* TODO: support SNAP, LLC etc */ if (proto < ETH_P_802_3_MIN) return -EOPNOTSUPP; - ace->type = OCELOT_ACE_TYPE_ETYPE; - *(__be16 *)ace->frame.etype.etype.value = htons(proto); - *(__be16 *)ace->frame.etype.etype.mask = htons(0xffff); + filter->key_type = OCELOT_VCAP_KEY_ETYPE; + *(__be16 *)filter->key.etype.etype.value = htons(proto); + *(__be16 *)filter->key.etype.etype.mask = htons(0xffff); } - /* else, a rule of type OCELOT_ACE_TYPE_ANY is implicitly added */ + /* else, a filter of type OCELOT_VCAP_KEY_ANY is implicitly added */ - ace->prio = f->common.prio; - ace->id = f->cookie; - return ocelot_flower_parse_action(f, ace); + filter->prio = f->common.prio; + filter->id = f->cookie; + return ocelot_flower_parse_action(f, filter); } -static -struct ocelot_ace_rule *ocelot_ace_rule_create(struct ocelot *ocelot, int port, - struct flow_cls_offload *f) +static struct ocelot_vcap_filter +*ocelot_vcap_filter_create(struct ocelot *ocelot, int port, + struct flow_cls_offload *f) { - struct ocelot_ace_rule *ace; + struct ocelot_vcap_filter *filter; - ace = kzalloc(sizeof(*ace), GFP_KERNEL); - if (!ace) + filter = kzalloc(sizeof(*filter), GFP_KERNEL); + if (!filter) return NULL; - ace->ingress_port_mask = BIT(port); - return ace; + filter->ingress_port_mask = BIT(port); + return filter; } int ocelot_cls_flower_replace(struct ocelot *ocelot, int port, struct flow_cls_offload *f, bool ingress) { - struct ocelot_ace_rule *ace; + struct ocelot_vcap_filter *filter; int ret; - ace = ocelot_ace_rule_create(ocelot, port, f); - if (!ace) + filter = ocelot_vcap_filter_create(ocelot, port, f); + if (!filter) return -ENOMEM; - ret = ocelot_flower_parse(f, ace); + ret = ocelot_flower_parse(f, filter); if (ret) { - kfree(ace); + kfree(filter); return ret; } - return ocelot_ace_rule_offload_add(ocelot, ace, f->common.extack); + return ocelot_vcap_filter_add(ocelot, filter, f->common.extack); } EXPORT_SYMBOL_GPL(ocelot_cls_flower_replace); int ocelot_cls_flower_destroy(struct ocelot *ocelot, int port, struct flow_cls_offload *f, bool ingress) { - struct ocelot_ace_rule ace; + struct ocelot_vcap_filter filter; - ace.prio = f->common.prio; - ace.id = f->cookie; + filter.prio = f->common.prio; + filter.id = f->cookie; - return ocelot_ace_rule_offload_del(ocelot, &ace); + return ocelot_vcap_filter_del(ocelot, &filter); } EXPORT_SYMBOL_GPL(ocelot_cls_flower_destroy); int ocelot_cls_flower_stats(struct ocelot *ocelot, int port, struct flow_cls_offload *f, bool ingress) { - struct ocelot_ace_rule ace; + struct ocelot_vcap_filter filter; int ret; - ace.prio = f->common.prio; - ace.id = f->cookie; - ret = ocelot_ace_rule_stats_update(ocelot, &ace); + filter.prio = f->common.prio; + filter.id = f->cookie; + ret = ocelot_vcap_filter_stats_update(ocelot, &filter); if (ret) return ret; - flow_stats_update(&f->stats, 0x0, ace.stats.pkts, 0, 0x0, + flow_stats_update(&f->stats, 0x0, filter.stats.pkts, 0, 0x0, FLOW_ACTION_HW_STATS_IMMEDIATE); return 0; } diff --git a/drivers/net/ethernet/mscc/ocelot_police.h b/drivers/net/ethernet/mscc/ocelot_police.h index 79d18442aa9b..be6f2286a5cd 100644 --- a/drivers/net/ethernet/mscc/ocelot_police.h +++ b/drivers/net/ethernet/mscc/ocelot_police.h @@ -33,9 +33,9 @@ struct qos_policer_conf { int qos_policer_conf_set(struct ocelot *ocelot, int port, u32 pol_ix, struct qos_policer_conf *conf); -int ocelot_ace_policer_add(struct ocelot *ocelot, u32 pol_ix, - struct ocelot_policer *pol); +int ocelot_vcap_policer_add(struct ocelot *ocelot, u32 pol_ix, + struct ocelot_policer *pol); -int ocelot_ace_policer_del(struct ocelot *ocelot, u32 pol_ix); +int ocelot_vcap_policer_del(struct ocelot *ocelot, u32 pol_ix); #endif /* _MSCC_OCELOT_POLICE_H_ */ diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c index 33b5b015e8a7..8597034fd3b7 100644 --- a/drivers/net/ethernet/mscc/ocelot_vcap.c +++ b/drivers/net/ethernet/mscc/ocelot_vcap.c @@ -302,10 +302,10 @@ static void vcap_action_set(struct ocelot *ocelot, struct vcap_data *data, } static void is2_action_set(struct ocelot *ocelot, struct vcap_data *data, - struct ocelot_ace_rule *ace) + struct ocelot_vcap_filter *filter) { - switch (ace->action) { - case OCELOT_ACL_ACTION_DROP: + switch (filter->action) { + case OCELOT_VCAP_ACTION_DROP: vcap_action_set(ocelot, data, VCAP_IS2_ACT_PORT_MASK, 0); vcap_action_set(ocelot, data, VCAP_IS2_ACT_MASK_MODE, 1); vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_ENA, 1); @@ -314,7 +314,7 @@ static void is2_action_set(struct ocelot *ocelot, struct vcap_data *data, vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_QU_NUM, 0); vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_COPY_ENA, 0); break; - case OCELOT_ACL_ACTION_TRAP: + case OCELOT_VCAP_ACTION_TRAP: vcap_action_set(ocelot, data, VCAP_IS2_ACT_PORT_MASK, 0); vcap_action_set(ocelot, data, VCAP_IS2_ACT_MASK_MODE, 1); vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_ENA, 0); @@ -322,12 +322,12 @@ static void is2_action_set(struct ocelot *ocelot, struct vcap_data *data, vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_QU_NUM, 0); vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_COPY_ENA, 1); break; - case OCELOT_ACL_ACTION_POLICE: + case OCELOT_VCAP_ACTION_POLICE: vcap_action_set(ocelot, data, VCAP_IS2_ACT_PORT_MASK, 0); vcap_action_set(ocelot, data, VCAP_IS2_ACT_MASK_MODE, 0); vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_ENA, 1); vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_IDX, - ace->pol_ix); + filter->pol_ix); vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_QU_NUM, 0); vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_COPY_ENA, 0); break; @@ -335,11 +335,11 @@ static void is2_action_set(struct ocelot *ocelot, struct vcap_data *data, } static void is2_entry_set(struct ocelot *ocelot, int ix, - struct ocelot_ace_rule *ace) + struct ocelot_vcap_filter *filter) { const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2]; + struct ocelot_vcap_key_vlan *tag = &filter->vlan; u32 val, msk, type, type_mask = 0xf, i, count; - struct ocelot_ace_vlan *tag = &ace->vlan; struct ocelot_vcap_u64 payload; struct vcap_data data; int row = (ix / 2); @@ -355,19 +355,19 @@ static void is2_entry_set(struct ocelot *ocelot, int ix, data.tg_sw = VCAP_TG_HALF; is2_data_get(ocelot, &data, ix); data.tg = (data.tg & ~data.tg_mask); - if (ace->prio != 0) + if (filter->prio != 0) data.tg |= data.tg_value; data.type = IS2_ACTION_TYPE_NORMAL; vcap_key_set(ocelot, &data, VCAP_IS2_HK_PAG, 0, 0); vcap_key_set(ocelot, &data, VCAP_IS2_HK_IGR_PORT_MASK, 0, - ~ace->ingress_port_mask); + ~filter->ingress_port_mask); vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_FIRST, OCELOT_VCAP_BIT_1); vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_HOST_MATCH, OCELOT_VCAP_BIT_ANY); - vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L2_MC, ace->dmac_mc); - vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L2_BC, ace->dmac_bc); + vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L2_MC, filter->dmac_mc); + vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L2_BC, filter->dmac_bc); vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_VLAN_TAGGED, tag->tagged); vcap_key_set(ocelot, &data, VCAP_IS2_HK_VID, tag->vid.value, tag->vid.mask); @@ -375,9 +375,9 @@ static void is2_entry_set(struct ocelot *ocelot, int ix, tag->pcp.value[0], tag->pcp.mask[0]); vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_DEI, tag->dei); - switch (ace->type) { - case OCELOT_ACE_TYPE_ETYPE: { - struct ocelot_ace_frame_etype *etype = &ace->frame.etype; + switch (filter->key_type) { + case OCELOT_VCAP_KEY_ETYPE: { + struct ocelot_vcap_key_etype *etype = &filter->key.etype; type = IS2_TYPE_ETYPE; vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_DMAC, @@ -398,8 +398,8 @@ static void is2_entry_set(struct ocelot *ocelot, int ix, etype->data.value, etype->data.mask); break; } - case OCELOT_ACE_TYPE_LLC: { - struct ocelot_ace_frame_llc *llc = &ace->frame.llc; + case OCELOT_VCAP_KEY_LLC: { + struct ocelot_vcap_key_llc *llc = &filter->key.llc; type = IS2_TYPE_LLC; vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_DMAC, @@ -414,8 +414,8 @@ static void is2_entry_set(struct ocelot *ocelot, int ix, payload.value, payload.mask); break; } - case OCELOT_ACE_TYPE_SNAP: { - struct ocelot_ace_frame_snap *snap = &ace->frame.snap; + case OCELOT_VCAP_KEY_SNAP: { + struct ocelot_vcap_key_snap *snap = &filter->key.snap; type = IS2_TYPE_SNAP; vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_DMAC, @@ -423,12 +423,12 @@ static void is2_entry_set(struct ocelot *ocelot, int ix, vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_SMAC, snap->smac.value, snap->smac.mask); vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_MAC_SNAP_L2_SNAP, - ace->frame.snap.snap.value, - ace->frame.snap.snap.mask); + filter->key.snap.snap.value, + filter->key.snap.snap.mask); break; } - case OCELOT_ACE_TYPE_ARP: { - struct ocelot_ace_frame_arp *arp = &ace->frame.arp; + case OCELOT_VCAP_KEY_ARP: { + struct ocelot_vcap_key_arp *arp = &filter->key.arp; type = IS2_TYPE_ARP; vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_MAC_ARP_SMAC, @@ -469,20 +469,20 @@ static void is2_entry_set(struct ocelot *ocelot, int ix, 0, 0); break; } - case OCELOT_ACE_TYPE_IPV4: - case OCELOT_ACE_TYPE_IPV6: { + case OCELOT_VCAP_KEY_IPV4: + case OCELOT_VCAP_KEY_IPV6: { enum ocelot_vcap_bit sip_eq_dip, sport_eq_dport, seq_zero, tcp; enum ocelot_vcap_bit ttl, fragment, options, tcp_ack, tcp_urg; enum ocelot_vcap_bit tcp_fin, tcp_syn, tcp_rst, tcp_psh; - struct ocelot_ace_frame_ipv4 *ipv4 = NULL; - struct ocelot_ace_frame_ipv6 *ipv6 = NULL; + struct ocelot_vcap_key_ipv4 *ipv4 = NULL; + struct ocelot_vcap_key_ipv6 *ipv6 = NULL; struct ocelot_vcap_udp_tcp *sport, *dport; struct ocelot_vcap_ipv4 sip, dip; struct ocelot_vcap_u8 proto, ds; struct ocelot_vcap_u48 *ip_data; - if (ace->type == OCELOT_ACE_TYPE_IPV4) { - ipv4 = &ace->frame.ipv4; + if (filter->key_type == OCELOT_VCAP_KEY_IPV4) { + ipv4 = &filter->key.ipv4; ttl = ipv4->ttl; fragment = ipv4->fragment; options = ipv4->options; @@ -503,7 +503,7 @@ static void is2_entry_set(struct ocelot *ocelot, int ix, sport_eq_dport = ipv4->sport_eq_dport; seq_zero = ipv4->seq_zero; } else { - ipv6 = &ace->frame.ipv6; + ipv6 = &filter->key.ipv6; ttl = ipv6->ttl; fragment = OCELOT_VCAP_BIT_ANY; options = OCELOT_VCAP_BIT_ANY; @@ -607,7 +607,7 @@ static void is2_entry_set(struct ocelot *ocelot, int ix, } break; } - case OCELOT_ACE_TYPE_ANY: + case OCELOT_VCAP_KEY_ANY: default: type = 0; type_mask = 0; @@ -623,9 +623,9 @@ static void is2_entry_set(struct ocelot *ocelot, int ix, } vcap_key_set(ocelot, &data, VCAP_IS2_TYPE, type, type_mask); - is2_action_set(ocelot, &data, ace); + is2_action_set(ocelot, &data, filter); vcap_data_set(data.counter, data.counter_offset, - vcap_is2->counter_width, ace->stats.pkts); + vcap_is2->counter_width, filter->stats.pkts); /* Write row */ vcap_entry2cache(ocelot, &data); @@ -633,7 +633,7 @@ static void is2_entry_set(struct ocelot *ocelot, int ix, vcap_row_cmd(ocelot, row, VCAP_CMD_WRITE, VCAP_SEL_ALL); } -static void is2_entry_get(struct ocelot *ocelot, struct ocelot_ace_rule *rule, +static void is2_entry_get(struct ocelot *ocelot, struct ocelot_vcap_filter *filter, int ix) { const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2]; @@ -648,55 +648,56 @@ static void is2_entry_get(struct ocelot *ocelot, struct ocelot_ace_rule *rule, cnt = vcap_data_get(data.counter, data.counter_offset, vcap_is2->counter_width); - rule->stats.pkts = cnt; + filter->stats.pkts = cnt; } -static void ocelot_ace_rule_add(struct ocelot *ocelot, - struct ocelot_acl_block *block, - struct ocelot_ace_rule *rule) +static void ocelot_vcap_filter_add_to_block(struct ocelot *ocelot, + struct ocelot_vcap_block *block, + struct ocelot_vcap_filter *filter) { - struct ocelot_ace_rule *tmp; + struct ocelot_vcap_filter *tmp; struct list_head *pos, *n; - if (rule->action == OCELOT_ACL_ACTION_POLICE) { + if (filter->action == OCELOT_VCAP_ACTION_POLICE) { block->pol_lpr--; - rule->pol_ix = block->pol_lpr; - ocelot_ace_policer_add(ocelot, rule->pol_ix, &rule->pol); + filter->pol_ix = block->pol_lpr; + ocelot_vcap_policer_add(ocelot, filter->pol_ix, &filter->pol); } block->count++; if (list_empty(&block->rules)) { - list_add(&rule->list, &block->rules); + list_add(&filter->list, &block->rules); return; } list_for_each_safe(pos, n, &block->rules) { - tmp = list_entry(pos, struct ocelot_ace_rule, list); - if (rule->prio < tmp->prio) + tmp = list_entry(pos, struct ocelot_vcap_filter, list); + if (filter->prio < tmp->prio) break; } - list_add(&rule->list, pos->prev); + list_add(&filter->list, pos->prev); } -static int ocelot_ace_rule_get_index_id(struct ocelot_acl_block *block, - struct ocelot_ace_rule *rule) +static int ocelot_vcap_block_get_filter_index(struct ocelot_vcap_block *block, + struct ocelot_vcap_filter *filter) { - struct ocelot_ace_rule *tmp; + struct ocelot_vcap_filter *tmp; int index = -1; list_for_each_entry(tmp, &block->rules, list) { ++index; - if (rule->id == tmp->id) + if (filter->id == tmp->id) break; } return index; } -static struct ocelot_ace_rule* -ocelot_ace_rule_get_rule_index(struct ocelot_acl_block *block, int index) +static struct ocelot_vcap_filter* +ocelot_vcap_block_find_filter(struct ocelot_vcap_block *block, + int index) { - struct ocelot_ace_rule *tmp; + struct ocelot_vcap_filter *tmp; int i = 0; list_for_each_entry(tmp, &block->rules, list) { @@ -739,15 +740,16 @@ static void ocelot_match_all_as_mac_etype(struct ocelot *ocelot, int port, ANA_PORT_VCAP_S2_CFG, port); } -static bool ocelot_ace_is_problematic_mac_etype(struct ocelot_ace_rule *ace) +static bool +ocelot_vcap_is_problematic_mac_etype(struct ocelot_vcap_filter *filter) { u16 proto, mask; - if (ace->type != OCELOT_ACE_TYPE_ETYPE) + if (filter->key_type != OCELOT_VCAP_KEY_ETYPE) return false; - proto = ntohs(*(__be16 *)ace->frame.etype.etype.value); - mask = ntohs(*(__be16 *)ace->frame.etype.etype.mask); + proto = ntohs(*(__be16 *)filter->key.etype.etype.value); + mask = ntohs(*(__be16 *)filter->key.etype.etype.mask); /* ETH_P_ALL match, so all protocols below are included */ if (mask == 0) @@ -762,49 +764,51 @@ static bool ocelot_ace_is_problematic_mac_etype(struct ocelot_ace_rule *ace) return false; } -static bool ocelot_ace_is_problematic_non_mac_etype(struct ocelot_ace_rule *ace) +static bool +ocelot_vcap_is_problematic_non_mac_etype(struct ocelot_vcap_filter *filter) { - if (ace->type == OCELOT_ACE_TYPE_SNAP) + if (filter->key_type == OCELOT_VCAP_KEY_SNAP) return true; - if (ace->type == OCELOT_ACE_TYPE_ARP) + if (filter->key_type == OCELOT_VCAP_KEY_ARP) return true; - if (ace->type == OCELOT_ACE_TYPE_IPV4) + if (filter->key_type == OCELOT_VCAP_KEY_IPV4) return true; - if (ace->type == OCELOT_ACE_TYPE_IPV6) + if (filter->key_type == OCELOT_VCAP_KEY_IPV6) return true; return false; } -static bool ocelot_exclusive_mac_etype_ace_rules(struct ocelot *ocelot, - struct ocelot_ace_rule *ace) +static bool +ocelot_exclusive_mac_etype_filter_rules(struct ocelot *ocelot, + struct ocelot_vcap_filter *filter) { - struct ocelot_acl_block *block = &ocelot->acl_block; - struct ocelot_ace_rule *tmp; + struct ocelot_vcap_block *block = &ocelot->block; + struct ocelot_vcap_filter *tmp; unsigned long port; int i; - if (ocelot_ace_is_problematic_mac_etype(ace)) { + if (ocelot_vcap_is_problematic_mac_etype(filter)) { /* Search for any non-MAC_ETYPE rules on the port */ for (i = 0; i < block->count; i++) { - tmp = ocelot_ace_rule_get_rule_index(block, i); - if (tmp->ingress_port_mask & ace->ingress_port_mask && - ocelot_ace_is_problematic_non_mac_etype(tmp)) + tmp = ocelot_vcap_block_find_filter(block, i); + if (tmp->ingress_port_mask & filter->ingress_port_mask && + ocelot_vcap_is_problematic_non_mac_etype(tmp)) return false; } - for_each_set_bit(port, &ace->ingress_port_mask, + for_each_set_bit(port, &filter->ingress_port_mask, ocelot->num_phys_ports) ocelot_match_all_as_mac_etype(ocelot, port, true); - } else if (ocelot_ace_is_problematic_non_mac_etype(ace)) { + } else if (ocelot_vcap_is_problematic_non_mac_etype(filter)) { /* Search for any MAC_ETYPE rules on the port */ for (i = 0; i < block->count; i++) { - tmp = ocelot_ace_rule_get_rule_index(block, i); - if (tmp->ingress_port_mask & ace->ingress_port_mask && - ocelot_ace_is_problematic_mac_etype(tmp)) + tmp = ocelot_vcap_block_find_filter(block, i); + if (tmp->ingress_port_mask & filter->ingress_port_mask && + ocelot_vcap_is_problematic_mac_etype(tmp)) return false; } - for_each_set_bit(port, &ace->ingress_port_mask, + for_each_set_bit(port, &filter->ingress_port_mask, ocelot->num_phys_ports) ocelot_match_all_as_mac_etype(ocelot, port, false); } @@ -812,39 +816,40 @@ static bool ocelot_exclusive_mac_etype_ace_rules(struct ocelot *ocelot, return true; } -int ocelot_ace_rule_offload_add(struct ocelot *ocelot, - struct ocelot_ace_rule *rule, - struct netlink_ext_ack *extack) +int ocelot_vcap_filter_add(struct ocelot *ocelot, + struct ocelot_vcap_filter *filter, + struct netlink_ext_ack *extack) { - struct ocelot_acl_block *block = &ocelot->acl_block; - struct ocelot_ace_rule *ace; + struct ocelot_vcap_block *block = &ocelot->block; int i, index; - if (!ocelot_exclusive_mac_etype_ace_rules(ocelot, rule)) { + if (!ocelot_exclusive_mac_etype_filter_rules(ocelot, filter)) { NL_SET_ERR_MSG_MOD(extack, "Cannot mix MAC_ETYPE with non-MAC_ETYPE rules"); return -EBUSY; } - /* Add rule to the linked list */ - ocelot_ace_rule_add(ocelot, block, rule); + /* Add filter to the linked list */ + ocelot_vcap_filter_add_to_block(ocelot, block, filter); - /* Get the index of the inserted rule */ - index = ocelot_ace_rule_get_index_id(block, rule); + /* Get the index of the inserted filter */ + index = ocelot_vcap_block_get_filter_index(block, filter); - /* Move down the rules to make place for the new rule */ + /* Move down the rules to make place for the new filter */ for (i = block->count - 1; i > index; i--) { - ace = ocelot_ace_rule_get_rule_index(block, i); - is2_entry_set(ocelot, i, ace); + struct ocelot_vcap_filter *tmp; + + tmp = ocelot_vcap_block_find_filter(block, i); + is2_entry_set(ocelot, i, tmp); } - /* Now insert the new rule */ - is2_entry_set(ocelot, index, rule); + /* Now insert the new filter */ + is2_entry_set(ocelot, index, filter); return 0; } -int ocelot_ace_policer_add(struct ocelot *ocelot, u32 pol_ix, - struct ocelot_policer *pol) +int ocelot_vcap_policer_add(struct ocelot *ocelot, u32 pol_ix, + struct ocelot_policer *pol) { struct qos_policer_conf pp = { 0 }; @@ -858,7 +863,7 @@ int ocelot_ace_policer_add(struct ocelot *ocelot, u32 pol_ix, return qos_policer_conf_set(ocelot, 0, pol_ix, &pp); } -int ocelot_ace_policer_del(struct ocelot *ocelot, u32 pol_ix) +int ocelot_vcap_policer_del(struct ocelot *ocelot, u32 pol_ix) { struct qos_policer_conf pp = { 0 }; @@ -867,44 +872,44 @@ int ocelot_ace_policer_del(struct ocelot *ocelot, u32 pol_ix) return qos_policer_conf_set(ocelot, 0, pol_ix, &pp); } -static void ocelot_ace_police_del(struct ocelot *ocelot, - struct ocelot_acl_block *block, - u32 ix) +static void ocelot_vcap_police_del(struct ocelot *ocelot, + struct ocelot_vcap_block *block, + u32 ix) { - struct ocelot_ace_rule *ace; + struct ocelot_vcap_filter *filter; int index = -1; if (ix < block->pol_lpr) return; - list_for_each_entry(ace, &block->rules, list) { + list_for_each_entry(filter, &block->rules, list) { index++; - if (ace->action == OCELOT_ACL_ACTION_POLICE && - ace->pol_ix < ix) { - ace->pol_ix += 1; - ocelot_ace_policer_add(ocelot, ace->pol_ix, - &ace->pol); - is2_entry_set(ocelot, index, ace); + if (filter->action == OCELOT_VCAP_ACTION_POLICE && + filter->pol_ix < ix) { + filter->pol_ix += 1; + ocelot_vcap_policer_add(ocelot, filter->pol_ix, + &filter->pol); + is2_entry_set(ocelot, index, filter); } } - ocelot_ace_policer_del(ocelot, block->pol_lpr); + ocelot_vcap_policer_del(ocelot, block->pol_lpr); block->pol_lpr++; } -static void ocelot_ace_rule_del(struct ocelot *ocelot, - struct ocelot_acl_block *block, - struct ocelot_ace_rule *rule) +static void ocelot_vcap_block_remove_filter(struct ocelot *ocelot, + struct ocelot_vcap_block *block, + struct ocelot_vcap_filter *filter) { - struct ocelot_ace_rule *tmp; + struct ocelot_vcap_filter *tmp; struct list_head *pos, *q; list_for_each_safe(pos, q, &block->rules) { - tmp = list_entry(pos, struct ocelot_ace_rule, list); - if (tmp->id == rule->id) { - if (tmp->action == OCELOT_ACL_ACTION_POLICE) - ocelot_ace_police_del(ocelot, block, - tmp->pol_ix); + tmp = list_entry(pos, struct ocelot_vcap_filter, list); + if (tmp->id == filter->id) { + if (tmp->action == OCELOT_VCAP_ACTION_POLICE) + ocelot_vcap_police_del(ocelot, block, + tmp->pol_ix); list_del(pos); kfree(tmp); @@ -914,56 +919,57 @@ static void ocelot_ace_rule_del(struct ocelot *ocelot, block->count--; } -int ocelot_ace_rule_offload_del(struct ocelot *ocelot, - struct ocelot_ace_rule *rule) +int ocelot_vcap_filter_del(struct ocelot *ocelot, + struct ocelot_vcap_filter *filter) { - struct ocelot_acl_block *block = &ocelot->acl_block; - struct ocelot_ace_rule del_ace; - struct ocelot_ace_rule *ace; + struct ocelot_vcap_block *block = &ocelot->block; + struct ocelot_vcap_filter del_filter; int i, index; - memset(&del_ace, 0, sizeof(del_ace)); + memset(&del_filter, 0, sizeof(del_filter)); - /* Gets index of the rule */ - index = ocelot_ace_rule_get_index_id(block, rule); + /* Gets index of the filter */ + index = ocelot_vcap_block_get_filter_index(block, filter); - /* Delete rule */ - ocelot_ace_rule_del(ocelot, block, rule); + /* Delete filter */ + ocelot_vcap_block_remove_filter(ocelot, block, filter); - /* Move up all the blocks over the deleted rule */ + /* Move up all the blocks over the deleted filter */ for (i = index; i < block->count; i++) { - ace = ocelot_ace_rule_get_rule_index(block, i); - is2_entry_set(ocelot, i, ace); + struct ocelot_vcap_filter *tmp; + + tmp = ocelot_vcap_block_find_filter(block, i); + is2_entry_set(ocelot, i, tmp); } - /* Now delete the last rule, because it is duplicated */ - is2_entry_set(ocelot, block->count, &del_ace); + /* Now delete the last filter, because it is duplicated */ + is2_entry_set(ocelot, block->count, &del_filter); return 0; } -int ocelot_ace_rule_stats_update(struct ocelot *ocelot, - struct ocelot_ace_rule *rule) +int ocelot_vcap_filter_stats_update(struct ocelot *ocelot, + struct ocelot_vcap_filter *filter) { - struct ocelot_acl_block *block = &ocelot->acl_block; - struct ocelot_ace_rule *tmp; + struct ocelot_vcap_block *block = &ocelot->block; + struct ocelot_vcap_filter *tmp; int index; - index = ocelot_ace_rule_get_index_id(block, rule); - is2_entry_get(ocelot, rule, index); + index = ocelot_vcap_block_get_filter_index(block, filter); + is2_entry_get(ocelot, filter, index); /* After we get the result we need to clear the counters */ - tmp = ocelot_ace_rule_get_rule_index(block, index); + tmp = ocelot_vcap_block_find_filter(block, index); tmp->stats.pkts = 0; is2_entry_set(ocelot, index, tmp); return 0; } -int ocelot_ace_init(struct ocelot *ocelot) +int ocelot_vcap_init(struct ocelot *ocelot) { const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2]; - struct ocelot_acl_block *block = &ocelot->acl_block; + struct ocelot_vcap_block *block = &ocelot->block; struct vcap_data data; memset(&data, 0, sizeof(data)); @@ -994,7 +1000,7 @@ int ocelot_ace_init(struct ocelot *ocelot) block->pol_lpr = OCELOT_POLICER_DISCARD - 1; - INIT_LIST_HEAD(&ocelot->acl_block.rules); + INIT_LIST_HEAD(&ocelot->block.rules); return 0; } diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.h b/drivers/net/ethernet/mscc/ocelot_vcap.h index 099e177f2617..0dfbfc011b2e 100644 --- a/drivers/net/ethernet/mscc/ocelot_vcap.h +++ b/drivers/net/ethernet/mscc/ocelot_vcap.h @@ -3,8 +3,8 @@ * Copyright (c) 2019 Microsemi Corporation */ -#ifndef _MSCC_OCELOT_ACE_H_ -#define _MSCC_OCELOT_ACE_H_ +#ifndef _MSCC_OCELOT_VCAP_H_ +#define _MSCC_OCELOT_VCAP_H_ #include "ocelot.h" #include "ocelot_police.h" @@ -76,31 +76,31 @@ struct ocelot_vcap_udp_tcp { u16 mask; }; -enum ocelot_ace_type { - OCELOT_ACE_TYPE_ANY, - OCELOT_ACE_TYPE_ETYPE, - OCELOT_ACE_TYPE_LLC, - OCELOT_ACE_TYPE_SNAP, - OCELOT_ACE_TYPE_ARP, - OCELOT_ACE_TYPE_IPV4, - OCELOT_ACE_TYPE_IPV6 +enum ocelot_vcap_key_type { + OCELOT_VCAP_KEY_ANY, + OCELOT_VCAP_KEY_ETYPE, + OCELOT_VCAP_KEY_LLC, + OCELOT_VCAP_KEY_SNAP, + OCELOT_VCAP_KEY_ARP, + OCELOT_VCAP_KEY_IPV4, + OCELOT_VCAP_KEY_IPV6 }; -struct ocelot_ace_vlan { +struct ocelot_vcap_key_vlan { struct ocelot_vcap_vid vid; /* VLAN ID (12 bit) */ struct ocelot_vcap_u8 pcp; /* PCP (3 bit) */ enum ocelot_vcap_bit dei; /* DEI */ enum ocelot_vcap_bit tagged; /* Tagged/untagged frame */ }; -struct ocelot_ace_frame_etype { +struct ocelot_vcap_key_etype { struct ocelot_vcap_u48 dmac; struct ocelot_vcap_u48 smac; struct ocelot_vcap_u16 etype; struct ocelot_vcap_u16 data; /* MAC data */ }; -struct ocelot_ace_frame_llc { +struct ocelot_vcap_key_llc { struct ocelot_vcap_u48 dmac; struct ocelot_vcap_u48 smac; @@ -108,7 +108,7 @@ struct ocelot_ace_frame_llc { struct ocelot_vcap_u32 llc; }; -struct ocelot_ace_frame_snap { +struct ocelot_vcap_key_snap { struct ocelot_vcap_u48 dmac; struct ocelot_vcap_u48 smac; @@ -116,7 +116,7 @@ struct ocelot_ace_frame_snap { struct ocelot_vcap_u40 snap; }; -struct ocelot_ace_frame_arp { +struct ocelot_vcap_key_arp { struct ocelot_vcap_u48 smac; enum ocelot_vcap_bit arp; /* Opcode ARP/RARP */ enum ocelot_vcap_bit req; /* Opcode request/reply */ @@ -133,7 +133,7 @@ struct ocelot_ace_frame_arp { struct ocelot_vcap_ipv4 dip; /* Target IP address */ }; -struct ocelot_ace_frame_ipv4 { +struct ocelot_vcap_key_ipv4 { enum ocelot_vcap_bit ttl; /* TTL zero */ enum ocelot_vcap_bit fragment; /* Fragment */ enum ocelot_vcap_bit options; /* Header options */ @@ -155,7 +155,7 @@ struct ocelot_ace_frame_ipv4 { enum ocelot_vcap_bit seq_zero; /* TCP sequence number is zero */ }; -struct ocelot_ace_frame_ipv6 { +struct ocelot_vcap_key_ipv6 { struct ocelot_vcap_u8 proto; /* IPv6 protocol */ struct ocelot_vcap_u128 sip; /* IPv6 source (byte 0-7 ignored) */ enum ocelot_vcap_bit ttl; /* TTL zero */ @@ -174,58 +174,58 @@ struct ocelot_ace_frame_ipv6 { enum ocelot_vcap_bit seq_zero; /* TCP sequence number is zero */ }; -enum ocelot_ace_action { - OCELOT_ACL_ACTION_DROP, - OCELOT_ACL_ACTION_TRAP, - OCELOT_ACL_ACTION_POLICE, +enum ocelot_vcap_action { + OCELOT_VCAP_ACTION_DROP, + OCELOT_VCAP_ACTION_TRAP, + OCELOT_VCAP_ACTION_POLICE, }; -struct ocelot_ace_stats { +struct ocelot_vcap_stats { u64 bytes; u64 pkts; u64 used; }; -struct ocelot_ace_rule { +struct ocelot_vcap_filter { struct list_head list; u16 prio; u32 id; - enum ocelot_ace_action action; - struct ocelot_ace_stats stats; + enum ocelot_vcap_action action; + struct ocelot_vcap_stats stats; unsigned long ingress_port_mask; enum ocelot_vcap_bit dmac_mc; enum ocelot_vcap_bit dmac_bc; - struct ocelot_ace_vlan vlan; + struct ocelot_vcap_key_vlan vlan; - enum ocelot_ace_type type; + enum ocelot_vcap_key_type key_type; union { - /* ocelot_ACE_TYPE_ANY: No specific fields */ - struct ocelot_ace_frame_etype etype; - struct ocelot_ace_frame_llc llc; - struct ocelot_ace_frame_snap snap; - struct ocelot_ace_frame_arp arp; - struct ocelot_ace_frame_ipv4 ipv4; - struct ocelot_ace_frame_ipv6 ipv6; - } frame; + /* OCELOT_VCAP_KEY_ANY: No specific fields */ + struct ocelot_vcap_key_etype etype; + struct ocelot_vcap_key_llc llc; + struct ocelot_vcap_key_snap snap; + struct ocelot_vcap_key_arp arp; + struct ocelot_vcap_key_ipv4 ipv4; + struct ocelot_vcap_key_ipv6 ipv6; + } key; struct ocelot_policer pol; u32 pol_ix; }; -int ocelot_ace_rule_offload_add(struct ocelot *ocelot, - struct ocelot_ace_rule *rule, - struct netlink_ext_ack *extack); -int ocelot_ace_rule_offload_del(struct ocelot *ocelot, - struct ocelot_ace_rule *rule); -int ocelot_ace_rule_stats_update(struct ocelot *ocelot, - struct ocelot_ace_rule *rule); +int ocelot_vcap_filter_add(struct ocelot *ocelot, + struct ocelot_vcap_filter *rule, + struct netlink_ext_ack *extack); +int ocelot_vcap_filter_del(struct ocelot *ocelot, + struct ocelot_vcap_filter *rule); +int ocelot_vcap_filter_stats_update(struct ocelot *ocelot, + struct ocelot_vcap_filter *rule); -int ocelot_ace_init(struct ocelot *ocelot); +int ocelot_vcap_init(struct ocelot *ocelot); int ocelot_setup_tc_cls_flower(struct ocelot_port_private *priv, struct flow_cls_offload *f, bool ingress); -#endif /* _MSCC_OCELOT_ACE_H_ */ +#endif /* _MSCC_OCELOT_VCAP_H_ */ diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index 4953e9994df3..fa2c3904049e 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -470,7 +470,7 @@ struct ocelot_ops { int (*reset)(struct ocelot *ocelot); }; -struct ocelot_acl_block { +struct ocelot_vcap_block { struct list_head rules; int count; int pol_lpr; @@ -535,7 +535,7 @@ struct ocelot { struct list_head multicast; - struct ocelot_acl_block acl_block; + struct ocelot_vcap_block block; const struct vcap_field *vcap_is2_keys; const struct vcap_field *vcap_is2_actions; -- cgit v1.2.3-59-g8ed1b From 78e57f152c001eed0321ba4413a07c9e33e753e6 Mon Sep 17 00:00:00 2001 From: Amritha Nambiar Date: Thu, 18 Jun 2020 14:22:15 -0700 Subject: net: Avoid overwriting valid skb->napi_id This will be useful to allow busy poll for tunneled traffic. In case of busy poll for sessions over tunnels, the underlying physical device's queues need to be polled. Tunnels schedule NAPI either via netif_rx() for backlog queue or schedule the gro_cell_poll(). netif_rx() propagates the valid skb->napi_id to the socket. OTOH, gro_cell_poll() stamps the skb->napi_id again by calling skb_mark_napi_id() with the tunnel NAPI which is not a busy poll candidate. This was preventing tunneled traffic to use busy poll. A valid NAPI ID in the skb indicates it was already marked for busy poll by a NAPI driver and hence needs to be copied into the socket. Signed-off-by: Amritha Nambiar Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/busy_poll.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h index 86e028388bad..b001fa91c14e 100644 --- a/include/net/busy_poll.h +++ b/include/net/busy_poll.h @@ -114,7 +114,11 @@ static inline void skb_mark_napi_id(struct sk_buff *skb, struct napi_struct *napi) { #ifdef CONFIG_NET_RX_BUSY_POLL - skb->napi_id = napi->napi_id; + /* If the skb was already marked with a valid NAPI ID, avoid overwriting + * it. + */ + if (skb->napi_id < MIN_NAPI_ID) + skb->napi_id = napi->napi_id; #endif } -- cgit v1.2.3-59-g8ed1b From 05e22e8395058745bd0312bc488b522197852aff Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 19 Jun 2020 12:12:34 -0700 Subject: tcp: remove indirect calls for icsk->icsk_af_ops->queue_xmit Mitigate RETPOLINE costs in __tcp_transmit_skb() by using INDIRECT_CALL_INET() wrapper. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/ip.h | 6 +----- include/net/tcp.h | 1 + net/ipv4/ip_output.c | 6 ++++++ net/ipv4/tcp_output.c | 7 ++++++- 4 files changed, 14 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/net/ip.h b/include/net/ip.h index 04ebe7bf54c6..862c9545833a 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -231,11 +231,7 @@ struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4, struct ipcm_cookie *ipc, struct rtable **rtp, struct inet_cork *cork, unsigned int flags); -static inline int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, - struct flowi *fl) -{ - return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos); -} +int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl); static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4) { diff --git a/include/net/tcp.h b/include/net/tcp.h index 4de9485f73d9..e5d7e0b09924 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -25,6 +25,7 @@ #include #include #include +#include #include #include diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 090d3097ee15..d946356187ed 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -539,6 +539,12 @@ no_route: } EXPORT_SYMBOL(__ip_queue_xmit); +int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl) +{ + return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos); +} +EXPORT_SYMBOL(ip_queue_xmit); + static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) { to->pkt_type = from->pkt_type; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index a50e1990a845..be1bd37185d8 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1064,6 +1064,9 @@ static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb, list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); } +INDIRECT_CALLABLE_DECLARE(int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)); +INDIRECT_CALLABLE_DECLARE(int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)); + /* This routine actually transmits TCP packets queued in by * tcp_do_sendmsg(). This is used by both the initial * transmission and possible later retransmissions. @@ -1235,7 +1238,9 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, tcp_add_tx_delay(skb, tp); - err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); + err = INDIRECT_CALL_INET(icsk->icsk_af_ops->queue_xmit, + inet6_csk_xmit, ip_queue_xmit, + sk, skb, &inet->cork.fl); if (unlikely(err > 0)) { tcp_enter_cwr(sk); -- cgit v1.2.3-59-g8ed1b From dd2e0b86fc4ee146ac8f3275833d0187efeb950a Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 19 Jun 2020 12:12:35 -0700 Subject: tcp: remove indirect calls for icsk->icsk_af_ops->send_check Mitigate RETPOLINE costs in __tcp_transmit_skb() by using INDIRECT_CALL_INET() wrapper. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/ip6_checksum.h | 9 --------- include/net/tcp.h | 3 +++ net/ipv4/tcp_output.c | 5 ++++- net/ipv6/tcp_ipv6.c | 7 +++++++ 4 files changed, 14 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/net/ip6_checksum.h b/include/net/ip6_checksum.h index 27ec612cd4a4..b3f4eaa88672 100644 --- a/include/net/ip6_checksum.h +++ b/include/net/ip6_checksum.h @@ -85,15 +85,6 @@ static inline void tcp_v6_gso_csum_prep(struct sk_buff *skb) th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0); } -#if IS_ENABLED(CONFIG_IPV6) -static inline void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb) -{ - struct ipv6_pinfo *np = inet6_sk(sk); - - __tcp_v6_send_check(skb, &np->saddr, &sk->sk_v6_daddr); -} -#endif - static inline __sum16 udp_v6_check(int len, const struct in6_addr *saddr, const struct in6_addr *daddr, diff --git a/include/net/tcp.h b/include/net/tcp.h index e5d7e0b09924..cd9cc348dbf9 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -932,6 +932,9 @@ static inline int tcp_v6_sdif(const struct sk_buff *skb) #endif return 0; } + +INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)); + #endif static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb) diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index be1bd37185d8..04b70fe31fa2 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1066,6 +1066,7 @@ static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb, INDIRECT_CALLABLE_DECLARE(int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)); INDIRECT_CALLABLE_DECLARE(int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)); +INDIRECT_CALLABLE_DECLARE(void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)); /* This routine actually transmits TCP packets queued in by * tcp_do_sendmsg(). This is used by both the initial @@ -1210,7 +1211,9 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, } #endif - icsk->icsk_af_ops->send_check(sk, skb); + INDIRECT_CALL_INET(icsk->icsk_af_ops->send_check, + tcp_v6_send_check, tcp_v4_send_check, + sk, skb); if (likely(tcb->tcp_flags & TCPHDR_ACK)) tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index f67d45ff00b4..4502db706f75 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1811,6 +1811,13 @@ static struct timewait_sock_ops tcp6_timewait_sock_ops = { .twsk_destructor = tcp_twsk_destructor, }; +INDIRECT_CALLABLE_SCOPE void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb) +{ + struct ipv6_pinfo *np = inet6_sk(sk); + + __tcp_v6_send_check(skb, &np->saddr, &sk->sk_v6_daddr); +} + const struct inet_connection_sock_af_ops ipv6_specific = { .queue_xmit = inet6_csk_xmit, .send_check = tcp_v6_send_check, -- cgit v1.2.3-59-g8ed1b From 5769a351b89cd4d82016f18fa5f6c4077403564d Mon Sep 17 00:00:00 2001 From: Jiufei Xue Date: Wed, 17 Jun 2020 17:53:55 +0800 Subject: io_uring: change the poll type to be 32-bits poll events should be 32-bits to cover EPOLLEXCLUSIVE. Explicit word-swap the poll32_events for big endian to make sure the ABI is not changed. We call this feature IORING_FEAT_POLL_32BITS, applications who want to use EPOLLEXCLUSIVE should check the feature bit first. Signed-off-by: Jiufei Xue Signed-off-by: Jens Axboe --- fs/io_uring.c | 13 +++++++++---- include/uapi/linux/io_uring.h | 4 +++- tools/io_uring/liburing.h | 6 +++++- 3 files changed, 17 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/fs/io_uring.c b/fs/io_uring.c index a78201b96179..0eb063daa9b5 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -4589,7 +4589,7 @@ static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head, static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_poll_iocb *poll = &req->poll; - u16 events; + u32 events; if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; @@ -4598,7 +4598,10 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe if (!poll->file) return -EBADF; - events = READ_ONCE(sqe->poll_events); + events = READ_ONCE(sqe->poll32_events); +#ifdef __BIG_ENDIAN + events = swahw32(events); +#endif poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP; io_get_req_task(req); @@ -7928,7 +7931,8 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p, p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP | IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS | - IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL; + IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL | + IORING_FEAT_POLL_32BITS; if (copy_to_user(params, p, sizeof(*p))) { ret = -EFAULT; @@ -8217,7 +8221,8 @@ static int __init io_uring_init(void) BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags); BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags); BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags); - BUILD_BUG_SQE_ELEM(28, __u16, poll_events); + BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events); + BUILD_BUG_SQE_ELEM(28, __u32, poll32_events); BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags); BUILD_BUG_SQE_ELEM(28, __u32, msg_flags); BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags); diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 92c22699a5a7..8d033961cb78 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -31,7 +31,8 @@ struct io_uring_sqe { union { __kernel_rwf_t rw_flags; __u32 fsync_flags; - __u16 poll_events; + __u16 poll_events; /* compatibility */ + __u32 poll32_events; /* word-reversed for BE */ __u32 sync_range_flags; __u32 msg_flags; __u32 timeout_flags; @@ -248,6 +249,7 @@ struct io_uring_params { #define IORING_FEAT_RW_CUR_POS (1U << 3) #define IORING_FEAT_CUR_PERSONALITY (1U << 4) #define IORING_FEAT_FAST_POLL (1U << 5) +#define IORING_FEAT_POLL_32BITS (1U << 6) /* * io_uring_register(2) opcodes and arguments diff --git a/tools/io_uring/liburing.h b/tools/io_uring/liburing.h index 5f305c86b892..28a837b6069d 100644 --- a/tools/io_uring/liburing.h +++ b/tools/io_uring/liburing.h @@ -10,6 +10,7 @@ extern "C" { #include #include "../../include/uapi/linux/io_uring.h" #include +#include #include "barrier.h" /* @@ -145,11 +146,14 @@ static inline void io_uring_prep_write_fixed(struct io_uring_sqe *sqe, int fd, } static inline void io_uring_prep_poll_add(struct io_uring_sqe *sqe, int fd, - short poll_mask) + unsigned poll_mask) { memset(sqe, 0, sizeof(*sqe)); sqe->opcode = IORING_OP_POLL_ADD; sqe->fd = fd; +#if __BYTE_ORDER == __BIG_ENDIAN + poll_mask = __swahw32(poll_mask); +#endif sqe->poll_events = poll_mask; } -- cgit v1.2.3-59-g8ed1b From 5a473e8311b582a40c10409a0f4bb39f42aa8123 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 4 Jun 2020 11:23:39 -0600 Subject: block: provide plug based way of signaling forced no-wait semantics Provide a way for the caller to specify that IO should be marked with REQ_NOWAIT to avoid blocking on allocation. Signed-off-by: Jens Axboe --- block/blk-core.c | 6 ++++++ include/linux/blkdev.h | 1 + 2 files changed, 7 insertions(+) (limited to 'include') diff --git a/block/blk-core.c b/block/blk-core.c index 03252af8c82c..62a4904db921 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -958,6 +958,7 @@ generic_make_request_checks(struct bio *bio) struct request_queue *q; int nr_sectors = bio_sectors(bio); blk_status_t status = BLK_STS_IOERR; + struct blk_plug *plug; char b[BDEVNAME_SIZE]; might_sleep(); @@ -971,6 +972,10 @@ generic_make_request_checks(struct bio *bio) goto end_io; } + plug = blk_mq_plug(q, bio); + if (plug && plug->nowait) + bio->bi_opf |= REQ_NOWAIT; + /* * For a REQ_NOWAIT based request, return -EOPNOTSUPP * if queue is not a request based queue. @@ -1800,6 +1805,7 @@ void blk_start_plug(struct blk_plug *plug) INIT_LIST_HEAD(&plug->cb_list); plug->rq_count = 0; plug->multiple_queues = false; + plug->nowait = false; /* * Store ordering should not be needed here, since a potential diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 8fd900998b4e..6e067dca94cf 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1189,6 +1189,7 @@ struct blk_plug { struct list_head cb_list; /* md requires an unplug callback */ unsigned short rq_count; bool multiple_queues; + bool nowait; }; #define BLK_MAX_REQUEST_COUNT 16 #define BLK_PLUG_FLUSH_SIZE (128 * 1024) -- cgit v1.2.3-59-g8ed1b From c7510ab2cf5ccd997fe7f194edfe09cc511abf99 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sat, 23 May 2020 08:22:14 -0600 Subject: mm: abstract out wake_page_match() from wake_page_function() No functional changes in this patch, just in preparation for allowing more callers. Acked-by: Johannes Weiner Signed-off-by: Jens Axboe --- include/linux/pagemap.h | 37 +++++++++++++++++++++++++++++++++++++ mm/filemap.c | 35 ++++------------------------------- 2 files changed, 41 insertions(+), 31 deletions(-) (limited to 'include') diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index cf2468da68e9..2f18221bb5c8 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -496,6 +496,43 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma, return pgoff; } +/* This has the same layout as wait_bit_key - see fs/cachefiles/rdwr.c */ +struct wait_page_key { + struct page *page; + int bit_nr; + int page_match; +}; + +struct wait_page_queue { + struct page *page; + int bit_nr; + wait_queue_entry_t wait; +}; + +static inline int wake_page_match(struct wait_page_queue *wait_page, + struct wait_page_key *key) +{ + if (wait_page->page != key->page) + return 0; + key->page_match = 1; + + if (wait_page->bit_nr != key->bit_nr) + return 0; + + /* + * Stop walking if it's locked. + * Is this safe if put_and_wait_on_page_locked() is in use? + * Yes: the waker must hold a reference to this page, and if PG_locked + * has now already been set by another task, that task must also hold + * a reference to the *same usage* of this page; so there is no need + * to walk on to wake even the put_and_wait_on_page_locked() callers. + */ + if (test_bit(key->bit_nr, &key->page->flags)) + return -1; + + return 1; +} + extern void __lock_page(struct page *page); extern int __lock_page_killable(struct page *page); extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, diff --git a/mm/filemap.c b/mm/filemap.c index 3378d4fca883..c3175dbd8fba 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -987,43 +987,16 @@ void __init pagecache_init(void) page_writeback_init(); } -/* This has the same layout as wait_bit_key - see fs/cachefiles/rdwr.c */ -struct wait_page_key { - struct page *page; - int bit_nr; - int page_match; -}; - -struct wait_page_queue { - struct page *page; - int bit_nr; - wait_queue_entry_t wait; -}; - static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg) { struct wait_page_key *key = arg; struct wait_page_queue *wait_page = container_of(wait, struct wait_page_queue, wait); + int ret; - if (wait_page->page != key->page) - return 0; - key->page_match = 1; - - if (wait_page->bit_nr != key->bit_nr) - return 0; - - /* - * Stop walking if it's locked. - * Is this safe if put_and_wait_on_page_locked() is in use? - * Yes: the waker must hold a reference to this page, and if PG_locked - * has now already been set by another task, that task must also hold - * a reference to the *same usage* of this page; so there is no need - * to walk on to wake even the put_and_wait_on_page_locked() callers. - */ - if (test_bit(key->bit_nr, &key->page->flags)) - return -1; - + ret = wake_page_match(wait_page, key); + if (ret != 1) + return ret; return autoremove_wake_function(wait, mode, sync, key); } -- cgit v1.2.3-59-g8ed1b From dd3e6d5039de1cbff4e20e2b34390ff44cdb182f Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 22 May 2020 09:12:09 -0600 Subject: mm: add support for async page locking Normally waiting for a page to become unlocked, or locking the page, requires waiting for IO to complete. Add support for lock_page_async() and wait_on_page_locked_async(), which are callback based instead. This allows a caller to get notified when a page becomes unlocked, rather than wait for it. We add a new iocb field, ki_waitq, to pass in the necessary data for this to happen. We can unionize this with ki_cookie, since that is only used for polled IO. Polled IO can never co-exist with async callbacks, as it is (by definition) polled completions. struct wait_page_key is made public, and we define struct wait_page_async as the interface between the caller and the core. Acked-by: Johannes Weiner Signed-off-by: Jens Axboe --- include/linux/fs.h | 7 ++++++- include/linux/pagemap.h | 17 +++++++++++++++++ mm/filemap.c | 45 ++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 67 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/fs.h b/include/linux/fs.h index 3f881a892ea7..2a5cf6080e68 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -315,6 +315,8 @@ enum rw_hint { #define IOCB_SYNC (1 << 5) #define IOCB_WRITE (1 << 6) #define IOCB_NOWAIT (1 << 7) +/* iocb->ki_waitq is valid */ +#define IOCB_WAITQ (1 << 8) struct kiocb { struct file *ki_filp; @@ -328,7 +330,10 @@ struct kiocb { int ki_flags; u16 ki_hint; u16 ki_ioprio; /* See linux/ioprio.h */ - unsigned int ki_cookie; /* for ->iopoll */ + union { + unsigned int ki_cookie; /* for ->iopoll */ + struct wait_page_queue *ki_waitq; /* for async buffered IO */ + }; randomized_struct_fields_end }; diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 2f18221bb5c8..e053e1d9a4d7 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -535,6 +535,7 @@ static inline int wake_page_match(struct wait_page_queue *wait_page, extern void __lock_page(struct page *page); extern int __lock_page_killable(struct page *page); +extern int __lock_page_async(struct page *page, struct wait_page_queue *wait); extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, unsigned int flags); extern void unlock_page(struct page *page); @@ -571,6 +572,22 @@ static inline int lock_page_killable(struct page *page) return 0; } +/* + * lock_page_async - Lock the page, unless this would block. If the page + * is already locked, then queue a callback when the page becomes unlocked. + * This callback can then retry the operation. + * + * Returns 0 if the page is locked successfully, or -EIOCBQUEUED if the page + * was already locked and the callback defined in 'wait' was queued. + */ +static inline int lock_page_async(struct page *page, + struct wait_page_queue *wait) +{ + if (!trylock_page(page)) + return __lock_page_async(page, wait); + return 0; +} + /* * lock_page_or_retry - Lock the page, unless this would block and the * caller indicated that it can handle a retry. diff --git a/mm/filemap.c b/mm/filemap.c index c3175dbd8fba..e8aaf43bee9f 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1180,6 +1180,36 @@ int wait_on_page_bit_killable(struct page *page, int bit_nr) } EXPORT_SYMBOL(wait_on_page_bit_killable); +static int __wait_on_page_locked_async(struct page *page, + struct wait_page_queue *wait, bool set) +{ + struct wait_queue_head *q = page_waitqueue(page); + int ret = 0; + + wait->page = page; + wait->bit_nr = PG_locked; + + spin_lock_irq(&q->lock); + __add_wait_queue_entry_tail(q, &wait->wait); + SetPageWaiters(page); + if (set) + ret = !trylock_page(page); + else + ret = PageLocked(page); + /* + * If we were succesful now, we know we're still on the + * waitqueue as we're still under the lock. This means it's + * safe to remove and return success, we know the callback + * isn't going to trigger. + */ + if (!ret) + __remove_wait_queue(q, &wait->wait); + else + ret = -EIOCBQUEUED; + spin_unlock_irq(&q->lock); + return ret; +} + /** * put_and_wait_on_page_locked - Drop a reference and wait for it to be unlocked * @page: The page to wait for. @@ -1342,6 +1372,11 @@ int __lock_page_killable(struct page *__page) } EXPORT_SYMBOL_GPL(__lock_page_killable); +int __lock_page_async(struct page *page, struct wait_page_queue *wait) +{ + return __wait_on_page_locked_async(page, wait, true); +} + /* * Return values: * 1 - page is locked; mmap_lock is still held. @@ -2131,6 +2166,11 @@ page_not_up_to_date_locked: } readpage: + if (iocb->ki_flags & IOCB_NOWAIT) { + unlock_page(page); + put_page(page); + goto would_block; + } /* * A previous I/O error may have been due to temporary * failures, eg. multipath errors. @@ -2150,7 +2190,10 @@ readpage: } if (!PageUptodate(page)) { - error = lock_page_killable(page); + if (iocb->ki_flags & IOCB_WAITQ) + error = lock_page_async(page, iocb->ki_waitq); + else + error = lock_page_killable(page); if (unlikely(error)) goto readpage_error; if (!PageUptodate(page)) { -- cgit v1.2.3-59-g8ed1b From c2a25ec0f1005dde004cd671484f578a9c8ca7de Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 22 May 2020 09:12:51 -0600 Subject: fs: add FMODE_BUF_RASYNC If set, this indicates that the file system supports IOCB_WAITQ for buffered reads. Signed-off-by: Jens Axboe --- include/linux/fs.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include') diff --git a/include/linux/fs.h b/include/linux/fs.h index 2a5cf6080e68..4090320360f4 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -175,6 +175,9 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, /* File does not contribute to nr_files count */ #define FMODE_NOACCOUNT ((__force fmode_t)0x20000000) +/* File supports async buffered reads */ +#define FMODE_BUF_RASYNC ((__force fmode_t)0x40000000) + /* * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector * that indicates that they should check the contents of the iovec are -- cgit v1.2.3-59-g8ed1b From d1932dc3dc268f8dd5201c64971324d06ba977cc Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 22 May 2020 10:18:23 -0600 Subject: mm: add kiocb_wait_page_queue_init() helper Checks if the file supports it, and initializes the values that we need. Caller passes in 'data' pointer, if any, and the callback function to be used. Acked-by: Johannes Weiner Signed-off-by: Jens Axboe --- include/linux/pagemap.h | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) (limited to 'include') diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index e053e1d9a4d7..7386bc67cc5a 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -533,6 +533,27 @@ static inline int wake_page_match(struct wait_page_queue *wait_page, return 1; } +static inline int kiocb_wait_page_queue_init(struct kiocb *kiocb, + struct wait_page_queue *wait, + wait_queue_func_t func, + void *data) +{ + /* Can't support async wakeup with polled IO */ + if (kiocb->ki_flags & IOCB_HIPRI) + return -EINVAL; + if (kiocb->ki_filp->f_mode & FMODE_BUF_RASYNC) { + wait->wait.func = func; + wait->wait.private = data; + wait->wait.flags = 0; + INIT_LIST_HEAD(&wait->wait.entry); + kiocb->ki_flags |= IOCB_WAITQ; + kiocb->ki_waitq = wait; + return 0; + } + + return -EOPNOTSUPP; +} + extern void __lock_page(struct page *page); extern int __lock_page_killable(struct page *page); extern int __lock_page_async(struct page *page, struct wait_page_queue *wait); -- cgit v1.2.3-59-g8ed1b From 49bcaef86eba1a8097980f341e243ba01177a685 Mon Sep 17 00:00:00 2001 From: Sivaprakash Murugesan Date: Mon, 22 Jun 2020 09:58:11 +0530 Subject: clk: qcom: Add DT bindings for ipq6018 apss clock controller Add dt-binding for ipq6018 apss clock controller Acked-by: Rob Herring Signed-off-by: Sivaprakash Murugesan Link: https://lore.kernel.org/r/1592800092-20533-4-git-send-email-sivaprak@codeaurora.org Signed-off-by: Stephen Boyd --- include/dt-bindings/clock/qcom,apss-ipq.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 include/dt-bindings/clock/qcom,apss-ipq.h (limited to 'include') diff --git a/include/dt-bindings/clock/qcom,apss-ipq.h b/include/dt-bindings/clock/qcom,apss-ipq.h new file mode 100644 index 000000000000..77b6e05492e2 --- /dev/null +++ b/include/dt-bindings/clock/qcom,apss-ipq.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLOCK_QCA_APSS_IPQ6018_H +#define _DT_BINDINGS_CLOCK_QCA_APSS_IPQ6018_H + +#define APCS_ALIAS0_CLK_SRC 0 +#define APCS_ALIAS0_CORE_CLK 1 + +#endif -- cgit v1.2.3-59-g8ed1b From b608013ac5b55a2e42d8734f29f9757b75d26165 Mon Sep 17 00:00:00 2001 From: Konrad Dybcio Date: Mon, 22 Jun 2020 11:02:52 +0200 Subject: clk: qcom: smd: Add support for SDM660 rpm clocks Add rpm smd clocks, PMIC and bus clocks which are required on SDM630/660 (and APQ variants) for clients to vote on. Signed-off-by: Konrad Dybcio Link: https://lore.kernel.org/r/20200622090252.36568-1-konradybcio@gmail.com Signed-off-by: Stephen Boyd --- .../devicetree/bindings/clock/qcom,rpmcc.txt | 1 + drivers/clk/qcom/clk-smd-rpm.c | 76 ++++++++++++++++++++++ include/dt-bindings/clock/qcom,rpmcc.h | 10 +++ 3 files changed, 87 insertions(+) (limited to 'include') diff --git a/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt b/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt index 90a1349bc713..86190acc71bc 100644 --- a/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt +++ b/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt @@ -20,6 +20,7 @@ Required properties : "qcom,rpmcc-msm8996", "qcom,rpmcc" "qcom,rpmcc-msm8998", "qcom,rpmcc" "qcom,rpmcc-qcs404", "qcom,rpmcc" + "qcom,rpmcc-sdm660", "qcom,rpmcc" - #clock-cells : shall contain 1 diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c index 52f63ad787ba..643bc355df5c 100644 --- a/drivers/clk/qcom/clk-smd-rpm.c +++ b/drivers/clk/qcom/clk-smd-rpm.c @@ -766,6 +766,81 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8998 = { .num_clks = ARRAY_SIZE(msm8998_clks), }; +/* sdm660 */ +DEFINE_CLK_SMD_RPM_BRANCH(sdm660, bi_tcxo, bi_tcxo_a, QCOM_SMD_RPM_MISC_CLK, 0, + 19200000); +DEFINE_CLK_SMD_RPM(sdm660, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1); +DEFINE_CLK_SMD_RPM(sdm660, cnoc_clk, cnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2); +DEFINE_CLK_SMD_RPM(sdm660, cnoc_periph_clk, cnoc_periph_a_clk, + QCOM_SMD_RPM_BUS_CLK, 0); +DEFINE_CLK_SMD_RPM(sdm660, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0); +DEFINE_CLK_SMD_RPM(sdm660, mmssnoc_axi_clk, mmssnoc_axi_a_clk, + QCOM_SMD_RPM_MMAXI_CLK, 0); +DEFINE_CLK_SMD_RPM(sdm660, ipa_clk, ipa_a_clk, QCOM_SMD_RPM_IPA_CLK, 0); +DEFINE_CLK_SMD_RPM(sdm660, ce1_clk, ce1_a_clk, QCOM_SMD_RPM_CE_CLK, 0); +DEFINE_CLK_SMD_RPM(sdm660, aggre2_noc_clk, aggre2_noc_a_clk, + QCOM_SMD_RPM_AGGR_CLK, 2); +DEFINE_CLK_SMD_RPM_QDSS(sdm660, qdss_clk, qdss_a_clk, + QCOM_SMD_RPM_MISC_CLK, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER(sdm660, rf_clk1, rf_clk1_a, 4); +DEFINE_CLK_SMD_RPM_XO_BUFFER(sdm660, div_clk1, div_clk1_a, 11); +DEFINE_CLK_SMD_RPM_XO_BUFFER(sdm660, ln_bb_clk1, ln_bb_clk1_a, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER(sdm660, ln_bb_clk2, ln_bb_clk2_a, 2); +DEFINE_CLK_SMD_RPM_XO_BUFFER(sdm660, ln_bb_clk3, ln_bb_clk3_a, 3); + +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(sdm660, rf_clk1_pin, rf_clk1_a_pin, 4); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(sdm660, ln_bb_clk1_pin, + ln_bb_clk1_pin_a, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(sdm660, ln_bb_clk2_pin, + ln_bb_clk2_pin_a, 2); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(sdm660, ln_bb_clk3_pin, + ln_bb_clk3_pin_a, 3); +static struct clk_smd_rpm *sdm660_clks[] = { + [RPM_SMD_XO_CLK_SRC] = &sdm660_bi_tcxo, + [RPM_SMD_XO_A_CLK_SRC] = &sdm660_bi_tcxo_a, + [RPM_SMD_SNOC_CLK] = &sdm660_snoc_clk, + [RPM_SMD_SNOC_A_CLK] = &sdm660_snoc_a_clk, + [RPM_SMD_CNOC_CLK] = &sdm660_cnoc_clk, + [RPM_SMD_CNOC_A_CLK] = &sdm660_cnoc_a_clk, + [RPM_SMD_CNOC_PERIPH_CLK] = &sdm660_cnoc_periph_clk, + [RPM_SMD_CNOC_PERIPH_A_CLK] = &sdm660_cnoc_periph_a_clk, + [RPM_SMD_BIMC_CLK] = &sdm660_bimc_clk, + [RPM_SMD_BIMC_A_CLK] = &sdm660_bimc_a_clk, + [RPM_SMD_MMSSNOC_AXI_CLK] = &sdm660_mmssnoc_axi_clk, + [RPM_SMD_MMSSNOC_AXI_CLK_A] = &sdm660_mmssnoc_axi_a_clk, + [RPM_SMD_IPA_CLK] = &sdm660_ipa_clk, + [RPM_SMD_IPA_A_CLK] = &sdm660_ipa_a_clk, + [RPM_SMD_CE1_CLK] = &sdm660_ce1_clk, + [RPM_SMD_CE1_A_CLK] = &sdm660_ce1_a_clk, + [RPM_SMD_AGGR2_NOC_CLK] = &sdm660_aggre2_noc_clk, + [RPM_SMD_AGGR2_NOC_A_CLK] = &sdm660_aggre2_noc_a_clk, + [RPM_SMD_QDSS_CLK] = &sdm660_qdss_clk, + [RPM_SMD_QDSS_A_CLK] = &sdm660_qdss_a_clk, + [RPM_SMD_RF_CLK1] = &sdm660_rf_clk1, + [RPM_SMD_RF_CLK1_A] = &sdm660_rf_clk1_a, + [RPM_SMD_DIV_CLK1] = &sdm660_div_clk1, + [RPM_SMD_DIV_A_CLK1] = &sdm660_div_clk1_a, + [RPM_SMD_LN_BB_CLK] = &sdm660_ln_bb_clk1, + [RPM_SMD_LN_BB_A_CLK] = &sdm660_ln_bb_clk1_a, + [RPM_SMD_LN_BB_CLK2] = &sdm660_ln_bb_clk2, + [RPM_SMD_LN_BB_CLK2_A] = &sdm660_ln_bb_clk2_a, + [RPM_SMD_LN_BB_CLK3] = &sdm660_ln_bb_clk3, + [RPM_SMD_LN_BB_CLK3_A] = &sdm660_ln_bb_clk3_a, + [RPM_SMD_RF_CLK1_PIN] = &sdm660_rf_clk1_pin, + [RPM_SMD_RF_CLK1_A_PIN] = &sdm660_rf_clk1_a_pin, + [RPM_SMD_LN_BB_CLK1_PIN] = &sdm660_ln_bb_clk1_pin, + [RPM_SMD_LN_BB_CLK1_A_PIN] = &sdm660_ln_bb_clk1_pin_a, + [RPM_SMD_LN_BB_CLK2_PIN] = &sdm660_ln_bb_clk2_pin, + [RPM_SMD_LN_BB_CLK2_A_PIN] = &sdm660_ln_bb_clk2_pin_a, + [RPM_SMD_LN_BB_CLK3_PIN] = &sdm660_ln_bb_clk3_pin, + [RPM_SMD_LN_BB_CLK3_A_PIN] = &sdm660_ln_bb_clk3_pin_a, +}; + +static const struct rpm_smd_clk_desc rpm_clk_sdm660 = { + .clks = sdm660_clks, + .num_clks = ARRAY_SIZE(sdm660_clks), +}; + static const struct of_device_id rpm_smd_clk_match_table[] = { { .compatible = "qcom,rpmcc-msm8916", .data = &rpm_clk_msm8916 }, { .compatible = "qcom,rpmcc-msm8974", .data = &rpm_clk_msm8974 }, @@ -773,6 +848,7 @@ static const struct of_device_id rpm_smd_clk_match_table[] = { { .compatible = "qcom,rpmcc-msm8996", .data = &rpm_clk_msm8996 }, { .compatible = "qcom,rpmcc-msm8998", .data = &rpm_clk_msm8998 }, { .compatible = "qcom,rpmcc-qcs404", .data = &rpm_clk_qcs404 }, + { .compatible = "qcom,rpmcc-sdm660", .data = &rpm_clk_sdm660 }, { } }; MODULE_DEVICE_TABLE(of, rpm_smd_clk_match_table); diff --git a/include/dt-bindings/clock/qcom,rpmcc.h b/include/dt-bindings/clock/qcom,rpmcc.h index ae74c43c485d..d1afa634b58d 100644 --- a/include/dt-bindings/clock/qcom,rpmcc.h +++ b/include/dt-bindings/clock/qcom,rpmcc.h @@ -133,5 +133,15 @@ #define RPM_SMD_RF_CLK3_A 87 #define RPM_SMD_RF_CLK3_PIN 88 #define RPM_SMD_RF_CLK3_A_PIN 89 +#define RPM_SMD_MMSSNOC_AXI_CLK 90 +#define RPM_SMD_MMSSNOC_AXI_CLK_A 91 +#define RPM_SMD_CNOC_PERIPH_CLK 92 +#define RPM_SMD_CNOC_PERIPH_A_CLK 93 +#define RPM_SMD_LN_BB_CLK3 94 +#define RPM_SMD_LN_BB_CLK3_A 95 +#define RPM_SMD_LN_BB_CLK1_PIN 96 +#define RPM_SMD_LN_BB_CLK1_A_PIN 97 +#define RPM_SMD_LN_BB_CLK2_PIN 98 +#define RPM_SMD_LN_BB_CLK2_A_PIN 99 #endif -- cgit v1.2.3-59-g8ed1b From 613c2e2c7e69e574411e2a3609459e18e91ae3fb Mon Sep 17 00:00:00 2001 From: Dennis YC Hsieh Date: Sun, 21 Jun 2020 22:18:26 +0800 Subject: soc: mediatek: cmdq: add assign function Add assign function in cmdq helper which assign constant value into internal register by index. Signed-off-by: Dennis YC Hsieh Link: https://lore.kernel.org/r/1592749115-24158-3-git-send-email-dennis-yc.hsieh@mediatek.com Signed-off-by: Matthias Brugger --- drivers/soc/mediatek/mtk-cmdq-helper.c | 24 +++++++++++++++++++++++- include/linux/mailbox/mtk-cmdq-mailbox.h | 1 + include/linux/soc/mediatek/mtk-cmdq.h | 14 ++++++++++++++ 3 files changed, 38 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c index 87ee9f767b7a..b9e5e4eea876 100644 --- a/drivers/soc/mediatek/mtk-cmdq-helper.c +++ b/drivers/soc/mediatek/mtk-cmdq-helper.c @@ -12,6 +12,7 @@ #define CMDQ_WRITE_ENABLE_MASK BIT(0) #define CMDQ_POLL_ENABLE_MASK BIT(0) #define CMDQ_EOC_IRQ_EN BIT(0) +#define CMDQ_REG_TYPE 1 struct cmdq_instruction { union { @@ -21,8 +22,17 @@ struct cmdq_instruction { union { u16 offset; u16 event; + u16 reg_dst; + }; + union { + u8 subsys; + struct { + u8 sop:5; + u8 arg_c_t:1; + u8 src_t:1; + u8 dst_t:1; + }; }; - u8 subsys; u8 op; }; @@ -278,6 +288,18 @@ int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys, } EXPORT_SYMBOL(cmdq_pkt_poll_mask); +int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value) +{ + struct cmdq_instruction inst = {}; + + inst.op = CMDQ_CODE_LOGIC; + inst.dst_t = CMDQ_REG_TYPE; + inst.reg_dst = reg_idx; + inst.value = value; + return cmdq_pkt_append_command(pkt, inst); +} +EXPORT_SYMBOL(cmdq_pkt_assign); + static int cmdq_pkt_finalize(struct cmdq_pkt *pkt) { struct cmdq_instruction inst = { {0} }; diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h index a4dc45fbec0a..70b740a552c2 100644 --- a/include/linux/mailbox/mtk-cmdq-mailbox.h +++ b/include/linux/mailbox/mtk-cmdq-mailbox.h @@ -59,6 +59,7 @@ enum cmdq_code { CMDQ_CODE_JUMP = 0x10, CMDQ_CODE_WFE = 0x20, CMDQ_CODE_EOC = 0x40, + CMDQ_CODE_LOGIC = 0xa0, }; enum cmdq_cb_status { diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h index a74c1d5acdf3..83340211e1d3 100644 --- a/include/linux/soc/mediatek/mtk-cmdq.h +++ b/include/linux/soc/mediatek/mtk-cmdq.h @@ -152,6 +152,20 @@ int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys, */ int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value, u32 mask); + +/** + * cmdq_pkt_assign() - Append logic assign command to the CMDQ packet, ask GCE + * to execute an instruction that set a constant value into + * internal register and use as value, mask or address in + * read/write instruction. + * @pkt: the CMDQ packet + * @reg_idx: the CMDQ internal register ID + * @value: the specified value + * + * Return: 0 for success; else the error code is returned + */ +int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value); + /** * cmdq_pkt_flush_async() - trigger CMDQ to asynchronously execute the CMDQ * packet and call back at the end of done packet -- cgit v1.2.3-59-g8ed1b From 995818588b6deedbed6b2b9d4c67e59b9afd6c60 Mon Sep 17 00:00:00 2001 From: Dennis YC Hsieh Date: Sun, 21 Jun 2020 22:18:32 +0800 Subject: soc: mediatek: cmdq: export finalize function Export finalize function to client which helps append eoc and jump command to pkt. Let client decide call finalize or not. Signed-off-by: Dennis YC Hsieh Reviewed-by: CK Hu Acked-by: Chun-Kuang Hu Link: https://lore.kernel.org/r/1592749115-24158-9-git-send-email-dennis-yc.hsieh@mediatek.com Signed-off-by: Matthias Brugger --- drivers/gpu/drm/mediatek/mtk_drm_crtc.c | 1 + drivers/soc/mediatek/mtk-cmdq-helper.c | 7 ++----- include/linux/soc/mediatek/mtk-cmdq.h | 8 ++++++++ 3 files changed, 11 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index fe46c4bac64d..ec6c9ffbf35e 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -492,6 +492,7 @@ static void mtk_drm_crtc_hw_config(struct mtk_drm_crtc *mtk_crtc) cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event); cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event); mtk_crtc_ddp_config(crtc, cmdq_handle); + cmdq_pkt_finalize(cmdq_handle); cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle); } #endif diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c index b9e5e4eea876..2eda03fc7662 100644 --- a/drivers/soc/mediatek/mtk-cmdq-helper.c +++ b/drivers/soc/mediatek/mtk-cmdq-helper.c @@ -300,7 +300,7 @@ int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value) } EXPORT_SYMBOL(cmdq_pkt_assign); -static int cmdq_pkt_finalize(struct cmdq_pkt *pkt) +int cmdq_pkt_finalize(struct cmdq_pkt *pkt) { struct cmdq_instruction inst = { {0} }; int err; @@ -319,6 +319,7 @@ static int cmdq_pkt_finalize(struct cmdq_pkt *pkt) return err; } +EXPORT_SYMBOL(cmdq_pkt_finalize); static void cmdq_pkt_flush_async_cb(struct cmdq_cb_data data) { @@ -353,10 +354,6 @@ int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb, unsigned long flags = 0; struct cmdq_client *client = (struct cmdq_client *)pkt->cl; - err = cmdq_pkt_finalize(pkt); - if (err < 0) - return err; - pkt->cb.cb = cb; pkt->cb.data = data; pkt->async_cb.cb = cmdq_pkt_flush_async_cb; diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h index 83340211e1d3..c331255eacd1 100644 --- a/include/linux/soc/mediatek/mtk-cmdq.h +++ b/include/linux/soc/mediatek/mtk-cmdq.h @@ -166,6 +166,14 @@ int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys, */ int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value); +/** + * cmdq_pkt_finalize() - Append EOC and jump command to pkt. + * @pkt: the CMDQ packet + * + * Return: 0 for success; else the error code is returned + */ +int cmdq_pkt_finalize(struct cmdq_pkt *pkt); + /** * cmdq_pkt_flush_async() - trigger CMDQ to asynchronously execute the CMDQ * packet and call back at the end of done packet -- cgit v1.2.3-59-g8ed1b From 7de796cac48c0f55974cfe9cff683dfb3b7c71b6 Mon Sep 17 00:00:00 2001 From: Dennis YC Hsieh Date: Sun, 21 Jun 2020 22:18:35 +0800 Subject: soc: mediatek: cmdq: add set event function Add set event function in cmdq helper functions to set specific event. Signed-off-by: Dennis YC Hsieh Link: https://lore.kernel.org/r/1592749115-24158-12-git-send-email-dennis-yc.hsieh@mediatek.com Signed-off-by: Matthias Brugger --- drivers/soc/mediatek/mtk-cmdq-helper.c | 15 +++++++++++++++ include/linux/mailbox/mtk-cmdq-mailbox.h | 1 + include/linux/soc/mediatek/mtk-cmdq.h | 9 +++++++++ 3 files changed, 25 insertions(+) (limited to 'include') diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c index 2eda03fc7662..dc644cfb6419 100644 --- a/drivers/soc/mediatek/mtk-cmdq-helper.c +++ b/drivers/soc/mediatek/mtk-cmdq-helper.c @@ -253,6 +253,21 @@ int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event) } EXPORT_SYMBOL(cmdq_pkt_clear_event); +int cmdq_pkt_set_event(struct cmdq_pkt *pkt, u16 event) +{ + struct cmdq_instruction inst = {}; + + if (event >= CMDQ_MAX_EVENT) + return -EINVAL; + + inst.op = CMDQ_CODE_WFE; + inst.value = CMDQ_WFE_UPDATE | CMDQ_WFE_UPDATE_VALUE; + inst.event = event; + + return cmdq_pkt_append_command(pkt, inst); +} +EXPORT_SYMBOL(cmdq_pkt_set_event); + int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value) { diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h index 70b740a552c2..a96e8c252bac 100644 --- a/include/linux/mailbox/mtk-cmdq-mailbox.h +++ b/include/linux/mailbox/mtk-cmdq-mailbox.h @@ -17,6 +17,7 @@ #define CMDQ_JUMP_PASS CMDQ_INST_SIZE #define CMDQ_WFE_UPDATE BIT(31) +#define CMDQ_WFE_UPDATE_VALUE BIT(16) #define CMDQ_WFE_WAIT BIT(15) #define CMDQ_WFE_WAIT_VALUE 0x1 diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h index c331255eacd1..2249ecaf77e4 100644 --- a/include/linux/soc/mediatek/mtk-cmdq.h +++ b/include/linux/soc/mediatek/mtk-cmdq.h @@ -120,6 +120,15 @@ int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event); */ int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event); +/** + * cmdq_pkt_set_event() - append set event command to the CMDQ packet + * @pkt: the CMDQ packet + * @event: the desired event to be set + * + * Return: 0 for success; else the error code is returned + */ +int cmdq_pkt_set_event(struct cmdq_pkt *pkt, u16 event); + /** * cmdq_pkt_poll() - Append polling command to the CMDQ packet, ask GCE to * execute an instruction that wait for a specified -- cgit v1.2.3-59-g8ed1b From 3af8588c77186bf08e55e7281da83d88373481d7 Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Mon, 8 Jun 2020 17:28:50 +0200 Subject: fork: fold legacy_clone_args_valid() into _do_fork() This separate helper only existed to guarantee the mutual exclusivity of CLONE_PIDFD and CLONE_PARENT_SETTID for legacy clone since CLONE_PIDFD abuses the parent_tid field to return the pidfd. But we can actually handle this uniformely thus removing the helper. For legacy clone we can detect that CLONE_PIDFD is specified in conjunction with CLONE_PARENT_SETTID because they will share the same memory which is invalid and for clone3() setting the separate pidfd and parent_tid fields to the same memory is bogus as well. So fold that helper directly into _do_fork() by detecting this case. Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Al Viro Cc: Geert Uytterhoeven Cc: "Matthew Wilcox (Oracle)" Cc: "Peter Zijlstra (Intel)" Cc: linux-m68k@lists.linux-m68k.org Cc: x86@kernel.org Signed-off-by: Christian Brauner --- arch/m68k/kernel/process.c | 3 --- arch/x86/kernel/sys_ia32.c | 3 --- include/linux/sched/task.h | 1 - kernel/fork.c | 30 ++++++++++++++---------------- 4 files changed, 14 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c index 90ae376b7ab1..0608439ba452 100644 --- a/arch/m68k/kernel/process.c +++ b/arch/m68k/kernel/process.c @@ -125,9 +125,6 @@ asmlinkage int m68k_clone(struct pt_regs *regs) .tls = regs->d5, }; - if (!legacy_clone_args_valid(&args)) - return -EINVAL; - return _do_fork(&args); } diff --git a/arch/x86/kernel/sys_ia32.c b/arch/x86/kernel/sys_ia32.c index f8d65c99feb8..720cde885042 100644 --- a/arch/x86/kernel/sys_ia32.c +++ b/arch/x86/kernel/sys_ia32.c @@ -251,9 +251,6 @@ COMPAT_SYSCALL_DEFINE5(ia32_clone, unsigned long, clone_flags, .tls = tls_val, }; - if (!legacy_clone_args_valid(&args)) - return -EINVAL; - return _do_fork(&args); } #endif /* CONFIG_IA32_EMULATION */ diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index 38359071236a..ddce0ea515d1 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -96,7 +96,6 @@ extern void exit_files(struct task_struct *); extern void exit_itimers(struct signal_struct *); extern long _do_fork(struct kernel_clone_args *kargs); -extern bool legacy_clone_args_valid(const struct kernel_clone_args *kargs); extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); struct task_struct *fork_idle(int); struct mm_struct *copy_init_mm(void); diff --git a/kernel/fork.c b/kernel/fork.c index 142b23645d82..9875aeb2ba41 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2422,6 +2422,20 @@ long _do_fork(struct kernel_clone_args *args) int trace = 0; long nr; + /* + * For legacy clone() calls, CLONE_PIDFD uses the parent_tid argument + * to return the pidfd. Hence, CLONE_PIDFD and CLONE_PARENT_SETTID are + * mutually exclusive. With clone3() CLONE_PIDFD has grown a separate + * field in struct clone_args and it still doesn't make sense to have + * them both point at the same memory location. Performing this check + * here has the advantage that we don't need to have a separate helper + * to check for legacy clone(). + */ + if ((args->flags & CLONE_PIDFD) && + (args->flags & CLONE_PARENT_SETTID) && + (args->pidfd == args->parent_tid)) + return -EINVAL; + /* * Determine whether and which event to report to ptracer. When * called from kernel_thread or CLONE_UNTRACED is explicitly @@ -2479,16 +2493,6 @@ long _do_fork(struct kernel_clone_args *args) return nr; } -bool legacy_clone_args_valid(const struct kernel_clone_args *kargs) -{ - /* clone(CLONE_PIDFD) uses parent_tidptr to return a pidfd */ - if ((kargs->flags & CLONE_PIDFD) && - (kargs->flags & CLONE_PARENT_SETTID)) - return false; - - return true; -} - #ifndef CONFIG_HAVE_COPY_THREAD_TLS /* For compatibility with architectures that call do_fork directly rather than * using the syscall entry points below. */ @@ -2508,9 +2512,6 @@ long do_fork(unsigned long clone_flags, .stack_size = stack_size, }; - if (!legacy_clone_args_valid(&args)) - return -EINVAL; - return _do_fork(&args); } #endif @@ -2593,9 +2594,6 @@ SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, .tls = tls, }; - if (!legacy_clone_args_valid(&args)) - return -EINVAL; - return _do_fork(&args); } #endif -- cgit v1.2.3-59-g8ed1b From 5cbd3ebde859bd43bd0584c146060638b1a3abb4 Mon Sep 17 00:00:00 2001 From: Alain Michaud Date: Mon, 22 Jun 2020 13:30:28 +0000 Subject: Bluetooth: use configured params for ext adv MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When the extended advertisement feature is enabled, a hardcoded min and max interval of 0x8000 is used. This patch fixes this issue by using the configured min/max value. This was validated by setting min/max in main.conf and making sure the right setting is applied: < HCI Command: LE Set Extended Advertising Parameters (0x08|0x0036) plen 25 #93 [hci0] 10.953011 … Min advertising interval: 181.250 msec (0x0122) Max advertising interval: 181.250 msec (0x0122) … Signed-off-by: Alain Michaud Reviewed-by: Abhishek Pandit-Subedi Reviewed-by: Daniel Winkler Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci.h | 8 ++++++++ net/bluetooth/hci_request.c | 7 +++---- 2 files changed, 11 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 16ab6ce87883..1f18f71363e9 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -2516,4 +2516,12 @@ static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb) #define hci_iso_data_len(h) ((h) & 0x3fff) #define hci_iso_data_flags(h) ((h) >> 14) +/* le24 support */ +static inline void hci_cpu_to_le24(__u32 val, __u8 dst[3]) +{ + dst[0] = val & 0xff; + dst[1] = (val & 0xff00) >> 8; + dst[2] = (val & 0xff0000) >> 16; +} + #endif /* __HCI_H */ diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index 29decd7e8051..86ae4b953a01 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -1799,8 +1799,6 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance) int err; struct adv_info *adv_instance; bool secondary_adv; - /* In ext adv set param interval is 3 octets */ - const u8 adv_interval[3] = { 0x00, 0x08, 0x00 }; if (instance > 0) { adv_instance = hci_find_adv_instance(hdev, instance); @@ -1833,8 +1831,9 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance) memset(&cp, 0, sizeof(cp)); - memcpy(cp.min_interval, adv_interval, sizeof(cp.min_interval)); - memcpy(cp.max_interval, adv_interval, sizeof(cp.max_interval)); + /* In ext adv set param interval is 3 octets */ + hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval); + hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval); secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK); -- cgit v1.2.3-59-g8ed1b From cf6e26c71bfdff823fd40945b07666d75f1e1412 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Tue, 16 Jun 2020 14:19:41 +0900 Subject: ASoC: soc-component: merge snd_soc_component_read() and snd_soc_component_read32() We had read/write function for Codec, Platform, etc, but these has been merged into snd_soc_component_read/write(). Internally, it is using regmap or driver function. In read case, each styles are like below regmap ret = regmap_read(..., reg, &val); driver function val = xxx->read(..., reg); Because of this kind of different style, to keep same read style, when we merged each read function into snd_soc_component_read(), we created snd_soc_component_read32(), like below. commit 738b49efe6c6 ("ASoC: add snd_soc_component_read32") (1) val = snd_soc_component_read32(component, reg); (2) ret = snd_soc_component_read(component, reg, &val); Many drivers are using snd_soc_component_read32(), and some drivers are using snd_soc_component_read() today. In generally, we don't check read function successes, because, we will have many other issues at initial timing if read function didn't work. Now we can use soc_component_err() when error case. This means, it is easy to notice if error occurred. This patch aggressively merge snd_soc_component_read() and _read32(), and makes snd_soc_component_read/write() as generally style. This patch do 1) merge snd_soc_component_read() and snd_soc_component_read32() 2) it uses soc_component_err() when error case (easy to notice) 3) keeps read32 for now by #define 4) update snd_soc_component_read() for all drivers Because _read() user drivers are not too many, this patch changes all user drivers. Signed-off-by: Kuninori Morimoto Reviewed-by: Kai Vehmanen Link: https://lore.kernel.org/r/87sgev4mfl.wl-kuninori.morimoto.gx@renesas.com Signed-off-by: Mark Brown --- include/sound/soc-component.h | 5 ++-- sound/soc/codecs/ak4613.c | 4 ++-- sound/soc/codecs/cs47l35.c | 10 +------- sound/soc/codecs/cs47l85.c | 10 +------- sound/soc/codecs/inno_rk3036.c | 6 ++--- sound/soc/codecs/madera.c | 49 ++++++++------------------------------- sound/soc/codecs/nau8822.c | 2 +- sound/soc/codecs/rt1305.c | 2 +- sound/soc/codecs/rt5682.c | 3 +-- sound/soc/codecs/tas5720.c | 4 ++-- sound/soc/codecs/tda7419.c | 9 ++----- sound/soc/codecs/tscs454.c | 24 ++++--------------- sound/soc/fsl/fsl_audmix.c | 10 ++------ sound/soc/fsl/fsl_easrc.c | 5 +--- sound/soc/meson/aiu-encoder-i2s.c | 3 +-- sound/soc/meson/aiu-fifo-i2s.c | 3 +-- sound/soc/meson/aiu-fifo.c | 3 +-- sound/soc/soc-ac97.c | 7 +++--- sound/soc/soc-component.c | 40 ++++++++++---------------------- sound/soc/soc-dapm.c | 31 +++++++++---------------- sound/soc/soc-ops.c | 43 +++++++--------------------------- 21 files changed, 69 insertions(+), 204 deletions(-) (limited to 'include') diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h index 4a4bb723ca9f..f64cffa12967 100644 --- a/include/sound/soc-component.h +++ b/include/sound/soc-component.h @@ -333,9 +333,8 @@ void snd_soc_component_set_aux(struct snd_soc_component *component, int snd_soc_component_init(struct snd_soc_component *component); /* component IO */ -int snd_soc_component_read(struct snd_soc_component *component, - unsigned int reg, unsigned int *val); -unsigned int snd_soc_component_read32(struct snd_soc_component *component, +#define snd_soc_component_read32 snd_soc_component_read +unsigned int snd_soc_component_read(struct snd_soc_component *component, unsigned int reg); int snd_soc_component_write(struct snd_soc_component *component, unsigned int reg, unsigned int val); diff --git a/sound/soc/codecs/ak4613.c b/sound/soc/codecs/ak4613.c index c1181a20714d..d4d2f0d9231a 100644 --- a/sound/soc/codecs/ak4613.c +++ b/sound/soc/codecs/ak4613.c @@ -490,8 +490,8 @@ static void ak4613_dummy_write(struct work_struct *work) */ udelay(5000000 / priv->rate); - snd_soc_component_read(component, PW_MGMT1, &mgmt1); - snd_soc_component_read(component, PW_MGMT3, &mgmt3); + mgmt1 = snd_soc_component_read(component, PW_MGMT1); + mgmt3 = snd_soc_component_read(component, PW_MGMT3); snd_soc_component_write(component, PW_MGMT1, mgmt1); snd_soc_component_write(component, PW_MGMT3, mgmt3); diff --git a/sound/soc/codecs/cs47l35.c b/sound/soc/codecs/cs47l35.c index d7538d50bbd3..e9b1fc4c7580 100644 --- a/sound/soc/codecs/cs47l35.c +++ b/sound/soc/codecs/cs47l35.c @@ -129,19 +129,11 @@ static void cs47l35_hp_post_enable(struct snd_soc_dapm_widget *w) struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); unsigned int val; - int ret; switch (w->shift) { case MADERA_OUT1L_ENA_SHIFT: case MADERA_OUT1R_ENA_SHIFT: - ret = snd_soc_component_read(component, MADERA_OUTPUT_ENABLES_1, - &val); - if (ret) { - dev_err(component->dev, - "Failed to check output enables: %d\n", ret); - return; - } - + val = snd_soc_component_read(component, MADERA_OUTPUT_ENABLES_1); val &= (MADERA_OUT1L_ENA | MADERA_OUT1R_ENA); if (val != (MADERA_OUT1L_ENA | MADERA_OUT1R_ENA)) diff --git a/sound/soc/codecs/cs47l85.c b/sound/soc/codecs/cs47l85.c index 9de991adad74..64db07a99408 100644 --- a/sound/soc/codecs/cs47l85.c +++ b/sound/soc/codecs/cs47l85.c @@ -191,19 +191,11 @@ static void cs47l85_hp_post_enable(struct snd_soc_dapm_widget *w) struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); unsigned int val; - int ret; switch (w->shift) { case MADERA_OUT1L_ENA_SHIFT: case MADERA_OUT1R_ENA_SHIFT: - ret = snd_soc_component_read(component, MADERA_OUTPUT_ENABLES_1, - &val); - if (ret) { - dev_err(component->dev, - "Failed to check output enables: %d\n", ret); - return; - } - + val = snd_soc_component_read(component, MADERA_OUTPUT_ENABLES_1); val &= (MADERA_OUT1L_ENA | MADERA_OUT1R_ENA); if (val != (MADERA_OUT1L_ENA | MADERA_OUT1R_ENA)) diff --git a/sound/soc/codecs/inno_rk3036.c b/sound/soc/codecs/inno_rk3036.c index 14d8fe1c28a4..d0e8f0d2fbc1 100644 --- a/sound/soc/codecs/inno_rk3036.c +++ b/sound/soc/codecs/inno_rk3036.c @@ -48,11 +48,9 @@ static int rk3036_codec_antipop_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); - int val, ret, regval; + int val, regval; - ret = snd_soc_component_read(component, INNO_R09, ®val); - if (ret) - return ret; + regval = snd_soc_component_read(component, INNO_R09); val = ((regval >> INNO_R09_HPL_ANITPOP_SHIFT) & INNO_R09_HP_ANTIPOP_MSK) == INNO_R09_HP_ANTIPOP_ON; ucontrol->value.integer.value[0] = val; diff --git a/sound/soc/codecs/madera.c b/sound/soc/codecs/madera.c index ec380b0b2d4e..680f31a6493a 100644 --- a/sound/soc/codecs/madera.c +++ b/sound/soc/codecs/madera.c @@ -628,12 +628,8 @@ int madera_out1_demux_get(struct snd_kcontrol *kcontrol, struct snd_soc_component *component = snd_soc_dapm_kcontrol_component(kcontrol); unsigned int val; - int ret; - - ret = snd_soc_component_read(component, MADERA_OUTPUT_ENABLES_1, &val); - if (ret) - return ret; + val = snd_soc_component_read(component, MADERA_OUTPUT_ENABLES_1); val &= MADERA_EP_SEL_MASK; val >>= MADERA_EP_SEL_SHIFT; ucontrol->value.enumerated.item[0] = val; @@ -1068,12 +1064,7 @@ int madera_rate_put(struct snd_kcontrol *kcontrol, */ mutex_lock(&priv->rate_lock); - ret = snd_soc_component_read(component, e->reg, &val); - if (ret < 0) { - dev_warn(priv->madera->dev, "Failed to read 0x%x (%d)\n", - e->reg, ret); - goto out; - } + val = snd_soc_component_read(component, e->reg); val >>= e->shift_l; val &= e->mask; if (snd_soc_enum_item_to_val(e, item) == val) { @@ -2178,10 +2169,7 @@ int madera_dfc_put(struct snd_kcontrol *kcontrol, snd_soc_dapm_mutex_lock(dapm); - ret = snd_soc_component_read(component, reg, &val); - if (ret) - goto exit; - + val = snd_soc_component_read(component, reg); if (val & MADERA_DFC1_ENA) { ret = -EBUSY; dev_err(component->dev, "Can't change mode on an active DFC\n"); @@ -2211,9 +2199,7 @@ int madera_lp_mode_put(struct snd_kcontrol *kcontrol, snd_soc_dapm_mutex_lock(dapm); /* Cannot change lp mode on an active input */ - ret = snd_soc_component_read(component, MADERA_INPUT_ENABLES, &val); - if (ret) - goto exit; + val = snd_soc_component_read(component, MADERA_INPUT_ENABLES); mask = (mc->reg - MADERA_ADC_DIGITAL_VOLUME_1L) / 4; mask ^= 0x1; /* Flip bottom bit for channel order */ @@ -2276,7 +2262,6 @@ int madera_in_ev(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); struct madera_priv *priv = snd_soc_component_get_drvdata(component); unsigned int reg, val; - int ret; if (w->shift % 2) reg = MADERA_ADC_DIGITAL_VOLUME_1L + ((w->shift / 2) * 8); @@ -2305,9 +2290,8 @@ int madera_in_ev(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, break; case SND_SOC_DAPM_POST_PMD: /* Disable volume updates if no inputs are enabled */ - ret = snd_soc_component_read(component, MADERA_INPUT_ENABLES, - &val); - if (!ret && !val) + val = snd_soc_component_read(component, MADERA_INPUT_ENABLES); + if (!val) madera_in_set_vu(priv, false); break; default: @@ -3087,26 +3071,16 @@ static int madera_aif_cfg_changed(struct snd_soc_component *component, int base, int bclk, int lrclk, int frame) { unsigned int val; - int ret; - ret = snd_soc_component_read(component, base + MADERA_AIF_BCLK_CTRL, - &val); - if (ret) - return ret; + val = snd_soc_component_read(component, base + MADERA_AIF_BCLK_CTRL); if (bclk != (val & MADERA_AIF1_BCLK_FREQ_MASK)) return 1; - ret = snd_soc_component_read(component, base + MADERA_AIF_RX_BCLK_RATE, - &val); - if (ret) - return ret; + val = snd_soc_component_read(component, base + MADERA_AIF_RX_BCLK_RATE); if (lrclk != (val & MADERA_AIF1RX_BCPF_MASK)) return 1; - ret = snd_soc_component_read(component, base + MADERA_AIF_FRAME_CTRL_1, - &val); - if (ret) - return ret; + val = snd_soc_component_read(component, base + MADERA_AIF_FRAME_CTRL_1); if (frame != (val & (MADERA_AIF1TX_WL_MASK | MADERA_AIF1TX_SLOT_LEN_MASK))) return 1; @@ -3162,10 +3136,7 @@ static int madera_hw_params(struct snd_pcm_substream *substream, } /* Force multiple of 2 channels for I2S mode */ - ret = snd_soc_component_read(component, base + MADERA_AIF_FORMAT, &val); - if (ret) - return ret; - + val = snd_soc_component_read(component, base + MADERA_AIF_FORMAT); val &= MADERA_AIF1_FMT_MASK; if ((channels & 1) && val == MADERA_FMT_I2S_MODE) { madera_aif_dbg(dai, "Forcing stereo mode\n"); diff --git a/sound/soc/codecs/nau8822.c b/sound/soc/codecs/nau8822.c index 78db3bd0b3bc..a4f661335c57 100644 --- a/sound/soc/codecs/nau8822.c +++ b/sound/soc/codecs/nau8822.c @@ -831,7 +831,7 @@ static int nau8822_hw_params(struct snd_pcm_substream *substream, unsigned int ctrl_val, bclk_fs, bclk_div; /* make BCLK and LRC divide configuration if the codec as master. */ - snd_soc_component_read(component, NAU8822_REG_CLOCKING, &ctrl_val); + ctrl_val = snd_soc_component_read(component, NAU8822_REG_CLOCKING); if (ctrl_val & NAU8822_CLK_MASTER) { /* get the bclk and fs ratio */ bclk_fs = snd_soc_params_to_bclk(params) / params_rate(params); diff --git a/sound/soc/codecs/rt1305.c b/sound/soc/codecs/rt1305.c index e27742abfa76..4e9dfd235e59 100644 --- a/sound/soc/codecs/rt1305.c +++ b/sound/soc/codecs/rt1305.c @@ -411,7 +411,7 @@ static int rt1305_is_rc_clk_from_pll(struct snd_soc_dapm_widget *source, struct rt1305_priv *rt1305 = snd_soc_component_get_drvdata(component); unsigned int val; - snd_soc_component_read(component, RT1305_CLK_1, &val); + val = snd_soc_component_read(component, RT1305_CLK_1); if (rt1305->sysclk_src == RT1305_FS_SYS_PRE_S_PLL1 && (val & RT1305_SEL_PLL_SRC_2_RCCLK)) diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c index 36cfd10f8b04..8b592069a7e2 100644 --- a/sound/soc/codecs/rt5682.c +++ b/sound/soc/codecs/rt5682.c @@ -2640,8 +2640,7 @@ static unsigned long rt5682_bclk_recalc_rate(struct clk_hw *hw, struct snd_soc_component *component = rt5682->component; unsigned int bclks_per_wclk; - snd_soc_component_read(component, RT5682_TDM_TCON_CTRL, - &bclks_per_wclk); + bclks_per_wclk = snd_soc_component_read(component, RT5682_TDM_TCON_CTRL); switch (bclks_per_wclk & RT5682_TDM_BCLK_MS1_MASK) { case RT5682_TDM_BCLK_MS1_256: diff --git a/sound/soc/codecs/tas5720.c b/sound/soc/codecs/tas5720.c index 37fab8f22800..e159f839d928 100644 --- a/sound/soc/codecs/tas5720.c +++ b/sound/soc/codecs/tas5720.c @@ -508,10 +508,10 @@ static int tas5722_volume_get(struct snd_kcontrol *kcontrol, struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol); unsigned int val; - snd_soc_component_read(component, TAS5720_VOLUME_CTRL_REG, &val); + val = snd_soc_component_read(component, TAS5720_VOLUME_CTRL_REG); ucontrol->value.integer.value[0] = val << 1; - snd_soc_component_read(component, TAS5722_DIGITAL_CTRL2_REG, &val); + val = snd_soc_component_read(component, TAS5722_DIGITAL_CTRL2_REG); ucontrol->value.integer.value[0] |= val & TAS5722_VOL_CONTROL_LSB; return 0; diff --git a/sound/soc/codecs/tda7419.c b/sound/soc/codecs/tda7419.c index 2bf4f5e8af27..83d220054c96 100644 --- a/sound/soc/codecs/tda7419.c +++ b/sound/soc/codecs/tda7419.c @@ -187,18 +187,13 @@ static int tda7419_vol_get(struct snd_kcontrol *kcontrol, int thresh = tvc->thresh; unsigned int invert = tvc->invert; int val; - int ret; - ret = snd_soc_component_read(component, reg, &val); - if (ret < 0) - return ret; + val = snd_soc_component_read(component, reg); ucontrol->value.integer.value[0] = tda7419_vol_get_value(val, mask, min, thresh, invert); if (tda7419_vol_is_stereo(tvc)) { - ret = snd_soc_component_read(component, rreg, &val); - if (ret < 0) - return ret; + val = snd_soc_component_read(component, rreg); ucontrol->value.integer.value[1] = tda7419_vol_get_value(val, mask, min, thresh, invert); } diff --git a/sound/soc/codecs/tscs454.c b/sound/soc/codecs/tscs454.c index c3587af9985c..d0af16b4db2f 100644 --- a/sound/soc/codecs/tscs454.c +++ b/sound/soc/codecs/tscs454.c @@ -353,12 +353,7 @@ static int write_coeff_ram(struct snd_soc_component *component, u8 *coeff_ram, for (cnt = 0; cnt < coeff_cnt; cnt++, coeff_addr++) { for (trys = 0; trys < DACCRSTAT_MAX_TRYS; trys++) { - ret = snd_soc_component_read(component, r_stat, &val); - if (ret < 0) { - dev_err(component->dev, - "Failed to read stat (%d)\n", ret); - return ret; - } + val = snd_soc_component_read(component, r_stat); if (!val) break; } @@ -444,12 +439,7 @@ static int coeff_ram_put(struct snd_kcontrol *kcontrol, mutex_lock(&tscs454->pll1.lock); mutex_lock(&tscs454->pll2.lock); - ret = snd_soc_component_read(component, R_PLLSTAT, &val); - if (ret < 0) { - dev_err(component->dev, "Failed to read PLL status (%d)\n", - ret); - goto exit; - } + val = snd_soc_component_read(component, R_PLLSTAT); if (val) { /* PLLs locked */ ret = write_coeff_ram(component, coeff_ram, r_stat, r_addr, r_wr, @@ -2642,13 +2632,10 @@ static int tscs454_set_sysclk(struct snd_soc_dai *dai, struct tscs454 *tscs454 = snd_soc_component_get_drvdata(component); unsigned int val; int bclk_dai; - int ret; dev_dbg(component->dev, "%s(): freq = %u\n", __func__, freq); - ret = snd_soc_component_read(component, R_PLLCTL, &val); - if (ret < 0) - return ret; + val = snd_soc_component_read(component, R_PLLCTL); bclk_dai = (val & FM_PLLCTL_BCLKSEL) >> FB_PLLCTL_BCLKSEL; if (bclk_dai != dai->id) @@ -3204,10 +3191,7 @@ static int tscs454_hw_params(struct snd_pcm_substream *substream, } if (!aifs_active(&tscs454->aifs_status)) { /* First active aif */ - ret = snd_soc_component_read(component, R_ISRC, &val); - if (ret < 0) - goto exit; - + val = snd_soc_component_read(component, R_ISRC); if ((val & FM_ISRC_IBR) == FV_IBR_48) tscs454->internal_rate.pll = &tscs454->pll1; else diff --git a/sound/soc/fsl/fsl_audmix.c b/sound/soc/fsl/fsl_audmix.c index 8b9027f76d8a..a447bafa00d2 100644 --- a/sound/soc/fsl/fsl_audmix.c +++ b/sound/soc/fsl/fsl_audmix.c @@ -116,13 +116,9 @@ static int fsl_audmix_put_mix_clk_src(struct snd_kcontrol *kcontrol, struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; unsigned int *item = ucontrol->value.enumerated.item; unsigned int reg_val, val, mix_clk; - int ret; /* Get current state */ - ret = snd_soc_component_read(comp, FSL_AUDMIX_CTR, ®_val); - if (ret) - return ret; - + reg_val = snd_soc_component_read(comp, FSL_AUDMIX_CTR); mix_clk = ((reg_val & FSL_AUDMIX_CTR_MIXCLK_MASK) >> FSL_AUDMIX_CTR_MIXCLK_SHIFT); val = snd_soc_enum_item_to_val(e, item[0]); @@ -162,9 +158,7 @@ static int fsl_audmix_put_out_src(struct snd_kcontrol *kcontrol, int ret; /* Get current state */ - ret = snd_soc_component_read(comp, FSL_AUDMIX_CTR, ®_val); - if (ret) - return ret; + reg_val = snd_soc_component_read(comp, FSL_AUDMIX_CTR); /* "From" state */ out_src = ((reg_val & FSL_AUDMIX_CTR_OUTSRC_MASK) diff --git a/sound/soc/fsl/fsl_easrc.c b/sound/soc/fsl/fsl_easrc.c index 2f6b3d8bfcfc..58cc093ad741 100644 --- a/sound/soc/fsl/fsl_easrc.c +++ b/sound/soc/fsl/fsl_easrc.c @@ -79,11 +79,8 @@ static int fsl_easrc_get_reg(struct snd_kcontrol *kcontrol, struct soc_mreg_control *mc = (struct soc_mreg_control *)kcontrol->private_value; unsigned int regval; - int ret; - ret = snd_soc_component_read(component, mc->regbase, ®val); - if (ret < 0) - return ret; + regval = snd_soc_component_read(component, mc->regbase); ucontrol->value.integer.value[0] = regval; diff --git a/sound/soc/meson/aiu-encoder-i2s.c b/sound/soc/meson/aiu-encoder-i2s.c index 832e22d275fe..932224552146 100644 --- a/sound/soc/meson/aiu-encoder-i2s.c +++ b/sound/soc/meson/aiu-encoder-i2s.c @@ -72,11 +72,10 @@ static int aiu_encoder_i2s_setup_desc(struct snd_soc_component *component, { /* Always operate in split (classic interleaved) mode */ unsigned int desc = AIU_I2S_SOURCE_DESC_MODE_SPLIT; - unsigned int val; /* Reset required to update the pipeline */ snd_soc_component_write(component, AIU_RST_SOFT, AIU_RST_SOFT_I2S_FAST); - snd_soc_component_read(component, AIU_I2S_SYNC, &val); + snd_soc_component_read(component, AIU_I2S_SYNC); switch (params_physical_width(params)) { case 16: /* Nothing to do */ diff --git a/sound/soc/meson/aiu-fifo-i2s.c b/sound/soc/meson/aiu-fifo-i2s.c index 9a5271ce80fe..d91b0d874342 100644 --- a/sound/soc/meson/aiu-fifo-i2s.c +++ b/sound/soc/meson/aiu-fifo-i2s.c @@ -46,7 +46,6 @@ static int aiu_fifo_i2s_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct snd_soc_component *component = dai->component; - unsigned int val; switch (cmd) { case SNDRV_PCM_TRIGGER_START: @@ -54,7 +53,7 @@ static int aiu_fifo_i2s_trigger(struct snd_pcm_substream *substream, int cmd, case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: snd_soc_component_write(component, AIU_RST_SOFT, AIU_RST_SOFT_I2S_FAST); - snd_soc_component_read(component, AIU_I2S_SYNC, &val); + snd_soc_component_read(component, AIU_I2S_SYNC); break; } diff --git a/sound/soc/meson/aiu-fifo.c b/sound/soc/meson/aiu-fifo.c index d9cede4c33ff..aa88aae8e517 100644 --- a/sound/soc/meson/aiu-fifo.c +++ b/sound/soc/meson/aiu-fifo.c @@ -37,8 +37,7 @@ snd_pcm_uframes_t aiu_fifo_pointer(struct snd_soc_component *component, struct snd_pcm_runtime *runtime = substream->runtime; unsigned int addr; - snd_soc_component_read(component, fifo->mem_offset + AIU_MEM_RD, - &addr); + addr = snd_soc_component_read(component, fifo->mem_offset + AIU_MEM_RD); return bytes_to_frames(runtime, addr - (unsigned int)runtime->dma_addr); } diff --git a/sound/soc/soc-ac97.c b/sound/soc/soc-ac97.c index c086786e4471..906106ed8ca1 100644 --- a/sound/soc/soc-ac97.c +++ b/sound/soc/soc-ac97.c @@ -82,13 +82,12 @@ static int snd_soc_ac97_gpio_get(struct gpio_chip *chip, unsigned offset) struct snd_soc_component *component = gpio_to_component(chip); int ret; - if (snd_soc_component_read(component, AC97_GPIO_STATUS, &ret) < 0) - ret = -1; + ret = snd_soc_component_read(component, AC97_GPIO_STATUS); dev_dbg(component->dev, "get gpio %d : %d\n", offset, - ret < 0 ? ret : ret & (1 << offset)); + ret & (1 << offset)); - return ret < 0 ? ret : !!(ret & (1 << offset)); + return !!(ret & (1 << offset)); } static void snd_soc_ac97_gpio_set(struct gpio_chip *chip, unsigned offset, diff --git a/sound/soc/soc-component.c b/sound/soc/soc-component.c index d121f5f7633c..428f88decfdb 100644 --- a/sound/soc/soc-component.c +++ b/sound/soc/soc-component.c @@ -407,41 +407,30 @@ EXPORT_SYMBOL_GPL(snd_soc_component_exit_regmap); * snd_soc_component_read() - Read register value * @component: Component to read from * @reg: Register to read - * @val: Pointer to where the read value is stored * - * Return: 0 on success, a negative error code otherwise. + * Return: read value */ -int snd_soc_component_read(struct snd_soc_component *component, - unsigned int reg, unsigned int *val) +unsigned int snd_soc_component_read(struct snd_soc_component *component, + unsigned int reg) { int ret; + unsigned int val = 0; if (component->regmap) - ret = regmap_read(component->regmap, reg, val); + ret = regmap_read(component->regmap, reg, &val); else if (component->driver->read) { - *val = component->driver->read(component, reg); ret = 0; + val = component->driver->read(component, reg); } else ret = -EIO; - return soc_component_ret(component, ret); -} -EXPORT_SYMBOL_GPL(snd_soc_component_read); - -unsigned int snd_soc_component_read32(struct snd_soc_component *component, - unsigned int reg) -{ - unsigned int val; - int ret; - - ret = snd_soc_component_read(component, reg, &val); if (ret < 0) - return soc_component_ret(component, -1); + soc_component_ret(component, ret); return val; } -EXPORT_SYMBOL_GPL(snd_soc_component_read32); +EXPORT_SYMBOL_GPL(snd_soc_component_read); /** * snd_soc_component_write() - Write register value @@ -470,19 +459,17 @@ static int snd_soc_component_update_bits_legacy( unsigned int mask, unsigned int val, bool *change) { unsigned int old, new; - int ret; + int ret = 0; mutex_lock(&component->io_mutex); - ret = snd_soc_component_read(component, reg, &old); - if (ret < 0) - goto out_unlock; + old = snd_soc_component_read(component, reg); new = (old & ~mask) | (val & mask); *change = old != new; if (*change) ret = snd_soc_component_write(component, reg, new); -out_unlock: + mutex_unlock(&component->io_mutex); return soc_component_ret(component, ret); @@ -584,11 +571,8 @@ int snd_soc_component_test_bits(struct snd_soc_component *component, unsigned int reg, unsigned int mask, unsigned int value) { unsigned int old, new; - int ret; - ret = snd_soc_component_read(component, reg, &old); - if (ret < 0) - return soc_component_ret(component, ret); + old = snd_soc_component_read(component, reg); new = (old & ~mask) | value; return old != new; } diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 2491e1ce16d3..e51aa2efc65c 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -616,12 +616,11 @@ static const char *soc_dapm_prefix(struct snd_soc_dapm_context *dapm) return dapm->component->name_prefix; } -static int soc_dapm_read(struct snd_soc_dapm_context *dapm, int reg, - unsigned int *value) +static unsigned int soc_dapm_read(struct snd_soc_dapm_context *dapm, int reg) { if (!dapm->component) return -EIO; - return snd_soc_component_read(dapm->component, reg, value); + return snd_soc_component_read(dapm->component, reg); } static int soc_dapm_update_bits(struct snd_soc_dapm_context *dapm, @@ -753,7 +752,7 @@ static int dapm_connect_mux(struct snd_soc_dapm_context *dapm, int i; if (e->reg != SND_SOC_NOPM) { - soc_dapm_read(dapm, e->reg, &val); + val = soc_dapm_read(dapm, e->reg); val = (val >> e->shift_l) & e->mask; item = snd_soc_enum_val_to_item(e, val); } else { @@ -790,7 +789,7 @@ static void dapm_set_mixer_path_status(struct snd_soc_dapm_path *p, int i, unsigned int val; if (reg != SND_SOC_NOPM) { - soc_dapm_read(p->sink->dapm, reg, &val); + val = soc_dapm_read(p->sink->dapm, reg); /* * The nth_path argument allows this function to know * which path of a kcontrol it is setting the initial @@ -805,7 +804,7 @@ static void dapm_set_mixer_path_status(struct snd_soc_dapm_path *p, int i, */ if (snd_soc_volsw_is_stereo(mc) && nth_path > 0) { if (reg != mc->rreg) - soc_dapm_read(p->sink->dapm, mc->rreg, &val); + val = soc_dapm_read(p->sink->dapm, mc->rreg); val = (val >> mc->rshift) & mask; } else { val = (val >> shift) & mask; @@ -3246,7 +3245,7 @@ int snd_soc_dapm_new_widgets(struct snd_soc_card *card) /* Read the initial power state from the device */ if (w->reg >= 0) { - soc_dapm_read(w->dapm, w->reg, &val); + val = soc_dapm_read(w->dapm, w->reg); val = val >> w->shift; val &= w->mask; if (val == w->on_val) @@ -3288,15 +3287,14 @@ int snd_soc_dapm_get_volsw(struct snd_kcontrol *kcontrol, unsigned int mask = (1 << fls(max)) - 1; unsigned int invert = mc->invert; unsigned int reg_val, val, rval = 0; - int ret = 0; mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME); if (dapm_kcontrol_is_powered(kcontrol) && reg != SND_SOC_NOPM) { - ret = soc_dapm_read(dapm, reg, ®_val); + reg_val = soc_dapm_read(dapm, reg); val = (reg_val >> shift) & mask; - if (ret == 0 && reg != mc->rreg) - ret = soc_dapm_read(dapm, mc->rreg, ®_val); + if (reg != mc->rreg) + reg_val = soc_dapm_read(dapm, mc->rreg); if (snd_soc_volsw_is_stereo(mc)) rval = (reg_val >> mc->rshift) & mask; @@ -3309,9 +3307,6 @@ int snd_soc_dapm_get_volsw(struct snd_kcontrol *kcontrol, } mutex_unlock(&card->dapm_mutex); - if (ret) - return ret; - if (invert) ucontrol->value.integer.value[0] = max - val; else @@ -3324,7 +3319,7 @@ int snd_soc_dapm_get_volsw(struct snd_kcontrol *kcontrol, ucontrol->value.integer.value[1] = rval; } - return ret; + return 0; } EXPORT_SYMBOL_GPL(snd_soc_dapm_get_volsw); @@ -3439,11 +3434,7 @@ int snd_soc_dapm_get_enum_double(struct snd_kcontrol *kcontrol, mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME); if (e->reg != SND_SOC_NOPM && dapm_kcontrol_is_powered(kcontrol)) { - int ret = soc_dapm_read(dapm, e->reg, ®_val); - if (ret) { - mutex_unlock(&card->dapm_mutex); - return ret; - } + reg_val = soc_dapm_read(dapm, e->reg); } else { reg_val = dapm_kcontrol_get_value(kcontrol); } diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c index 55ffb34be95e..10f48827bb0e 100644 --- a/sound/soc/soc-ops.c +++ b/sound/soc/soc-ops.c @@ -63,11 +63,8 @@ int snd_soc_get_enum_double(struct snd_kcontrol *kcontrol, struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; unsigned int val, item; unsigned int reg_val; - int ret; - ret = snd_soc_component_read(component, e->reg, ®_val); - if (ret) - return ret; + reg_val = snd_soc_component_read(component, e->reg); val = (reg_val >> e->shift_l) & e->mask; item = snd_soc_enum_val_to_item(e, val); ucontrol->value.enumerated.item[0] = item; @@ -136,10 +133,7 @@ static int snd_soc_read_signed(struct snd_soc_component *component, int ret; unsigned int val; - ret = snd_soc_component_read(component, reg, &val); - if (ret < 0) - return ret; - + val = snd_soc_component_read(component, reg); val = (val >> shift) & mask; if (!sign_bit) { @@ -375,19 +369,12 @@ int snd_soc_get_volsw_sx(struct snd_kcontrol *kcontrol, int min = mc->min; unsigned int mask = (1U << (fls(min + max) - 1)) - 1; unsigned int val; - int ret; - - ret = snd_soc_component_read(component, reg, &val); - if (ret < 0) - return ret; + val = snd_soc_component_read(component, reg); ucontrol->value.integer.value[0] = ((val >> shift) - min) & mask; if (snd_soc_volsw_is_stereo(mc)) { - ret = snd_soc_component_read(component, reg2, &val); - if (ret < 0) - return ret; - + val = snd_soc_component_read(component, reg2); val = ((val >> rshift) - min) & mask; ucontrol->value.integer.value[1] = val; } @@ -548,12 +535,8 @@ int snd_soc_get_volsw_range(struct snd_kcontrol *kcontrol, unsigned int mask = (1 << fls(max)) - 1; unsigned int invert = mc->invert; unsigned int val; - int ret; - - ret = snd_soc_component_read(component, reg, &val); - if (ret) - return ret; + val = snd_soc_component_read(component, reg); ucontrol->value.integer.value[0] = (val >> shift) & mask; if (invert) ucontrol->value.integer.value[0] = @@ -563,10 +546,7 @@ int snd_soc_get_volsw_range(struct snd_kcontrol *kcontrol, ucontrol->value.integer.value[0] - min; if (snd_soc_volsw_is_stereo(mc)) { - ret = snd_soc_component_read(component, rreg, &val); - if (ret) - return ret; - + val = snd_soc_component_read(component, rreg); ucontrol->value.integer.value[1] = (val >> shift) & mask; if (invert) ucontrol->value.integer.value[1] = @@ -833,12 +813,9 @@ int snd_soc_get_xr_sx(struct snd_kcontrol *kcontrol, long val = 0; unsigned int regval; unsigned int i; - int ret; for (i = 0; i < regcount; i++) { - ret = snd_soc_component_read(component, regbase+i, ®val); - if (ret) - return ret; + regval = snd_soc_component_read(component, regbase+i); val |= (regval & regwmask) << (regwshift*(regcount-i-1)); } val &= mask; @@ -918,12 +895,8 @@ int snd_soc_get_strobe(struct snd_kcontrol *kcontrol, unsigned int mask = 1 << shift; unsigned int invert = mc->invert != 0; unsigned int val; - int ret; - - ret = snd_soc_component_read(component, reg, &val); - if (ret) - return ret; + val = snd_soc_component_read(component, reg); val &= mask; if (shift != 0 && val != 0) -- cgit v1.2.3-59-g8ed1b From 5b554b0a29ce9610e3c237c77a1f76db87454b72 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Tue, 16 Jun 2020 14:21:55 +0900 Subject: ASoC: remove snd_soc_component_read32() No driver is using snd_soc_component_read32() anymore. This patch removes it. Signed-off-by: Kuninori Morimoto Link: https://lore.kernel.org/r/877dw74mbv.wl-kuninori.morimoto.gx@renesas.com Signed-off-by: Mark Brown --- include/sound/soc-component.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include') diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h index f64cffa12967..8917b15eccae 100644 --- a/include/sound/soc-component.h +++ b/include/sound/soc-component.h @@ -333,7 +333,6 @@ void snd_soc_component_set_aux(struct snd_soc_component *component, int snd_soc_component_init(struct snd_soc_component *component); /* component IO */ -#define snd_soc_component_read32 snd_soc_component_read unsigned int snd_soc_component_read(struct snd_soc_component *component, unsigned int reg); int snd_soc_component_write(struct snd_soc_component *component, -- cgit v1.2.3-59-g8ed1b From 8746f135bb01872ff412d408ea1aa9ebd328c1f5 Mon Sep 17 00:00:00 2001 From: Luiz Augusto von Dentz Date: Wed, 20 May 2020 14:20:14 -0700 Subject: Bluetooth: Disconnect if E0 is used for Level 4 E0 is not allowed with Level 4: BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C page 1319: '128-bit equivalent strength for link and encryption keys required using FIPS approved algorithms (E0 not allowed, SAFER+ not allowed, and P-192 not allowed; encryption key not shortened' SC enabled: > HCI Event: Read Remote Extended Features (0x23) plen 13 Status: Success (0x00) Handle: 256 Page: 1/2 Features: 0x0b 0x00 0x00 0x00 0x00 0x00 0x00 0x00 Secure Simple Pairing (Host Support) LE Supported (Host) Secure Connections (Host Support) > HCI Event: Encryption Change (0x08) plen 4 Status: Success (0x00) Handle: 256 Encryption: Enabled with AES-CCM (0x02) SC disabled: > HCI Event: Read Remote Extended Features (0x23) plen 13 Status: Success (0x00) Handle: 256 Page: 1/2 Features: 0x03 0x00 0x00 0x00 0x00 0x00 0x00 0x00 Secure Simple Pairing (Host Support) LE Supported (Host) > HCI Event: Encryption Change (0x08) plen 4 Status: Success (0x00) Handle: 256 Encryption: Enabled with E0 (0x01) [May 8 20:23] Bluetooth: hci0: Invalid security: expect AES but E0 was used < HCI Command: Disconnect (0x01|0x0006) plen 3 Handle: 256 Reason: Authentication Failure (0x05) Signed-off-by: Luiz Augusto von Dentz Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci_core.h | 10 ++++++---- net/bluetooth/hci_conn.c | 17 +++++++++++++++++ net/bluetooth/hci_event.c | 20 ++++++++------------ 3 files changed, 31 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 77d29341b064..836dc997ff94 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1481,11 +1481,13 @@ static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status) else encrypt = 0x01; - if (conn->sec_level == BT_SECURITY_SDP) - conn->sec_level = BT_SECURITY_LOW; + if (!status) { + if (conn->sec_level == BT_SECURITY_SDP) + conn->sec_level = BT_SECURITY_LOW; - if (conn->pending_sec_level > conn->sec_level) - conn->sec_level = conn->pending_sec_level; + if (conn->pending_sec_level > conn->sec_level) + conn->sec_level = conn->pending_sec_level; + } mutex_lock(&hci_cb_list_lock); list_for_each_entry(cb, &hci_cb_list, list) { diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 47f3a45d7dcb..8805d68e65f2 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -1322,6 +1322,23 @@ int hci_conn_check_link_mode(struct hci_conn *conn) return 0; } + /* AES encryption is required for Level 4: + * + * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C + * page 1319: + * + * 128-bit equivalent strength for link and encryption keys + * required using FIPS approved algorithms (E0 not allowed, + * SAFER+ not allowed, and P-192 not allowed; encryption key + * not shortened) + */ + if (conn->sec_level == BT_SECURITY_FIPS && + !test_bit(HCI_CONN_AES_CCM, &conn->flags)) { + bt_dev_err(conn->hdev, + "Invalid security: Missing AES-CCM usage"); + return 0; + } + if (hci_conn_ssp_enabled(conn) && !test_bit(HCI_CONN_ENCRYPT, &conn->flags)) return 0; diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index e08d4dd9a24e..e060fc9ebb18 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -3065,27 +3065,23 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); + /* Check link security requirements are met */ + if (!hci_conn_check_link_mode(conn)) + ev->status = HCI_ERROR_AUTH_FAILURE; + if (ev->status && conn->state == BT_CONNECTED) { if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); + /* Notify upper layers so they can cleanup before + * disconnecting. + */ + hci_encrypt_cfm(conn, ev->status); hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); hci_conn_drop(conn); goto unlock; } - /* In Secure Connections Only mode, do not allow any connections - * that are not encrypted with AES-CCM using a P-256 authenticated - * combination key. - */ - if (hci_dev_test_flag(hdev, HCI_SC_ONLY) && - (!test_bit(HCI_CONN_AES_CCM, &conn->flags) || - conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) { - hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE); - hci_conn_drop(conn); - goto unlock; - } - /* Try reading the encryption key size for encrypted ACL links */ if (!ev->status && ev->encrypt && conn->type == ACL_LINK) { struct hci_cp_read_enc_key_size cp; -- cgit v1.2.3-59-g8ed1b From 53f13319d13197dd1f4c8ce5fc1ef4c32509b4e2 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Tue, 9 Jun 2020 18:10:39 +0300 Subject: thunderbolt: Get rid of E2E workaround The end-to-end (E2E) workaround is needed for Falcon Ridge (TBT 2) controller when E2E is enabled for both ends of the host-to-host connection. However, we never supported full E2E in the first place so this code is not necessary at the moment. Further this allows us to use all available rings for data except ring 0 which is reserved for the control path. The complete E2E flow control is explained in the USB4 spec so we may add it back later if needed but at least the networking driver seems to work fine without, and the higher level stack, like TCP will retransmit lost packets anyway. Signed-off-by: Mika Westerberg --- drivers/net/thunderbolt.c | 4 ++-- drivers/thunderbolt/nhi.c | 26 ++------------------------ include/linux/thunderbolt.h | 2 -- 3 files changed, 4 insertions(+), 28 deletions(-) (limited to 'include') diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c index dacb4f680fd4..a812726703a4 100644 --- a/drivers/net/thunderbolt.c +++ b/drivers/net/thunderbolt.c @@ -866,8 +866,8 @@ static int tbnet_open(struct net_device *dev) eof_mask = BIT(TBIP_PDF_FRAME_END); ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE, - RING_FLAG_FRAME | RING_FLAG_E2E, sof_mask, - eof_mask, tbnet_start_poll, net); + RING_FLAG_FRAME, sof_mask, eof_mask, + tbnet_start_poll, net); if (!ring) { netdev_err(dev, "failed to allocate Rx ring\n"); tb_ring_free(net->tx_ring.ring); diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c index b617922b5b0a..5f7489fa1327 100644 --- a/drivers/thunderbolt/nhi.c +++ b/drivers/thunderbolt/nhi.c @@ -24,12 +24,7 @@ #define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring") -/* - * Used to enable end-to-end workaround for missing RX packets. Do not - * use this ring for anything else. - */ -#define RING_E2E_UNUSED_HOPID 2 -#define RING_FIRST_USABLE_HOPID TB_PATH_MIN_HOPID +#define RING_FIRST_USABLE_HOPID 1 /* * Minimal number of vectors when we use MSI-X. Two for control channel @@ -440,7 +435,7 @@ static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring) /* * Automatically allocate HopID from the non-reserved - * range 8 .. hop_count - 1. + * range 1 .. hop_count - 1. */ for (i = RING_FIRST_USABLE_HOPID; i < nhi->hop_count; i++) { if (ring->is_tx) { @@ -496,10 +491,6 @@ static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size, dev_dbg(&nhi->pdev->dev, "allocating %s ring %d of size %d\n", transmit ? "TX" : "RX", hop, size); - /* Tx Ring 2 is reserved for E2E workaround */ - if (transmit && hop == RING_E2E_UNUSED_HOPID) - return NULL; - ring = kzalloc(sizeof(*ring), GFP_KERNEL); if (!ring) return NULL; @@ -614,19 +605,6 @@ void tb_ring_start(struct tb_ring *ring) flags = RING_FLAG_ENABLE | RING_FLAG_RAW; } - if (ring->flags & RING_FLAG_E2E && !ring->is_tx) { - u32 hop; - - /* - * In order not to lose Rx packets we enable end-to-end - * workaround which transfers Rx credits to an unused Tx - * HopID. - */ - hop = RING_E2E_UNUSED_HOPID << REG_RX_OPTIONS_E2E_HOP_SHIFT; - hop &= REG_RX_OPTIONS_E2E_HOP_MASK; - flags |= hop | RING_FLAG_E2E_FLOW_CONTROL; - } - ring_iowrite64desc(ring, ring->descriptors_dma, 0); if (ring->is_tx) { ring_iowrite32desc(ring, ring->size, 12); diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h index ff397c0d5c07..5db2b11ab085 100644 --- a/include/linux/thunderbolt.h +++ b/include/linux/thunderbolt.h @@ -504,8 +504,6 @@ struct tb_ring { #define RING_FLAG_NO_SUSPEND BIT(0) /* Configure the ring to be in frame mode */ #define RING_FLAG_FRAME BIT(1) -/* Enable end-to-end flow control */ -#define RING_FLAG_E2E BIT(2) struct ring_frame; typedef void (*ring_cb)(struct tb_ring *, struct ring_frame *, bool canceled); -- cgit v1.2.3-59-g8ed1b From 41c48f3a98231738c5ce79f6f2aa6e40ba924d18 Mon Sep 17 00:00:00 2001 From: Andrey Ignatov Date: Fri, 19 Jun 2020 14:11:43 -0700 Subject: bpf: Support access to bpf map fields There are multiple use-cases when it's convenient to have access to bpf map fields, both `struct bpf_map` and map type specific struct-s such as `struct bpf_array`, `struct bpf_htab`, etc. For example while working with sock arrays it can be necessary to calculate the key based on map->max_entries (some_hash % max_entries). Currently this is solved by communicating max_entries via "out-of-band" channel, e.g. via additional map with known key to get info about target map. That works, but is not very convenient and error-prone while working with many maps. In other cases necessary data is dynamic (i.e. unknown at loading time) and it's impossible to get it at all. For example while working with a hash table it can be convenient to know how much capacity is already used (bpf_htab.count.counter for BPF_F_NO_PREALLOC case). At the same time kernel knows this info and can provide it to bpf program. Fill this gap by adding support to access bpf map fields from bpf program for both `struct bpf_map` and map type specific fields. Support is implemented via btf_struct_access() so that a user can define their own `struct bpf_map` or map type specific struct in their program with only necessary fields and preserve_access_index attribute, cast a map to this struct and use a field. For example: struct bpf_map { __u32 max_entries; } __attribute__((preserve_access_index)); struct bpf_array { struct bpf_map map; __u32 elem_size; } __attribute__((preserve_access_index)); struct { __uint(type, BPF_MAP_TYPE_ARRAY); __uint(max_entries, 4); __type(key, __u32); __type(value, __u32); } m_array SEC(".maps"); SEC("cgroup_skb/egress") int cg_skb(void *ctx) { struct bpf_array *array = (struct bpf_array *)&m_array; struct bpf_map *map = (struct bpf_map *)&m_array; /* .. use map->max_entries or array->map.max_entries .. */ } Similarly to other btf_struct_access() use-cases (e.g. struct tcp_sock in net/ipv4/bpf_tcp_ca.c) the patch allows access to any fields of corresponding struct. Only reading from map fields is supported. For btf_struct_access() to work there should be a way to know btf id of a struct that corresponds to a map type. To get btf id there should be a way to get a stringified name of map-specific struct, such as "bpf_array", "bpf_htab", etc for a map type. Two new fields are added to `struct bpf_map_ops` to handle it: * .map_btf_name keeps a btf name of a struct returned by map_alloc(); * .map_btf_id is used to cache btf id of that struct. To make btf ids calculation cheaper they're calculated once while preparing btf_vmlinux and cached same way as it's done for btf_id field of `struct bpf_func_proto` While calculating btf ids, struct names are NOT checked for collision. Collisions will be checked as a part of the work to prepare btf ids used in verifier in compile time that should land soon. The only known collision for `struct bpf_htab` (kernel/bpf/hashtab.c vs net/core/sock_map.c) was fixed earlier. Both new fields .map_btf_name and .map_btf_id must be set for a map type for the feature to work. If neither is set for a map type, verifier will return ENOTSUPP on a try to access map_ptr of corresponding type. If just one of them set, it's verifier misconfiguration. Only `struct bpf_array` for BPF_MAP_TYPE_ARRAY and `struct bpf_htab` for BPF_MAP_TYPE_HASH are supported by this patch. Other map types will be supported separately. The feature is available only for CONFIG_DEBUG_INFO_BTF=y and gated by perfmon_capable() so that unpriv programs won't have access to bpf map fields. Signed-off-by: Andrey Ignatov Signed-off-by: Daniel Borkmann Acked-by: John Fastabend Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/6479686a0cd1e9067993df57b4c3eef0e276fec9.1592600985.git.rdna@fb.com --- include/linux/bpf.h | 9 +++ include/linux/bpf_verifier.h | 1 + kernel/bpf/arraymap.c | 3 + kernel/bpf/btf.c | 40 +++++++++++ kernel/bpf/hashtab.c | 3 + kernel/bpf/verifier.c | 82 +++++++++++++++++++--- .../selftests/bpf/verifier/map_ptr_mixing.c | 2 +- 7 files changed, 131 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 07052d44bca1..1e1501ee53ce 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -92,6 +92,10 @@ struct bpf_map_ops { int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma); __poll_t (*map_poll)(struct bpf_map *map, struct file *filp, struct poll_table_struct *pts); + + /* BTF name and id of struct allocated by map_alloc */ + const char * const map_btf_name; + int *map_btf_id; }; struct bpf_map_memory { @@ -1109,6 +1113,11 @@ static inline bool bpf_allow_ptr_leaks(void) return perfmon_capable(); } +static inline bool bpf_allow_ptr_to_map_access(void) +{ + return perfmon_capable(); +} + static inline bool bpf_bypass_spec_v1(void) { return perfmon_capable(); diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index ca08db4ffb5f..53c7bd568c5d 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -379,6 +379,7 @@ struct bpf_verifier_env { u32 used_map_cnt; /* number of used maps */ u32 id_gen; /* used to generate unique reg IDs */ bool allow_ptr_leaks; + bool allow_ptr_to_map_access; bool bpf_capable; bool bypass_spec_v1; bool bypass_spec_v4; diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 11584618e861..e7caa48812fb 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -494,6 +494,7 @@ static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma) vma->vm_pgoff + pgoff); } +static int array_map_btf_id; const struct bpf_map_ops array_map_ops = { .map_alloc_check = array_map_alloc_check, .map_alloc = array_map_alloc, @@ -510,6 +511,8 @@ const struct bpf_map_ops array_map_ops = { .map_check_btf = array_map_check_btf, .map_lookup_batch = generic_map_lookup_batch, .map_update_batch = generic_map_update_batch, + .map_btf_name = "bpf_array", + .map_btf_id = &array_map_btf_id, }; const struct bpf_map_ops percpu_array_map_ops = { diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 3eb804618a53..e377d1981730 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -3571,6 +3571,41 @@ btf_get_prog_ctx_type(struct bpf_verifier_log *log, struct btf *btf, return ctx_type; } +static const struct bpf_map_ops * const btf_vmlinux_map_ops[] = { +#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) +#define BPF_LINK_TYPE(_id, _name) +#define BPF_MAP_TYPE(_id, _ops) \ + [_id] = &_ops, +#include +#undef BPF_PROG_TYPE +#undef BPF_LINK_TYPE +#undef BPF_MAP_TYPE +}; + +static int btf_vmlinux_map_ids_init(const struct btf *btf, + struct bpf_verifier_log *log) +{ + const struct bpf_map_ops *ops; + int i, btf_id; + + for (i = 0; i < ARRAY_SIZE(btf_vmlinux_map_ops); ++i) { + ops = btf_vmlinux_map_ops[i]; + if (!ops || (!ops->map_btf_name && !ops->map_btf_id)) + continue; + if (!ops->map_btf_name || !ops->map_btf_id) { + bpf_log(log, "map type %d is misconfigured\n", i); + return -EINVAL; + } + btf_id = btf_find_by_name_kind(btf, ops->map_btf_name, + BTF_KIND_STRUCT); + if (btf_id < 0) + return btf_id; + *ops->map_btf_id = btf_id; + } + + return 0; +} + static int btf_translate_to_vmlinux(struct bpf_verifier_log *log, struct btf *btf, const struct btf_type *t, @@ -3633,6 +3668,11 @@ struct btf *btf_parse_vmlinux(void) /* btf_parse_vmlinux() runs under bpf_verifier_lock */ bpf_ctx_convert.t = btf_type_by_id(btf, btf_id); + /* find bpf map structs for map_ptr access checking */ + err = btf_vmlinux_map_ids_init(btf, log); + if (err < 0) + goto errout; + bpf_struct_ops_init(btf, log); btf_verifier_env_free(env); diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index b4b288a3c3c9..2c5999e02060 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -1614,6 +1614,7 @@ htab_lru_map_lookup_and_delete_batch(struct bpf_map *map, true, false); } +static int htab_map_btf_id; const struct bpf_map_ops htab_map_ops = { .map_alloc_check = htab_map_alloc_check, .map_alloc = htab_map_alloc, @@ -1625,6 +1626,8 @@ const struct bpf_map_ops htab_map_ops = { .map_gen_lookup = htab_map_gen_lookup, .map_seq_show_elem = htab_map_seq_show_elem, BATCH_OPS(htab), + .map_btf_name = "bpf_htab", + .map_btf_id = &htab_map_btf_id, }; const struct bpf_map_ops htab_lru_map_ops = { diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index a1857c4ffaaf..7460f967cb75 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1351,6 +1351,19 @@ static void mark_reg_not_init(struct bpf_verifier_env *env, __mark_reg_not_init(env, regs + regno); } +static void mark_btf_ld_reg(struct bpf_verifier_env *env, + struct bpf_reg_state *regs, u32 regno, + enum bpf_reg_type reg_type, u32 btf_id) +{ + if (reg_type == SCALAR_VALUE) { + mark_reg_unknown(env, regs, regno); + return; + } + mark_reg_known_zero(env, regs, regno); + regs[regno].type = PTR_TO_BTF_ID; + regs[regno].btf_id = btf_id; +} + #define DEF_NOT_SUBREG (0) static void init_reg_state(struct bpf_verifier_env *env, struct bpf_func_state *state) @@ -3182,19 +3195,68 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env, if (ret < 0) return ret; - if (atype == BPF_READ && value_regno >= 0) { - if (ret == SCALAR_VALUE) { - mark_reg_unknown(env, regs, value_regno); - return 0; - } - mark_reg_known_zero(env, regs, value_regno); - regs[value_regno].type = PTR_TO_BTF_ID; - regs[value_regno].btf_id = btf_id; + if (atype == BPF_READ && value_regno >= 0) + mark_btf_ld_reg(env, regs, value_regno, ret, btf_id); + + return 0; +} + +static int check_ptr_to_map_access(struct bpf_verifier_env *env, + struct bpf_reg_state *regs, + int regno, int off, int size, + enum bpf_access_type atype, + int value_regno) +{ + struct bpf_reg_state *reg = regs + regno; + struct bpf_map *map = reg->map_ptr; + const struct btf_type *t; + const char *tname; + u32 btf_id; + int ret; + + if (!btf_vmlinux) { + verbose(env, "map_ptr access not supported without CONFIG_DEBUG_INFO_BTF\n"); + return -ENOTSUPP; + } + + if (!map->ops->map_btf_id || !*map->ops->map_btf_id) { + verbose(env, "map_ptr access not supported for map type %d\n", + map->map_type); + return -ENOTSUPP; + } + + t = btf_type_by_id(btf_vmlinux, *map->ops->map_btf_id); + tname = btf_name_by_offset(btf_vmlinux, t->name_off); + + if (!env->allow_ptr_to_map_access) { + verbose(env, + "%s access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n", + tname); + return -EPERM; } + if (off < 0) { + verbose(env, "R%d is %s invalid negative access: off=%d\n", + regno, tname, off); + return -EACCES; + } + + if (atype != BPF_READ) { + verbose(env, "only read from %s is supported\n", tname); + return -EACCES; + } + + ret = btf_struct_access(&env->log, t, off, size, atype, &btf_id); + if (ret < 0) + return ret; + + if (value_regno >= 0) + mark_btf_ld_reg(env, regs, value_regno, ret, btf_id); + return 0; } + /* check whether memory at (regno + off) is accessible for t = (read | write) * if t==write, value_regno is a register which value is stored into memory * if t==read, value_regno is a register which will receive the value from memory @@ -3363,6 +3425,9 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn } else if (reg->type == PTR_TO_BTF_ID) { err = check_ptr_to_btf_access(env, regs, regno, off, size, t, value_regno); + } else if (reg->type == CONST_PTR_TO_MAP) { + err = check_ptr_to_map_access(env, regs, regno, off, size, t, + value_regno); } else { verbose(env, "R%d invalid mem access '%s'\n", regno, reg_type_str[reg->type]); @@ -10951,6 +11016,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, env->strict_alignment = false; env->allow_ptr_leaks = bpf_allow_ptr_leaks(); + env->allow_ptr_to_map_access = bpf_allow_ptr_to_map_access(); env->bypass_spec_v1 = bpf_bypass_spec_v1(); env->bypass_spec_v4 = bpf_bypass_spec_v4(); env->bpf_capable = bpf_capable(); diff --git a/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c b/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c index cd26ee6b7b1d..1f2b8c4cb26d 100644 --- a/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c +++ b/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c @@ -56,7 +56,7 @@ .fixup_map_in_map = { 16 }, .fixup_map_array_48b = { 13 }, .result = REJECT, - .errstr = "R0 invalid mem access 'map_ptr'", + .errstr = "only read from bpf_array is supported", }, { "cond: two branches returning different map pointers for lookup (tail, tail)", -- cgit v1.2.3-59-g8ed1b From 73edcd38d7720bb6a761966ea14c0bc64e95dc26 Mon Sep 17 00:00:00 2001 From: Maulik Shah Date: Thu, 18 Jun 2020 18:35:53 +0530 Subject: soc: qcom: rpmh: Update rpmh_invalidate function to return void Currently rpmh_invalidate() always returns success. Update its return type to void. Reviewed-by: Lina Iyer Reviewed-by: Stephen Boyd Suggested-by: Stephen Boyd Signed-off-by: Maulik Shah Link: https://lore.kernel.org/r/1592485553-29163-1-git-send-email-mkshah@codeaurora.org Signed-off-by: Bjorn Andersson --- drivers/interconnect/qcom/bcm-voter.c | 6 +----- drivers/soc/qcom/rpmh.c | 4 +--- include/soc/qcom/rpmh.h | 7 ++++--- 3 files changed, 6 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/drivers/interconnect/qcom/bcm-voter.c b/drivers/interconnect/qcom/bcm-voter.c index 2a11a63e7217..a3d2ef1d9903 100644 --- a/drivers/interconnect/qcom/bcm-voter.c +++ b/drivers/interconnect/qcom/bcm-voter.c @@ -266,11 +266,7 @@ int qcom_icc_bcm_voter_commit(struct bcm_voter *voter) if (!commit_idx[0]) goto out; - ret = rpmh_invalidate(voter->dev); - if (ret) { - pr_err("Error invalidating RPMH client (%d)\n", ret); - goto out; - } + rpmh_invalidate(voter->dev); ret = rpmh_write_batch(voter->dev, RPMH_ACTIVE_ONLY_STATE, cmds, commit_idx); diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c index f2b5b46ccd1f..b61e183ede69 100644 --- a/drivers/soc/qcom/rpmh.c +++ b/drivers/soc/qcom/rpmh.c @@ -497,7 +497,7 @@ exit: * * Invalidate the sleep and wake values in batch_cache. */ -int rpmh_invalidate(const struct device *dev) +void rpmh_invalidate(const struct device *dev) { struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); struct batch_cache_req *req, *tmp; @@ -509,7 +509,5 @@ int rpmh_invalidate(const struct device *dev) INIT_LIST_HEAD(&ctrlr->batch_cache); ctrlr->dirty = true; spin_unlock_irqrestore(&ctrlr->cache_lock, flags); - - return 0; } EXPORT_SYMBOL(rpmh_invalidate); diff --git a/include/soc/qcom/rpmh.h b/include/soc/qcom/rpmh.h index f9ec353d24a5..bdbee1a97d36 100644 --- a/include/soc/qcom/rpmh.h +++ b/include/soc/qcom/rpmh.h @@ -20,7 +20,7 @@ int rpmh_write_async(const struct device *dev, enum rpmh_state state, int rpmh_write_batch(const struct device *dev, enum rpmh_state state, const struct tcs_cmd *cmd, u32 *n); -int rpmh_invalidate(const struct device *dev); +void rpmh_invalidate(const struct device *dev); #else @@ -38,8 +38,9 @@ static inline int rpmh_write_batch(const struct device *dev, const struct tcs_cmd *cmd, u32 *n) { return -ENODEV; } -static inline int rpmh_invalidate(const struct device *dev) -{ return -ENODEV; } +static inline void rpmh_invalidate(const struct device *dev) +{ +} #endif /* CONFIG_QCOM_RPMH */ -- cgit v1.2.3-59-g8ed1b From 2a916ecc405686c1d86f632281bc06aa75ebae4e Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Fri, 19 Jun 2020 03:32:48 +0000 Subject: net/devlink: Support querying hardware address of port function PCI PF and VF devlink port can manage the function represented by a devlink port. Enable users to query port function's hardware address. Example of a PCI VF port which supports a port function: $ devlink port show pci/0000:06:00.0/2 pci/0000:06:00.0/2: type eth netdev enp6s0pf0vf1 flavour pcivf pfnum 0 vfnum 1 function: hw_addr 00:11:22:33:44:66 $ devlink port show pci/0000:06:00.0/2 -jp { "port": { "pci/0000:06:00.0/2": { "type": "eth", "netdev": "enp6s0pf0vf1", "flavour": "pcivf", "pfnum": 0, "vfnum": 1, "function": { "hw_addr": "00:11:22:33:44:66" } } } } Signed-off-by: Parav Pandit Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/devlink.h | 12 ++++++++++++ include/uapi/linux/devlink.h | 10 ++++++++++ net/core/devlink.c | 45 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 67 insertions(+) (limited to 'include') diff --git a/include/net/devlink.h b/include/net/devlink.h index 1df6dfec26c2..56fc9cdb189d 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -1107,6 +1107,18 @@ struct devlink_ops { int (*trap_policer_counter_get)(struct devlink *devlink, const struct devlink_trap_policer *policer, u64 *p_drops); + /** + * @port_function_hw_addr_get: Port function's hardware address get function. + * + * Should be used by device drivers to report the hardware address of a function managed + * by the devlink port. Driver should return -EOPNOTSUPP if it doesn't support port + * function handling for a particular port. + * + * Note: @extack can be NULL when port notifier queries the port function. + */ + int (*port_function_hw_addr_get)(struct devlink *devlink, struct devlink_port *port, + u8 *hw_addr, int *hw_addr_len, + struct netlink_ext_ack *extack); }; static inline void *devlink_priv(struct devlink *devlink) diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index 08563e6a424d..07d0af8f5923 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -451,6 +451,8 @@ enum devlink_attr { DEVLINK_ATTR_TRAP_POLICER_RATE, /* u64 */ DEVLINK_ATTR_TRAP_POLICER_BURST, /* u64 */ + DEVLINK_ATTR_PORT_FUNCTION, /* nested */ + /* add new attributes above here, update the policy in devlink.c */ __DEVLINK_ATTR_MAX, @@ -497,4 +499,12 @@ enum devlink_resource_unit { DEVLINK_RESOURCE_UNIT_ENTRY, }; +enum devlink_port_function_attr { + DEVLINK_PORT_FUNCTION_ATTR_UNSPEC, + DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR, /* binary */ + + __DEVLINK_PORT_FUNCTION_ATTR_MAX, + DEVLINK_PORT_FUNCTION_ATTR_MAX = __DEVLINK_PORT_FUNCTION_ATTR_MAX - 1 +}; + #endif /* _UAPI_LINUX_DEVLINK_H_ */ diff --git a/net/core/devlink.c b/net/core/devlink.c index 05197631d52a..b6848b607e9c 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -563,6 +563,49 @@ static int devlink_nl_port_attrs_put(struct sk_buff *msg, return 0; } +static int +devlink_nl_port_function_attrs_put(struct sk_buff *msg, struct devlink_port *port, + struct netlink_ext_ack *extack) +{ + struct devlink *devlink = port->devlink; + const struct devlink_ops *ops; + struct nlattr *function_attr; + bool empty_nest = true; + int err = 0; + + function_attr = nla_nest_start_noflag(msg, DEVLINK_ATTR_PORT_FUNCTION); + if (!function_attr) + return -EMSGSIZE; + + ops = devlink->ops; + if (ops->port_function_hw_addr_get) { + int uninitialized_var(hw_addr_len); + u8 hw_addr[MAX_ADDR_LEN]; + + err = ops->port_function_hw_addr_get(devlink, port, hw_addr, &hw_addr_len, extack); + if (err == -EOPNOTSUPP) { + /* Port function attributes are optional for a port. If port doesn't + * support function attribute, returning -EOPNOTSUPP is not an error. + */ + err = 0; + goto out; + } else if (err) { + goto out; + } + err = nla_put(msg, DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR, hw_addr_len, hw_addr); + if (err) + goto out; + empty_nest = false; + } + +out: + if (err || empty_nest) + nla_nest_cancel(msg, function_attr); + else + nla_nest_end(msg, function_attr); + return err; +} + static int devlink_nl_port_fill(struct sk_buff *msg, struct devlink *devlink, struct devlink_port *devlink_port, enum devlink_command cmd, u32 portid, @@ -608,6 +651,8 @@ static int devlink_nl_port_fill(struct sk_buff *msg, struct devlink *devlink, spin_unlock_bh(&devlink_port->type_lock); if (devlink_nl_port_attrs_put(msg, devlink_port)) goto nla_put_failure; + if (devlink_nl_port_function_attrs_put(msg, devlink_port, extack)) + goto nla_put_failure; genlmsg_end(msg, hdr); return 0; -- cgit v1.2.3-59-g8ed1b From a1e8ae907c8d67f57432190bb742802a76516b00 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Fri, 19 Jun 2020 03:32:49 +0000 Subject: net/devlink: Support setting hardware address of port function PCI PF and VF devlink port can manage the function represented by a devlink port. Allow users to set port function's hardware address. Example of a PCI VF port which supports a port function: $ devlink port show pci/0000:06:00.0/2 pci/0000:06:00.0/2: type eth netdev enp6s0pf0vf1 flavour pcivf pfnum 0 vfnum 1 function: hw_addr 00:00:00:00:00:00 $ devlink port function set pci/0000:06:00.0/2 hw_addr 00:11:22:33:44:55 $ devlink port show pci/0000:06:00.0/2 pci/0000:06:00.0/2: type eth netdev enp6s0pf0vf1 flavour pcivf pfnum 0 vfnum 1 function: hw_addr 00:11:22:33:44:55 Signed-off-by: Parav Pandit Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/devlink.h | 10 +++++++ net/core/devlink.c | 76 +++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 86 insertions(+) (limited to 'include') diff --git a/include/net/devlink.h b/include/net/devlink.h index 56fc9cdb189d..7007f93585a5 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -1119,6 +1119,16 @@ struct devlink_ops { int (*port_function_hw_addr_get)(struct devlink *devlink, struct devlink_port *port, u8 *hw_addr, int *hw_addr_len, struct netlink_ext_ack *extack); + /** + * @port_function_hw_addr_set: Port function's hardware address set function. + * + * Should be used by device drivers to set the hardware address of a function managed + * by the devlink port. Driver should return -EOPNOTSUPP if it doesn't support port + * function handling for a particular port. + */ + int (*port_function_hw_addr_set)(struct devlink *devlink, struct devlink_port *port, + const u8 *hw_addr, int hw_addr_len, + struct netlink_ext_ack *extack); }; static inline void *devlink_priv(struct devlink *devlink) diff --git a/net/core/devlink.c b/net/core/devlink.c index b6848b607e9c..baa45eca6b5a 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -85,6 +85,10 @@ EXPORT_SYMBOL(devlink_dpipe_header_ipv6); EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_hwmsg); EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_hwerr); +static const struct nla_policy devlink_function_nl_policy[DEVLINK_PORT_FUNCTION_ATTR_MAX + 1] = { + [DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR] = { .type = NLA_BINARY }, +}; + static LIST_HEAD(devlink_list); /* devlink_mutex @@ -827,6 +831,67 @@ static int devlink_port_type_set(struct devlink *devlink, return -EOPNOTSUPP; } +static int +devlink_port_function_hw_addr_set(struct devlink *devlink, struct devlink_port *port, + const struct nlattr *attr, struct netlink_ext_ack *extack) +{ + const struct devlink_ops *ops; + const u8 *hw_addr; + int hw_addr_len; + int err; + + hw_addr = nla_data(attr); + hw_addr_len = nla_len(attr); + if (hw_addr_len > MAX_ADDR_LEN) { + NL_SET_ERR_MSG_MOD(extack, "Port function hardware address too long"); + return -EINVAL; + } + if (port->type == DEVLINK_PORT_TYPE_ETH) { + if (hw_addr_len != ETH_ALEN) { + NL_SET_ERR_MSG_MOD(extack, "Address must be 6 bytes for Ethernet device"); + return -EINVAL; + } + if (!is_unicast_ether_addr(hw_addr)) { + NL_SET_ERR_MSG_MOD(extack, "Non-unicast hardware address unsupported"); + return -EINVAL; + } + } + + ops = devlink->ops; + if (!ops->port_function_hw_addr_set) { + NL_SET_ERR_MSG_MOD(extack, "Port doesn't support function attributes"); + return -EOPNOTSUPP; + } + + err = ops->port_function_hw_addr_set(devlink, port, hw_addr, hw_addr_len, extack); + if (err) + return err; + + devlink_port_notify(port, DEVLINK_CMD_PORT_NEW); + return 0; +} + +static int +devlink_port_function_set(struct devlink *devlink, struct devlink_port *port, + const struct nlattr *attr, struct netlink_ext_ack *extack) +{ + struct nlattr *tb[DEVLINK_PORT_FUNCTION_ATTR_MAX + 1]; + int err; + + err = nla_parse_nested(tb, DEVLINK_PORT_FUNCTION_ATTR_MAX, attr, + devlink_function_nl_policy, extack); + if (err < 0) { + NL_SET_ERR_MSG_MOD(extack, "Fail to parse port function attributes"); + return err; + } + + attr = tb[DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR]; + if (attr) + err = devlink_port_function_hw_addr_set(devlink, port, attr, extack); + + return err; +} + static int devlink_nl_cmd_port_set_doit(struct sk_buff *skb, struct genl_info *info) { @@ -842,6 +907,16 @@ static int devlink_nl_cmd_port_set_doit(struct sk_buff *skb, if (err) return err; } + + if (info->attrs[DEVLINK_ATTR_PORT_FUNCTION]) { + struct nlattr *attr = info->attrs[DEVLINK_ATTR_PORT_FUNCTION]; + struct netlink_ext_ack *extack = info->extack; + + err = devlink_port_function_set(devlink, devlink_port, attr, extack); + if (err) + return err; + } + return 0; } @@ -6758,6 +6833,7 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = { [DEVLINK_ATTR_TRAP_POLICER_ID] = { .type = NLA_U32 }, [DEVLINK_ATTR_TRAP_POLICER_RATE] = { .type = NLA_U64 }, [DEVLINK_ATTR_TRAP_POLICER_BURST] = { .type = NLA_U64 }, + [DEVLINK_ATTR_PORT_FUNCTION] = { .type = NLA_NESTED }, }; static const struct genl_ops devlink_nl_ops[] = { -- cgit v1.2.3-59-g8ed1b From fa997825ebeca820f4001a9e6d285345d3a535ba Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Fri, 19 Jun 2020 03:32:50 +0000 Subject: net/mlx5: Constify mac address pointer Since none of the functions need to modify the input mac address, constify them. Signed-off-by: Parav Pandit Reviewed-by: Roi Dayan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 4 ++-- drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 2 +- drivers/net/ethernet/mellanox/mlx5/core/vport.c | 2 +- include/linux/mlx5/vport.h | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 1116ab9bea6c..d6a585a143dc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1127,7 +1127,7 @@ int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW); } -static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN]) +static void node_guid_gen_from_mac(u64 *node_guid, const u8 *mac) { ((u8 *)node_guid)[7] = mac[0]; ((u8 *)node_guid)[6] = mac[1]; @@ -1779,7 +1779,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) /* Vport Administration */ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, - u16 vport, u8 mac[ETH_ALEN]) + u16 vport, const u8 *mac) { struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); u64 node_guid; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index a5175e98c0b3..165a23efc608 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -311,7 +311,7 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs); void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf); void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf); int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, - u16 vport, u8 mac[ETH_ALEN]); + u16 vport, const u8 *mac); int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state); int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index c107d92dc118..88cdb9bb4c4a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -173,7 +173,7 @@ int mlx5_query_mac_address(struct mlx5_core_dev *mdev, u8 *addr) EXPORT_SYMBOL_GPL(mlx5_query_mac_address); int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev, - u16 vport, u8 *addr) + u16 vport, const u8 *addr) { void *in; int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index 8170da1e9f70..4db87bcfce7b 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h @@ -75,7 +75,7 @@ void mlx5_query_min_inline(struct mlx5_core_dev *mdev, u8 *min_inline); int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev, u16 vport, u8 min_inline); int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev, - u16 vport, u8 *addr); + u16 vport, const u8 *addr); int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu); int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu); int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev, -- cgit v1.2.3-59-g8ed1b From 272c2330adc9c68284cb0066719160c24bfe605f Mon Sep 17 00:00:00 2001 From: Jarod Wilson Date: Fri, 19 Jun 2020 10:31:52 -0400 Subject: xfrm: bail early on slave pass over skb This is prep work for initial support of bonding hardware encryption pass-through support. The bonding driver will fill in the slave_dev pointer, and we use that to know not to skb_push() again on a given skb that was already processed on the bond device. CC: Jay Vosburgh CC: Veaceslav Falico CC: Andy Gospodarek CC: "David S. Miller" CC: Jeff Kirsher CC: Jakub Kicinski CC: Steffen Klassert CC: Herbert Xu CC: netdev@vger.kernel.org CC: intel-wired-lan@lists.osuosl.org Signed-off-by: Jarod Wilson Signed-off-by: David S. Miller --- include/net/xfrm.h | 1 + net/xfrm/xfrm_device.c | 34 +++++++++++++++++----------------- 2 files changed, 18 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 094fe682f5d7..e20b2b27ec48 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -127,6 +127,7 @@ struct xfrm_state_walk { struct xfrm_state_offload { struct net_device *dev; + struct net_device *slave_dev; unsigned long offload_handle; unsigned int num_exthdrs; u8 flags; diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index f50d1f97cf8e..b8918fc5248b 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c @@ -106,6 +106,7 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur struct sk_buff *skb2, *nskb, *pskb = NULL; netdev_features_t esp_features = features; struct xfrm_offload *xo = xfrm_offload(skb); + struct net_device *dev = skb->dev; struct sec_path *sp; if (!xo) @@ -119,6 +120,10 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND) return skb; + /* This skb was already validated on the master dev */ + if ((x->xso.dev != dev) && (x->xso.slave_dev == dev)) + return skb; + local_irq_save(flags); sd = this_cpu_ptr(&softnet_data); err = !skb_queue_empty(&sd->xfrm_backlog); @@ -129,25 +134,20 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur return skb; } - if (skb_is_gso(skb)) { - struct net_device *dev = skb->dev; - - if (unlikely(x->xso.dev != dev)) { - struct sk_buff *segs; + if (skb_is_gso(skb) && unlikely(x->xso.dev != dev)) { + struct sk_buff *segs; - /* Packet got rerouted, fixup features and segment it. */ - esp_features = esp_features & ~(NETIF_F_HW_ESP - | NETIF_F_GSO_ESP); + /* Packet got rerouted, fixup features and segment it. */ + esp_features = esp_features & ~(NETIF_F_HW_ESP | NETIF_F_GSO_ESP); - segs = skb_gso_segment(skb, esp_features); - if (IS_ERR(segs)) { - kfree_skb(skb); - atomic_long_inc(&dev->tx_dropped); - return NULL; - } else { - consume_skb(skb); - skb = segs; - } + segs = skb_gso_segment(skb, esp_features); + if (IS_ERR(segs)) { + kfree_skb(skb); + atomic_long_inc(&dev->tx_dropped); + return NULL; + } else { + consume_skb(skb); + skb = segs; } } -- cgit v1.2.3-59-g8ed1b From 18cb261afd7bf50134e5ccacc5ec91ea16efadd4 Mon Sep 17 00:00:00 2001 From: Jarod Wilson Date: Fri, 19 Jun 2020 10:31:55 -0400 Subject: bonding: support hardware encryption offload to slaves Currently, this support is limited to active-backup mode, as I'm not sure about the feasilibity of mapping an xfrm_state's offload handle to multiple hardware devices simultaneously, and we rely on being able to pass some hints to both the xfrm and NIC driver about whether or not they're operating on a slave device. I've tested this atop an Intel x520 device (ixgbe) using libreswan in transport mode, succesfully achieving ~4.3Gbps throughput with netperf (more or less identical to throughput on a bare NIC in this system), as well as successful failover and recovery mid-netperf. v2: just use CONFIG_XFRM_OFFLOAD for wrapping, isolate more code with it CC: Jay Vosburgh CC: Veaceslav Falico CC: Andy Gospodarek CC: "David S. Miller" CC: Jeff Kirsher CC: Jakub Kicinski CC: Steffen Klassert CC: Herbert Xu CC: netdev@vger.kernel.org CC: intel-wired-lan@lists.osuosl.org Signed-off-by: Jarod Wilson Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 127 +++++++++++++++++++++++++++++++++++++++- include/net/bonding.h | 3 + 2 files changed, 128 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 004919aea5fb..90939ccf2a94 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -79,6 +79,7 @@ #include #include #include +#include #include #include #include @@ -278,8 +279,6 @@ const char *bond_mode_name(int mode) return names[mode]; } -/*---------------------------------- VLAN -----------------------------------*/ - /** * bond_dev_queue_xmit - Prepare skb for xmit. * @@ -302,6 +301,8 @@ netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, return dev_queue_xmit(skb); } +/*---------------------------------- VLAN -----------------------------------*/ + /* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid, * We don't protect the slave list iteration with a lock because: * a. This operation is performed in IOCTL context, @@ -372,6 +373,84 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev, return 0; } +/*---------------------------------- XFRM -----------------------------------*/ + +#ifdef CONFIG_XFRM_OFFLOAD +/** + * bond_ipsec_add_sa - program device with a security association + * @xs: pointer to transformer state struct + **/ +static int bond_ipsec_add_sa(struct xfrm_state *xs) +{ + struct net_device *bond_dev = xs->xso.dev; + struct bonding *bond = netdev_priv(bond_dev); + struct slave *slave = rtnl_dereference(bond->curr_active_slave); + + xs->xso.slave_dev = slave->dev; + bond->xs = xs; + + if (!(slave->dev->xfrmdev_ops + && slave->dev->xfrmdev_ops->xdo_dev_state_add)) { + slave_warn(bond_dev, slave->dev, "Slave does not support ipsec offload\n"); + return -EINVAL; + } + + return slave->dev->xfrmdev_ops->xdo_dev_state_add(xs); +} + +/** + * bond_ipsec_del_sa - clear out this specific SA + * @xs: pointer to transformer state struct + **/ +static void bond_ipsec_del_sa(struct xfrm_state *xs) +{ + struct net_device *bond_dev = xs->xso.dev; + struct bonding *bond = netdev_priv(bond_dev); + struct slave *slave = rtnl_dereference(bond->curr_active_slave); + + if (!slave) + return; + + xs->xso.slave_dev = slave->dev; + + if (!(slave->dev->xfrmdev_ops + && slave->dev->xfrmdev_ops->xdo_dev_state_delete)) { + slave_warn(bond_dev, slave->dev, "%s: no slave xdo_dev_state_delete\n", __func__); + return; + } + + slave->dev->xfrmdev_ops->xdo_dev_state_delete(xs); +} + +/** + * bond_ipsec_offload_ok - can this packet use the xfrm hw offload + * @skb: current data packet + * @xs: pointer to transformer state struct + **/ +static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) +{ + struct net_device *bond_dev = xs->xso.dev; + struct bonding *bond = netdev_priv(bond_dev); + struct slave *curr_active = rtnl_dereference(bond->curr_active_slave); + struct net_device *slave_dev = curr_active->dev; + + if (!(slave_dev->xfrmdev_ops + && slave_dev->xfrmdev_ops->xdo_dev_offload_ok)) { + slave_warn(bond_dev, slave_dev, "%s: no slave xdo_dev_offload_ok\n", __func__); + return false; + } + + xs->xso.slave_dev = slave_dev; + return slave_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs); +} + +static const struct xfrmdev_ops bond_xfrmdev_ops = { + .xdo_dev_state_add = bond_ipsec_add_sa, + .xdo_dev_state_delete = bond_ipsec_del_sa, + .xdo_dev_offload_ok = bond_ipsec_offload_ok, +}; +#endif /* CONFIG_XFRM_OFFLOAD */ + /*------------------------------- Link status -------------------------------*/ /* Set the carrier state for the master according to the state of its @@ -879,6 +958,11 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) return; if (new_active) { +#ifdef CONFIG_XFRM_OFFLOAD + if ((BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) && bond->xs) + bond_ipsec_del_sa(bond->xs); +#endif /* CONFIG_XFRM_OFFLOAD */ + new_active->last_link_up = jiffies; if (new_active->link == BOND_LINK_BACK) { @@ -941,6 +1025,13 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) bond_should_notify_peers(bond); } +#ifdef CONFIG_XFRM_OFFLOAD + if (old_active && bond->xs) { + xfrm_dev_state_flush(dev_net(bond->dev), bond->dev, true); + bond_ipsec_add_sa(bond->xs); + } +#endif /* CONFIG_XFRM_OFFLOAD */ + call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev); if (should_notify_peers) { bond->send_peer_notif--; @@ -1127,15 +1218,24 @@ static netdev_features_t bond_fix_features(struct net_device *dev, #define BOND_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ NETIF_F_RXCSUM | NETIF_F_ALL_TSO) +#ifdef CONFIG_XFRM_OFFLOAD +#define BOND_XFRM_FEATURES (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM | \ + NETIF_F_GSO_ESP) +#endif /* CONFIG_XFRM_OFFLOAD */ + #define BOND_MPLS_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ NETIF_F_ALL_TSO) + static void bond_compute_features(struct bonding *bond) { unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; netdev_features_t vlan_features = BOND_VLAN_FEATURES; netdev_features_t enc_features = BOND_ENC_FEATURES; +#ifdef CONFIG_XFRM_OFFLOAD + netdev_features_t xfrm_features = BOND_XFRM_FEATURES; +#endif /* CONFIG_XFRM_OFFLOAD */ netdev_features_t mpls_features = BOND_MPLS_FEATURES; struct net_device *bond_dev = bond->dev; struct list_head *iter; @@ -1157,6 +1257,12 @@ static void bond_compute_features(struct bonding *bond) slave->dev->hw_enc_features, BOND_ENC_FEATURES); +#ifdef CONFIG_XFRM_OFFLOAD + xfrm_features = netdev_increment_features(xfrm_features, + slave->dev->hw_enc_features, + BOND_XFRM_FEATURES); +#endif /* CONFIG_XFRM_OFFLOAD */ + mpls_features = netdev_increment_features(mpls_features, slave->dev->mpls_features, BOND_MPLS_FEATURES); @@ -1176,6 +1282,9 @@ done: NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX | NETIF_F_GSO_UDP_L4; +#ifdef CONFIG_XFRM_OFFLOAD + bond_dev->hw_enc_features |= xfrm_features; +#endif /* CONFIG_XFRM_OFFLOAD */ bond_dev->mpls_features = mpls_features; bond_dev->gso_max_segs = gso_max_segs; netif_set_gso_max_size(bond_dev, gso_max_size); @@ -1464,6 +1573,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, slave_dbg(bond_dev, slave_dev, "is !NETIF_F_VLAN_CHALLENGED\n"); } + if (slave_dev->features & NETIF_F_HW_ESP) + slave_dbg(bond_dev, slave_dev, "is esp-hw-offload capable\n"); + /* Old ifenslave binaries are no longer supported. These can * be identified with moderate accuracy by the state of the slave: * the current ifenslave will set the interface down prior to @@ -4540,6 +4652,13 @@ void bond_setup(struct net_device *bond_dev) bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT | IFF_NO_QUEUE; bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); +#ifdef CONFIG_XFRM_OFFLOAD + /* set up xfrm device ops (only supported in active-backup right now) */ + if ((BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)) + bond_dev->xfrmdev_ops = &bond_xfrmdev_ops; + bond->xs = NULL; +#endif /* CONFIG_XFRM_OFFLOAD */ + /* don't acquire bond device's netif_tx_lock when transmitting */ bond_dev->features |= NETIF_F_LLTX; @@ -4558,6 +4677,10 @@ void bond_setup(struct net_device *bond_dev) NETIF_F_HW_VLAN_CTAG_FILTER; bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4; +#ifdef CONFIG_XFRM_OFFLOAD + if ((BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)) + bond_dev->hw_features |= BOND_XFRM_FEATURES; +#endif /* CONFIG_XFRM_OFFLOAD */ bond_dev->features |= bond_dev->hw_features; bond_dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; } diff --git a/include/net/bonding.h b/include/net/bonding.h index aa854a9c01e2..a00e1764e9b1 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -238,6 +238,9 @@ struct bonding { struct dentry *debug_dir; #endif /* CONFIG_DEBUG_FS */ struct rtnl_link_stats64 bond_stats; +#ifdef CONFIG_XFRM_OFFLOAD + struct xfrm_state *xs; +#endif /* CONFIG_XFRM_OFFLOAD */ }; #define bond_slave_get_rcu(dev) \ -- cgit v1.2.3-59-g8ed1b From b5872cd0e823e4cb50b3a75cd9522167eeb676a2 Mon Sep 17 00:00:00 2001 From: Vasundhara Volam Date: Sat, 20 Jun 2020 22:01:56 +0530 Subject: devlink: Add support for board.serial_number to info_get cb. Board serial number is a serial number, often available in PCI *Vital Product Data*. Also, update devlink-info.rst documentation file. Cc: Jiri Pirko Cc: Jakub Kicinski Signed-off-by: Vasundhara Volam Reviewed-by: Michael Chan Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- Documentation/networking/devlink/devlink-info.rst | 12 +++++------- include/net/devlink.h | 2 ++ include/uapi/linux/devlink.h | 2 ++ net/core/devlink.c | 8 ++++++++ 4 files changed, 17 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/Documentation/networking/devlink/devlink-info.rst b/Documentation/networking/devlink/devlink-info.rst index 3fe11401b838..7572bf6de5c1 100644 --- a/Documentation/networking/devlink/devlink-info.rst +++ b/Documentation/networking/devlink/devlink-info.rst @@ -44,9 +44,11 @@ versions is generally discouraged - here, and via any other Linux API. reported for two ports of the same device or on two hosts of a multi-host device should be identical. - .. note:: ``devlink-info`` API should be extended with a new field - if devices want to report board/product serial number (often - reported in PCI *Vital Product Data* capability). + * - ``board.serial_number`` + - Board serial number of the device. + + This is usually the serial number of the board, often available in + PCI *Vital Product Data*. * - ``fixed`` - Group for hardware identifiers, and versions of components @@ -201,10 +203,6 @@ Future work The following extensions could be useful: - - product serial number - NIC boards often get labeled with a board serial - number rather than ASIC serial number; it'd be useful to add board serial - numbers to the API if they can be retrieved from the device; - - on-disk firmware file names - drivers list the file names of firmware they may need to load onto devices via the ``MODULE_FIRMWARE()`` macro. These, however, are per module, rather than per device. It'd be useful to list diff --git a/include/net/devlink.h b/include/net/devlink.h index 7007f93585a5..428f55f8197c 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -1284,6 +1284,8 @@ int devlink_info_serial_number_put(struct devlink_info_req *req, const char *sn); int devlink_info_driver_name_put(struct devlink_info_req *req, const char *name); +int devlink_info_board_serial_number_put(struct devlink_info_req *req, + const char *bsn); int devlink_info_version_fixed_put(struct devlink_info_req *req, const char *version_name, const char *version_value); diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index 07d0af8f5923..87c83a82991b 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -453,6 +453,8 @@ enum devlink_attr { DEVLINK_ATTR_PORT_FUNCTION, /* nested */ + DEVLINK_ATTR_INFO_BOARD_SERIAL_NUMBER, /* string */ + /* add new attributes above here, update the policy in devlink.c */ __DEVLINK_ATTR_MAX, diff --git a/net/core/devlink.c b/net/core/devlink.c index baa45eca6b5a..455998a57671 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -4502,6 +4502,14 @@ int devlink_info_serial_number_put(struct devlink_info_req *req, const char *sn) } EXPORT_SYMBOL_GPL(devlink_info_serial_number_put); +int devlink_info_board_serial_number_put(struct devlink_info_req *req, + const char *bsn) +{ + return nla_put_string(req->msg, DEVLINK_ATTR_INFO_BOARD_SERIAL_NUMBER, + bsn); +} +EXPORT_SYMBOL_GPL(devlink_info_board_serial_number_put); + static int devlink_info_version_put(struct devlink_info_req *req, int attr, const char *version_name, const char *version_value) -- cgit v1.2.3-59-g8ed1b From a602ea86e9f0d82f5c7ba1d3f7487d4097380b96 Mon Sep 17 00:00:00 2001 From: Maxim Kochetkov Date: Sun, 21 Jun 2020 10:59:51 +0300 Subject: net: phy: marvell: Add Marvell 88E1340S support Add support for this new phy ID. Signed-off-by: Maxim Kochetkov Signed-off-by: David S. Miller --- drivers/net/phy/marvell.c | 23 +++++++++++++++++++++++ include/linux/marvell_phy.h | 1 + 2 files changed, 24 insertions(+) (limited to 'include') diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index ee9c352f67ab..0842deb33085 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -2943,6 +2943,28 @@ static struct phy_driver marvell_drivers[] = { .cable_test_tdr_start = marvell_vct5_cable_test_tdr_start, .cable_test_get_status = marvell_vct7_cable_test_get_status, }, + { + .phy_id = MARVELL_PHY_ID_88E1340S, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .name = "Marvell 88E1340S", + .probe = m88e1510_probe, + /* PHY_GBIT_FEATURES */ + .config_init = marvell_config_init, + .config_aneg = m88e1510_config_aneg, + .read_status = marvell_read_status, + .ack_interrupt = marvell_ack_interrupt, + .config_intr = marvell_config_intr, + .did_interrupt = m88e1121_did_interrupt, + .resume = genphy_resume, + .suspend = genphy_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, + .get_sset_count = marvell_get_sset_count, + .get_strings = marvell_get_strings, + .get_stats = marvell_get_stats, + .get_tunable = m88e1540_get_tunable, + .set_tunable = m88e1540_set_tunable, + }, }; module_phy_driver(marvell_drivers); @@ -2963,6 +2985,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = { { MARVELL_PHY_ID_88E1545, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E6390, MARVELL_PHY_ID_MASK }, + { MARVELL_PHY_ID_88E1340S, MARVELL_PHY_ID_MASK }, { } }; diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index af6b11d4d673..c4390e9cbf15 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h @@ -15,6 +15,7 @@ #define MARVELL_PHY_ID_88E1149R 0x01410e50 #define MARVELL_PHY_ID_88E1240 0x01410e30 #define MARVELL_PHY_ID_88E1318S 0x01410e90 +#define MARVELL_PHY_ID_88E1340S 0x01410dc0 #define MARVELL_PHY_ID_88E1116R 0x01410e40 #define MARVELL_PHY_ID_88E1510 0x01410dd0 #define MARVELL_PHY_ID_88E1540 0x01410eb0 -- cgit v1.2.3-59-g8ed1b From f59babf95ef969a18744082ee16e4dfd17743c0b Mon Sep 17 00:00:00 2001 From: Maxim Kochetkov Date: Sun, 21 Jun 2020 10:59:52 +0300 Subject: net: phy: marvell: Add Marvell 88E1548P support Add support for this new phy ID. Signed-off-by: Maxim Kochetkov Signed-off-by: David S. Miller --- drivers/net/phy/marvell.c | 23 +++++++++++++++++++++++ include/linux/marvell_phy.h | 1 + 2 files changed, 24 insertions(+) (limited to 'include') diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 0842deb33085..bb86ac0bd092 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -2965,6 +2965,28 @@ static struct phy_driver marvell_drivers[] = { .get_tunable = m88e1540_get_tunable, .set_tunable = m88e1540_set_tunable, }, + { + .phy_id = MARVELL_PHY_ID_88E1548P, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .name = "Marvell 88E1548P", + .probe = m88e1510_probe, + .features = PHY_GBIT_FIBRE_FEATURES, + .config_init = marvell_config_init, + .config_aneg = m88e1510_config_aneg, + .read_status = marvell_read_status, + .ack_interrupt = marvell_ack_interrupt, + .config_intr = marvell_config_intr, + .did_interrupt = m88e1121_did_interrupt, + .resume = genphy_resume, + .suspend = genphy_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, + .get_sset_count = marvell_get_sset_count, + .get_strings = marvell_get_strings, + .get_stats = marvell_get_stats, + .get_tunable = m88e1540_get_tunable, + .set_tunable = m88e1540_set_tunable, + }, }; module_phy_driver(marvell_drivers); @@ -2986,6 +3008,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = { { MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E6390, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1340S, MARVELL_PHY_ID_MASK }, + { MARVELL_PHY_ID_88E1548P, MARVELL_PHY_ID_MASK }, { } }; diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index c4390e9cbf15..ff7b7607c8cf 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h @@ -20,6 +20,7 @@ #define MARVELL_PHY_ID_88E1510 0x01410dd0 #define MARVELL_PHY_ID_88E1540 0x01410eb0 #define MARVELL_PHY_ID_88E1545 0x01410ea0 +#define MARVELL_PHY_ID_88E1548P 0x01410ec0 #define MARVELL_PHY_ID_88E3016 0x01410e60 #define MARVELL_PHY_ID_88X3310 0x002b09a0 #define MARVELL_PHY_ID_88E2110 0x002b09b0 -- cgit v1.2.3-59-g8ed1b From 59390282b7542c6050c1deaca0b2949180903175 Mon Sep 17 00:00:00 2001 From: Vincent Knecht Date: Sat, 13 Jun 2020 09:27:42 +0200 Subject: clk: qcom: smd: Add support for MSM8936 rpm clocks Add missing definition of rpm clk for msm8936 soc (also used by msm8939) Signed-off-by: Vincent Knecht Link: https://lore.kernel.org/r/20200613072745.1249003-2-vincent.knecht@mailoo.org Signed-off-by: Stephen Boyd --- drivers/clk/qcom/clk-smd-rpm.c | 50 ++++++++++++++++++++++++++++++++++ include/dt-bindings/clock/qcom,rpmcc.h | 2 ++ 2 files changed, 52 insertions(+) (limited to 'include') diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c index 643bc355df5c..083399affc8e 100644 --- a/drivers/clk/qcom/clk-smd-rpm.c +++ b/drivers/clk/qcom/clk-smd-rpm.c @@ -452,6 +452,55 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8916 = { .num_clks = ARRAY_SIZE(msm8916_clks), }; +/* msm8936 */ +DEFINE_CLK_SMD_RPM(msm8936, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0); +DEFINE_CLK_SMD_RPM(msm8936, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1); +DEFINE_CLK_SMD_RPM(msm8936, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0); +DEFINE_CLK_SMD_RPM(msm8936, sysmmnoc_clk, sysmmnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2); +DEFINE_CLK_SMD_RPM_QDSS(msm8936, qdss_clk, qdss_a_clk, QCOM_SMD_RPM_MISC_CLK, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8936, bb_clk1, bb_clk1_a, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8936, bb_clk2, bb_clk2_a, 2); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8936, rf_clk1, rf_clk1_a, 4); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8936, rf_clk2, rf_clk2_a, 5); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8936, bb_clk1_pin, bb_clk1_a_pin, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8936, bb_clk2_pin, bb_clk2_a_pin, 2); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8936, rf_clk1_pin, rf_clk1_a_pin, 4); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8936, rf_clk2_pin, rf_clk2_a_pin, 5); + +static struct clk_smd_rpm *msm8936_clks[] = { + [RPM_SMD_PCNOC_CLK] = &msm8936_pcnoc_clk, + [RPM_SMD_PCNOC_A_CLK] = &msm8936_pcnoc_a_clk, + [RPM_SMD_SNOC_CLK] = &msm8936_snoc_clk, + [RPM_SMD_SNOC_A_CLK] = &msm8936_snoc_a_clk, + [RPM_SMD_BIMC_CLK] = &msm8936_bimc_clk, + [RPM_SMD_BIMC_A_CLK] = &msm8936_bimc_a_clk, + [RPM_SMD_SYSMMNOC_CLK] = &msm8936_sysmmnoc_clk, + [RPM_SMD_SYSMMNOC_A_CLK] = &msm8936_sysmmnoc_a_clk, + [RPM_SMD_QDSS_CLK] = &msm8936_qdss_clk, + [RPM_SMD_QDSS_A_CLK] = &msm8936_qdss_a_clk, + [RPM_SMD_BB_CLK1] = &msm8936_bb_clk1, + [RPM_SMD_BB_CLK1_A] = &msm8936_bb_clk1_a, + [RPM_SMD_BB_CLK2] = &msm8936_bb_clk2, + [RPM_SMD_BB_CLK2_A] = &msm8936_bb_clk2_a, + [RPM_SMD_RF_CLK1] = &msm8936_rf_clk1, + [RPM_SMD_RF_CLK1_A] = &msm8936_rf_clk1_a, + [RPM_SMD_RF_CLK2] = &msm8936_rf_clk2, + [RPM_SMD_RF_CLK2_A] = &msm8936_rf_clk2_a, + [RPM_SMD_BB_CLK1_PIN] = &msm8936_bb_clk1_pin, + [RPM_SMD_BB_CLK1_A_PIN] = &msm8936_bb_clk1_a_pin, + [RPM_SMD_BB_CLK2_PIN] = &msm8936_bb_clk2_pin, + [RPM_SMD_BB_CLK2_A_PIN] = &msm8936_bb_clk2_a_pin, + [RPM_SMD_RF_CLK1_PIN] = &msm8936_rf_clk1_pin, + [RPM_SMD_RF_CLK1_A_PIN] = &msm8936_rf_clk1_a_pin, + [RPM_SMD_RF_CLK2_PIN] = &msm8936_rf_clk2_pin, + [RPM_SMD_RF_CLK2_A_PIN] = &msm8936_rf_clk2_a_pin, +}; + +static const struct rpm_smd_clk_desc rpm_clk_msm8936 = { + .clks = msm8936_clks, + .num_clks = ARRAY_SIZE(msm8936_clks), +}; + /* msm8974 */ DEFINE_CLK_SMD_RPM(msm8974, pnoc_clk, pnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0); DEFINE_CLK_SMD_RPM(msm8974, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1); @@ -843,6 +892,7 @@ static const struct rpm_smd_clk_desc rpm_clk_sdm660 = { static const struct of_device_id rpm_smd_clk_match_table[] = { { .compatible = "qcom,rpmcc-msm8916", .data = &rpm_clk_msm8916 }, + { .compatible = "qcom,rpmcc-msm8936", .data = &rpm_clk_msm8936 }, { .compatible = "qcom,rpmcc-msm8974", .data = &rpm_clk_msm8974 }, { .compatible = "qcom,rpmcc-msm8976", .data = &rpm_clk_msm8976 }, { .compatible = "qcom,rpmcc-msm8996", .data = &rpm_clk_msm8996 }, diff --git a/include/dt-bindings/clock/qcom,rpmcc.h b/include/dt-bindings/clock/qcom,rpmcc.h index d1afa634b58d..e98ed70d91b3 100644 --- a/include/dt-bindings/clock/qcom,rpmcc.h +++ b/include/dt-bindings/clock/qcom,rpmcc.h @@ -143,5 +143,7 @@ #define RPM_SMD_LN_BB_CLK1_A_PIN 97 #define RPM_SMD_LN_BB_CLK2_PIN 98 #define RPM_SMD_LN_BB_CLK2_A_PIN 99 +#define RPM_SMD_SYSMMNOC_CLK 100 +#define RPM_SMD_SYSMMNOC_A_CLK 101 #endif -- cgit v1.2.3-59-g8ed1b From 34662f6e30846ae0f82bbc9605deff67781f6616 Mon Sep 17 00:00:00 2001 From: Adam Ford Date: Wed, 3 Jun 2020 10:43:28 -0500 Subject: dt: Add additional option bindings for IDT VersaClock The VersaClock driver now supports some additional bindings to support child nodes which can configure optional settings like mode, voltage and slew. This patch updates the binding document to describe what is available in the driver. Signed-off-by: Adam Ford Reviewed-by: Rob Herring Link: https://lore.kernel.org/r/20200603154329.31579-2-aford173@gmail.com Signed-off-by: Stephen Boyd --- .../devicetree/bindings/clock/idt,versaclock5.txt | 33 ++++++++++++++++++++++ include/dt-bindings/clk/versaclock.h | 13 +++++++++ 2 files changed, 46 insertions(+) create mode 100644 include/dt-bindings/clk/versaclock.h (limited to 'include') diff --git a/Documentation/devicetree/bindings/clock/idt,versaclock5.txt b/Documentation/devicetree/bindings/clock/idt,versaclock5.txt index bcff681a4bd0..6165b6ddb1a9 100644 --- a/Documentation/devicetree/bindings/clock/idt,versaclock5.txt +++ b/Documentation/devicetree/bindings/clock/idt,versaclock5.txt @@ -31,6 +31,29 @@ Required properties: - 5p49v5933 and - 5p49v5935: (optional) property not present or "clkin". +For all output ports, a corresponding, optional child node named OUT1, +OUT2, etc. can represent a each output, and the node can be used to +specify the following: + +- itd,mode: can be one of the following: + - VC5_LVPECL + - VC5_CMOS + - VC5_HCSL33 + - VC5_LVDS + - VC5_CMOS2 + - VC5_CMOSD + - VC5_HCSL25 + +- idt,voltage-microvolts: can be one of the following + - 1800000 + - 2500000 + - 3300000 +- idt,slew-percent: Percent of normal, can be one of + - 80 + - 85 + - 90 + - 100 + ==Mapping between clock specifier and physical pins== When referencing the provided clock in the DT using phandle and @@ -81,6 +104,16 @@ i2c-master-node { /* Connect XIN input to 25MHz reference */ clocks = <&ref25m>; clock-names = "xin"; + + OUT1 { + itd,mode = ; + idt,voltage-microvolts = <1800000>; + idt,slew-percent = <80>; + }; + OUT2 { + ... + }; + ... }; }; diff --git a/include/dt-bindings/clk/versaclock.h b/include/dt-bindings/clk/versaclock.h new file mode 100644 index 000000000000..c6a6a0946564 --- /dev/null +++ b/include/dt-bindings/clk/versaclock.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* This file defines field values used by the versaclock 6 family + * for defining output type + */ + +#define VC5_LVPECL 0 +#define VC5_CMOS 1 +#define VC5_HCSL33 2 +#define VC5_LVDS 3 +#define VC5_CMOS2 4 +#define VC5_CMOSD 5 +#define VC5_HCSL25 6 -- cgit v1.2.3-59-g8ed1b From 209edf95da63a0ad19750769f473f4ea1553d21d Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sun, 21 Jun 2020 14:46:01 +0300 Subject: net: dsa: felix: call port mdb operations from ocelot This adds the mdb hooks in felix and exports the mdb functions from ocelot. Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/dsa/ocelot/felix.c | 26 ++++++++++++++++++++++++++ drivers/net/ethernet/mscc/ocelot.c | 23 ++++++++--------------- drivers/net/ethernet/mscc/ocelot.h | 5 ----- drivers/net/ethernet/mscc/ocelot_net.c | 26 ++++++++++++++++++++++++++ include/soc/mscc/ocelot.h | 4 ++++ 5 files changed, 64 insertions(+), 20 deletions(-) (limited to 'include') diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 66648986e6e3..25046777c993 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -59,6 +59,29 @@ static int felix_fdb_del(struct dsa_switch *ds, int port, return ocelot_fdb_del(ocelot, port, addr, vid); } +/* This callback needs to be present */ +static int felix_mdb_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb) +{ + return 0; +} + +static void felix_mdb_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb) +{ + struct ocelot *ocelot = ds->priv; + + ocelot_port_mdb_add(ocelot, port, mdb); +} + +static int felix_mdb_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_port_mdb_del(ocelot, port, mdb); +} + static void felix_bridge_stp_state_set(struct dsa_switch *ds, int port, u8 state) { @@ -771,6 +794,9 @@ static const struct dsa_switch_ops felix_switch_ops = { .port_fdb_dump = felix_fdb_dump, .port_fdb_add = felix_fdb_add, .port_fdb_del = felix_fdb_del, + .port_mdb_prepare = felix_mdb_prepare, + .port_mdb_add = felix_mdb_add, + .port_mdb_del = felix_mdb_del, .port_bridge_join = felix_bridge_join, .port_bridge_leave = felix_bridge_leave, .port_stp_state_set = felix_bridge_stp_state_set, diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 4aadb65a6af8..468eaf5916e5 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -944,16 +944,12 @@ static struct ocelot_multicast *ocelot_multicast_get(struct ocelot *ocelot, return NULL; } -int ocelot_port_obj_add_mdb(struct net_device *dev, - const struct switchdev_obj_port_mdb *mdb, - struct switchdev_trans *trans) +int ocelot_port_mdb_add(struct ocelot *ocelot, int port, + const struct switchdev_obj_port_mdb *mdb) { - struct ocelot_port_private *priv = netdev_priv(dev); - struct ocelot_port *ocelot_port = &priv->port; - struct ocelot *ocelot = ocelot_port->ocelot; + struct ocelot_port *ocelot_port = ocelot->ports[port]; unsigned char addr[ETH_ALEN]; struct ocelot_multicast *mc; - int port = priv->chip_port; u16 vid = mdb->vid; bool new = false; @@ -991,17 +987,14 @@ int ocelot_port_obj_add_mdb(struct net_device *dev, return ocelot_mact_learn(ocelot, 0, addr, vid, ENTRYTYPE_MACv4); } -EXPORT_SYMBOL(ocelot_port_obj_add_mdb); +EXPORT_SYMBOL(ocelot_port_mdb_add); -int ocelot_port_obj_del_mdb(struct net_device *dev, - const struct switchdev_obj_port_mdb *mdb) +int ocelot_port_mdb_del(struct ocelot *ocelot, int port, + const struct switchdev_obj_port_mdb *mdb) { - struct ocelot_port_private *priv = netdev_priv(dev); - struct ocelot_port *ocelot_port = &priv->port; - struct ocelot *ocelot = ocelot_port->ocelot; + struct ocelot_port *ocelot_port = ocelot->ports[port]; unsigned char addr[ETH_ALEN]; struct ocelot_multicast *mc; - int port = priv->chip_port; u16 vid = mdb->vid; if (port == ocelot->npi) @@ -1032,7 +1025,7 @@ int ocelot_port_obj_del_mdb(struct net_device *dev, return ocelot_mact_learn(ocelot, 0, addr, vid, ENTRYTYPE_MACv4); } -EXPORT_SYMBOL(ocelot_port_obj_del_mdb); +EXPORT_SYMBOL(ocelot_port_mdb_del); int ocelot_port_bridge_join(struct ocelot *ocelot, int port, struct net_device *bridge) diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h index 0c23734a87be..be4a41646e5e 100644 --- a/drivers/net/ethernet/mscc/ocelot.h +++ b/drivers/net/ethernet/mscc/ocelot.h @@ -97,11 +97,6 @@ int ocelot_port_lag_join(struct ocelot *ocelot, int port, struct net_device *bond); void ocelot_port_lag_leave(struct ocelot *ocelot, int port, struct net_device *bond); -int ocelot_port_obj_del_mdb(struct net_device *dev, - const struct switchdev_obj_port_mdb *mdb); -int ocelot_port_obj_add_mdb(struct net_device *dev, - const struct switchdev_obj_port_mdb *mdb, - struct switchdev_trans *trans); u32 ocelot_port_readl(struct ocelot_port *port, u32 reg); void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg); diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c index 80cb1873e9d9..1bad146a0105 100644 --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -795,6 +795,32 @@ static int ocelot_port_vlan_del_vlan(struct net_device *dev, return 0; } +static int ocelot_port_obj_add_mdb(struct net_device *dev, + const struct switchdev_obj_port_mdb *mdb, + struct switchdev_trans *trans) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + int port = priv->chip_port; + + if (switchdev_trans_ph_prepare(trans)) + return 0; + + return ocelot_port_mdb_add(ocelot, port, mdb); +} + +static int ocelot_port_obj_del_mdb(struct net_device *dev, + const struct switchdev_obj_port_mdb *mdb) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + int port = priv->chip_port; + + return ocelot_port_mdb_del(ocelot, port, mdb); +} + static int ocelot_port_obj_add(struct net_device *dev, const struct switchdev_obj *obj, struct switchdev_trans *trans, diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index fa2c3904049e..80415b63ccfa 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -641,5 +641,9 @@ int ocelot_cls_flower_destroy(struct ocelot *ocelot, int port, struct flow_cls_offload *f, bool ingress); int ocelot_cls_flower_stats(struct ocelot *ocelot, int port, struct flow_cls_offload *f, bool ingress); +int ocelot_port_mdb_add(struct ocelot *ocelot, int port, + const struct switchdev_obj_port_mdb *mdb); +int ocelot_port_mdb_del(struct ocelot *ocelot, int port, + const struct switchdev_obj_port_mdb *mdb); #endif -- cgit v1.2.3-59-g8ed1b From 96b029b004942ecdb50e40d3e45cdc8a3aec9135 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sun, 21 Jun 2020 14:46:02 +0300 Subject: net: mscc: ocelot: introduce macros for iterating over PGIDs The current iterators are impossible to understand at first glance without switching back and forth between the definitions and their actual use in the for loops. So introduce some convenience names to help readability. Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/ethernet/mscc/ocelot.c | 8 ++++---- drivers/net/ethernet/mscc/ocelot_net.c | 2 +- include/soc/mscc/ocelot.h | 15 +++++++++++++++ 3 files changed, 20 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 468eaf5916e5..b6254c20f2f0 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -1064,10 +1064,10 @@ static void ocelot_set_aggr_pgids(struct ocelot *ocelot) int i, port, lag; /* Reset destination and aggregation PGIDS */ - for (port = 0; port < ocelot->num_phys_ports; port++) + for_each_unicast_dest_pgid(ocelot, port) ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, port); - for (i = PGID_AGGR; i < PGID_SRC; i++) + for_each_aggr_pgid(ocelot, i) ocelot_write_rix(ocelot, GENMASK(ocelot->num_phys_ports - 1, 0), ANA_PGID_PGID, i); @@ -1089,7 +1089,7 @@ static void ocelot_set_aggr_pgids(struct ocelot *ocelot) aggr_count++; } - for (i = PGID_AGGR; i < PGID_SRC; i++) { + for_each_aggr_pgid(ocelot, i) { u32 ac; ac = ocelot_read_rix(ocelot, ANA_PGID_PGID, i); @@ -1451,7 +1451,7 @@ int ocelot_init(struct ocelot *ocelot) } /* Allow broadcast MAC frames. */ - for (i = ocelot->num_phys_ports + 1; i < PGID_CPU; i++) { + for_each_nonreserved_multicast_dest_pgid(ocelot, i) { u32 val = ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports - 1, 0)); ocelot_write_rix(ocelot, val, ANA_PGID_PGID, i); diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c index 1bad146a0105..702b42543fb7 100644 --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -422,7 +422,7 @@ static void ocelot_set_rx_mode(struct net_device *dev) * forwarded to the CPU port. */ val = GENMASK(ocelot->num_phys_ports - 1, 0); - for (i = ocelot->num_phys_ports + 1; i < PGID_CPU; i++) + for_each_nonreserved_multicast_dest_pgid(ocelot, i) ocelot_write_rix(ocelot, val, ANA_PGID_PGID, i); __dev_mc_sync(dev, ocelot_mc_sync, ocelot_mc_unsync); diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index 80415b63ccfa..e050f8121ba2 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -65,6 +65,21 @@ #define PGID_MCIPV4 62 #define PGID_MCIPV6 63 +#define for_each_unicast_dest_pgid(ocelot, pgid) \ + for ((pgid) = 0; \ + (pgid) < (ocelot)->num_phys_ports; \ + (pgid)++) + +#define for_each_nonreserved_multicast_dest_pgid(ocelot, pgid) \ + for ((pgid) = (ocelot)->num_phys_ports + 1; \ + (pgid) < PGID_CPU; \ + (pgid)++) + +#define for_each_aggr_pgid(ocelot, pgid) \ + for ((pgid) = PGID_AGGR; \ + (pgid) < PGID_SRC; \ + (pgid)++) + /* Aggregation PGIDs, one per Link Aggregation Code */ #define PGID_AGGR 64 -- cgit v1.2.3-59-g8ed1b From 018e4308349dbf32f4d971cbe72f4f3d6b1c217a Mon Sep 17 00:00:00 2001 From: Andrey Smirnov Date: Mon, 1 Jun 2020 16:06:07 -0700 Subject: clk: imx: vf610: add CAAM clock MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit According to Vybrid Security RM, CCM_CCGR11[CG176] can be used to gate CAAM ipg clock. Signed-off-by: Horia Geantă Signed-off-by: Andrey Smirnov Cc: Chris Healy Cc: Shawn Guo Cc: Fabio Estevam Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Cc: linux-imx@nxp.com Tested-by: Chris Healy Signed-off-by: Shawn Guo --- drivers/clk/imx/clk-vf610.c | 1 + include/dt-bindings/clock/vf610-clock.h | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/clk/imx/clk-vf610.c b/drivers/clk/imx/clk-vf610.c index cd04e7dc1878..5129ef8e1d6e 100644 --- a/drivers/clk/imx/clk-vf610.c +++ b/drivers/clk/imx/clk-vf610.c @@ -438,6 +438,7 @@ static void __init vf610_clocks_init(struct device_node *ccm_node) clk[VF610_CLK_SNVS] = imx_clk_gate2("snvs-rtc", "ipg_bus", CCM_CCGR6, CCM_CCGRx_CGn(7)); clk[VF610_CLK_DAP] = imx_clk_gate("dap", "platform_bus", CCM_CCSR, 24); clk[VF610_CLK_OCOTP] = imx_clk_gate("ocotp", "ipg_bus", CCM_CCGR6, CCM_CCGRx_CGn(5)); + clk[VF610_CLK_CAAM] = imx_clk_gate2("caam", "ipg_bus", CCM_CCGR11, CCM_CCGRx_CGn(0)); imx_check_clocks(clk, ARRAY_SIZE(clk)); diff --git a/include/dt-bindings/clock/vf610-clock.h b/include/dt-bindings/clock/vf610-clock.h index 95394f35a74a..0f2d60e884dc 100644 --- a/include/dt-bindings/clock/vf610-clock.h +++ b/include/dt-bindings/clock/vf610-clock.h @@ -195,6 +195,7 @@ #define VF610_CLK_WKPU 186 #define VF610_CLK_TCON0 187 #define VF610_CLK_TCON1 188 -#define VF610_CLK_END 189 +#define VF610_CLK_CAAM 189 +#define VF610_CLK_END 190 #endif /* __DT_BINDINGS_CLOCK_VF610_H */ -- cgit v1.2.3-59-g8ed1b From 169caf692567897da35382503a5caeb64ab4b8c7 Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Fri, 5 Jun 2020 09:59:31 +0800 Subject: firmware: imx: add resource management api Add resource management API, when we have multiple partition running together, resources not owned to current partition should not be used. Reviewed-by: Leonard Crestez Reviewed-by: Dong Aisheng Signed-off-by: Peng Fan Signed-off-by: Shawn Guo --- drivers/firmware/imx/Makefile | 2 +- drivers/firmware/imx/rm.c | 45 ++++++++++++++++++++++++ include/linux/firmware/imx/sci.h | 1 + include/linux/firmware/imx/svc/rm.h | 69 +++++++++++++++++++++++++++++++++++++ 4 files changed, 116 insertions(+), 1 deletion(-) create mode 100644 drivers/firmware/imx/rm.c create mode 100644 include/linux/firmware/imx/svc/rm.h (limited to 'include') diff --git a/drivers/firmware/imx/Makefile b/drivers/firmware/imx/Makefile index 08bc9ddfbdfb..17ea3613e142 100644 --- a/drivers/firmware/imx/Makefile +++ b/drivers/firmware/imx/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_IMX_DSP) += imx-dsp.o -obj-$(CONFIG_IMX_SCU) += imx-scu.o misc.o imx-scu-irq.o +obj-$(CONFIG_IMX_SCU) += imx-scu.o misc.o imx-scu-irq.o rm.o obj-$(CONFIG_IMX_SCU_PD) += scu-pd.o diff --git a/drivers/firmware/imx/rm.c b/drivers/firmware/imx/rm.c new file mode 100644 index 000000000000..a12db6ff323b --- /dev/null +++ b/drivers/firmware/imx/rm.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2020 NXP + * + * File containing client-side RPC functions for the RM service. These + * function are ported to clients that communicate to the SC. + */ + +#include + +struct imx_sc_msg_rm_rsrc_owned { + struct imx_sc_rpc_msg hdr; + u16 resource; +} __packed __aligned(4); + +/* + * This function check @resource is owned by current partition or not + * + * @param[in] ipc IPC handle + * @param[in] resource resource the control is associated with + * + * @return Returns 0 for not owned and 1 for owned. + */ +bool imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource) +{ + struct imx_sc_msg_rm_rsrc_owned msg; + struct imx_sc_rpc_msg *hdr = &msg.hdr; + + hdr->ver = IMX_SC_RPC_VERSION; + hdr->svc = IMX_SC_RPC_SVC_RM; + hdr->func = IMX_SC_RM_FUNC_IS_RESOURCE_OWNED; + hdr->size = 2; + + msg.resource = resource; + + /* + * SCU firmware only returns value 0 or 1 + * for resource owned check which means not owned or owned. + * So it is always successful. + */ + imx_scu_call_rpc(ipc, &msg, true); + + return hdr->func; +} +EXPORT_SYMBOL(imx_sc_rm_is_resource_owned); diff --git a/include/linux/firmware/imx/sci.h b/include/linux/firmware/imx/sci.h index 3fa418a4ca67..3c459f54a88f 100644 --- a/include/linux/firmware/imx/sci.h +++ b/include/linux/firmware/imx/sci.h @@ -14,6 +14,7 @@ #include #include +#include int imx_scu_enable_general_irq_channel(struct device *dev); int imx_scu_irq_register_notifier(struct notifier_block *nb); diff --git a/include/linux/firmware/imx/svc/rm.h b/include/linux/firmware/imx/svc/rm.h new file mode 100644 index 000000000000..456b6a59d29b --- /dev/null +++ b/include/linux/firmware/imx/svc/rm.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017-2020 NXP + * + * Header file containing the public API for the System Controller (SC) + * Resource Management (RM) function. This includes functions for + * partitioning resources, pads, and memory regions. + * + * RM_SVC (SVC) Resource Management Service + * + * Module for the Resource Management (RM) service. + */ + +#ifndef _SC_RM_API_H +#define _SC_RM_API_H + +#include + +/* + * This type is used to indicate RPC RM function calls. + */ +enum imx_sc_rm_func { + IMX_SC_RM_FUNC_UNKNOWN = 0, + IMX_SC_RM_FUNC_PARTITION_ALLOC = 1, + IMX_SC_RM_FUNC_SET_CONFIDENTIAL = 31, + IMX_SC_RM_FUNC_PARTITION_FREE = 2, + IMX_SC_RM_FUNC_GET_DID = 26, + IMX_SC_RM_FUNC_PARTITION_STATIC = 3, + IMX_SC_RM_FUNC_PARTITION_LOCK = 4, + IMX_SC_RM_FUNC_GET_PARTITION = 5, + IMX_SC_RM_FUNC_SET_PARENT = 6, + IMX_SC_RM_FUNC_MOVE_ALL = 7, + IMX_SC_RM_FUNC_ASSIGN_RESOURCE = 8, + IMX_SC_RM_FUNC_SET_RESOURCE_MOVABLE = 9, + IMX_SC_RM_FUNC_SET_SUBSYS_RSRC_MOVABLE = 28, + IMX_SC_RM_FUNC_SET_MASTER_ATTRIBUTES = 10, + IMX_SC_RM_FUNC_SET_MASTER_SID = 11, + IMX_SC_RM_FUNC_SET_PERIPHERAL_PERMISSIONS = 12, + IMX_SC_RM_FUNC_IS_RESOURCE_OWNED = 13, + IMX_SC_RM_FUNC_GET_RESOURCE_OWNER = 33, + IMX_SC_RM_FUNC_IS_RESOURCE_MASTER = 14, + IMX_SC_RM_FUNC_IS_RESOURCE_PERIPHERAL = 15, + IMX_SC_RM_FUNC_GET_RESOURCE_INFO = 16, + IMX_SC_RM_FUNC_MEMREG_ALLOC = 17, + IMX_SC_RM_FUNC_MEMREG_SPLIT = 29, + IMX_SC_RM_FUNC_MEMREG_FRAG = 32, + IMX_SC_RM_FUNC_MEMREG_FREE = 18, + IMX_SC_RM_FUNC_FIND_MEMREG = 30, + IMX_SC_RM_FUNC_ASSIGN_MEMREG = 19, + IMX_SC_RM_FUNC_SET_MEMREG_PERMISSIONS = 20, + IMX_SC_RM_FUNC_IS_MEMREG_OWNED = 21, + IMX_SC_RM_FUNC_GET_MEMREG_INFO = 22, + IMX_SC_RM_FUNC_ASSIGN_PAD = 23, + IMX_SC_RM_FUNC_SET_PAD_MOVABLE = 24, + IMX_SC_RM_FUNC_IS_PAD_OWNED = 25, + IMX_SC_RM_FUNC_DUMP = 27, +}; + +#if IS_ENABLED(CONFIG_IMX_SCU) +bool imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource); +#else +static inline bool +imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource) +{ + return true; +} +#endif +#endif -- cgit v1.2.3-59-g8ed1b From 23a60f834406c8e3805328b630d09d5546b460c1 Mon Sep 17 00:00:00 2001 From: Collin Walling Date: Mon, 22 Jun 2020 11:46:36 -0400 Subject: s390/kvm: diagnose 0x318 sync and reset DIAGNOSE 0x318 (diag318) sets information regarding the environment the VM is running in (Linux, z/VM, etc) and is observed via firmware/service events. This is a privileged s390x instruction that must be intercepted by SIE. Userspace handles the instruction as well as migration. Data is communicated via VCPU register synchronization. The Control Program Name Code (CPNC) is stored in the SIE block. The CPNC along with the Control Program Version Code (CPVC) are stored in the kvm_vcpu_arch struct. This data is reset on load normal and clear resets. Signed-off-by: Collin Walling Reviewed-by: Janosch Frank Acked-by: Cornelia Huck Reviewed-by: David Hildenbrand Link: https://lore.kernel.org/r/20200622154636.5499-3-walling@linux.ibm.com [borntraeger@de.ibm.com: fix sync_reg position] Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/kvm_host.h | 4 +++- arch/s390/include/uapi/asm/kvm.h | 7 +++++-- arch/s390/kvm/kvm-s390.c | 11 ++++++++++- arch/s390/kvm/vsie.c | 1 + include/uapi/linux/kvm.h | 1 + 5 files changed, 20 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index cee3cb6455a2..371ec6beb618 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -260,7 +260,8 @@ struct kvm_s390_sie_block { __u32 scaol; /* 0x0064 */ __u8 sdf; /* 0x0068 */ __u8 epdx; /* 0x0069 */ - __u8 reserved6a[2]; /* 0x006a */ + __u8 cpnc; /* 0x006a */ + __u8 reserved6b; /* 0x006b */ __u32 todpr; /* 0x006c */ #define GISA_FORMAT1 0x00000001 __u32 gd; /* 0x0070 */ @@ -745,6 +746,7 @@ struct kvm_vcpu_arch { bool gs_enabled; bool skey_enabled; struct kvm_s390_pv_vcpu pv; + union diag318_info diag318_info; }; struct kvm_vm_stat { diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h index 436ec7636927..7a6b14874d65 100644 --- a/arch/s390/include/uapi/asm/kvm.h +++ b/arch/s390/include/uapi/asm/kvm.h @@ -231,11 +231,13 @@ struct kvm_guest_debug_arch { #define KVM_SYNC_GSCB (1UL << 9) #define KVM_SYNC_BPBC (1UL << 10) #define KVM_SYNC_ETOKEN (1UL << 11) +#define KVM_SYNC_DIAG318 (1UL << 12) #define KVM_SYNC_S390_VALID_FIELDS \ (KVM_SYNC_PREFIX | KVM_SYNC_GPRS | KVM_SYNC_ACRS | KVM_SYNC_CRS | \ KVM_SYNC_ARCH0 | KVM_SYNC_PFAULT | KVM_SYNC_VRS | KVM_SYNC_RICCB | \ - KVM_SYNC_FPRS | KVM_SYNC_GSCB | KVM_SYNC_BPBC | KVM_SYNC_ETOKEN) + KVM_SYNC_FPRS | KVM_SYNC_GSCB | KVM_SYNC_BPBC | KVM_SYNC_ETOKEN | \ + KVM_SYNC_DIAG318) /* length and alignment of the sdnx as a power of two */ #define SDNXC 8 @@ -264,7 +266,8 @@ struct kvm_sync_regs { __u8 reserved2 : 7; __u8 padding1[51]; /* riccb needs to be 64byte aligned */ __u8 riccb[64]; /* runtime instrumentation controls block */ - __u8 padding2[192]; /* sdnx needs to be 256byte aligned */ + __u64 diag318; /* diagnose 0x318 info */ + __u8 padding2[184]; /* sdnx needs to be 256byte aligned */ union { __u8 sdnx[SDNXL]; /* state description annex */ struct { diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index d47c19718615..08e6cf6cb454 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -545,6 +545,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_S390_AIS_MIGRATION: case KVM_CAP_S390_VCPU_RESETS: case KVM_CAP_SET_GUEST_DEBUG: + case KVM_CAP_S390_DIAG318: r = 1; break; case KVM_CAP_S390_HPAGE_1M: @@ -3267,7 +3268,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) KVM_SYNC_ACRS | KVM_SYNC_CRS | KVM_SYNC_ARCH0 | - KVM_SYNC_PFAULT; + KVM_SYNC_PFAULT | + KVM_SYNC_DIAG318; kvm_s390_set_prefix(vcpu, 0); if (test_kvm_facility(vcpu->kvm, 64)) vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB; @@ -3562,6 +3564,7 @@ static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu) vcpu->arch.sie_block->pp = 0; vcpu->arch.sie_block->fpf &= ~FPF_BPBC; vcpu->arch.sie_block->todpr = 0; + vcpu->arch.sie_block->cpnc = 0; } } @@ -3579,6 +3582,7 @@ static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu) regs->etoken = 0; regs->etoken_extension = 0; + regs->diag318 = 0; } int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) @@ -4196,6 +4200,10 @@ static void sync_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) kvm_clear_async_pf_completion_queue(vcpu); } + if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) { + vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318; + vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc; + } /* * If userspace sets the riccb (e.g. after migration) to a valid state, * we should enable RI here instead of doing the lazy enablement. @@ -4297,6 +4305,7 @@ static void store_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) kvm_run->s.regs.pp = vcpu->arch.sie_block->pp; kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea; kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC; + kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val; if (MACHINE_HAS_GS) { __ctl_set_bit(2, 4); if (vcpu->arch.gs_enabled) diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index 9e9056cebfcf..4f3cbf6003a9 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -548,6 +548,7 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) scb_s->ecd |= scb_o->ecd & ECD_ETOKENF; scb_s->hpid = HPID_VSIE; + scb_s->cpnc = scb_o->cpnc; prepare_ibc(vcpu, vsie_page); rc = shadow_crycb(vcpu, vsie_page); diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 4fdf30316582..35cdb4307904 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1031,6 +1031,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_PPC_SECURE_GUEST 181 #define KVM_CAP_HALT_POLL 182 #define KVM_CAP_ASYNC_PF_INT 183 +#define KVM_CAP_S390_DIAG318 184 #ifdef KVM_CAP_IRQ_ROUTING -- cgit v1.2.3-59-g8ed1b From 830e87ed15f85765bb7e57f310f95dfd87f11dfa Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Thu, 14 May 2020 18:01:40 +0200 Subject: media: videobuf2: use explicit unsigned int in vb2_queue Switch from 'unsigned' to 'unsigned int' so that checkpatch doesn't complain. Signed-off-by: Sergey Senozhatsky Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- include/media/videobuf2-core.h | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h index f11b96514cf7..9e522bd2acc7 100644 --- a/include/media/videobuf2-core.h +++ b/include/media/videobuf2-core.h @@ -558,15 +558,15 @@ struct vb2_queue { unsigned int io_modes; struct device *dev; unsigned long dma_attrs; - unsigned bidirectional:1; - unsigned fileio_read_once:1; - unsigned fileio_write_immediately:1; - unsigned allow_zero_bytesused:1; - unsigned quirk_poll_must_check_waiting_for_buffers:1; - unsigned supports_requests:1; - unsigned requires_requests:1; - unsigned uses_qbuf:1; - unsigned uses_requests:1; + unsigned int bidirectional:1; + unsigned int fileio_read_once:1; + unsigned int fileio_write_immediately:1; + unsigned int allow_zero_bytesused:1; + unsigned int quirk_poll_must_check_waiting_for_buffers:1; + unsigned int supports_requests:1; + unsigned int requires_requests:1; + unsigned int uses_qbuf:1; + unsigned int uses_requests:1; struct mutex *lock; void *owner; -- cgit v1.2.3-59-g8ed1b From 6d2199868a9aede70a4ee5fa32e6ae2800b8b25a Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Thu, 14 May 2020 18:01:41 +0200 Subject: media: videobuf2: add cache management members Extend vb2_buffer and vb2_queue structs with cache management members. V4L2 UAPI already contains two buffer flags which user-space, supposedly, can use to control buffer cache sync: - V4L2_BUF_FLAG_NO_CACHE_INVALIDATE - V4L2_BUF_FLAG_NO_CACHE_CLEAN None of these, however, do anything at the moment. This patch set is intended to change it. Since user-space cache management hints are supposed to be implemented on a per-buffer basis we need to extend vb2_buffer struct with two new members ->need_cache_sync_on_prepare and ->need_cache_sync_on_finish, which will store corresponding user-space hints. In order to preserve the existing behaviour, user-space cache managements flags will be handled only by those drivers that permit user-space cache hints. That's the purpose of vb2_queue ->allow_cache_hints member. Driver must set ->allow_cache_hints during queue initialisation to enable cache management hints mechanism. Signed-off-by: Sergey Senozhatsky Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- include/media/videobuf2-core.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'include') diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h index 9e522bd2acc7..7f39d9fffc8c 100644 --- a/include/media/videobuf2-core.h +++ b/include/media/videobuf2-core.h @@ -263,6 +263,10 @@ struct vb2_buffer { * after the 'buf_finish' op is called. * copied_timestamp: the timestamp of this capture buffer was copied * from an output buffer. + * need_cache_sync_on_prepare: when set buffer's ->prepare() function + * performs cache sync/invalidation. + * need_cache_sync_on_finish: when set buffer's ->finish() function + * performs cache sync/invalidation. * queued_entry: entry on the queued buffers list, which holds * all buffers queued from userspace * done_entry: entry on the list that stores all buffers ready @@ -273,6 +277,8 @@ struct vb2_buffer { unsigned int synced:1; unsigned int prepared:1; unsigned int copied_timestamp:1; + unsigned int need_cache_sync_on_prepare:1; + unsigned int need_cache_sync_on_finish:1; struct vb2_plane planes[VB2_MAX_PLANES]; struct list_head queued_entry; @@ -491,6 +497,9 @@ struct vb2_buf_ops { * @uses_requests: requests are used for this queue. Set to 1 the first time * a request is queued. Set to 0 when the queue is canceled. * If this is 1, then you cannot queue buffers directly. + * @allow_cache_hints: when set user-space can pass cache management hints in + * order to skip cache flush/invalidation on ->prepare() or/and + * ->finish(). * @lock: pointer to a mutex that protects the &struct vb2_queue. The * driver can set this to a mutex to let the v4l2 core serialize * the queuing ioctls. If the driver wants to handle locking @@ -567,6 +576,7 @@ struct vb2_queue { unsigned int requires_requests:1; unsigned int uses_qbuf:1; unsigned int uses_requests:1; + unsigned int allow_cache_hints:1; struct mutex *lock; void *owner; -- cgit v1.2.3-59-g8ed1b From f5f5fa73fbfb9f346f1b5f37ebf343bae6ef6361 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Thu, 14 May 2020 18:01:42 +0200 Subject: media: videobuf2: handle V4L2 buffer cache flags Set video buffer cache management flags corresponding to V4L2 cache flags. Both ->prepare() and ->finish() cache management hints should be passed during this stage (buffer preparation), because there is no other way for user-space to tell V4L2 to avoid ->finish() cache flush. Signed-off-by: Sergey Senozhatsky Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- drivers/media/common/videobuf2/videobuf2-v4l2.c | 48 +++++++++++++++++++++++++ include/media/videobuf2-core.h | 11 ++++++ 2 files changed, 59 insertions(+) (limited to 'include') diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c index eb5d5db96552..f13851212cc8 100644 --- a/drivers/media/common/videobuf2/videobuf2-v4l2.c +++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c @@ -337,6 +337,53 @@ static int vb2_fill_vb2_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b return 0; } +static void set_buffer_cache_hints(struct vb2_queue *q, + struct vb2_buffer *vb, + struct v4l2_buffer *b) +{ + /* + * DMA exporter should take care of cache syncs, so we can avoid + * explicit ->prepare()/->finish() syncs. For other ->memory types + * we always need ->prepare() or/and ->finish() cache sync. + */ + if (q->memory == VB2_MEMORY_DMABUF) { + vb->need_cache_sync_on_finish = 0; + vb->need_cache_sync_on_prepare = 0; + return; + } + + /* + * Cache sync/invalidation flags are set by default in order to + * preserve existing behaviour for old apps/drivers. + */ + vb->need_cache_sync_on_prepare = 1; + vb->need_cache_sync_on_finish = 1; + + if (!vb2_queue_allows_cache_hints(q)) { + /* + * Clear buffer cache flags if queue does not support user + * space hints. That's to indicate to userspace that these + * flags won't work. + */ + b->flags &= ~V4L2_BUF_FLAG_NO_CACHE_INVALIDATE; + b->flags &= ~V4L2_BUF_FLAG_NO_CACHE_CLEAN; + return; + } + + /* + * ->finish() cache sync can be avoided when queue direction is + * TO_DEVICE. + */ + if (q->dma_dir == DMA_TO_DEVICE) + vb->need_cache_sync_on_finish = 0; + + if (b->flags & V4L2_BUF_FLAG_NO_CACHE_INVALIDATE) + vb->need_cache_sync_on_finish = 0; + + if (b->flags & V4L2_BUF_FLAG_NO_CACHE_CLEAN) + vb->need_cache_sync_on_prepare = 0; +} + static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *mdev, struct v4l2_buffer *b, bool is_prepare, struct media_request **p_req) @@ -381,6 +428,7 @@ static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *md } if (!vb->prepared) { + set_buffer_cache_hints(q, vb, b); /* Copy relevant information provided by the userspace */ memset(vbuf->planes, 0, sizeof(vbuf->planes[0]) * vb->num_planes); diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h index 7f39d9fffc8c..ccc5c498d3e3 100644 --- a/include/media/videobuf2-core.h +++ b/include/media/videobuf2-core.h @@ -635,6 +635,17 @@ struct vb2_queue { #endif }; +/** + * vb2_queue_allows_cache_hints() - Return true if the queue allows cache + * and memory consistency hints. + * + * @q: pointer to &struct vb2_queue with videobuf2 queue + */ +static inline bool vb2_queue_allows_cache_hints(struct vb2_queue *q) +{ + return q->allow_cache_hints && q->memory == VB2_MEMORY_MMAP; +} + /** * vb2_plane_vaddr() - Return a kernel virtual address of a given plane. * @vb: pointer to &struct vb2_buffer to which the plane in -- cgit v1.2.3-59-g8ed1b From 21a00fb33790f828a34b9ce50ab9f9130bc1ffb4 Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bossart Date: Mon, 22 Jun 2020 10:42:37 -0500 Subject: ASoC: soc-link: introduce exit() callback Some machine drivers allocate or request resources with snd_soc_link_init() phase of the card probe. These resources need to be properly released when removing a card, and this patch suggests a dual exit() callback. The exit() is invoked in soc_remove_pcm_runtime(), which is not completely symmetric with the init() invoked in soc_init_pcm_runtime(). Alternate solutions were considered, e.g. adding a .remove() callback for the platform driver, but that's not symmetrical at all and would be difficult to handle if there are more than one dailink implementing an .init(). We looked also into using .remove_dai_link() callback, but that would also be imbalanced. Note that because of the error handling in snd_soc_bind_card(), which jumps to probe_end, there is no way to guarantee the exit() is invoked with resources allocated in the init(). Prior to releasing those resources, implementations of the exit() callback shall check the resources are valid. Suggested-by: Andy Shevchenko Signed-off-by: Pierre-Louis Bossart Reviewed-by: Guennadi Liakhovetski Reviewed-by: Curtis Malainey Link: https://lore.kernel.org/r/20200622154241.29053-2-pierre-louis.bossart@linux.intel.com Signed-off-by: Mark Brown --- include/sound/soc-link.h | 1 + include/sound/soc.h | 3 +++ sound/soc/soc-core.c | 3 +++ sound/soc/soc-link.c | 6 ++++++ 4 files changed, 13 insertions(+) (limited to 'include') diff --git a/include/sound/soc-link.h b/include/sound/soc-link.h index 3dd6e33e94ec..337ac5666757 100644 --- a/include/sound/soc-link.h +++ b/include/sound/soc-link.h @@ -9,6 +9,7 @@ #define __SOC_LINK_H int snd_soc_link_init(struct snd_soc_pcm_runtime *rtd); +void snd_soc_link_exit(struct snd_soc_pcm_runtime *rtd); int snd_soc_link_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, struct snd_pcm_hw_params *params); diff --git a/include/sound/soc.h b/include/sound/soc.h index 2756f9bcac3e..33aceadebd03 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -799,6 +799,9 @@ struct snd_soc_dai_link { /* codec/machine specific init - e.g. add machine controls */ int (*init)(struct snd_soc_pcm_runtime *rtd); + /* codec/machine specific exit - dual of init() */ + void (*exit)(struct snd_soc_pcm_runtime *rtd); + /* optional hw_params re-writing for BE and FE sync */ int (*be_hw_params_fixup)(struct snd_soc_pcm_runtime *rtd, struct snd_pcm_hw_params *params); diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 62c0c9482018..adedadcb0efb 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -945,6 +945,9 @@ void snd_soc_remove_pcm_runtime(struct snd_soc_card *card, { lockdep_assert_held(&client_mutex); + /* release machine specific resources */ + snd_soc_link_exit(rtd); + /* * Notify the machine driver for extra destruction */ diff --git a/sound/soc/soc-link.c b/sound/soc/soc-link.c index f849278beba0..1c3bf2118718 100644 --- a/sound/soc/soc-link.c +++ b/sound/soc/soc-link.c @@ -40,6 +40,12 @@ int snd_soc_link_init(struct snd_soc_pcm_runtime *rtd) return soc_link_ret(rtd, ret); } +void snd_soc_link_exit(struct snd_soc_pcm_runtime *rtd) +{ + if (rtd->dai_link->exit) + rtd->dai_link->exit(rtd); +} + int snd_soc_link_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, struct snd_pcm_hw_params *params) { -- cgit v1.2.3-59-g8ed1b From ac53503ee38a1ffbc47c7cca6cbfc48ba9c65c5e Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Thu, 14 May 2020 18:01:43 +0200 Subject: media: videobuf2: add V4L2_FLAG_MEMORY_NON_CONSISTENT flag By setting or clearing V4L2_FLAG_MEMORY_NON_CONSISTENT flag user-space should be able to set or clear queue's NON_CONSISTENT ->dma_attrs. Queue's ->dma_attrs are passed to the underlying allocator in __vb2_buf_mem_alloc(), so thus user-space is able to request vb2 buffer's memory to be either consistent (coherent) or non-consistent. The patch set also adds a corresponding capability flag: fill_buf_caps() reports V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS when queue supports user-space cache management hints. Note, however, that MMAP_CACHE_HINTS capability only valid when the queue is used for memory MMAP-ed streaming I/O. Signed-off-by: Sergey Senozhatsky Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- Documentation/userspace-api/media/v4l/buffer.rst | 40 ++++++++++++++++++++-- .../userspace-api/media/v4l/vidioc-reqbufs.rst | 10 ++++++ drivers/media/common/videobuf2/videobuf2-v4l2.c | 2 ++ include/uapi/linux/videodev2.h | 3 ++ 4 files changed, 53 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/Documentation/userspace-api/media/v4l/buffer.rst b/Documentation/userspace-api/media/v4l/buffer.rst index 951ae1ed485f..5088393b5a5c 100644 --- a/Documentation/userspace-api/media/v4l/buffer.rst +++ b/Documentation/userspace-api/media/v4l/buffer.rst @@ -577,7 +577,10 @@ Buffer Flags applications shall use this flag if the data captured in the buffer is not going to be touched by the CPU, instead the buffer will, probably, be passed on to a DMA-capable hardware unit for - further processing or output. + further processing or output. This flag is ignored unless the + queue is used for :ref:`memory mapping ` streaming I/O and + reports :ref:`V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS + ` capability. * .. _`V4L2-BUF-FLAG-NO-CACHE-CLEAN`: - ``V4L2_BUF_FLAG_NO_CACHE_CLEAN`` @@ -585,7 +588,10 @@ Buffer Flags - Caches do not have to be cleaned for this buffer. Typically applications shall use this flag for output buffers if the data in this buffer has not been created by the CPU but by some - DMA-capable unit, in which case caches have not been used. + DMA-capable unit, in which case caches have not been used. This flag + is ignored unless the queue is used for :ref:`memory mapping ` + streaming I/O and reports :ref:`V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS + ` capability. * .. _`V4L2-BUF-FLAG-M2M-HOLD-CAPTURE-BUF`: - ``V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF`` @@ -681,6 +687,36 @@ Buffer Flags \normalsize +.. _memory-flags: + +Memory Consistency Flags +======================== + +.. tabularcolumns:: |p{7.0cm}|p{2.2cm}|p{8.3cm}| + +.. cssclass:: longtable + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + :widths: 3 1 4 + + * .. _`V4L2-FLAG-MEMORY-NON-CONSISTENT`: + + - ``V4L2_FLAG_MEMORY_NON_CONSISTENT`` + - 0x00000001 + - A buffer is allocated either in consistent (it will be automatically + coherent between the CPU and the bus) or non-consistent memory. The + latter can provide performance gains, for instance the CPU cache + sync/flush operations can be avoided if the buffer is accessed by the + corresponding device only and the CPU does not read/write to/from that + buffer. However, this requires extra care from the driver -- it must + guarantee memory consistency by issuing a cache flush/sync when + consistency is needed. If this flag is set V4L2 will attempt to + allocate the buffer in non-consistent memory. The flag takes effect + only if the buffer is used for :ref:`memory mapping ` I/O and the + queue reports the :ref:`V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS + ` capability. .. c:type:: v4l2_memory diff --git a/Documentation/userspace-api/media/v4l/vidioc-reqbufs.rst b/Documentation/userspace-api/media/v4l/vidioc-reqbufs.rst index b6d52083707b..96a59793d857 100644 --- a/Documentation/userspace-api/media/v4l/vidioc-reqbufs.rst +++ b/Documentation/userspace-api/media/v4l/vidioc-reqbufs.rst @@ -126,6 +126,7 @@ aborting or finishing any DMA in progress, an implicit .. _V4L2-BUF-CAP-SUPPORTS-REQUESTS: .. _V4L2-BUF-CAP-SUPPORTS-ORPHANED-BUFS: .. _V4L2-BUF-CAP-SUPPORTS-M2M-HOLD-CAPTURE-BUF: +.. _V4L2-BUF-CAP-SUPPORTS-MMAP-CACHE-HINTS: .. cssclass:: longtable @@ -156,6 +157,15 @@ aborting or finishing any DMA in progress, an implicit - Only valid for stateless decoders. If set, then userspace can set the ``V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF`` flag to hold off on returning the capture buffer until the OUTPUT timestamp changes. + * - ``V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS`` + - 0x00000040 + - This capability is set by the driver to indicate that the queue supports + cache and memory management hints. However, it's only valid when the + queue is used for :ref:`memory mapping ` streaming I/O. See + :ref:`V4L2_FLAG_MEMORY_NON_CONSISTENT `, + :ref:`V4L2_BUF_FLAG_NO_CACHE_INVALIDATE ` and + :ref:`V4L2_BUF_FLAG_NO_CACHE_CLEAN `. + Return Value ============ diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c index f13851212cc8..e4b4354b42b8 100644 --- a/drivers/media/common/videobuf2/videobuf2-v4l2.c +++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c @@ -710,6 +710,8 @@ static void fill_buf_caps(struct vb2_queue *q, u32 *caps) *caps |= V4L2_BUF_CAP_SUPPORTS_DMABUF; if (q->subsystem_flags & VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF) *caps |= V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF; + if (q->allow_cache_hints && q->io_modes & VB2_MMAP) + *caps |= V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS; #ifdef CONFIG_MEDIA_CONTROLLER_REQUEST_API if (q->supports_requests) *caps |= V4L2_BUF_CAP_SUPPORTS_REQUESTS; diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index c3a1cf1c507f..34ba1017b89b 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -189,6 +189,8 @@ enum v4l2_memory { V4L2_MEMORY_DMABUF = 4, }; +#define V4L2_FLAG_MEMORY_NON_CONSISTENT (1 << 0) + /* see also http://vektor.theorem.ca/graphics/ycbcr/ */ enum v4l2_colorspace { /* @@ -954,6 +956,7 @@ struct v4l2_requestbuffers { #define V4L2_BUF_CAP_SUPPORTS_REQUESTS (1 << 3) #define V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS (1 << 4) #define V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF (1 << 5) +#define V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS (1 << 6) /** * struct v4l2_plane - plane info for multi-planar buffers -- cgit v1.2.3-59-g8ed1b From 7b4b45555c79db03dad8192e6ef85cb30236827b Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Thu, 14 May 2020 18:01:44 +0200 Subject: media: videobuf2: add queue memory consistency parameter Preparations for future V4L2_FLAG_MEMORY_NON_CONSISTENT support. Extend vb2_core_reqbufs() parameters list to accept requests' ->flags, which will be used for memory consistency configuration. An attempt to allocate a buffer with consistency requirements which don't match queue's consistency model will fail. Signed-off-by: Sergey Senozhatsky Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- drivers/media/common/videobuf2/videobuf2-core.c | 51 ++++++++++++++++++++----- drivers/media/common/videobuf2/videobuf2-v4l2.c | 6 +-- drivers/media/dvb-core/dvb_vb2.c | 2 +- include/media/videobuf2-core.h | 8 +++- 4 files changed, 51 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c index 44d65f5be845..0fdcf90330df 100644 --- a/drivers/media/common/videobuf2/videobuf2-core.c +++ b/drivers/media/common/videobuf2/videobuf2-core.c @@ -664,11 +664,33 @@ int vb2_verify_memory_type(struct vb2_queue *q, } EXPORT_SYMBOL(vb2_verify_memory_type); +static void set_queue_consistency(struct vb2_queue *q, bool consistent_mem) +{ + q->dma_attrs &= ~DMA_ATTR_NON_CONSISTENT; + + if (!vb2_queue_allows_cache_hints(q)) + return; + if (!consistent_mem) + q->dma_attrs |= DMA_ATTR_NON_CONSISTENT; +} + +static bool verify_consistency_attr(struct vb2_queue *q, bool consistent_mem) +{ + bool queue_is_consistent = !(q->dma_attrs & DMA_ATTR_NON_CONSISTENT); + + if (consistent_mem != queue_is_consistent) { + dprintk(1, "memory consistency model mismatch\n"); + return false; + } + return true; +} + int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, - unsigned int *count) + unsigned int flags, unsigned int *count) { unsigned int num_buffers, allocated_buffers, num_planes = 0; unsigned plane_sizes[VB2_MAX_PLANES] = { }; + bool consistent_mem = true; unsigned int i; int ret; @@ -683,7 +705,8 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, } if (*count == 0 || q->num_buffers != 0 || - (q->memory != VB2_MEMORY_UNKNOWN && q->memory != memory)) { + (q->memory != VB2_MEMORY_UNKNOWN && q->memory != memory) || + !verify_consistency_attr(q, consistent_mem)) { /* * We already have buffers allocated, so first check if they * are not in use and can be freed. @@ -720,6 +743,7 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, num_buffers = min_t(unsigned int, num_buffers, VB2_MAX_FRAME); memset(q->alloc_devs, 0, sizeof(q->alloc_devs)); q->memory = memory; + set_queue_consistency(q, consistent_mem); /* * Ask the driver how many buffers and planes per buffer it requires. @@ -804,11 +828,13 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, EXPORT_SYMBOL_GPL(vb2_core_reqbufs); int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, - unsigned int *count, unsigned requested_planes, - const unsigned requested_sizes[]) + unsigned int flags, unsigned int *count, + unsigned int requested_planes, + const unsigned int requested_sizes[]) { unsigned int num_planes = 0, num_buffers, allocated_buffers; unsigned plane_sizes[VB2_MAX_PLANES] = { }; + bool consistent_mem = true; int ret; if (q->num_buffers == VB2_MAX_FRAME) { @@ -823,10 +849,15 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, } memset(q->alloc_devs, 0, sizeof(q->alloc_devs)); q->memory = memory; + set_queue_consistency(q, consistent_mem); q->waiting_for_buffers = !q->is_output; - } else if (q->memory != memory) { - dprintk(1, "memory model mismatch\n"); - return -EINVAL; + } else { + if (q->memory != memory) { + dprintk(1, "memory model mismatch\n"); + return -EINVAL; + } + if (!verify_consistency_attr(q, consistent_mem)) + return -EINVAL; } num_buffers = min(*count, VB2_MAX_FRAME - q->num_buffers); @@ -2498,7 +2529,7 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read) fileio->memory = VB2_MEMORY_MMAP; fileio->type = q->type; q->fileio = fileio; - ret = vb2_core_reqbufs(q, fileio->memory, &fileio->count); + ret = vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count); if (ret) goto err_kfree; @@ -2555,7 +2586,7 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read) err_reqbufs: fileio->count = 0; - vb2_core_reqbufs(q, fileio->memory, &fileio->count); + vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count); err_kfree: q->fileio = NULL; @@ -2575,7 +2606,7 @@ static int __vb2_cleanup_fileio(struct vb2_queue *q) vb2_core_streamoff(q, q->type); q->fileio = NULL; fileio->count = 0; - vb2_core_reqbufs(q, fileio->memory, &fileio->count); + vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count); kfree(fileio); dprintk(3, "file io emulator closed\n"); } diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c index e4b4354b42b8..26a3ec333bb7 100644 --- a/drivers/media/common/videobuf2/videobuf2-v4l2.c +++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c @@ -723,7 +723,7 @@ int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req) int ret = vb2_verify_memory_type(q, req->memory, req->type); fill_buf_caps(q, &req->capabilities); - return ret ? ret : vb2_core_reqbufs(q, req->memory, &req->count); + return ret ? ret : vb2_core_reqbufs(q, req->memory, 0, &req->count); } EXPORT_SYMBOL_GPL(vb2_reqbufs); @@ -797,7 +797,7 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create) for (i = 0; i < requested_planes; i++) if (requested_sizes[i] == 0) return -EINVAL; - return ret ? ret : vb2_core_create_bufs(q, create->memory, + return ret ? ret : vb2_core_create_bufs(q, create->memory, 0, &create->count, requested_planes, requested_sizes); } EXPORT_SYMBOL_GPL(vb2_create_bufs); @@ -973,7 +973,7 @@ int vb2_ioctl_reqbufs(struct file *file, void *priv, return res; if (vb2_queue_is_busy(vdev, file)) return -EBUSY; - res = vb2_core_reqbufs(vdev->queue, p->memory, &p->count); + res = vb2_core_reqbufs(vdev->queue, p->memory, 0, &p->count); /* If count == 0, then the owner has released all buffers and he is no longer owner of the queue. Otherwise we have a new owner. */ if (res == 0) diff --git a/drivers/media/dvb-core/dvb_vb2.c b/drivers/media/dvb-core/dvb_vb2.c index 6974f1731529..959d110407a4 100644 --- a/drivers/media/dvb-core/dvb_vb2.c +++ b/drivers/media/dvb-core/dvb_vb2.c @@ -342,7 +342,7 @@ int dvb_vb2_reqbufs(struct dvb_vb2_ctx *ctx, struct dmx_requestbuffers *req) ctx->buf_siz = req->size; ctx->buf_cnt = req->count; - ret = vb2_core_reqbufs(&ctx->vb_q, VB2_MEMORY_MMAP, &req->count); + ret = vb2_core_reqbufs(&ctx->vb_q, VB2_MEMORY_MMAP, 0, &req->count); if (ret) { ctx->state = DVB_VB2_STATE_NONE; dprintk(1, "[%s] count=%d size=%d errno=%d\n", ctx->name, diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h index ccc5c498d3e3..9e68fe043a6c 100644 --- a/include/media/videobuf2-core.h +++ b/include/media/videobuf2-core.h @@ -740,6 +740,8 @@ void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb); * vb2_core_reqbufs() - Initiate streaming. * @q: pointer to &struct vb2_queue with videobuf2 queue. * @memory: memory type, as defined by &enum vb2_memory. + * @flags: auxiliary queue/buffer management flags. Currently, the only + * used flag is %V4L2_FLAG_MEMORY_NON_CONSISTENT. * @count: requested buffer count. * * Videobuf2 core helper to implement VIDIOC_REQBUF() operation. It is called @@ -764,12 +766,13 @@ void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb); * Return: returns zero on success; an error code otherwise. */ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, - unsigned int *count); + unsigned int flags, unsigned int *count); /** * vb2_core_create_bufs() - Allocate buffers and any required auxiliary structs * @q: pointer to &struct vb2_queue with videobuf2 queue. * @memory: memory type, as defined by &enum vb2_memory. + * @flags: auxiliary queue/buffer management flags. * @count: requested buffer count. * @requested_planes: number of planes requested. * @requested_sizes: array with the size of the planes. @@ -787,7 +790,8 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, * Return: returns zero on success; an error code otherwise. */ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, - unsigned int *count, unsigned int requested_planes, + unsigned int flags, unsigned int *count, + unsigned int requested_planes, const unsigned int requested_sizes[]); /** -- cgit v1.2.3-59-g8ed1b From 1e0b2318fa75d186ee0d2be31843ce867385fcc4 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Thu, 14 May 2020 18:01:45 +0200 Subject: media: videobuf2: handle V4L2_FLAG_MEMORY_NON_CONSISTENT flag This patch lets user-space to request a non-consistent memory allocation during CREATE_BUFS and REQBUFS ioctl calls. = CREATE_BUFS struct v4l2_create_buffers has seven 4-byte reserved areas, so reserved[0] is renamed to ->flags. The struct, thus, now has six reserved 4-byte regions. = CREATE_BUFS32 struct v4l2_create_buffers32 has seven 4-byte reserved areas, so reserved[0] is renamed to ->flags. The struct, thus, now has six reserved 4-byte regions. = REQBUFS We use one bit of a ->reserved[1] member of struct v4l2_requestbuffers, which is now renamed to ->flags. Unlike v4l2_create_buffers, struct v4l2_requestbuffers does not have enough reserved room. Therefore for backward compatibility ->reserved and ->flags were put into anonymous union. Signed-off-by: Sergey Senozhatsky Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- .../userspace-api/media/v4l/vidioc-create-bufs.rst | 7 ++++++- .../userspace-api/media/v4l/vidioc-reqbufs.rst | 11 ++++++++-- drivers/media/common/videobuf2/videobuf2-core.c | 6 ++++++ drivers/media/common/videobuf2/videobuf2-v4l2.c | 24 ++++++++++++++++++---- drivers/media/v4l2-core/v4l2-compat-ioctl32.c | 10 +++++++-- drivers/media/v4l2-core/v4l2-ioctl.c | 5 +---- include/uapi/linux/videodev2.h | 11 ++++++++-- 7 files changed, 59 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/Documentation/userspace-api/media/v4l/vidioc-create-bufs.rst b/Documentation/userspace-api/media/v4l/vidioc-create-bufs.rst index e1afc5b504c2..f2a702870fad 100644 --- a/Documentation/userspace-api/media/v4l/vidioc-create-bufs.rst +++ b/Documentation/userspace-api/media/v4l/vidioc-create-bufs.rst @@ -121,7 +121,12 @@ than the number requested. other changes, then set ``count`` to 0, ``memory`` to ``V4L2_MEMORY_MMAP`` and ``format.type`` to the buffer type. * - __u32 - - ``reserved``\ [7] + - ``flags`` + - Specifies additional buffer management attributes. + See :ref:`memory-flags`. + + * - __u32 + - ``reserved``\ [6] - A place holder for future extensions. Drivers and applications must set the array to zero. diff --git a/Documentation/userspace-api/media/v4l/vidioc-reqbufs.rst b/Documentation/userspace-api/media/v4l/vidioc-reqbufs.rst index 96a59793d857..75d894d9c36c 100644 --- a/Documentation/userspace-api/media/v4l/vidioc-reqbufs.rst +++ b/Documentation/userspace-api/media/v4l/vidioc-reqbufs.rst @@ -112,10 +112,17 @@ aborting or finishing any DMA in progress, an implicit ``V4L2_MEMORY_MMAP`` and ``type`` set to the buffer type. This will free any previously allocated buffers, so this is typically something that will be done at the start of the application. + * - union { + - (anonymous) + * - __u32 + - ``flags`` + - Specifies additional buffer management attributes. + See :ref:`memory-flags`. * - __u32 - ``reserved``\ [1] - - A place holder for future extensions. Drivers and applications - must set the array to zero. + - Kept for backwards compatibility. Use ``flags`` instead. + * - } + - .. tabularcolumns:: |p{6.1cm}|p{2.2cm}|p{8.7cm}| diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c index 0fdcf90330df..626c4db5134c 100644 --- a/drivers/media/common/videobuf2/videobuf2-core.c +++ b/drivers/media/common/videobuf2/videobuf2-core.c @@ -694,6 +694,9 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, unsigned int i; int ret; + if (flags & V4L2_FLAG_MEMORY_NON_CONSISTENT) + consistent_mem = false; + if (q->streaming) { dprintk(1, "streaming active\n"); return -EBUSY; @@ -837,6 +840,9 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, bool consistent_mem = true; int ret; + if (flags & V4L2_FLAG_MEMORY_NON_CONSISTENT) + consistent_mem = false; + if (q->num_buffers == VB2_MAX_FRAME) { dprintk(1, "maximum number of buffers already allocated\n"); return -ENOBUFS; diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c index 26a3ec333bb7..559a229cac41 100644 --- a/drivers/media/common/videobuf2/videobuf2-v4l2.c +++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c @@ -718,12 +718,22 @@ static void fill_buf_caps(struct vb2_queue *q, u32 *caps) #endif } +static void clear_consistency_attr(struct vb2_queue *q, + int memory, + unsigned int *flags) +{ + if (!q->allow_cache_hints || memory != V4L2_MEMORY_MMAP) + *flags &= ~V4L2_FLAG_MEMORY_NON_CONSISTENT; +} + int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req) { int ret = vb2_verify_memory_type(q, req->memory, req->type); fill_buf_caps(q, &req->capabilities); - return ret ? ret : vb2_core_reqbufs(q, req->memory, 0, &req->count); + clear_consistency_attr(q, req->memory, &req->flags); + return ret ? ret : vb2_core_reqbufs(q, req->memory, + req->flags, &req->count); } EXPORT_SYMBOL_GPL(vb2_reqbufs); @@ -755,6 +765,7 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create) unsigned i; fill_buf_caps(q, &create->capabilities); + clear_consistency_attr(q, create->memory, &create->flags); create->index = q->num_buffers; if (create->count == 0) return ret != -EBUSY ? ret : 0; @@ -797,8 +808,11 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create) for (i = 0; i < requested_planes; i++) if (requested_sizes[i] == 0) return -EINVAL; - return ret ? ret : vb2_core_create_bufs(q, create->memory, 0, - &create->count, requested_planes, requested_sizes); + return ret ? ret : vb2_core_create_bufs(q, create->memory, + create->flags, + &create->count, + requested_planes, + requested_sizes); } EXPORT_SYMBOL_GPL(vb2_create_bufs); @@ -969,11 +983,12 @@ int vb2_ioctl_reqbufs(struct file *file, void *priv, int res = vb2_verify_memory_type(vdev->queue, p->memory, p->type); fill_buf_caps(vdev->queue, &p->capabilities); + clear_consistency_attr(vdev->queue, p->memory, &p->flags); if (res) return res; if (vb2_queue_is_busy(vdev, file)) return -EBUSY; - res = vb2_core_reqbufs(vdev->queue, p->memory, 0, &p->count); + res = vb2_core_reqbufs(vdev->queue, p->memory, p->flags, &p->count); /* If count == 0, then the owner has released all buffers and he is no longer owner of the queue. Otherwise we have a new owner. */ if (res == 0) @@ -991,6 +1006,7 @@ int vb2_ioctl_create_bufs(struct file *file, void *priv, p->index = vdev->queue->num_buffers; fill_buf_caps(vdev->queue, &p->capabilities); + clear_consistency_attr(vdev->queue, p->memory, &p->flags); /* * If count == 0, then just check if memory and type are valid. * Any -EBUSY result from vb2_verify_memory_type can be mapped to 0. diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c index a99e82ec9ab6..593bcf6c3735 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -246,6 +246,9 @@ struct v4l2_format32 { * @memory: buffer memory type * @format: frame format, for which buffers are requested * @capabilities: capabilities of this buffer type. + * @flags: additional buffer management attributes (ignored unless the + * queue has V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS capability and + * configured for MMAP streaming I/O). * @reserved: future extensions */ struct v4l2_create_buffers32 { @@ -254,7 +257,8 @@ struct v4l2_create_buffers32 { __u32 memory; /* enum v4l2_memory */ struct v4l2_format32 format; __u32 capabilities; - __u32 reserved[7]; + __u32 flags; + __u32 reserved[6]; }; static int __bufsize_v4l2_format(struct v4l2_format32 __user *p32, u32 *size) @@ -355,7 +359,8 @@ static int get_v4l2_create32(struct v4l2_create_buffers __user *p64, { if (!access_ok(p32, sizeof(*p32)) || copy_in_user(p64, p32, - offsetof(struct v4l2_create_buffers32, format))) + offsetof(struct v4l2_create_buffers32, format)) || + assign_in_user(&p64->flags, &p32->flags)) return -EFAULT; return __get_v4l2_format32(&p64->format, &p32->format, aux_buf, aux_space); @@ -417,6 +422,7 @@ static int put_v4l2_create32(struct v4l2_create_buffers __user *p64, copy_in_user(p32, p64, offsetof(struct v4l2_create_buffers32, format)) || assign_in_user(&p32->capabilities, &p64->capabilities) || + assign_in_user(&p32->flags, &p64->flags) || copy_in_user(p32->reserved, p64->reserved, sizeof(p64->reserved))) return -EFAULT; return __put_v4l2_format32(&p64->format, &p32->format); diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index 2322f08a98be..02bfef0da76d 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -2038,9 +2038,6 @@ static int v4l_reqbufs(const struct v4l2_ioctl_ops *ops, if (ret) return ret; - - CLEAR_AFTER_FIELD(p, capabilities); - return ops->vidioc_reqbufs(file, fh, p); } @@ -2080,7 +2077,7 @@ static int v4l_create_bufs(const struct v4l2_ioctl_ops *ops, if (ret) return ret; - CLEAR_AFTER_FIELD(create, capabilities); + CLEAR_AFTER_FIELD(create, flags); v4l_sanitize_format(&create->format); diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index 34ba1017b89b..fec2607a07e3 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -946,7 +946,10 @@ struct v4l2_requestbuffers { __u32 type; /* enum v4l2_buf_type */ __u32 memory; /* enum v4l2_memory */ __u32 capabilities; - __u32 reserved[1]; + union { + __u32 flags; + __u32 reserved[1]; + }; }; /* capabilities for struct v4l2_requestbuffers and v4l2_create_buffers */ @@ -2450,6 +2453,9 @@ struct v4l2_dbg_chip_info { * @memory: enum v4l2_memory; buffer memory type * @format: frame format, for which buffers are requested * @capabilities: capabilities of this buffer type. + * @flags: additional buffer management attributes (ignored unless the + * queue has V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS capability + * and configured for MMAP streaming I/O). * @reserved: future extensions */ struct v4l2_create_buffers { @@ -2458,7 +2464,8 @@ struct v4l2_create_buffers { __u32 memory; struct v4l2_format format; __u32 capabilities; - __u32 reserved[7]; + __u32 flags; + __u32 reserved[6]; }; /* -- cgit v1.2.3-59-g8ed1b From 286cf7d3a99e1ca8c1d8e674b9a98f2dbe8520dc Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Tue, 26 May 2020 10:59:53 +0200 Subject: media: videodev2.h: add V4L2_FMT_FLAG_ENC_CAP_FRAME_INTERVAL flag Add the V4L2_FMT_FLAG_ENC_CAP_FRAME_INTERVAL flag to signal that the coded frame interval can be set separately from the raw frame interval for stateful encoders. Signed-off-by: Hans Verkuil Reviewed-by: Michael Tretter Acked-by: Tomasz Figa Signed-off-by: Mauro Carvalho Chehab --- .../userspace-api/media/v4l/vidioc-enum-fmt.rst | 30 ++++++++++++++++++---- .../userspace-api/media/videodev2.h.rst.exceptions | 1 + include/uapi/linux/videodev2.h | 1 + 3 files changed, 27 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/Documentation/userspace-api/media/v4l/vidioc-enum-fmt.rst b/Documentation/userspace-api/media/v4l/vidioc-enum-fmt.rst index a53dd3d7f7e2..05835e04c20b 100644 --- a/Documentation/userspace-api/media/v4l/vidioc-enum-fmt.rst +++ b/Documentation/userspace-api/media/v4l/vidioc-enum-fmt.rst @@ -167,17 +167,37 @@ the ``mbus_code`` field is handled differently: - The hardware decoder for this compressed bytestream format (aka coded format) is capable of parsing a continuous bytestream. Applications do not need to parse the bytestream themselves to find the boundaries - between frames/fields. This flag can only be used in combination with - the ``V4L2_FMT_FLAG_COMPRESSED`` flag, since this applies to compressed + between frames/fields. + + This flag can only be used in combination with the + ``V4L2_FMT_FLAG_COMPRESSED`` flag, since this applies to compressed formats only. This flag is valid for stateful decoders only. * - ``V4L2_FMT_FLAG_DYN_RESOLUTION`` - 0x0008 - Dynamic resolution switching is supported by the device for this compressed bytestream format (aka coded format). It will notify the user via the event ``V4L2_EVENT_SOURCE_CHANGE`` when changes in the video - parameters are detected. This flag can only be used in combination - with the ``V4L2_FMT_FLAG_COMPRESSED`` flag, since this applies to - compressed formats only. It is also only applies to stateful codecs. + parameters are detected. + + This flag can only be used in combination with the + ``V4L2_FMT_FLAG_COMPRESSED`` flag, since this applies to + compressed formats only. This flag is valid for stateful codecs only. + * - ``V4L2_FMT_FLAG_ENC_CAP_FRAME_INTERVAL`` + - 0x0010 + - The hardware encoder supports setting the ``CAPTURE`` coded frame + interval separately from the ``OUTPUT`` raw frame interval. + Setting the ``OUTPUT`` raw frame interval with :ref:`VIDIOC_S_PARM ` + also sets the ``CAPTURE`` coded frame interval to the same value. + If this flag is set, then the ``CAPTURE`` coded frame interval can be + set to a different value afterwards. This is typically used for + offline encoding where the ``OUTPUT`` raw frame interval is used as + a hint for reserving hardware encoder resources and the ``CAPTURE`` coded + frame interval is the actual frame rate embedded in the encoded video + stream. + + This flag can only be used in combination with the + ``V4L2_FMT_FLAG_COMPRESSED`` flag, since this applies to + compressed formats only. This flag is valid for stateful encoders only. Return Value diff --git a/Documentation/userspace-api/media/videodev2.h.rst.exceptions b/Documentation/userspace-api/media/videodev2.h.rst.exceptions index a625fb90e3a9..ca05e4e126b2 100644 --- a/Documentation/userspace-api/media/videodev2.h.rst.exceptions +++ b/Documentation/userspace-api/media/videodev2.h.rst.exceptions @@ -187,6 +187,7 @@ replace define V4L2_FMT_FLAG_COMPRESSED fmtdesc-flags replace define V4L2_FMT_FLAG_EMULATED fmtdesc-flags replace define V4L2_FMT_FLAG_CONTINUOUS_BYTESTREAM fmtdesc-flags replace define V4L2_FMT_FLAG_DYN_RESOLUTION fmtdesc-flags +replace define V4L2_FMT_FLAG_ENC_CAP_FRAME_INTERVAL fmtdesc-flags # V4L2 timecode types replace define V4L2_TC_TYPE_24FPS timecode-type diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index fec2607a07e3..303805438814 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -794,6 +794,7 @@ struct v4l2_fmtdesc { #define V4L2_FMT_FLAG_EMULATED 0x0002 #define V4L2_FMT_FLAG_CONTINUOUS_BYTESTREAM 0x0004 #define V4L2_FMT_FLAG_DYN_RESOLUTION 0x0008 +#define V4L2_FMT_FLAG_ENC_CAP_FRAME_INTERVAL 0x0010 /* Frame Size and frame rate enumeration */ /* -- cgit v1.2.3-59-g8ed1b From 809b1b04df898b6d182069146231a3cbf5f2d9cc Mon Sep 17 00:00:00 2001 From: Robin Gong Date: Wed, 17 Jun 2020 06:42:08 +0800 Subject: spi: introduce fallback to pio Add fallback to pio mode in case dma transfer failed with error status SPI_TRANS_FAIL_NO_START. If spi client driver want to enable this feature please set xfer->error in the proper place such as dmaengine_prep_slave_sg() failure detect(but no any data put into spi bus yet). Besides, add master->fallback checking in its can_dma() so that spi core could switch to pio next time. Please refer to spi-imx.c. Signed-off-by: Robin Gong Link: https://lore.kernel.org/r/1592347329-28363-2-git-send-email-yibin.gong@nxp.com Signed-off-by: Mark Brown --- drivers/spi/spi.c | 12 ++++++++++++ include/linux/spi/spi.h | 7 +++++++ 2 files changed, 19 insertions(+) (limited to 'include') diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 8158e281f354..6fa56590bba2 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -982,6 +982,8 @@ static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg) spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); } + ctlr->cur_msg_mapped = false; + return 0; } #else /* !CONFIG_HAS_DMA */ @@ -1234,8 +1236,17 @@ static int spi_transfer_one_message(struct spi_controller *ctlr, if (xfer->tx_buf || xfer->rx_buf) { reinit_completion(&ctlr->xfer_completion); +fallback_pio: ret = ctlr->transfer_one(ctlr, msg->spi, xfer); if (ret < 0) { + if (ctlr->cur_msg_mapped && + (xfer->error & SPI_TRANS_FAIL_NO_START)) { + __spi_unmap_msg(ctlr, msg); + ctlr->fallback = true; + xfer->error &= ~SPI_TRANS_FAIL_NO_START; + goto fallback_pio; + } + SPI_STATISTICS_INCREMENT_FIELD(statm, errors); SPI_STATISTICS_INCREMENT_FIELD(stats, @@ -1693,6 +1704,7 @@ void spi_finalize_current_message(struct spi_controller *ctlr) spin_lock_irqsave(&ctlr->queue_lock, flags); ctlr->cur_msg = NULL; ctlr->cur_msg_prepared = false; + ctlr->fallback = false; kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); spin_unlock_irqrestore(&ctlr->queue_lock, flags); diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index aac57b5b7c21..b4917df79637 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -447,6 +447,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * If the driver does not set this, the SPI core takes the snapshot as * close to the driver hand-over as possible. * @irq_flags: Interrupt enable state during PTP system timestamping + * @fallback: fallback to pio if dma transfer return failure with + * SPI_TRANS_FAIL_NO_START. * * Each SPI controller can communicate with one or more @spi_device * children. These make a small bus, sharing MOSI, MISO and SCK signals @@ -602,6 +604,7 @@ struct spi_controller { bool auto_runtime_pm; bool cur_msg_prepared; bool cur_msg_mapped; + bool fallback; struct completion xfer_completion; size_t max_dma_len; @@ -847,6 +850,7 @@ extern void spi_res_release(struct spi_controller *ctlr, * back unset and they need the better resolution. * @timestamped_post: See above. The reason why both exist is that these * booleans are also used to keep state in the core SPI logic. + * @error: Error status logged by spi controller driver. * * SPI transfers always write the same number of bytes as they read. * Protocol drivers should always provide @rx_buf and/or @tx_buf. @@ -940,6 +944,9 @@ struct spi_transfer { bool timestamped; struct list_head transfer_list; + +#define SPI_TRANS_FAIL_NO_START BIT(0) + u16 error; }; /** -- cgit v1.2.3-59-g8ed1b From 8dd65ed67e1679830dc8f94169d25ec9452da99d Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Sat, 20 Jun 2020 12:16:40 +0200 Subject: media: cec: remove unused waitq and phys_addrs fields The cec_adapter struct contained a waitq field that isn't used anywhere, so drop this. It also contained a phys_addrs array to store any reported physical addresses. However, this was never actually used, so this field is removed as well. The original idea was to let the core keep track of this information, but nothing was ever done with this. Should this be needed in the future then it is easy enough to resurrect this. Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- drivers/media/cec/core/cec-adap.c | 4 ---- drivers/media/cec/core/cec-core.c | 1 - include/media/cec.h | 2 -- 3 files changed, 7 deletions(-) (limited to 'include') diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c index 6a04d19a96b2..4efe8014445e 100644 --- a/drivers/media/cec/core/cec-adap.c +++ b/drivers/media/cec/core/cec-adap.c @@ -1306,7 +1306,6 @@ static int cec_config_log_addr(struct cec_adapter *adap, las->log_addr[idx] = log_addr; las->log_addr_mask |= 1 << log_addr; - adap->phys_addrs[log_addr] = adap->phys_addr; return 1; } @@ -1324,7 +1323,6 @@ static void cec_adap_unconfigure(struct cec_adapter *adap) adap->log_addrs.log_addr_mask = 0; adap->is_configuring = false; adap->is_configured = false; - memset(adap->phys_addrs, 0xff, sizeof(adap->phys_addrs)); cec_flush(adap); wake_up_interruptible(&adap->kthread_waitq); cec_post_state_event(adap); @@ -1974,8 +1972,6 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg, case CEC_MSG_REPORT_PHYSICAL_ADDR: { u16 pa = (msg->msg[2] << 8) | msg->msg[3]; - if (!from_unregistered) - adap->phys_addrs[init_laddr] = pa; dprintk(1, "reported physical address %x.%x.%x.%x for logical address %d\n", cec_phys_addr_exp(pa), init_laddr); break; diff --git a/drivers/media/cec/core/cec-core.c b/drivers/media/cec/core/cec-core.c index 0c52e1bb3910..c599cd94dd62 100644 --- a/drivers/media/cec/core/cec-core.c +++ b/drivers/media/cec/core/cec-core.c @@ -265,7 +265,6 @@ struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops, adap->sequence = 0; adap->ops = ops; adap->priv = priv; - memset(adap->phys_addrs, 0xff, sizeof(adap->phys_addrs)); mutex_init(&adap->lock); INIT_LIST_HEAD(&adap->transmit_queue); INIT_LIST_HEAD(&adap->wait_queue); diff --git a/include/media/cec.h b/include/media/cec.h index 972bc8cd4384..1de44a4fc390 100644 --- a/include/media/cec.h +++ b/include/media/cec.h @@ -162,7 +162,6 @@ struct cec_adapter { struct task_struct *kthread; wait_queue_head_t kthread_waitq; - wait_queue_head_t waitq; const struct cec_adap_ops *ops; void *priv; @@ -197,7 +196,6 @@ struct cec_adapter { struct dentry *status_file; struct dentry *error_inj_file; - u16 phys_addrs[15]; u32 sequence; char input_phys[32]; -- cgit v1.2.3-59-g8ed1b From e233f81cfc906e415e5784526d9a6ec7d7cf3c5c Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Sat, 20 Jun 2020 12:16:41 +0200 Subject: media: media/cec.h: document cec_adapter fields Document this core CEC structure. Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- include/media/cec.h | 49 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) (limited to 'include') diff --git a/include/media/cec.h b/include/media/cec.h index 1de44a4fc390..32f7c695d7b5 100644 --- a/include/media/cec.h +++ b/include/media/cec.h @@ -144,6 +144,55 @@ struct cec_adap_ops { */ #define CEC_MAX_MSG_TX_QUEUE_SZ (18 * 1) +/** + * struct cec_adapter - cec adapter structure + * @owner: module owner + * @name: name of the CEC adapter + * @devnode: device node for the /dev/cecX device + * @lock: mutex controlling access to this structure + * @rc: remote control device + * @transmit_queue: queue of pending transmits + * @transmit_queue_sz: number of pending transmits + * @wait_queue: queue of transmits waiting for a reply + * @transmitting: CEC messages currently being transmitted + * @transmit_in_progress: true if a transmit is in progress + * @kthread_config: kthread used to configure a CEC adapter + * @config_completion: used to signal completion of the config kthread + * @kthread: main CEC processing thread + * @kthread_waitq: main CEC processing wait_queue + * @ops: cec adapter ops + * @priv: cec driver's private data + * @capabilities: cec adapter capabilities + * @available_log_addrs: maximum number of available logical addresses + * @phys_addr: the current physical address + * @needs_hpd: if true, then the HDMI HotPlug Detect pin must be high + * in order to transmit or receive CEC messages. This is usually a HW + * limitation. + * @is_configuring: the CEC adapter is configuring (i.e. claiming LAs) + * @is_configured: the CEC adapter is configured (i.e. has claimed LAs) + * @cec_pin_is_high: if true then the CEC pin is high. Only used with the + * CEC pin framework. + * @last_initiator: the initiator of the last transmitted message. + * @monitor_all_cnt: number of filehandles monitoring all msgs + * @monitor_pin_cnt: number of filehandles monitoring pin changes + * @follower_cnt: number of filehandles in follower mode + * @cec_follower: filehandle of the exclusive follower + * @cec_initiator: filehandle of the exclusive initiator + * @passthrough: if true, then the exclusive follower is in + * passthrough mode. + * @log_addrs: current logical addresses + * @conn_info: current connector info + * @tx_timeouts: number of transmit timeouts + * @notifier: CEC notifier + * @pin: CEC pin status struct + * @cec_dir: debugfs cec directory + * @status_file: debugfs cec status file + * @error_inj_file: debugfs cec error injection file + * @sequence: transmit sequence counter + * @input_phys: remote control input_phys name + * + * This structure represents a cec adapter. + */ struct cec_adapter { struct module *owner; char name[32]; -- cgit v1.2.3-59-g8ed1b From d63cc24933c774ea464090af1998a7b63f11c166 Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Wed, 8 Apr 2020 12:42:09 +0300 Subject: net/mlx5: Export resource dump interface Export some of the resource dump API. mlx5_ib driver will use it in downstream patches. Signed-off-by: Maor Gottlieb Signed-off-by: Leon Romanovsky --- .../ethernet/mellanox/mlx5/core/diag/rsc_dump.c | 3 ++ .../ethernet/mellanox/mlx5/core/diag/rsc_dump.h | 33 +-------------- include/linux/mlx5/rsc_dump.h | 48 ++++++++++++++++++++++ 3 files changed, 52 insertions(+), 32 deletions(-) create mode 100644 include/linux/mlx5/rsc_dump.h (limited to 'include') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c index 17ab7efe693d..10218c2324cc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c @@ -130,11 +130,13 @@ struct mlx5_rsc_dump_cmd *mlx5_rsc_dump_cmd_create(struct mlx5_core_dev *dev, cmd->mem_size = key->size; return cmd; } +EXPORT_SYMBOL(mlx5_rsc_dump_cmd_create); void mlx5_rsc_dump_cmd_destroy(struct mlx5_rsc_dump_cmd *cmd) { kfree(cmd); } +EXPORT_SYMBOL(mlx5_rsc_dump_cmd_destroy); int mlx5_rsc_dump_next(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd, struct page *page, int *size) @@ -155,6 +157,7 @@ int mlx5_rsc_dump_next(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd, return more_dump; } +EXPORT_SYMBOL(mlx5_rsc_dump_next); #define MLX5_RSC_DUMP_MENU_SEGMENT 0xffff static int mlx5_rsc_dump_menu(struct mlx5_core_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.h index 148270073e71..64c4956db6d2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.h @@ -4,41 +4,10 @@ #ifndef __MLX5_RSC_DUMP_H #define __MLX5_RSC_DUMP_H +#include #include #include "mlx5_core.h" -enum mlx5_sgmt_type { - MLX5_SGMT_TYPE_HW_CQPC, - MLX5_SGMT_TYPE_HW_SQPC, - MLX5_SGMT_TYPE_HW_RQPC, - MLX5_SGMT_TYPE_FULL_SRQC, - MLX5_SGMT_TYPE_FULL_CQC, - MLX5_SGMT_TYPE_FULL_EQC, - MLX5_SGMT_TYPE_FULL_QPC, - MLX5_SGMT_TYPE_SND_BUFF, - MLX5_SGMT_TYPE_RCV_BUFF, - MLX5_SGMT_TYPE_SRQ_BUFF, - MLX5_SGMT_TYPE_CQ_BUFF, - MLX5_SGMT_TYPE_EQ_BUFF, - MLX5_SGMT_TYPE_SX_SLICE, - MLX5_SGMT_TYPE_SX_SLICE_ALL, - MLX5_SGMT_TYPE_RDB, - MLX5_SGMT_TYPE_RX_SLICE_ALL, - MLX5_SGMT_TYPE_MENU, - MLX5_SGMT_TYPE_TERMINATE, - - MLX5_SGMT_TYPE_NUM, /* Keep last */ -}; - -struct mlx5_rsc_key { - enum mlx5_sgmt_type rsc; - int index1; - int index2; - int num_of_obj1; - int num_of_obj2; - int size; -}; - #define MLX5_RSC_DUMP_ALL 0xFFFF struct mlx5_rsc_dump_cmd; struct mlx5_rsc_dump; diff --git a/include/linux/mlx5/rsc_dump.h b/include/linux/mlx5/rsc_dump.h new file mode 100644 index 000000000000..87415fa754fe --- /dev/null +++ b/include/linux/mlx5/rsc_dump.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2020 Mellanox Technologies inc. */ + +#include + +#ifndef __MLX5_RSC_DUMP +#define __MLX5_RSC_DUMP + +enum mlx5_sgmt_type { + MLX5_SGMT_TYPE_HW_CQPC, + MLX5_SGMT_TYPE_HW_SQPC, + MLX5_SGMT_TYPE_HW_RQPC, + MLX5_SGMT_TYPE_FULL_SRQC, + MLX5_SGMT_TYPE_FULL_CQC, + MLX5_SGMT_TYPE_FULL_EQC, + MLX5_SGMT_TYPE_FULL_QPC, + MLX5_SGMT_TYPE_SND_BUFF, + MLX5_SGMT_TYPE_RCV_BUFF, + MLX5_SGMT_TYPE_SRQ_BUFF, + MLX5_SGMT_TYPE_CQ_BUFF, + MLX5_SGMT_TYPE_EQ_BUFF, + MLX5_SGMT_TYPE_SX_SLICE, + MLX5_SGMT_TYPE_SX_SLICE_ALL, + MLX5_SGMT_TYPE_RDB, + MLX5_SGMT_TYPE_RX_SLICE_ALL, + MLX5_SGMT_TYPE_MENU, + MLX5_SGMT_TYPE_TERMINATE, + + MLX5_SGMT_TYPE_NUM, /* Keep last */ +}; + +struct mlx5_rsc_key { + enum mlx5_sgmt_type rsc; + int index1; + int index2; + int num_of_obj1; + int num_of_obj2; + int size; +}; + +struct mlx5_rsc_dump_cmd; + +struct mlx5_rsc_dump_cmd *mlx5_rsc_dump_cmd_create(struct mlx5_core_dev *dev, + struct mlx5_rsc_key *key); +void mlx5_rsc_dump_cmd_destroy(struct mlx5_rsc_dump_cmd *cmd); +int mlx5_rsc_dump_next(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd, + struct page *page, int *size); +#endif /* __MLX5_RSC_DUMP */ -- cgit v1.2.3-59-g8ed1b From 608ca553c9a2008908120e0e45b1cfc4aefcfd49 Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Wed, 8 Apr 2020 12:36:20 +0300 Subject: net/mlx5: Add support in query QP, CQ and MKEY segments Introduce new resource dump segments - PRM_QUERY_QP, PRM_QUERY_CQ and PRM_QUERY_MKEY. These segments contains the resource dump in PRM query format. Signed-off-by: Maor Gottlieb Signed-off-by: Leon Romanovsky --- drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c | 3 +++ include/linux/mlx5/rsc_dump.h | 3 +++ 2 files changed, 6 insertions(+) (limited to 'include') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c index 10218c2324cc..4924a5658853 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c @@ -23,6 +23,9 @@ static const char *const mlx5_rsc_sgmt_name[] = { MLX5_SGMT_STR_ASSING(SX_SLICE_ALL), MLX5_SGMT_STR_ASSING(RDB), MLX5_SGMT_STR_ASSING(RX_SLICE_ALL), + MLX5_SGMT_STR_ASSING(PRM_QUERY_QP), + MLX5_SGMT_STR_ASSING(PRM_QUERY_CQ), + MLX5_SGMT_STR_ASSING(PRM_QUERY_MKEY), }; struct mlx5_rsc_dump { diff --git a/include/linux/mlx5/rsc_dump.h b/include/linux/mlx5/rsc_dump.h index 87415fa754fe..d11c0b228620 100644 --- a/include/linux/mlx5/rsc_dump.h +++ b/include/linux/mlx5/rsc_dump.h @@ -23,6 +23,9 @@ enum mlx5_sgmt_type { MLX5_SGMT_TYPE_SX_SLICE_ALL, MLX5_SGMT_TYPE_RDB, MLX5_SGMT_TYPE_RX_SLICE_ALL, + MLX5_SGMT_TYPE_PRM_QUERY_QP, + MLX5_SGMT_TYPE_PRM_QUERY_CQ, + MLX5_SGMT_TYPE_PRM_QUERY_MKEY, MLX5_SGMT_TYPE_MENU, MLX5_SGMT_TYPE_TERMINATE, -- cgit v1.2.3-59-g8ed1b From f4434529003522d72b314d26d65b18c06ea9307c Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Tue, 23 Jun 2020 14:30:36 +0300 Subject: RDMA: Add dedicated MR resource tracker function In order to avoid double multiplexing of the resource when it is a MR, add a dedicated callback function. Link: https://lore.kernel.org/r/20200623113043.1228482-5-leon@kernel.org Signed-off-by: Maor Gottlieb Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/device.c | 3 ++- drivers/infiniband/core/nldev.c | 18 ++++-------------- drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 1 + drivers/infiniband/hw/cxgb4/provider.c | 1 + drivers/infiniband/hw/cxgb4/restrack.c | 5 +---- drivers/infiniband/hw/mlx5/main.c | 4 ++-- drivers/infiniband/hw/mlx5/mlx5_ib.h | 6 ++---- drivers/infiniband/hw/mlx5/restrack.c | 28 ++++------------------------ include/rdma/ib_verbs.h | 4 ++-- 9 files changed, 19 insertions(+), 51 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 905a2beaf885..ffdf9787e7f6 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -2618,7 +2618,8 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops) SET_DEVICE_OP(dev_ops, drain_sq); SET_DEVICE_OP(dev_ops, enable_driver); SET_DEVICE_OP(dev_ops, fill_res_entry); - SET_DEVICE_OP(dev_ops, fill_stat_entry); + SET_DEVICE_OP(dev_ops, fill_res_mr_entry); + SET_DEVICE_OP(dev_ops, fill_stat_mr_entry); SET_DEVICE_OP(dev_ops, get_dev_fw_str); SET_DEVICE_OP(dev_ops, get_dma_mr); SET_DEVICE_OP(dev_ops, get_hw_stats); diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index 8548f09746ab..a4f3f838d6fe 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -454,14 +454,6 @@ static bool fill_res_entry(struct ib_device *dev, struct sk_buff *msg, return dev->ops.fill_res_entry(msg, res); } -static bool fill_stat_entry(struct ib_device *dev, struct sk_buff *msg, - struct rdma_restrack_entry *res) -{ - if (!dev->ops.fill_stat_entry) - return false; - return dev->ops.fill_stat_entry(msg, res); -} - static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { @@ -641,9 +633,8 @@ static int fill_res_mr_entry(struct sk_buff *msg, bool has_cap_net_admin, if (fill_res_name_pid(msg, res)) goto err; - if (fill_res_entry(dev, msg, res)) - goto err; - + if (dev->ops.fill_res_mr_entry) + return dev->ops.fill_res_mr_entry(msg, mr); return 0; err: return -EMSGSIZE; @@ -786,9 +777,8 @@ static int fill_stat_mr_entry(struct sk_buff *msg, bool has_cap_net_admin, if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id)) goto err; - if (fill_stat_entry(dev, msg, res)) - goto err; - + if (dev->ops.fill_stat_mr_entry) + return dev->ops.fill_stat_mr_entry(msg, mr); return 0; err: diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index e8e11bd95e42..5b9884ca2f5e 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -1055,6 +1055,7 @@ struct c4iw_wr_wait *c4iw_alloc_wr_wait(gfp_t gfp); typedef int c4iw_restrack_func(struct sk_buff *msg, struct rdma_restrack_entry *res); +int c4iw_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr); extern c4iw_restrack_func *c4iw_restrack_funcs[RDMA_RESTRACK_MAX]; #endif diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index ba83d942997c..36eeb595d41c 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -486,6 +486,7 @@ static const struct ib_device_ops c4iw_dev_ops = { .destroy_qp = c4iw_destroy_qp, .destroy_srq = c4iw_destroy_srq, .fill_res_entry = fill_res_entry, + .fill_res_mr_entry = c4iw_fill_res_mr_entry, .get_dev_fw_str = get_dev_fw_str, .get_dma_mr = c4iw_get_dma_mr, .get_hw_stats = c4iw_get_mib, diff --git a/drivers/infiniband/hw/cxgb4/restrack.c b/drivers/infiniband/hw/cxgb4/restrack.c index f82d46ed969d..9a5ca9192c1c 100644 --- a/drivers/infiniband/hw/cxgb4/restrack.c +++ b/drivers/infiniband/hw/cxgb4/restrack.c @@ -433,10 +433,8 @@ err: return -EMSGSIZE; } -static int fill_res_mr_entry(struct sk_buff *msg, - struct rdma_restrack_entry *res) +int c4iw_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr) { - struct ib_mr *ibmr = container_of(res, struct ib_mr, res); struct c4iw_mr *mhp = to_c4iw_mr(ibmr); struct c4iw_dev *dev = mhp->rhp; u32 stag = mhp->attr.stag; @@ -497,5 +495,4 @@ c4iw_restrack_func *c4iw_restrack_funcs[RDMA_RESTRACK_MAX] = { [RDMA_RESTRACK_QP] = fill_res_qp_entry, [RDMA_RESTRACK_CM_ID] = fill_res_ep_entry, [RDMA_RESTRACK_CQ] = fill_res_cq_entry, - [RDMA_RESTRACK_MR] = fill_res_mr_entry, }; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 343a8b8361e7..fa9237a10ed8 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -6598,8 +6598,8 @@ static const struct ib_device_ops mlx5_ib_dev_ops = { .drain_rq = mlx5_ib_drain_rq, .drain_sq = mlx5_ib_drain_sq, .enable_driver = mlx5_ib_enable_driver, - .fill_res_entry = mlx5_ib_fill_res_entry, - .fill_stat_entry = mlx5_ib_fill_stat_entry, + .fill_res_mr_entry = mlx5_ib_fill_res_mr_entry, + .fill_stat_mr_entry = mlx5_ib_fill_stat_mr_entry, .get_dev_fw_str = get_dev_fw_str, .get_dma_mr = mlx5_ib_get_dma_mr, .get_link_layer = mlx5_ib_port_link_layer, diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 5dbe3eb0d9cb..37ead14eb317 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -1375,10 +1375,8 @@ struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev, u8 *native_port_num); void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev, u8 port_num); -int mlx5_ib_fill_res_entry(struct sk_buff *msg, - struct rdma_restrack_entry *res); -int mlx5_ib_fill_stat_entry(struct sk_buff *msg, - struct rdma_restrack_entry *res); +int mlx5_ib_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr); +int mlx5_ib_fill_stat_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr); extern const struct uapi_definition mlx5_ib_devx_defs[]; extern const struct uapi_definition mlx5_ib_flow_defs[]; diff --git a/drivers/infiniband/hw/mlx5/restrack.c b/drivers/infiniband/hw/mlx5/restrack.c index 8f6c04f12531..598a09796d09 100644 --- a/drivers/infiniband/hw/mlx5/restrack.c +++ b/drivers/infiniband/hw/mlx5/restrack.c @@ -8,10 +8,9 @@ #include #include "mlx5_ib.h" -static int fill_stat_mr_entry(struct sk_buff *msg, - struct rdma_restrack_entry *res) +int mlx5_ib_fill_stat_mr_entry(struct sk_buff *msg, + struct ib_mr *ibmr) { - struct ib_mr *ibmr = container_of(res, struct ib_mr, res); struct mlx5_ib_mr *mr = to_mmr(ibmr); struct nlattr *table_attr; @@ -41,10 +40,9 @@ err: return -EMSGSIZE; } -static int fill_res_mr_entry(struct sk_buff *msg, - struct rdma_restrack_entry *res) +int mlx5_ib_fill_res_mr_entry(struct sk_buff *msg, + struct ib_mr *ibmr) { - struct ib_mr *ibmr = container_of(res, struct ib_mr, res); struct mlx5_ib_mr *mr = to_mmr(ibmr); struct nlattr *table_attr; @@ -70,21 +68,3 @@ err: nla_nest_cancel(msg, table_attr); return -EMSGSIZE; } - -int mlx5_ib_fill_res_entry(struct sk_buff *msg, - struct rdma_restrack_entry *res) -{ - if (res->type == RDMA_RESTRACK_MR) - return fill_res_mr_entry(msg, res); - - return 0; -} - -int mlx5_ib_fill_stat_entry(struct sk_buff *msg, - struct rdma_restrack_entry *res) -{ - if (res->type == RDMA_RESTRACK_MR) - return fill_stat_mr_entry(msg, res); - - return 0; -} diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index ef2f3986c493..117a0e802aa1 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -2583,6 +2583,7 @@ struct ib_device_ops { */ int (*fill_res_entry)(struct sk_buff *msg, struct rdma_restrack_entry *entry); + int (*fill_res_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr); /* Device lifecycle callbacks */ /* @@ -2637,8 +2638,7 @@ struct ib_device_ops { * Allows rdma drivers to add their own restrack attributes * dumped via 'rdma stat' iproute2 command. */ - int (*fill_stat_entry)(struct sk_buff *msg, - struct rdma_restrack_entry *entry); + int (*fill_stat_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr); DECLARE_RDMA_OBJ_SIZE(ib_ah); DECLARE_RDMA_OBJ_SIZE(ib_cq); -- cgit v1.2.3-59-g8ed1b From 9e2a187a93c395f573ed38b888ba4bd731e70622 Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Tue, 23 Jun 2020 14:30:37 +0300 Subject: RDMA: Add a dedicated CQ resource tracker function In order to avoid double multiplexing of the resource when it is a CQ, add a dedicated callback function. Link: https://lore.kernel.org/r/20200623113043.1228482-6-leon@kernel.org Signed-off-by: Maor Gottlieb Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/device.c | 1 + drivers/infiniband/core/nldev.c | 5 ++--- drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 1 + drivers/infiniband/hw/cxgb4/provider.c | 1 + drivers/infiniband/hw/cxgb4/restrack.c | 5 +---- drivers/infiniband/hw/hns/hns_roce_device.h | 4 ++-- drivers/infiniband/hw/hns/hns_roce_main.c | 2 +- drivers/infiniband/hw/hns/hns_roce_restrack.c | 14 ++------------ include/rdma/ib_verbs.h | 1 + 9 files changed, 12 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index ffdf9787e7f6..9eeac8cb600e 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -2617,6 +2617,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops) SET_DEVICE_OP(dev_ops, drain_rq); SET_DEVICE_OP(dev_ops, drain_sq); SET_DEVICE_OP(dev_ops, enable_driver); + SET_DEVICE_OP(dev_ops, fill_res_cq_entry); SET_DEVICE_OP(dev_ops, fill_res_entry); SET_DEVICE_OP(dev_ops, fill_res_mr_entry); SET_DEVICE_OP(dev_ops, fill_stat_mr_entry); diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index a4f3f838d6fe..707f724db1dd 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -598,9 +598,8 @@ static int fill_res_cq_entry(struct sk_buff *msg, bool has_cap_net_admin, if (fill_res_name_pid(msg, res)) goto err; - if (fill_res_entry(dev, msg, res)) - goto err; - + if (dev->ops.fill_res_cq_entry) + return dev->ops.fill_res_cq_entry(msg, cq); return 0; err: return -EMSGSIZE; diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 5b9884ca2f5e..18a2c1a44dcc 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -1056,6 +1056,7 @@ struct c4iw_wr_wait *c4iw_alloc_wr_wait(gfp_t gfp); typedef int c4iw_restrack_func(struct sk_buff *msg, struct rdma_restrack_entry *res); int c4iw_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr); +int c4iw_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ibcq); extern c4iw_restrack_func *c4iw_restrack_funcs[RDMA_RESTRACK_MAX]; #endif diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index 36eeb595d41c..d6b20aa314a0 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -485,6 +485,7 @@ static const struct ib_device_ops c4iw_dev_ops = { .destroy_cq = c4iw_destroy_cq, .destroy_qp = c4iw_destroy_qp, .destroy_srq = c4iw_destroy_srq, + .fill_res_cq_entry = c4iw_fill_res_cq_entry, .fill_res_entry = fill_res_entry, .fill_res_mr_entry = c4iw_fill_res_mr_entry, .get_dev_fw_str = get_dev_fw_str, diff --git a/drivers/infiniband/hw/cxgb4/restrack.c b/drivers/infiniband/hw/cxgb4/restrack.c index 9a5ca9192c1c..ead2cd08793d 100644 --- a/drivers/infiniband/hw/cxgb4/restrack.c +++ b/drivers/infiniband/hw/cxgb4/restrack.c @@ -372,10 +372,8 @@ err: return -EMSGSIZE; } -static int fill_res_cq_entry(struct sk_buff *msg, - struct rdma_restrack_entry *res) +int c4iw_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ibcq) { - struct ib_cq *ibcq = container_of(res, struct ib_cq, res); struct c4iw_cq *chp = to_c4iw_cq(ibcq); struct nlattr *table_attr; struct t4_cqe hwcqes[2]; @@ -494,5 +492,4 @@ err: c4iw_restrack_func *c4iw_restrack_funcs[RDMA_RESTRACK_MAX] = { [RDMA_RESTRACK_QP] = fill_res_qp_entry, [RDMA_RESTRACK_CM_ID] = fill_res_ep_entry, - [RDMA_RESTRACK_CQ] = fill_res_cq_entry, }; diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index a77fa6730b2d..a61f0c4d4dbb 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -1266,6 +1266,6 @@ void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev); int hns_roce_init(struct hns_roce_dev *hr_dev); void hns_roce_exit(struct hns_roce_dev *hr_dev); -int hns_roce_fill_res_entry(struct sk_buff *msg, - struct rdma_restrack_entry *res); +int hns_roce_fill_res_cq_entry(struct sk_buff *msg, + struct ib_cq *ib_cq); #endif /* _HNS_ROCE_DEVICE_H */ diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 50763cf4fa3d..5907cfd878a6 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -428,7 +428,7 @@ static const struct ib_device_ops hns_roce_dev_ops = { .destroy_ah = hns_roce_destroy_ah, .destroy_cq = hns_roce_destroy_cq, .disassociate_ucontext = hns_roce_disassociate_ucontext, - .fill_res_entry = hns_roce_fill_res_entry, + .fill_res_cq_entry = hns_roce_fill_res_cq_entry, .get_dma_mr = hns_roce_get_dma_mr, .get_link_layer = hns_roce_get_link_layer, .get_port_immutable = hns_roce_port_immutable, diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c index 06871731ac43..259444c0a630 100644 --- a/drivers/infiniband/hw/hns/hns_roce_restrack.c +++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c @@ -76,10 +76,9 @@ err: return -EMSGSIZE; } -static int hns_roce_fill_res_cq_entry(struct sk_buff *msg, - struct rdma_restrack_entry *res) +int hns_roce_fill_res_cq_entry(struct sk_buff *msg, + struct ib_cq *ib_cq) { - struct ib_cq *ib_cq = container_of(res, struct ib_cq, res); struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device); struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); struct hns_roce_v2_cq_context *context; @@ -119,12 +118,3 @@ err: kfree(context); return ret; } - -int hns_roce_fill_res_entry(struct sk_buff *msg, - struct rdma_restrack_entry *res) -{ - if (res->type == RDMA_RESTRACK_CQ) - return hns_roce_fill_res_cq_entry(msg, res); - - return 0; -} diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 117a0e802aa1..097b1d497d5f 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -2584,6 +2584,7 @@ struct ib_device_ops { int (*fill_res_entry)(struct sk_buff *msg, struct rdma_restrack_entry *entry); int (*fill_res_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr); + int (*fill_res_cq_entry)(struct sk_buff *msg, struct ib_cq *ibcq); /* Device lifecycle callbacks */ /* -- cgit v1.2.3-59-g8ed1b From 5cc34116ccec60032dbaa92768f41e95ce2d8ec7 Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Tue, 23 Jun 2020 14:30:38 +0300 Subject: RDMA: Add dedicated QP resource tracker function In order to avoid double multiplexing of the resource when it is a QP, add a dedicated callback function. Link: https://lore.kernel.org/r/20200623113043.1228482-7-leon@kernel.org Signed-off-by: Maor Gottlieb Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/device.c | 1 + drivers/infiniband/core/nldev.c | 5 ++--- drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 1 + drivers/infiniband/hw/cxgb4/restrack.c | 5 +---- include/rdma/ib_verbs.h | 1 + 5 files changed, 6 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 9eeac8cb600e..f94989274df5 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -2620,6 +2620,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops) SET_DEVICE_OP(dev_ops, fill_res_cq_entry); SET_DEVICE_OP(dev_ops, fill_res_entry); SET_DEVICE_OP(dev_ops, fill_res_mr_entry); + SET_DEVICE_OP(dev_ops, fill_res_qp_entry); SET_DEVICE_OP(dev_ops, fill_stat_mr_entry); SET_DEVICE_OP(dev_ops, get_dev_fw_str); SET_DEVICE_OP(dev_ops, get_dma_mr); diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index 707f724db1dd..79d0980a75e0 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -507,9 +507,8 @@ static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin, if (fill_res_name_pid(msg, res)) goto err; - if (fill_res_entry(dev, msg, res)) - goto err; - + if (dev->ops.fill_res_qp_entry) + return dev->ops.fill_res_qp_entry(msg, qp); return 0; err: return -EMSGSIZE; diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 18a2c1a44dcc..c84aa7c937f1 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -1057,6 +1057,7 @@ typedef int c4iw_restrack_func(struct sk_buff *msg, struct rdma_restrack_entry *res); int c4iw_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr); int c4iw_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ibcq); +int c4iw_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ibqp); extern c4iw_restrack_func *c4iw_restrack_funcs[RDMA_RESTRACK_MAX]; #endif diff --git a/drivers/infiniband/hw/cxgb4/restrack.c b/drivers/infiniband/hw/cxgb4/restrack.c index ead2cd08793d..5144d3b67293 100644 --- a/drivers/infiniband/hw/cxgb4/restrack.c +++ b/drivers/infiniband/hw/cxgb4/restrack.c @@ -134,10 +134,8 @@ err: return -EMSGSIZE; } -static int fill_res_qp_entry(struct sk_buff *msg, - struct rdma_restrack_entry *res) +int c4iw_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ibqp) { - struct ib_qp *ibqp = container_of(res, struct ib_qp, res); struct t4_swsqe *fsp = NULL, *lsp = NULL; struct c4iw_qp *qhp = to_c4iw_qp(ibqp); u16 first_sq_idx = 0, last_sq_idx = 0; @@ -490,6 +488,5 @@ err: } c4iw_restrack_func *c4iw_restrack_funcs[RDMA_RESTRACK_MAX] = { - [RDMA_RESTRACK_QP] = fill_res_qp_entry, [RDMA_RESTRACK_CM_ID] = fill_res_ep_entry, }; diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 097b1d497d5f..4e8519ac7363 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -2585,6 +2585,7 @@ struct ib_device_ops { struct rdma_restrack_entry *entry); int (*fill_res_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr); int (*fill_res_cq_entry)(struct sk_buff *msg, struct ib_cq *ibcq); + int (*fill_res_qp_entry)(struct sk_buff *msg, struct ib_qp *ibqp); /* Device lifecycle callbacks */ /* -- cgit v1.2.3-59-g8ed1b From 211cd9459fdabe9f556e539966f50825853bf262 Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Tue, 23 Jun 2020 14:30:39 +0300 Subject: RDMA: Add dedicated CM_ID resource tracker function In order to avoid double multiplexing of the resource when it is a cm id, add a dedicated callback function. In addition remove fill_res_entry which is not used anymore. Link: https://lore.kernel.org/r/20200623113043.1228482-8-leon@kernel.org Signed-off-by: Maor Gottlieb Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/device.c | 2 +- drivers/infiniband/core/nldev.c | 13 ++----------- drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 4 +--- drivers/infiniband/hw/cxgb4/provider.c | 9 +-------- drivers/infiniband/hw/cxgb4/restrack.c | 9 ++------- include/rdma/ib_verbs.h | 4 ++-- 6 files changed, 9 insertions(+), 32 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index f94989274df5..cbe95e729cf1 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -2617,8 +2617,8 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops) SET_DEVICE_OP(dev_ops, drain_rq); SET_DEVICE_OP(dev_ops, drain_sq); SET_DEVICE_OP(dev_ops, enable_driver); + SET_DEVICE_OP(dev_ops, fill_res_cm_id_entry); SET_DEVICE_OP(dev_ops, fill_res_cq_entry); - SET_DEVICE_OP(dev_ops, fill_res_entry); SET_DEVICE_OP(dev_ops, fill_res_mr_entry); SET_DEVICE_OP(dev_ops, fill_res_qp_entry); SET_DEVICE_OP(dev_ops, fill_stat_mr_entry); diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index 79d0980a75e0..394e307c342c 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -446,14 +446,6 @@ static int fill_res_name_pid(struct sk_buff *msg, return err ? -EMSGSIZE : 0; } -static bool fill_res_entry(struct ib_device *dev, struct sk_buff *msg, - struct rdma_restrack_entry *res) -{ - if (!dev->ops.fill_res_entry) - return false; - return dev->ops.fill_res_entry(msg, res); -} - static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { @@ -559,9 +551,8 @@ static int fill_res_cm_id_entry(struct sk_buff *msg, bool has_cap_net_admin, if (fill_res_name_pid(msg, res)) goto err; - if (fill_res_entry(dev, msg, res)) - goto err; - + if (dev->ops.fill_res_cm_id_entry) + return dev->ops.fill_res_cm_id_entry(msg, cm_id); return 0; err: return -EMSGSIZE; diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index c84aa7c937f1..27da0705c88a 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -1053,11 +1053,9 @@ int c4iw_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr); struct c4iw_wr_wait *c4iw_alloc_wr_wait(gfp_t gfp); -typedef int c4iw_restrack_func(struct sk_buff *msg, - struct rdma_restrack_entry *res); int c4iw_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr); int c4iw_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ibcq); int c4iw_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ibqp); -extern c4iw_restrack_func *c4iw_restrack_funcs[RDMA_RESTRACK_MAX]; +int c4iw_fill_res_cm_id_entry(struct sk_buff *msg, struct rdma_cm_id *cm_id); #endif diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index d6b20aa314a0..1d3ff59e4060 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -458,13 +458,6 @@ static void get_dev_fw_str(struct ib_device *dev, char *str) FW_HDR_FW_VER_BUILD_G(c4iw_dev->rdev.lldi.fw_vers)); } -static int fill_res_entry(struct sk_buff *msg, struct rdma_restrack_entry *res) -{ - return (res->type < ARRAY_SIZE(c4iw_restrack_funcs) && - c4iw_restrack_funcs[res->type]) ? - c4iw_restrack_funcs[res->type](msg, res) : 0; -} - static const struct ib_device_ops c4iw_dev_ops = { .owner = THIS_MODULE, .driver_id = RDMA_DRIVER_CXGB4, @@ -486,7 +479,7 @@ static const struct ib_device_ops c4iw_dev_ops = { .destroy_qp = c4iw_destroy_qp, .destroy_srq = c4iw_destroy_srq, .fill_res_cq_entry = c4iw_fill_res_cq_entry, - .fill_res_entry = fill_res_entry, + .fill_res_cm_id_entry = c4iw_fill_res_cm_id_entry, .fill_res_mr_entry = c4iw_fill_res_mr_entry, .get_dev_fw_str = get_dev_fw_str, .get_dma_mr = c4iw_get_dma_mr, diff --git a/drivers/infiniband/hw/cxgb4/restrack.c b/drivers/infiniband/hw/cxgb4/restrack.c index 5144d3b67293..b32e6516d65f 100644 --- a/drivers/infiniband/hw/cxgb4/restrack.c +++ b/drivers/infiniband/hw/cxgb4/restrack.c @@ -193,10 +193,9 @@ union union_ep { struct c4iw_ep ep; }; -static int fill_res_ep_entry(struct sk_buff *msg, - struct rdma_restrack_entry *res) +int c4iw_fill_res_cm_id_entry(struct sk_buff *msg, + struct rdma_cm_id *cm_id) { - struct rdma_cm_id *cm_id = rdma_res_to_id(res); struct nlattr *table_attr; struct c4iw_ep_common *epcp; struct c4iw_listen_ep *listen_ep = NULL; @@ -486,7 +485,3 @@ err_cancel_table: err: return -EMSGSIZE; } - -c4iw_restrack_func *c4iw_restrack_funcs[RDMA_RESTRACK_MAX] = { - [RDMA_RESTRACK_CM_ID] = fill_res_ep_entry, -}; diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 4e8519ac7363..9127cffafccd 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -75,6 +75,7 @@ struct ib_umem_odp; struct ib_uqp_object; struct ib_usrq_object; struct ib_uwq_object; +struct rdma_cm_id; extern struct workqueue_struct *ib_wq; extern struct workqueue_struct *ib_comp_wq; @@ -2581,11 +2582,10 @@ struct ib_device_ops { /** * Allows rdma drivers to add their own restrack attributes. */ - int (*fill_res_entry)(struct sk_buff *msg, - struct rdma_restrack_entry *entry); int (*fill_res_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr); int (*fill_res_cq_entry)(struct sk_buff *msg, struct ib_cq *ibcq); int (*fill_res_qp_entry)(struct sk_buff *msg, struct ib_qp *ibqp); + int (*fill_res_cm_id_entry)(struct sk_buff *msg, struct rdma_cm_id *id); /* Device lifecycle callbacks */ /* -- cgit v1.2.3-59-g8ed1b From 192a3aa0e4e20e1087baa29183c5d64d48716fa9 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 26 May 2020 04:14:47 +0300 Subject: drm: edid: Constify connector argument to infoframe functions The drm_hdmi_avi_infoframe_from_display_mode(), drm_hdmi_vendor_infoframe_from_display_mode() and drm_hdmi_avi_infoframe_quant_range() functions take a drm_connector that they don't modify. Mark it as const. Signed-off-by: Laurent Pinchart Acked-by: Sam Ravnborg Signed-off-by: Sam Ravnborg Link: https://patchwork.freedesktop.org/patch/msgid/20200526011505.31884-10-laurent.pinchart+renesas@ideasonboard.com --- drivers/gpu/drm/drm_edid.c | 12 ++++++------ include/drm/drm_edid.h | 6 +++--- 2 files changed, 9 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 57cac677269d..54b1d4f297df 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -5359,7 +5359,7 @@ void drm_set_preferred_mode(struct drm_connector *connector, } EXPORT_SYMBOL(drm_set_preferred_mode); -static bool is_hdmi2_sink(struct drm_connector *connector) +static bool is_hdmi2_sink(const struct drm_connector *connector) { /* * FIXME: sil-sii8620 doesn't have a connector around when @@ -5444,7 +5444,7 @@ drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame, } EXPORT_SYMBOL(drm_hdmi_infoframe_set_hdr_metadata); -static u8 drm_mode_hdmi_vic(struct drm_connector *connector, +static u8 drm_mode_hdmi_vic(const struct drm_connector *connector, const struct drm_display_mode *mode) { bool has_hdmi_infoframe = connector ? @@ -5460,7 +5460,7 @@ static u8 drm_mode_hdmi_vic(struct drm_connector *connector, return drm_match_hdmi_mode(mode); } -static u8 drm_mode_cea_vic(struct drm_connector *connector, +static u8 drm_mode_cea_vic(const struct drm_connector *connector, const struct drm_display_mode *mode) { u8 vic; @@ -5498,7 +5498,7 @@ static u8 drm_mode_cea_vic(struct drm_connector *connector, */ int drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, - struct drm_connector *connector, + const struct drm_connector *connector, const struct drm_display_mode *mode) { enum hdmi_picture_aspect picture_aspect; @@ -5645,7 +5645,7 @@ EXPORT_SYMBOL(drm_hdmi_avi_infoframe_colorspace); */ void drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame, - struct drm_connector *connector, + const struct drm_connector *connector, const struct drm_display_mode *mode, enum hdmi_quantization_range rgb_quant_range) { @@ -5749,7 +5749,7 @@ s3d_structure_from_display_mode(const struct drm_display_mode *mode) */ int drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame, - struct drm_connector *connector, + const struct drm_connector *connector, const struct drm_display_mode *mode) { /* diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index 34b15e3d070c..43254319ab19 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -361,11 +361,11 @@ drm_load_edid_firmware(struct drm_connector *connector) int drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, - struct drm_connector *connector, + const struct drm_connector *connector, const struct drm_display_mode *mode); int drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame, - struct drm_connector *connector, + const struct drm_connector *connector, const struct drm_display_mode *mode); void @@ -378,7 +378,7 @@ drm_hdmi_avi_infoframe_bars(struct hdmi_avi_infoframe *frame, void drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame, - struct drm_connector *connector, + const struct drm_connector *connector, const struct drm_display_mode *mode, enum hdmi_quantization_range rgb_quant_range); -- cgit v1.2.3-59-g8ed1b From 12c683e12cd8e2dcf7b7143bebceae484d17727a Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 26 May 2020 04:14:48 +0300 Subject: drm: bridge: Pass drm_display_info to drm_bridge_funcs .mode_valid() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When validating a mode, bridges may need to do so in the context of a display, as specified by drm_display_info. An example is the meson dw-hdmi bridge that needs to consider the YUV 4:2:0 output format to perform clock calculations. Bridges that need the display info currently retrieve it from the drm_connector created by the bridge. This gets in the way of moving connector creation out of bridge drivers. To make this possible, pass the drm_display_info to drm_bridge_funcs .mode_valid(). Changes to the bridge drivers have been performed with the following coccinelle semantic patch and have been compile-tested. @ rule1 @ identifier funcs; identifier fn; @@ struct drm_bridge_funcs funcs = { ..., .mode_valid = fn }; @ depends on rule1 @ identifier rule1.fn; identifier bridge; identifier mode; @@ enum drm_mode_status fn( struct drm_bridge *bridge, + const struct drm_display_info *info, const struct drm_display_mode *mode ) { ... } Signed-off-by: Laurent Pinchart Reviewed-by: Neil Armstrong Reviewed-by: Boris Brezillon Reviewed-by: Guido Günther # for the nwl-dsi part: Signed-off-by: Sam Ravnborg Link: https://patchwork.freedesktop.org/patch/msgid/20200526011505.31884-11-laurent.pinchart+renesas@ideasonboard.com --- drivers/gpu/drm/bridge/analogix/analogix-anx6345.c | 1 + drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c | 1 + drivers/gpu/drm/bridge/cdns-dsi.c | 1 + drivers/gpu/drm/bridge/chrontel-ch7033.c | 1 + drivers/gpu/drm/bridge/nwl-dsi.c | 1 + drivers/gpu/drm/bridge/sii9234.c | 1 + drivers/gpu/drm/bridge/sil-sii8620.c | 1 + drivers/gpu/drm/bridge/synopsys/dw-hdmi.c | 1 + drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c | 1 + drivers/gpu/drm/bridge/tc358767.c | 1 + drivers/gpu/drm/bridge/tc358768.c | 1 + drivers/gpu/drm/bridge/thc63lvd1024.c | 1 + drivers/gpu/drm/bridge/ti-tfp410.c | 1 + drivers/gpu/drm/drm_atomic_helper.c | 3 ++- drivers/gpu/drm/drm_bridge.c | 4 +++- drivers/gpu/drm/drm_probe_helper.c | 4 +++- drivers/gpu/drm/i2c/tda998x_drv.c | 1 + drivers/gpu/drm/omapdrm/dss/dpi.c | 1 + drivers/gpu/drm/omapdrm/dss/sdi.c | 1 + drivers/gpu/drm/omapdrm/dss/venc.c | 1 + include/drm/drm_bridge.h | 3 +++ 21 files changed, 28 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c index 2bc6e4f85171..371f4a9f866d 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c +++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c @@ -585,6 +585,7 @@ static int anx6345_bridge_attach(struct drm_bridge *bridge, static enum drm_mode_status anx6345_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, const struct drm_display_mode *mode) { if (mode->flags & DRM_MODE_FLAG_INTERLACE) diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c index 0d5a5ad0c9ee..81debd02c169 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c +++ b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c @@ -944,6 +944,7 @@ static int anx78xx_bridge_attach(struct drm_bridge *bridge, static enum drm_mode_status anx78xx_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, const struct drm_display_mode *mode) { if (mode->flags & DRM_MODE_FLAG_INTERLACE) diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cdns-dsi.c index 69c3892caee5..76373e31df92 100644 --- a/drivers/gpu/drm/bridge/cdns-dsi.c +++ b/drivers/gpu/drm/bridge/cdns-dsi.c @@ -663,6 +663,7 @@ static int cdns_dsi_bridge_attach(struct drm_bridge *bridge, static enum drm_mode_status cdns_dsi_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, const struct drm_display_mode *mode) { struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge); diff --git a/drivers/gpu/drm/bridge/chrontel-ch7033.c b/drivers/gpu/drm/bridge/chrontel-ch7033.c index f8675d82974b..486f405c2e16 100644 --- a/drivers/gpu/drm/bridge/chrontel-ch7033.c +++ b/drivers/gpu/drm/bridge/chrontel-ch7033.c @@ -317,6 +317,7 @@ static void ch7033_bridge_detach(struct drm_bridge *bridge) } static enum drm_mode_status ch7033_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, const struct drm_display_mode *mode) { if (mode->clock > 165000) diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c index b14d725bf609..77a79af70914 100644 --- a/drivers/gpu/drm/bridge/nwl-dsi.c +++ b/drivers/gpu/drm/bridge/nwl-dsi.c @@ -818,6 +818,7 @@ static bool nwl_dsi_bridge_mode_fixup(struct drm_bridge *bridge, static enum drm_mode_status nwl_dsi_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, const struct drm_display_mode *mode) { struct nwl_dsi *dsi = bridge_to_dsi(bridge); diff --git a/drivers/gpu/drm/bridge/sii9234.c b/drivers/gpu/drm/bridge/sii9234.c index b1258f0ed205..15c98a7bd81c 100644 --- a/drivers/gpu/drm/bridge/sii9234.c +++ b/drivers/gpu/drm/bridge/sii9234.c @@ -873,6 +873,7 @@ static inline struct sii9234 *bridge_to_sii9234(struct drm_bridge *bridge) } static enum drm_mode_status sii9234_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, const struct drm_display_mode *mode) { if (mode->clock > MHL1_MAX_CLK) diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c index 92acd336aa89..7c0c93c7e61f 100644 --- a/drivers/gpu/drm/bridge/sil-sii8620.c +++ b/drivers/gpu/drm/bridge/sil-sii8620.c @@ -2244,6 +2244,7 @@ static int sii8620_is_packing_required(struct sii8620 *ctx, } static enum drm_mode_status sii8620_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, const struct drm_display_mode *mode) { struct sii8620 *ctx = bridge_to_sii8620(bridge); diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index 30681398cfb0..b535354150db 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -2767,6 +2767,7 @@ static void dw_hdmi_bridge_detach(struct drm_bridge *bridge) static enum drm_mode_status dw_hdmi_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, const struct drm_display_mode *mode) { struct dw_hdmi *hdmi = bridge->driver_private; diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c index 5ef0f154aa7b..c223fb9a04cb 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c @@ -924,6 +924,7 @@ static void dw_mipi_dsi_bridge_enable(struct drm_bridge *bridge) static enum drm_mode_status dw_mipi_dsi_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, const struct drm_display_mode *mode) { struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge); diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index e4c0ea03ae3a..c2777b226c75 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -1306,6 +1306,7 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge, } static enum drm_mode_status tc_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, const struct drm_display_mode *mode) { struct tc_data *tc = bridge_to_tc(bridge); diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c index 6650fe4cfc20..4a463fadf743 100644 --- a/drivers/gpu/drm/bridge/tc358768.c +++ b/drivers/gpu/drm/bridge/tc358768.c @@ -529,6 +529,7 @@ static int tc358768_bridge_attach(struct drm_bridge *bridge, static enum drm_mode_status tc358768_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, const struct drm_display_mode *mode) { struct tc358768_priv *priv = bridge_to_tc358768(bridge); diff --git a/drivers/gpu/drm/bridge/thc63lvd1024.c b/drivers/gpu/drm/bridge/thc63lvd1024.c index 97d8129760e9..86b06975bfdd 100644 --- a/drivers/gpu/drm/bridge/thc63lvd1024.c +++ b/drivers/gpu/drm/bridge/thc63lvd1024.c @@ -51,6 +51,7 @@ static int thc63_attach(struct drm_bridge *bridge, } static enum drm_mode_status thc63_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, const struct drm_display_mode *mode) { struct thc63_dev *thc63 = to_thc63(bridge); diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c index 6467e6f2f1fa..ba3fa2a9b8a4 100644 --- a/drivers/gpu/drm/bridge/ti-tfp410.c +++ b/drivers/gpu/drm/bridge/ti-tfp410.c @@ -192,6 +192,7 @@ static void tfp410_disable(struct drm_bridge *bridge) } static enum drm_mode_status tfp410_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, const struct drm_display_mode *mode) { if (mode->clock < 25000) diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index a1898c58ae3c..f68c69a45752 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -506,7 +506,8 @@ static enum drm_mode_status mode_valid_path(struct drm_connector *connector, } bridge = drm_bridge_chain_get_first_bridge(encoder); - ret = drm_bridge_chain_mode_valid(bridge, mode); + ret = drm_bridge_chain_mode_valid(bridge, &connector->display_info, + mode); if (ret != MODE_OK) { DRM_DEBUG_ATOMIC("[BRIDGE] mode_valid() failed\n"); return ret; diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c index fe1e3460b486..64f0effb52ac 100644 --- a/drivers/gpu/drm/drm_bridge.c +++ b/drivers/gpu/drm/drm_bridge.c @@ -377,6 +377,7 @@ EXPORT_SYMBOL(drm_bridge_chain_mode_fixup); * drm_bridge_chain_mode_valid - validate the mode against all bridges in the * encoder chain. * @bridge: bridge control structure + * @info: display info against which the mode shall be validated * @mode: desired mode to be validated * * Calls &drm_bridge_funcs.mode_valid for all the bridges in the encoder @@ -390,6 +391,7 @@ EXPORT_SYMBOL(drm_bridge_chain_mode_fixup); */ enum drm_mode_status drm_bridge_chain_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, const struct drm_display_mode *mode) { struct drm_encoder *encoder; @@ -404,7 +406,7 @@ drm_bridge_chain_mode_valid(struct drm_bridge *bridge, if (!bridge->funcs->mode_valid) continue; - ret = bridge->funcs->mode_valid(bridge, mode); + ret = bridge->funcs->mode_valid(bridge, info, mode); if (ret != MODE_OK) return ret; } diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 26e997f1524f..09e872e61315 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -114,7 +114,9 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode, } bridge = drm_bridge_chain_get_first_bridge(encoder); - ret = drm_bridge_chain_mode_valid(bridge, mode); + ret = drm_bridge_chain_mode_valid(bridge, + &connector->display_info, + mode); if (ret != MODE_OK) { /* There is also no point in continuing for crtc check * here. */ diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index 9517f522dcb9..50fd119a5276 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c @@ -1379,6 +1379,7 @@ static void tda998x_bridge_detach(struct drm_bridge *bridge) } static enum drm_mode_status tda998x_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, const struct drm_display_mode *mode) { /* TDA19988 dotclock can go up to 165MHz */ diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c index 5110acb0c6c1..1d2992daef40 100644 --- a/drivers/gpu/drm/omapdrm/dss/dpi.c +++ b/drivers/gpu/drm/omapdrm/dss/dpi.c @@ -434,6 +434,7 @@ static int dpi_bridge_attach(struct drm_bridge *bridge, static enum drm_mode_status dpi_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, const struct drm_display_mode *mode) { struct dpi_data *dpi = drm_bridge_to_dpi(bridge); diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c index 417a8740ad0a..033fd30074b0 100644 --- a/drivers/gpu/drm/omapdrm/dss/sdi.c +++ b/drivers/gpu/drm/omapdrm/dss/sdi.c @@ -140,6 +140,7 @@ static int sdi_bridge_attach(struct drm_bridge *bridge, static enum drm_mode_status sdi_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, const struct drm_display_mode *mode) { struct sdi_device *sdi = drm_bridge_to_sdi(bridge); diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index 9701843ccf09..4406ce2a08b4 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -548,6 +548,7 @@ static int venc_bridge_attach(struct drm_bridge *bridge, static enum drm_mode_status venc_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, const struct drm_display_mode *mode) { switch (venc_get_videomode(mode)) { diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h index ea2aa5ebae34..e3d7f36d8c39 100644 --- a/include/drm/drm_bridge.h +++ b/include/drm/drm_bridge.h @@ -35,6 +35,7 @@ struct drm_bridge; struct drm_bridge_timings; struct drm_connector; +struct drm_display_info; struct drm_panel; struct edid; struct i2c_adapter; @@ -112,6 +113,7 @@ struct drm_bridge_funcs { * drm_mode_status Enum */ enum drm_mode_status (*mode_valid)(struct drm_bridge *bridge, + const struct drm_display_info *info, const struct drm_display_mode *mode); /** @@ -836,6 +838,7 @@ bool drm_bridge_chain_mode_fixup(struct drm_bridge *bridge, struct drm_display_mode *adjusted_mode); enum drm_mode_status drm_bridge_chain_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, const struct drm_display_mode *mode); void drm_bridge_chain_disable(struct drm_bridge *bridge); void drm_bridge_chain_post_disable(struct drm_bridge *bridge); -- cgit v1.2.3-59-g8ed1b From 96591a4b93fb8b335941783dd6e7ded9d6d49f09 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 26 May 2020 04:14:49 +0300 Subject: drm: bridge: dw-hdmi: Pass private data pointer to .mode_valid() Platform glue drivers for dw_hdmi may need to access device-specific data from their .mode_valid() implementation. They currently have no clean way to do so, and one driver hacks around it by accessing the dev_private data of the drm_device retrieved from the connector. Add a priv_data void pointer to the dw_hdmi_plat_data structure, and pass it to the .mode_valid() function. Signed-off-by: Laurent Pinchart Reviewed-by: Neil Armstrong Signed-off-by: Sam Ravnborg Link: https://patchwork.freedesktop.org/patch/msgid/20200526011505.31884-12-laurent.pinchart+renesas@ideasonboard.com --- drivers/gpu/drm/bridge/synopsys/dw-hdmi.c | 6 ++++-- drivers/gpu/drm/imx/dw_hdmi-imx.c | 6 ++++-- drivers/gpu/drm/meson/meson_dw_hdmi.c | 3 ++- drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c | 3 ++- drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c | 3 ++- drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c | 6 ++++-- drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h | 3 ++- include/drm/bridge/dw_hdmi.h | 14 ++++++++++++-- 8 files changed, 32 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index b535354150db..2b3f203cf467 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -2771,6 +2771,7 @@ dw_hdmi_bridge_mode_valid(struct drm_bridge *bridge, const struct drm_display_mode *mode) { struct dw_hdmi *hdmi = bridge->driver_private; + const struct dw_hdmi_plat_data *pdata = hdmi->plat_data; struct drm_connector *connector = &hdmi->connector; enum drm_mode_status mode_status = MODE_OK; @@ -2778,8 +2779,9 @@ dw_hdmi_bridge_mode_valid(struct drm_bridge *bridge, if (mode->flags & DRM_MODE_FLAG_DBLCLK) return MODE_BAD; - if (hdmi->plat_data->mode_valid) - mode_status = hdmi->plat_data->mode_valid(connector, mode); + if (pdata->mode_valid) + mode_status = pdata->mode_valid(hdmi, pdata->priv_data, + connector, mode); return mode_status; } diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c index ba4ca17fd4d8..95aed4666c95 100644 --- a/drivers/gpu/drm/imx/dw_hdmi-imx.c +++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c @@ -145,7 +145,8 @@ static const struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs = }; static enum drm_mode_status -imx6q_hdmi_mode_valid(struct drm_connector *con, +imx6q_hdmi_mode_valid(struct dw_hdmi *hdmi, void *data, + struct drm_connector *con, const struct drm_display_mode *mode) { if (mode->clock < 13500) @@ -158,7 +159,8 @@ imx6q_hdmi_mode_valid(struct drm_connector *con, } static enum drm_mode_status -imx6dl_hdmi_mode_valid(struct drm_connector *con, +imx6dl_hdmi_mode_valid(struct dw_hdmi *hdmi, void *data, + struct drm_connector *con, const struct drm_display_mode *mode) { if (mode->clock < 13500) diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c index 5be963e9db05..5cc311c1b8e0 100644 --- a/drivers/gpu/drm/meson/meson_dw_hdmi.c +++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c @@ -630,7 +630,8 @@ static irqreturn_t dw_hdmi_top_thread_irq(int irq, void *dev_id) } static enum drm_mode_status -dw_hdmi_mode_valid(struct drm_connector *connector, +dw_hdmi_mode_valid(struct dw_hdmi *hdmi, void *data, + struct drm_connector *connector, const struct drm_display_mode *mode) { struct meson_drm *priv = connector->dev->dev_private; diff --git a/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c b/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c index 452461dc96f2..4d837a4d302d 100644 --- a/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c +++ b/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c @@ -38,7 +38,8 @@ static const struct rcar_hdmi_phy_params rcar_hdmi_phy_params[] = { }; static enum drm_mode_status -rcar_hdmi_mode_valid(struct drm_connector *connector, +rcar_hdmi_mode_valid(struct dw_hdmi *hdmi, void *data, + struct drm_connector *connector, const struct drm_display_mode *mode) { /* diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c index 121aa8a63a76..d08f86783a28 100644 --- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c @@ -220,7 +220,8 @@ static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi) } static enum drm_mode_status -dw_hdmi_rockchip_mode_valid(struct drm_connector *connector, +dw_hdmi_rockchip_mode_valid(struct dw_hdmi *hdmi, void *data, + struct drm_connector *connector, const struct drm_display_mode *mode) { const struct dw_hdmi_mpll_config *mpll_cfg = rockchip_mpll_cfg; diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c index 972682bb8000..0a3637442ba6 100644 --- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c +++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c @@ -31,7 +31,8 @@ sun8i_dw_hdmi_encoder_helper_funcs = { }; static enum drm_mode_status -sun8i_dw_hdmi_mode_valid_a83t(struct drm_connector *connector, +sun8i_dw_hdmi_mode_valid_a83t(struct dw_hdmi *hdmi, void *data, + struct drm_connector *connector, const struct drm_display_mode *mode) { if (mode->clock > 297000) @@ -41,7 +42,8 @@ sun8i_dw_hdmi_mode_valid_a83t(struct drm_connector *connector, } static enum drm_mode_status -sun8i_dw_hdmi_mode_valid_h6(struct drm_connector *connector, +sun8i_dw_hdmi_mode_valid_h6(struct dw_hdmi *hdmi, void *data, + struct drm_connector *connector, const struct drm_display_mode *mode) { /* diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h index 8e64945167e9..8587b8d2590e 100644 --- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h +++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h @@ -176,7 +176,8 @@ struct sun8i_hdmi_phy { }; struct sun8i_dw_hdmi_quirks { - enum drm_mode_status (*mode_valid)(struct drm_connector *connector, + enum drm_mode_status (*mode_valid)(struct dw_hdmi *hdmi, void *data, + struct drm_connector *connector, const struct drm_display_mode *mode); unsigned int set_rate : 1; unsigned int use_drm_infoframe : 1; diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h index 0b34a12c4a1c..66a811f75b91 100644 --- a/include/drm/bridge/dw_hdmi.h +++ b/include/drm/bridge/dw_hdmi.h @@ -124,13 +124,23 @@ struct dw_hdmi_phy_ops { struct dw_hdmi_plat_data { struct regmap *regm; - enum drm_mode_status (*mode_valid)(struct drm_connector *connector, - const struct drm_display_mode *mode); + unsigned long input_bus_format; unsigned long input_bus_encoding; bool use_drm_infoframe; bool ycbcr_420_allowed; + /* + * Private data passed to all the .mode_valid() and .configure_phy() + * callback functions. + */ + void *priv_data; + + /* Platform-specific mode validation (optional). */ + enum drm_mode_status (*mode_valid)(struct dw_hdmi *hdmi, void *data, + struct drm_connector *connector, + const struct drm_display_mode *mode); + /* Vendor PHY support */ const struct dw_hdmi_phy_ops *phy_ops; const char *phy_name; -- cgit v1.2.3-59-g8ed1b From 49da7e5d84e3b520355c0b6148d6dc9e5415a13e Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 26 May 2020 04:14:50 +0300 Subject: drm: bridge: dw-hdmi: Pass private data pointer to .configure_phy() The .configure_phy() operation takes a dw_hdmi_plat_data pointer as a context argument. This differs from .mode_valid() that takes a custom private context pointer, causing possible confusion. Make the dw_hdmi_plat_data operations more consistent by passing the private context pointer to .configure_phy() as well. Signed-off-by: Laurent Pinchart Reviewed-by: Neil Armstrong Signed-off-by: Sam Ravnborg Link: https://patchwork.freedesktop.org/patch/msgid/20200526011505.31884-13-laurent.pinchart+renesas@ideasonboard.com --- drivers/gpu/drm/bridge/synopsys/dw-hdmi.c | 2 +- drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c | 3 +-- include/drm/bridge/dw_hdmi.h | 3 +-- 3 files changed, 3 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index 2b3f203cf467..6edb60e6c784 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -1514,7 +1514,7 @@ static int hdmi_phy_configure(struct dw_hdmi *hdmi) /* Write to the PHY as configured by the platform */ if (pdata->configure_phy) - ret = pdata->configure_phy(hdmi, pdata, mpixelclock); + ret = pdata->configure_phy(hdmi, pdata->priv_data, mpixelclock); else ret = phy->configure(hdmi, pdata, mpixelclock); if (ret) { diff --git a/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c b/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c index 4d837a4d302d..d0dffe55a7cb 100644 --- a/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c +++ b/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c @@ -52,8 +52,7 @@ rcar_hdmi_mode_valid(struct dw_hdmi *hdmi, void *data, return MODE_OK; } -static int rcar_hdmi_phy_configure(struct dw_hdmi *hdmi, - const struct dw_hdmi_plat_data *pdata, +static int rcar_hdmi_phy_configure(struct dw_hdmi *hdmi, void *data, unsigned long mpixelclock) { const struct rcar_hdmi_phy_params *params = rcar_hdmi_phy_params; diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h index 66a811f75b91..09348c9cbd11 100644 --- a/include/drm/bridge/dw_hdmi.h +++ b/include/drm/bridge/dw_hdmi.h @@ -151,8 +151,7 @@ struct dw_hdmi_plat_data { const struct dw_hdmi_mpll_config *mpll_cfg; const struct dw_hdmi_curr_ctrl *cur_ctr; const struct dw_hdmi_phy_config *phy_config; - int (*configure_phy)(struct dw_hdmi *hdmi, - const struct dw_hdmi_plat_data *pdata, + int (*configure_phy)(struct dw_hdmi *hdmi, void *data, unsigned long mpixelclock); }; -- cgit v1.2.3-59-g8ed1b From 29fc89719d396e81176974ce37e0cc81e23869d8 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 26 May 2020 04:14:51 +0300 Subject: drm: bridge: dw-hdmi: Remove unused field from dw_hdmi_plat_data The input_bus_format field of struct dw_hdmi_plat_data is unused. Remove it. Signed-off-by: Laurent Pinchart Reviewed-by: Neil Armstrong Signed-off-by: Sam Ravnborg Link: https://patchwork.freedesktop.org/patch/msgid/20200526011505.31884-14-laurent.pinchart+renesas@ideasonboard.com --- drivers/gpu/drm/bridge/synopsys/dw-hdmi.c | 5 +---- include/drm/bridge/dw_hdmi.h | 1 - 2 files changed, 1 insertion(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index 6edb60e6c784..adc5a95a06e9 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -2137,10 +2137,7 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode) hdmi->hdmi_data.video_mode.mpixelrepetitionoutput = 0; hdmi->hdmi_data.video_mode.mpixelrepetitioninput = 0; - if (hdmi->plat_data->input_bus_format) - hdmi->hdmi_data.enc_in_bus_format = - hdmi->plat_data->input_bus_format; - else if (hdmi->hdmi_data.enc_in_bus_format == MEDIA_BUS_FMT_FIXED) + if (hdmi->hdmi_data.enc_in_bus_format == MEDIA_BUS_FMT_FIXED) hdmi->hdmi_data.enc_in_bus_format = MEDIA_BUS_FMT_RGB888_1X24; /* TOFIX: Get input encoding from plat data or fallback to none */ diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h index 09348c9cbd11..5dfa9d83e2d3 100644 --- a/include/drm/bridge/dw_hdmi.h +++ b/include/drm/bridge/dw_hdmi.h @@ -125,7 +125,6 @@ struct dw_hdmi_phy_ops { struct dw_hdmi_plat_data { struct regmap *regm; - unsigned long input_bus_format; unsigned long input_bus_encoding; bool use_drm_infoframe; bool ycbcr_420_allowed; -- cgit v1.2.3-59-g8ed1b From af05bba0fbe2c07fe500f697080d78d050be2fbf Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 26 May 2020 04:14:53 +0300 Subject: drm: bridge: dw-hdmi: Pass drm_display_info to .mode_valid() Replace the drm_connector pointer passed to the .mode_valid() function with a const drm_display_info pointer, as that's all the function should need. Use the display info passed to the bridge .mode_valid() operation instead of retrieving it from the connector, to prepare for make connector creation optional. Signed-off-by: Laurent Pinchart Reviewed-by: Neil Armstrong Signed-off-by: Sam Ravnborg Link: https://patchwork.freedesktop.org/patch/msgid/20200526011505.31884-16-laurent.pinchart+renesas@ideasonboard.com --- drivers/gpu/drm/bridge/synopsys/dw-hdmi.c | 5 ++--- drivers/gpu/drm/imx/dw_hdmi-imx.c | 4 ++-- drivers/gpu/drm/meson/meson_dw_hdmi.c | 20 ++++++++++---------- drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c | 2 +- drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c | 2 +- drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c | 4 ++-- drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h | 2 +- include/drm/bridge/dw_hdmi.h | 4 ++-- 8 files changed, 21 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index adc5a95a06e9..23650e69604c 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -2769,7 +2769,6 @@ dw_hdmi_bridge_mode_valid(struct drm_bridge *bridge, { struct dw_hdmi *hdmi = bridge->driver_private; const struct dw_hdmi_plat_data *pdata = hdmi->plat_data; - struct drm_connector *connector = &hdmi->connector; enum drm_mode_status mode_status = MODE_OK; /* We don't support double-clocked modes */ @@ -2777,8 +2776,8 @@ dw_hdmi_bridge_mode_valid(struct drm_bridge *bridge, return MODE_BAD; if (pdata->mode_valid) - mode_status = pdata->mode_valid(hdmi, pdata->priv_data, - connector, mode); + mode_status = pdata->mode_valid(hdmi, pdata->priv_data, info, + mode); return mode_status; } diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c index 95aed4666c95..2dc93fa6ecb6 100644 --- a/drivers/gpu/drm/imx/dw_hdmi-imx.c +++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c @@ -146,7 +146,7 @@ static const struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs = static enum drm_mode_status imx6q_hdmi_mode_valid(struct dw_hdmi *hdmi, void *data, - struct drm_connector *con, + const struct drm_display_info *info, const struct drm_display_mode *mode) { if (mode->clock < 13500) @@ -160,7 +160,7 @@ imx6q_hdmi_mode_valid(struct dw_hdmi *hdmi, void *data, static enum drm_mode_status imx6dl_hdmi_mode_valid(struct dw_hdmi *hdmi, void *data, - struct drm_connector *con, + const struct drm_display_info *info, const struct drm_display_mode *mode) { if (mode->clock < 13500) diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c index 34ba94922605..71d599970ec7 100644 --- a/drivers/gpu/drm/meson/meson_dw_hdmi.c +++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c @@ -631,12 +631,12 @@ static irqreturn_t dw_hdmi_top_thread_irq(int irq, void *dev_id) static enum drm_mode_status dw_hdmi_mode_valid(struct dw_hdmi *hdmi, void *data, - struct drm_connector *connector, + const struct drm_display_info *display_info, const struct drm_display_mode *mode) { struct meson_dw_hdmi *dw_hdmi = data; struct meson_drm *priv = dw_hdmi->priv; - bool is_hdmi2_sink = connector->display_info.hdmi.scdc.supported; + bool is_hdmi2_sink = display_info->hdmi.scdc.supported; unsigned int phy_freq; unsigned int vclk_freq; unsigned int venc_freq; @@ -647,10 +647,10 @@ dw_hdmi_mode_valid(struct dw_hdmi *hdmi, void *data, DRM_DEBUG_DRIVER("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); /* If sink does not support 540MHz, reject the non-420 HDMI2 modes */ - if (connector->display_info.max_tmds_clock && - mode->clock > connector->display_info.max_tmds_clock && - !drm_mode_is_420_only(&connector->display_info, mode) && - !drm_mode_is_420_also(&connector->display_info, mode)) + if (display_info->max_tmds_clock && + mode->clock > display_info->max_tmds_clock && + !drm_mode_is_420_only(display_info, mode) && + !drm_mode_is_420_also(display_info, mode)) return MODE_BAD; /* Check against non-VIC supported modes */ @@ -667,9 +667,9 @@ dw_hdmi_mode_valid(struct dw_hdmi *hdmi, void *data, vclk_freq = mode->clock; /* For 420, pixel clock is half unlike venc clock */ - if (drm_mode_is_420_only(&connector->display_info, mode) || + if (drm_mode_is_420_only(display_info, mode) || (!is_hdmi2_sink && - drm_mode_is_420_also(&connector->display_info, mode))) + drm_mode_is_420_also(display_info, mode))) vclk_freq /= 2; /* TMDS clock is pixel_clock * 10 */ @@ -684,9 +684,9 @@ dw_hdmi_mode_valid(struct dw_hdmi *hdmi, void *data, /* VENC double pixels for 1080i, 720p and YUV420 modes */ if (meson_venc_hdmi_venc_repeat(vic) || - drm_mode_is_420_only(&connector->display_info, mode) || + drm_mode_is_420_only(display_info, mode) || (!is_hdmi2_sink && - drm_mode_is_420_also(&connector->display_info, mode))) + drm_mode_is_420_also(display_info, mode))) venc_freq *= 2; vclk_freq = max(venc_freq, hdmi_freq); diff --git a/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c b/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c index d0dffe55a7cb..7b8ec8310699 100644 --- a/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c +++ b/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c @@ -39,7 +39,7 @@ static const struct rcar_hdmi_phy_params rcar_hdmi_phy_params[] = { static enum drm_mode_status rcar_hdmi_mode_valid(struct dw_hdmi *hdmi, void *data, - struct drm_connector *connector, + const struct drm_display_info *info, const struct drm_display_mode *mode) { /* diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c index d08f86783a28..d286751bb333 100644 --- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c @@ -221,7 +221,7 @@ static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi) static enum drm_mode_status dw_hdmi_rockchip_mode_valid(struct dw_hdmi *hdmi, void *data, - struct drm_connector *connector, + const struct drm_display_info *info, const struct drm_display_mode *mode) { const struct dw_hdmi_mpll_config *mpll_cfg = rockchip_mpll_cfg; diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c index 0a3637442ba6..d4c08043dd81 100644 --- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c +++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c @@ -32,7 +32,7 @@ sun8i_dw_hdmi_encoder_helper_funcs = { static enum drm_mode_status sun8i_dw_hdmi_mode_valid_a83t(struct dw_hdmi *hdmi, void *data, - struct drm_connector *connector, + const struct drm_display_info *info, const struct drm_display_mode *mode) { if (mode->clock > 297000) @@ -43,7 +43,7 @@ sun8i_dw_hdmi_mode_valid_a83t(struct dw_hdmi *hdmi, void *data, static enum drm_mode_status sun8i_dw_hdmi_mode_valid_h6(struct dw_hdmi *hdmi, void *data, - struct drm_connector *connector, + const struct drm_display_info *info, const struct drm_display_mode *mode) { /* diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h index 8587b8d2590e..d983746fa194 100644 --- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h +++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h @@ -177,7 +177,7 @@ struct sun8i_hdmi_phy { struct sun8i_dw_hdmi_quirks { enum drm_mode_status (*mode_valid)(struct dw_hdmi *hdmi, void *data, - struct drm_connector *connector, + const struct drm_display_info *info, const struct drm_display_mode *mode); unsigned int set_rate : 1; unsigned int use_drm_infoframe : 1; diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h index 5dfa9d83e2d3..fec293b21c2e 100644 --- a/include/drm/bridge/dw_hdmi.h +++ b/include/drm/bridge/dw_hdmi.h @@ -8,7 +8,7 @@ #include -struct drm_connector; +struct drm_display_info; struct drm_display_mode; struct drm_encoder; struct dw_hdmi; @@ -137,7 +137,7 @@ struct dw_hdmi_plat_data { /* Platform-specific mode validation (optional). */ enum drm_mode_status (*mode_valid)(struct dw_hdmi *hdmi, void *data, - struct drm_connector *connector, + const struct drm_display_info *info, const struct drm_display_mode *mode); /* Vendor PHY support */ -- cgit v1.2.3-59-g8ed1b From 35a395f1134bbbd2984dcca28c04f09fbbb8b0a4 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 26 May 2020 04:14:54 +0300 Subject: drm: bridge: dw-hdmi: Constify mode argument to dw_hdmi_phy_ops .init() The PHY .init() must not modify the mode it receives. Make the pointer const to enfore that. Signed-off-by: Laurent Pinchart Reviewed-by: Neil Armstrong Signed-off-by: Sam Ravnborg Link: https://patchwork.freedesktop.org/patch/msgid/20200526011505.31884-17-laurent.pinchart+renesas@ideasonboard.com --- drivers/gpu/drm/bridge/synopsys/dw-hdmi.c | 2 +- drivers/gpu/drm/meson/meson_dw_hdmi.c | 4 ++-- drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c | 2 +- drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c | 2 +- include/drm/bridge/dw_hdmi.h | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index 23650e69604c..6e6a3d95e68e 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -1531,7 +1531,7 @@ static int hdmi_phy_configure(struct dw_hdmi *hdmi) } static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data, - struct drm_display_mode *mode) + const struct drm_display_mode *mode) { int i, ret; diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c index 71d599970ec7..757c17fbb29f 100644 --- a/drivers/gpu/drm/meson/meson_dw_hdmi.c +++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c @@ -297,7 +297,7 @@ static inline void dw_hdmi_dwc_write_bits(struct meson_dw_hdmi *dw_hdmi, /* Setup PHY bandwidth modes */ static void meson_hdmi_phy_setup_mode(struct meson_dw_hdmi *dw_hdmi, - struct drm_display_mode *mode) + const struct drm_display_mode *mode) { struct meson_drm *priv = dw_hdmi->priv; unsigned int pixel_clock = mode->clock; @@ -427,7 +427,7 @@ static void dw_hdmi_set_vclk(struct meson_dw_hdmi *dw_hdmi, } static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data, - struct drm_display_mode *mode) + const struct drm_display_mode *mode) { struct meson_dw_hdmi *dw_hdmi = (struct meson_dw_hdmi *)data; struct meson_drm *priv = dw_hdmi->priv; diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c index d286751bb333..10e210f6455d 100644 --- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c @@ -312,7 +312,7 @@ static const struct drm_encoder_helper_funcs dw_hdmi_rockchip_encoder_helper_fun }; static int dw_hdmi_rockchip_genphy_init(struct dw_hdmi *dw_hdmi, void *data, - struct drm_display_mode *mode) + const struct drm_display_mode *mode) { struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data; diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c index 43643ad31730..8e078cacf063 100644 --- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c +++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c @@ -341,7 +341,7 @@ static int sun8i_hdmi_phy_config_h3(struct dw_hdmi *hdmi, } static int sun8i_hdmi_phy_config(struct dw_hdmi *hdmi, void *data, - struct drm_display_mode *mode) + const struct drm_display_mode *mode) { struct sun8i_hdmi_phy *phy = (struct sun8i_hdmi_phy *)data; u32 val = 0; diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h index fec293b21c2e..f930d218cc6b 100644 --- a/include/drm/bridge/dw_hdmi.h +++ b/include/drm/bridge/dw_hdmi.h @@ -114,7 +114,7 @@ struct dw_hdmi_phy_config { struct dw_hdmi_phy_ops { int (*init)(struct dw_hdmi *hdmi, void *data, - struct drm_display_mode *mode); + const struct drm_display_mode *mode); void (*disable)(struct dw_hdmi *hdmi, void *data); enum drm_connector_status (*read_hpd)(struct dw_hdmi *hdmi, void *data); void (*update_hpd)(struct dw_hdmi *hdmi, void *data, -- cgit v1.2.3-59-g8ed1b From 7be390d4c0a125266c558c30a3687d931c3b6101 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 26 May 2020 04:14:56 +0300 Subject: drm: bridge: dw-hdmi: Pass drm_display_info to dw_hdmi_support_scdc() To prepare for making connector creation optional in the driver, pass the drm_display_info explicitly to dw_hdmi_support_scdc(). The pointer is passed to the callers where required, particularly to the dw_hdmi_phy_ops .init() function. Signed-off-by: Laurent Pinchart Reviewed-by: Neil Armstrong Signed-off-by: Sam Ravnborg Link: https://patchwork.freedesktop.org/patch/msgid/20200526011505.31884-19-laurent.pinchart+renesas@ideasonboard.com --- drivers/gpu/drm/bridge/synopsys/dw-hdmi.c | 32 +++++++++++++++++------------ drivers/gpu/drm/meson/meson_dw_hdmi.c | 3 ++- drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c | 1 + drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c | 1 + include/drm/bridge/dw_hdmi.h | 4 +++- 5 files changed, 26 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index 5b5f07a23400..a18794cce0d8 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -1241,10 +1241,9 @@ void dw_hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data, EXPORT_SYMBOL_GPL(dw_hdmi_phy_i2c_write); /* Filter out invalid setups to avoid configuring SCDC and scrambling */ -static bool dw_hdmi_support_scdc(struct dw_hdmi *hdmi) +static bool dw_hdmi_support_scdc(struct dw_hdmi *hdmi, + const struct drm_display_info *display) { - struct drm_display_info *display = &hdmi->connector.display_info; - /* Completely disable SCDC support for older controllers */ if (hdmi->version < 0x200a) return false; @@ -1282,12 +1281,13 @@ static bool dw_hdmi_support_scdc(struct dw_hdmi *hdmi) * helper should called right before enabling the TMDS Clock and Data in * the PHY configuration callback. */ -void dw_hdmi_set_high_tmds_clock_ratio(struct dw_hdmi *hdmi) +void dw_hdmi_set_high_tmds_clock_ratio(struct dw_hdmi *hdmi, + const struct drm_display_info *display) { unsigned long mtmdsclock = hdmi->hdmi_data.video_mode.mtmdsclock; /* Control for TMDS Bit Period/TMDS Clock-Period Ratio */ - if (dw_hdmi_support_scdc(hdmi)) { + if (dw_hdmi_support_scdc(hdmi, display)) { if (mtmdsclock > HDMI14_MAX_TMDSCLK) drm_scdc_set_high_tmds_clock_ratio(hdmi->ddc, 1); else @@ -1490,7 +1490,8 @@ static int hdmi_phy_configure_dwc_hdmi_3d_tx(struct dw_hdmi *hdmi, return 0; } -static int hdmi_phy_configure(struct dw_hdmi *hdmi) +static int hdmi_phy_configure(struct dw_hdmi *hdmi, + const struct drm_display_info *display) { const struct dw_hdmi_phy_data *phy = hdmi->phy.data; const struct dw_hdmi_plat_data *pdata = hdmi->plat_data; @@ -1500,7 +1501,7 @@ static int hdmi_phy_configure(struct dw_hdmi *hdmi) dw_hdmi_phy_power_off(hdmi); - dw_hdmi_set_high_tmds_clock_ratio(hdmi); + dw_hdmi_set_high_tmds_clock_ratio(hdmi, display); /* Leave low power consumption mode by asserting SVSRET. */ if (phy->has_svsret) @@ -1531,6 +1532,7 @@ static int hdmi_phy_configure(struct dw_hdmi *hdmi) } static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data, + const struct drm_display_info *display, const struct drm_display_mode *mode) { int i, ret; @@ -1540,7 +1542,7 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data, dw_hdmi_phy_sel_data_en_pol(hdmi, 1); dw_hdmi_phy_sel_interface_control(hdmi, 0); - ret = hdmi_phy_configure(hdmi); + ret = hdmi_phy_configure(hdmi, display); if (ret) return ret; } @@ -1846,10 +1848,11 @@ static void hdmi_config_drm_infoframe(struct dw_hdmi *hdmi) } static void hdmi_av_composer(struct dw_hdmi *hdmi, + const struct drm_display_info *display, const struct drm_display_mode *mode) { u8 inv_val, bytes; - struct drm_hdmi_info *hdmi_info = &hdmi->connector.display_info.hdmi; + const struct drm_hdmi_info *hdmi_info = &display->hdmi; struct hdmi_vmode *vmode = &hdmi->hdmi_data.video_mode; int hblank, vblank, h_de_hs, v_de_vs, hsync_len, vsync_len; unsigned int vdisplay, hdisplay; @@ -1882,7 +1885,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi, /* Set up HDMI_FC_INVIDCONF */ inv_val = (hdmi->hdmi_data.hdcp_enable || - (dw_hdmi_support_scdc(hdmi) && + (dw_hdmi_support_scdc(hdmi, display) && (vmode->mtmdsclock > HDMI14_MAX_TMDSCLK || hdmi_info->scdc.scrambling.low_rates)) ? HDMI_FC_INVIDCONF_HDCP_KEEPOUT_ACTIVE : @@ -1950,7 +1953,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi, } /* Scrambling Control */ - if (dw_hdmi_support_scdc(hdmi)) { + if (dw_hdmi_support_scdc(hdmi, display)) { if (vmode->mtmdsclock > HDMI14_MAX_TMDSCLK || hdmi_info->scdc.scrambling.low_rates) { /* @@ -2116,6 +2119,7 @@ static void hdmi_disable_overflow_interrupts(struct dw_hdmi *hdmi) static int dw_hdmi_setup(struct dw_hdmi *hdmi, const struct drm_display_mode *mode) { + struct drm_connector *connector = &hdmi->connector; int ret; hdmi_disable_overflow_interrupts(hdmi); @@ -2161,10 +2165,12 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, hdmi->hdmi_data.video_mode.mdataenablepolarity = true; /* HDMI Initialization Step B.1 */ - hdmi_av_composer(hdmi, mode); + hdmi_av_composer(hdmi, &connector->display_info, mode); /* HDMI Initializateion Step B.2 */ - ret = hdmi->phy.ops->init(hdmi, hdmi->phy.data, &hdmi->previous_mode); + ret = hdmi->phy.ops->init(hdmi, hdmi->phy.data, + &connector->display_info, + &hdmi->previous_mode); if (ret) return ret; hdmi->phy.enabled = true; diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c index 757c17fbb29f..15e62f327894 100644 --- a/drivers/gpu/drm/meson/meson_dw_hdmi.c +++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c @@ -427,6 +427,7 @@ static void dw_hdmi_set_vclk(struct meson_dw_hdmi *dw_hdmi, } static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data, + const struct drm_display_info *display, const struct drm_display_mode *mode) { struct meson_dw_hdmi *dw_hdmi = (struct meson_dw_hdmi *)data; @@ -496,7 +497,7 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data, /* Disable clock, fifo, fifo_wr */ regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1, 0xf, 0); - dw_hdmi_set_high_tmds_clock_ratio(hdmi); + dw_hdmi_set_high_tmds_clock_ratio(hdmi, display); msleep(100); diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c index 10e210f6455d..23de359a1dec 100644 --- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c @@ -312,6 +312,7 @@ static const struct drm_encoder_helper_funcs dw_hdmi_rockchip_encoder_helper_fun }; static int dw_hdmi_rockchip_genphy_init(struct dw_hdmi *dw_hdmi, void *data, + const struct drm_display_info *display, const struct drm_display_mode *mode) { struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data; diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c index 8e078cacf063..156d00e5165b 100644 --- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c +++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c @@ -341,6 +341,7 @@ static int sun8i_hdmi_phy_config_h3(struct dw_hdmi *hdmi, } static int sun8i_hdmi_phy_config(struct dw_hdmi *hdmi, void *data, + const struct drm_display_info *display, const struct drm_display_mode *mode) { struct sun8i_hdmi_phy *phy = (struct sun8i_hdmi_phy *)data; diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h index f930d218cc6b..ea34ca146b82 100644 --- a/include/drm/bridge/dw_hdmi.h +++ b/include/drm/bridge/dw_hdmi.h @@ -114,6 +114,7 @@ struct dw_hdmi_phy_config { struct dw_hdmi_phy_ops { int (*init)(struct dw_hdmi *hdmi, void *data, + const struct drm_display_info *display, const struct drm_display_mode *mode); void (*disable)(struct dw_hdmi *hdmi, void *data); enum drm_connector_status (*read_hpd)(struct dw_hdmi *hdmi, void *data); @@ -174,7 +175,8 @@ void dw_hdmi_set_channel_status(struct dw_hdmi *hdmi, u8 *channel_status); void dw_hdmi_set_channel_allocation(struct dw_hdmi *hdmi, unsigned int ca); void dw_hdmi_audio_enable(struct dw_hdmi *hdmi); void dw_hdmi_audio_disable(struct dw_hdmi *hdmi); -void dw_hdmi_set_high_tmds_clock_ratio(struct dw_hdmi *hdmi); +void dw_hdmi_set_high_tmds_clock_ratio(struct dw_hdmi *hdmi, + const struct drm_display_info *display); /* PHY configuration */ void dw_hdmi_phy_i2c_set_addr(struct dw_hdmi *hdmi, u8 address); -- cgit v1.2.3-59-g8ed1b From 79a28ddd18e9c653f13f60dfabee15c024e64b9b Mon Sep 17 00:00:00 2001 From: Alexandre Cassen Date: Tue, 23 Jun 2020 10:33:45 +0200 Subject: rtnetlink: add keepalived rtm_protocol Keepalived can set global static ip routes or virtual ip routes dynamically following VRRP protocol states. Using a dedicated rtm_protocol will help keeping track of it. Changes in v2: - fix tab/space indenting Signed-off-by: Alexandre Cassen Signed-off-by: David S. Miller --- include/uapi/linux/rtnetlink.h | 45 +++++++++++++++++++++--------------------- 1 file changed, 23 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index 073e71ef6bdd..879e64950a0a 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -257,12 +257,12 @@ enum { /* rtm_protocol */ -#define RTPROT_UNSPEC 0 -#define RTPROT_REDIRECT 1 /* Route installed by ICMP redirects; - not used by current IPv4 */ -#define RTPROT_KERNEL 2 /* Route installed by kernel */ -#define RTPROT_BOOT 3 /* Route installed during boot */ -#define RTPROT_STATIC 4 /* Route installed by administrator */ +#define RTPROT_UNSPEC 0 +#define RTPROT_REDIRECT 1 /* Route installed by ICMP redirects; + not used by current IPv4 */ +#define RTPROT_KERNEL 2 /* Route installed by kernel */ +#define RTPROT_BOOT 3 /* Route installed during boot */ +#define RTPROT_STATIC 4 /* Route installed by administrator */ /* Values of protocol >= RTPROT_STATIC are not interpreted by kernel; they are just passed from user and back as is. @@ -271,22 +271,23 @@ enum { avoid conflicts. */ -#define RTPROT_GATED 8 /* Apparently, GateD */ -#define RTPROT_RA 9 /* RDISC/ND router advertisements */ -#define RTPROT_MRT 10 /* Merit MRT */ -#define RTPROT_ZEBRA 11 /* Zebra */ -#define RTPROT_BIRD 12 /* BIRD */ -#define RTPROT_DNROUTED 13 /* DECnet routing daemon */ -#define RTPROT_XORP 14 /* XORP */ -#define RTPROT_NTK 15 /* Netsukuku */ -#define RTPROT_DHCP 16 /* DHCP client */ -#define RTPROT_MROUTED 17 /* Multicast daemon */ -#define RTPROT_BABEL 42 /* Babel daemon */ -#define RTPROT_BGP 186 /* BGP Routes */ -#define RTPROT_ISIS 187 /* ISIS Routes */ -#define RTPROT_OSPF 188 /* OSPF Routes */ -#define RTPROT_RIP 189 /* RIP Routes */ -#define RTPROT_EIGRP 192 /* EIGRP Routes */ +#define RTPROT_GATED 8 /* Apparently, GateD */ +#define RTPROT_RA 9 /* RDISC/ND router advertisements */ +#define RTPROT_MRT 10 /* Merit MRT */ +#define RTPROT_ZEBRA 11 /* Zebra */ +#define RTPROT_BIRD 12 /* BIRD */ +#define RTPROT_DNROUTED 13 /* DECnet routing daemon */ +#define RTPROT_XORP 14 /* XORP */ +#define RTPROT_NTK 15 /* Netsukuku */ +#define RTPROT_DHCP 16 /* DHCP client */ +#define RTPROT_MROUTED 17 /* Multicast daemon */ +#define RTPROT_KEEPALIVED 18 /* Keepalived daemon */ +#define RTPROT_BABEL 42 /* Babel daemon */ +#define RTPROT_BGP 186 /* BGP Routes */ +#define RTPROT_ISIS 187 /* ISIS Routes */ +#define RTPROT_OSPF 188 /* OSPF Routes */ +#define RTPROT_RIP 189 /* RIP Routes */ +#define RTPROT_EIGRP 192 /* EIGRP Routes */ /* rtm_scope -- cgit v1.2.3-59-g8ed1b From bdb7b79b4ce864a724250e1d35948c46f135de36 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 22 Jun 2020 20:22:21 -0700 Subject: bpf: Switch most helper return values from 32-bit int to 64-bit long Switch most of BPF helper definitions from returning int to long. These definitions are coming from comments in BPF UAPI header and are used to generate bpf_helper_defs.h (under libbpf) to be later included and used from BPF programs. In actual in-kernel implementation, all the helpers are defined as returning u64, but due to some historical reasons, most of them are actually defined as returning int in UAPI (usually, to return 0 on success, and negative value on error). This actually causes Clang to quite often generate sub-optimal code, because compiler believes that return value is 32-bit, and in a lot of cases has to be up-converted (usually with a pair of 32-bit bit shifts) to 64-bit values, before they can be used further in BPF code. Besides just "polluting" the code, these 32-bit shifts quite often cause problems for cases in which return value matters. This is especially the case for the family of bpf_probe_read_str() functions. There are few other similar helpers (e.g., bpf_read_branch_records()), in which return value is used by BPF program logic to record variable-length data and process it. For such cases, BPF program logic carefully manages offsets within some array or map to read variable-length data. For such uses, it's crucial for BPF verifier to track possible range of register values to prove that all the accesses happen within given memory bounds. Those extraneous zero-extending bit shifts, inserted by Clang (and quite often interleaved with other code, which makes the issues even more challenging and sometimes requires employing extra per-variable compiler barriers), throws off verifier logic and makes it mark registers as having unknown variable offset. We'll study this pattern a bit later below. Another common pattern is to check return of BPF helper for non-zero state to detect error conditions and attempt alternative actions in such case. Even in this simple and straightforward case, this 32-bit vs BPF's native 64-bit mode quite often leads to sub-optimal and unnecessary extra code. We'll look at this pattern as well. Clang's BPF target supports two modes of code generation: ALU32, in which it is capable of using lower 32-bit parts of registers, and no-ALU32, in which only full 64-bit registers are being used. ALU32 mode somewhat mitigates the above described problems, but not in all cases. This patch switches all the cases in which BPF helpers return 0 or negative error from returning int to returning long. It is shown below that such change in definition leads to equivalent or better code. No-ALU32 mode benefits more, but ALU32 mode doesn't degrade or still gets improved code generation. Another class of cases switched from int to long are bpf_probe_read_str()-like helpers, which encode successful case as non-negative values, while still returning negative value for errors. In all of such cases, correctness is preserved due to two's complement encoding of negative values and the fact that all helpers return values with 32-bit absolute value. Two's complement ensures that for negative values higher 32 bits are all ones and when truncated, leave valid negative 32-bit value with the same value. Non-negative values have upper 32 bits set to zero and similarly preserve value when high 32 bits are truncated. This means that just casting to int/u32 is correct and efficient (and in ALU32 mode doesn't require any extra shifts). To minimize the chances of regressions, two code patterns were investigated, as mentioned above. For both patterns, BPF assembly was analyzed in ALU32/NO-ALU32 compiler modes, both with current 32-bit int return type and new 64-bit long return type. Case 1. Variable-length data reading and concatenation. This is quite ubiquitous pattern in tracing/monitoring applications, reading data like process's environment variables, file path, etc. In such case, many pieces of string-like variable-length data are read into a single big buffer, and at the end of the process, only a part of array containing actual data is sent to user-space for further processing. This case is tested in test_varlen.c selftest (in the next patch). Code flow is roughly as follows: void *payload = &sample->payload; u64 len; len = bpf_probe_read_kernel_str(payload, MAX_SZ1, &source_data1); if (len <= MAX_SZ1) { payload += len; sample->len1 = len; } len = bpf_probe_read_kernel_str(payload, MAX_SZ2, &source_data2); if (len <= MAX_SZ2) { payload += len; sample->len2 = len; } /* and so on */ sample->total_len = payload - &sample->payload; /* send over, e.g., perf buffer */ There could be two variations with slightly different code generated: when len is 64-bit integer and when it is 32-bit integer. Both variations were analysed. BPF assembly instructions between two successive invocations of bpf_probe_read_kernel_str() were used to check code regressions. Results are below, followed by short analysis. Left side is using helpers with int return type, the right one is after the switch to long. ALU32 + INT ALU32 + LONG =========== ============ 64-BIT (13 insns): 64-BIT (10 insns): ------------------------------------ ------------------------------------ 17: call 115 17: call 115 18: if w0 > 256 goto +9 18: if r0 > 256 goto +6 19: w1 = w0 19: r1 = 0 ll 20: r1 <<= 32 21: *(u64 *)(r1 + 0) = r0 21: r1 s>>= 32 22: r6 = 0 ll 22: r2 = 0 ll 24: r6 += r0 24: *(u64 *)(r2 + 0) = r1 00000000000000c8 : 25: r6 = 0 ll 25: r1 = r6 27: r6 += r1 26: w2 = 256 00000000000000e0 : 27: r3 = 0 ll 28: r1 = r6 29: call 115 29: w2 = 256 30: r3 = 0 ll 32: call 115 32-BIT (11 insns): 32-BIT (12 insns): ------------------------------------ ------------------------------------ 17: call 115 17: call 115 18: if w0 > 256 goto +7 18: if w0 > 256 goto +8 19: r1 = 0 ll 19: r1 = 0 ll 21: *(u32 *)(r1 + 0) = r0 21: *(u32 *)(r1 + 0) = r0 22: w1 = w0 22: r0 <<= 32 23: r6 = 0 ll 23: r0 >>= 32 25: r6 += r1 24: r6 = 0 ll 00000000000000d0 : 26: r6 += r0 26: r1 = r6 00000000000000d8 : 27: w2 = 256 27: r1 = r6 28: r3 = 0 ll 28: w2 = 256 30: call 115 29: r3 = 0 ll 31: call 115 In ALU32 mode, the variant using 64-bit length variable clearly wins and avoids unnecessary zero-extension bit shifts. In practice, this is even more important and good, because BPF code won't need to do extra checks to "prove" that payload/len are within good bounds. 32-bit len is one instruction longer. Clang decided to do 64-to-32 casting with two bit shifts, instead of equivalent `w1 = w0` assignment. The former uses extra register. The latter might potentially lose some range information, but not for 32-bit value. So in this case, verifier infers that r0 is [0, 256] after check at 18:, and shifting 32 bits left/right keeps that range intact. We should probably look into Clang's logic and see why it chooses bitshifts over sub-register assignments for this. NO-ALU32 + INT NO-ALU32 + LONG ============== =============== 64-BIT (14 insns): 64-BIT (10 insns): ------------------------------------ ------------------------------------ 17: call 115 17: call 115 18: r0 <<= 32 18: if r0 > 256 goto +6 19: r1 = r0 19: r1 = 0 ll 20: r1 >>= 32 21: *(u64 *)(r1 + 0) = r0 21: if r1 > 256 goto +7 22: r6 = 0 ll 22: r0 s>>= 32 24: r6 += r0 23: r1 = 0 ll 00000000000000c8 : 25: *(u64 *)(r1 + 0) = r0 25: r1 = r6 26: r6 = 0 ll 26: r2 = 256 28: r6 += r0 27: r3 = 0 ll 00000000000000e8 : 29: call 115 29: r1 = r6 30: r2 = 256 31: r3 = 0 ll 33: call 115 32-BIT (13 insns): 32-BIT (13 insns): ------------------------------------ ------------------------------------ 17: call 115 17: call 115 18: r1 = r0 18: r1 = r0 19: r1 <<= 32 19: r1 <<= 32 20: r1 >>= 32 20: r1 >>= 32 21: if r1 > 256 goto +6 21: if r1 > 256 goto +6 22: r2 = 0 ll 22: r2 = 0 ll 24: *(u32 *)(r2 + 0) = r0 24: *(u32 *)(r2 + 0) = r0 25: r6 = 0 ll 25: r6 = 0 ll 27: r6 += r1 27: r6 += r1 00000000000000e0 : 00000000000000e0 : 28: r1 = r6 28: r1 = r6 29: r2 = 256 29: r2 = 256 30: r3 = 0 ll 30: r3 = 0 ll 32: call 115 32: call 115 In NO-ALU32 mode, for the case of 64-bit len variable, Clang generates much superior code, as expected, eliminating unnecessary bit shifts. For 32-bit len, code is identical. So overall, only ALU-32 32-bit len case is more-or-less equivalent and the difference stems from internal Clang decision, rather than compiler lacking enough information about types. Case 2. Let's look at the simpler case of checking return result of BPF helper for errors. The code is very simple: long bla; if (bpf_probe_read_kenerl(&bla, sizeof(bla), 0)) return 1; else return 0; ALU32 + CHECK (9 insns) ALU32 + CHECK (9 insns) ==================================== ==================================== 0: r1 = r10 0: r1 = r10 1: r1 += -8 1: r1 += -8 2: w2 = 8 2: w2 = 8 3: r3 = 0 3: r3 = 0 4: call 113 4: call 113 5: w1 = w0 5: r1 = r0 6: w0 = 1 6: w0 = 1 7: if w1 != 0 goto +1 7: if r1 != 0 goto +1 8: w0 = 0 8: w0 = 0 0000000000000048 : 0000000000000048 : 9: exit 9: exit Almost identical code, the only difference is the use of full register assignment (r1 = r0) vs half-registers (w1 = w0) in instruction #5. On 32-bit architectures, new BPF assembly might be slightly less optimal, in theory. But one can argue that's not a big issue, given that use of full registers is still prevalent (e.g., for parameter passing). NO-ALU32 + CHECK (11 insns) NO-ALU32 + CHECK (9 insns) ==================================== ==================================== 0: r1 = r10 0: r1 = r10 1: r1 += -8 1: r1 += -8 2: r2 = 8 2: r2 = 8 3: r3 = 0 3: r3 = 0 4: call 113 4: call 113 5: r1 = r0 5: r1 = r0 6: r1 <<= 32 6: r0 = 1 7: r1 >>= 32 7: if r1 != 0 goto +1 8: r0 = 1 8: r0 = 0 9: if r1 != 0 goto +1 0000000000000048 : 10: r0 = 0 9: exit 0000000000000058 : 11: exit NO-ALU32 is a clear improvement, getting rid of unnecessary zero-extension bit shifts. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200623032224.4020118-1-andriin@fb.com --- include/uapi/linux/bpf.h | 192 ++++++++++++++++++++--------------------- tools/include/uapi/linux/bpf.h | 192 ++++++++++++++++++++--------------------- 2 files changed, 192 insertions(+), 192 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 19684813faae..be0efee49093 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -653,7 +653,7 @@ union bpf_attr { * Map value associated to *key*, or **NULL** if no entry was * found. * - * int bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags) + * long bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags) * Description * Add or update the value of the entry associated to *key* in * *map* with *value*. *flags* is one of: @@ -671,13 +671,13 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_map_delete_elem(struct bpf_map *map, const void *key) + * long bpf_map_delete_elem(struct bpf_map *map, const void *key) * Description * Delete entry with *key* from *map*. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr) + * long bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr) * Description * For tracing programs, safely attempt to read *size* bytes from * kernel space address *unsafe_ptr* and store the data in *dst*. @@ -695,7 +695,7 @@ union bpf_attr { * Return * Current *ktime*. * - * int bpf_trace_printk(const char *fmt, u32 fmt_size, ...) + * long bpf_trace_printk(const char *fmt, u32 fmt_size, ...) * Description * This helper is a "printk()-like" facility for debugging. It * prints a message defined by format *fmt* (of size *fmt_size*) @@ -775,7 +775,7 @@ union bpf_attr { * Return * The SMP id of the processor running the program. * - * int bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags) + * long bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags) * Description * Store *len* bytes from address *from* into the packet * associated to *skb*, at *offset*. *flags* are a combination of @@ -792,7 +792,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size) + * long bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size) * Description * Recompute the layer 3 (e.g. IP) checksum for the packet * associated to *skb*. Computation is incremental, so the helper @@ -817,7 +817,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags) + * long bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags) * Description * Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the * packet associated to *skb*. Computation is incremental, so the @@ -849,7 +849,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index) + * long bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index) * Description * This special helper is used to trigger a "tail call", or in * other words, to jump into another eBPF program. The same stack @@ -880,7 +880,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags) + * long bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags) * Description * Clone and redirect the packet associated to *skb* to another * net device of index *ifindex*. Both ingress and egress @@ -916,7 +916,7 @@ union bpf_attr { * A 64-bit integer containing the current GID and UID, and * created as such: *current_gid* **<< 32 \|** *current_uid*. * - * int bpf_get_current_comm(void *buf, u32 size_of_buf) + * long bpf_get_current_comm(void *buf, u32 size_of_buf) * Description * Copy the **comm** attribute of the current task into *buf* of * *size_of_buf*. The **comm** attribute contains the name of @@ -953,7 +953,7 @@ union bpf_attr { * Return * The classid, or 0 for the default unconfigured classid. * - * int bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) + * long bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) * Description * Push a *vlan_tci* (VLAN tag control information) of protocol * *vlan_proto* to the packet associated to *skb*, then update @@ -969,7 +969,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_vlan_pop(struct sk_buff *skb) + * long bpf_skb_vlan_pop(struct sk_buff *skb) * Description * Pop a VLAN header from the packet associated to *skb*. * @@ -981,7 +981,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) + * long bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) * Description * Get tunnel metadata. This helper takes a pointer *key* to an * empty **struct bpf_tunnel_key** of **size**, that will be @@ -1032,7 +1032,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) + * long bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) * Description * Populate tunnel metadata for packet associated to *skb.* The * tunnel metadata is set to the contents of *key*, of *size*. The @@ -1098,7 +1098,7 @@ union bpf_attr { * The value of the perf event counter read from the map, or a * negative error code in case of failure. * - * int bpf_redirect(u32 ifindex, u64 flags) + * long bpf_redirect(u32 ifindex, u64 flags) * Description * Redirect the packet to another net device of index *ifindex*. * This helper is somewhat similar to **bpf_clone_redirect**\ @@ -1145,7 +1145,7 @@ union bpf_attr { * The realm of the route for the packet associated to *skb*, or 0 * if none was found. * - * int bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) + * long bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) * Description * Write raw *data* blob into a special BPF perf event held by * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf @@ -1190,7 +1190,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len) + * long bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len) * Description * This helper was provided as an easy way to load data from a * packet. It can be used to load *len* bytes from *offset* from @@ -1207,7 +1207,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags) + * long bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags) * Description * Walk a user or a kernel stack and return its id. To achieve * this, the helper needs *ctx*, which is a pointer to the context @@ -1276,7 +1276,7 @@ union bpf_attr { * The checksum result, or a negative error code in case of * failure. * - * int bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) + * long bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) * Description * Retrieve tunnel options metadata for the packet associated to * *skb*, and store the raw tunnel option data to the buffer *opt* @@ -1294,7 +1294,7 @@ union bpf_attr { * Return * The size of the option data retrieved. * - * int bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) + * long bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) * Description * Set tunnel options metadata for the packet associated to *skb* * to the option data contained in the raw buffer *opt* of *size*. @@ -1304,7 +1304,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags) + * long bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags) * Description * Change the protocol of the *skb* to *proto*. Currently * supported are transition from IPv4 to IPv6, and from IPv6 to @@ -1331,7 +1331,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_change_type(struct sk_buff *skb, u32 type) + * long bpf_skb_change_type(struct sk_buff *skb, u32 type) * Description * Change the packet type for the packet associated to *skb*. This * comes down to setting *skb*\ **->pkt_type** to *type*, except @@ -1358,7 +1358,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index) + * long bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index) * Description * Check whether *skb* is a descendant of the cgroup2 held by * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. @@ -1389,7 +1389,7 @@ union bpf_attr { * Return * A pointer to the current task struct. * - * int bpf_probe_write_user(void *dst, const void *src, u32 len) + * long bpf_probe_write_user(void *dst, const void *src, u32 len) * Description * Attempt in a safe way to write *len* bytes from the buffer * *src* to *dst* in memory. It only works for threads that are in @@ -1408,7 +1408,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_current_task_under_cgroup(struct bpf_map *map, u32 index) + * long bpf_current_task_under_cgroup(struct bpf_map *map, u32 index) * Description * Check whether the probe is being run is the context of a given * subset of the cgroup2 hierarchy. The cgroup2 to test is held by @@ -1420,7 +1420,7 @@ union bpf_attr { * * 1, if the *skb* task does not belong to the cgroup2. * * A negative error code, if an error occurred. * - * int bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags) + * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags) * Description * Resize (trim or grow) the packet associated to *skb* to the * new *len*. The *flags* are reserved for future usage, and must @@ -1444,7 +1444,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_pull_data(struct sk_buff *skb, u32 len) + * long bpf_skb_pull_data(struct sk_buff *skb, u32 len) * Description * Pull in non-linear data in case the *skb* is non-linear and not * all of *len* are part of the linear section. Make *len* bytes @@ -1500,7 +1500,7 @@ union bpf_attr { * recalculation the next time the kernel tries to access this * hash or when the **bpf_get_hash_recalc**\ () helper is called. * - * int bpf_get_numa_node_id(void) + * long bpf_get_numa_node_id(void) * Description * Return the id of the current NUMA node. The primary use case * for this helper is the selection of sockets for the local NUMA @@ -1511,7 +1511,7 @@ union bpf_attr { * Return * The id of current NUMA node. * - * int bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags) + * long bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags) * Description * Grows headroom of packet associated to *skb* and adjusts the * offset of the MAC header accordingly, adding *len* bytes of @@ -1532,7 +1532,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta) + * long bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta) * Description * Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that * it is possible to use a negative value for *delta*. This helper @@ -1547,7 +1547,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr) + * long bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr) * Description * Copy a NUL terminated string from an unsafe kernel address * *unsafe_ptr* to *dst*. See **bpf_probe_read_kernel_str**\ () for @@ -1595,14 +1595,14 @@ union bpf_attr { * is returned (note that **overflowuid** might also be the actual * UID value for the socket). * - * u32 bpf_set_hash(struct sk_buff *skb, u32 hash) + * long bpf_set_hash(struct sk_buff *skb, u32 hash) * Description * Set the full hash for *skb* (set the field *skb*\ **->hash**) * to value *hash*. * Return * 0 * - * int bpf_setsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) + * long bpf_setsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) * Description * Emulate a call to **setsockopt()** on the socket associated to * *bpf_socket*, which must be a full socket. The *level* at @@ -1630,7 +1630,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags) + * long bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags) * Description * Grow or shrink the room for data in the packet associated to * *skb* by *len_diff*, and according to the selected *mode*. @@ -1676,7 +1676,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags) + * long bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags) * Description * Redirect the packet to the endpoint referenced by *map* at * index *key*. Depending on its type, this *map* can contain @@ -1697,7 +1697,7 @@ union bpf_attr { * **XDP_REDIRECT** on success, or the value of the two lower bits * of the *flags* argument on error. * - * int bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags) + * long bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags) * Description * Redirect the packet to the socket referenced by *map* (of type * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and @@ -1708,7 +1708,7 @@ union bpf_attr { * Return * **SK_PASS** on success, or **SK_DROP** on error. * - * int bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) + * long bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) * Description * Add an entry to, or update a *map* referencing sockets. The * *skops* is used as a new value for the entry associated to @@ -1727,7 +1727,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta) + * long bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta) * Description * Adjust the address pointed by *xdp_md*\ **->data_meta** by * *delta* (which can be positive or negative). Note that this @@ -1756,7 +1756,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size) + * long bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size) * Description * Read the value of a perf event counter, and store it into *buf* * of size *buf_size*. This helper relies on a *map* of type @@ -1806,7 +1806,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size) + * long bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size) * Description * For en eBPF program attached to a perf event, retrieve the * value of the event counter associated to *ctx* and store it in @@ -1817,7 +1817,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_getsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) + * long bpf_getsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) * Description * Emulate a call to **getsockopt()** on the socket associated to * *bpf_socket*, which must be a full socket. The *level* at @@ -1842,7 +1842,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_override_return(struct pt_regs *regs, u64 rc) + * long bpf_override_return(struct pt_regs *regs, u64 rc) * Description * Used for error injection, this helper uses kprobes to override * the return value of the probed function, and to set it to *rc*. @@ -1867,7 +1867,7 @@ union bpf_attr { * Return * 0 * - * int bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval) + * long bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval) * Description * Attempt to set the value of the **bpf_sock_ops_cb_flags** field * for the full TCP socket associated to *bpf_sock_ops* to @@ -1911,7 +1911,7 @@ union bpf_attr { * be set is returned (which comes down to 0 if all bits were set * as required). * - * int bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags) + * long bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags) * Description * This helper is used in programs implementing policies at the * socket level. If the message *msg* is allowed to pass (i.e. if @@ -1925,7 +1925,7 @@ union bpf_attr { * Return * **SK_PASS** on success, or **SK_DROP** on error. * - * int bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes) + * long bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes) * Description * For socket policies, apply the verdict of the eBPF program to * the next *bytes* (number of bytes) of message *msg*. @@ -1959,7 +1959,7 @@ union bpf_attr { * Return * 0 * - * int bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes) + * long bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes) * Description * For socket policies, prevent the execution of the verdict eBPF * program for message *msg* until *bytes* (byte number) have been @@ -1977,7 +1977,7 @@ union bpf_attr { * Return * 0 * - * int bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags) + * long bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags) * Description * For socket policies, pull in non-linear data from user space * for *msg* and set pointers *msg*\ **->data** and *msg*\ @@ -2008,7 +2008,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len) + * long bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len) * Description * Bind the socket associated to *ctx* to the address pointed by * *addr*, of length *addr_len*. This allows for making outgoing @@ -2026,7 +2026,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta) + * long bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta) * Description * Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is * possible to both shrink and grow the packet tail. @@ -2040,7 +2040,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags) + * long bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags) * Description * Retrieve the XFRM state (IP transform framework, see also * **ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*. @@ -2056,7 +2056,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags) + * long bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags) * Description * Return a user or a kernel stack in bpf program provided buffer. * To achieve this, the helper needs *ctx*, which is a pointer @@ -2089,7 +2089,7 @@ union bpf_attr { * A non-negative value equal to or less than *size* on success, * or a negative error in case of failure. * - * int bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header) + * long bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header) * Description * This helper is similar to **bpf_skb_load_bytes**\ () in that * it provides an easy way to load *len* bytes from *offset* @@ -2111,7 +2111,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags) + * long bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags) * Description * Do FIB lookup in kernel tables using parameters in *params*. * If lookup is successful and result shows packet is to be @@ -2142,7 +2142,7 @@ union bpf_attr { * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the * packet is not forwarded or needs assist from full stack * - * int bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) + * long bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) * Description * Add an entry to, or update a sockhash *map* referencing sockets. * The *skops* is used as a new value for the entry associated to @@ -2161,7 +2161,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags) + * long bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags) * Description * This helper is used in programs implementing policies at the * socket level. If the message *msg* is allowed to pass (i.e. if @@ -2175,7 +2175,7 @@ union bpf_attr { * Return * **SK_PASS** on success, or **SK_DROP** on error. * - * int bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags) + * long bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags) * Description * This helper is used in programs implementing policies at the * skb socket level. If the sk_buff *skb* is allowed to pass (i.e. @@ -2189,7 +2189,7 @@ union bpf_attr { * Return * **SK_PASS** on success, or **SK_DROP** on error. * - * int bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len) + * long bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len) * Description * Encapsulate the packet associated to *skb* within a Layer 3 * protocol header. This header is provided in the buffer at @@ -2226,7 +2226,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len) + * long bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len) * Description * Store *len* bytes from address *from* into the packet * associated to *skb*, at *offset*. Only the flags, tag and TLVs @@ -2241,7 +2241,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta) + * long bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta) * Description * Adjust the size allocated to TLVs in the outermost IPv6 * Segment Routing Header contained in the packet associated to @@ -2257,7 +2257,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len) + * long bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len) * Description * Apply an IPv6 Segment Routing action of type *action* to the * packet associated to *skb*. Each action takes a parameter @@ -2286,7 +2286,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_rc_repeat(void *ctx) + * long bpf_rc_repeat(void *ctx) * Description * This helper is used in programs implementing IR decoding, to * report a successfully decoded repeat key message. This delays @@ -2305,7 +2305,7 @@ union bpf_attr { * Return * 0 * - * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle) + * long bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle) * Description * This helper is used in programs implementing IR decoding, to * report a successfully decoded key press with *scancode*, @@ -2370,7 +2370,7 @@ union bpf_attr { * Return * A pointer to the local storage area. * - * int bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags) + * long bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags) * Description * Select a **SO_REUSEPORT** socket from a * **BPF_MAP_TYPE_REUSEPORT_ARRAY** *map*. @@ -2471,7 +2471,7 @@ union bpf_attr { * result is from *reuse*\ **->socks**\ [] using the hash of the * tuple. * - * int bpf_sk_release(struct bpf_sock *sock) + * long bpf_sk_release(struct bpf_sock *sock) * Description * Release the reference held by *sock*. *sock* must be a * non-**NULL** pointer that was returned from @@ -2479,7 +2479,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags) + * long bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags) * Description * Push an element *value* in *map*. *flags* is one of: * @@ -2489,19 +2489,19 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_map_pop_elem(struct bpf_map *map, void *value) + * long bpf_map_pop_elem(struct bpf_map *map, void *value) * Description * Pop an element from *map*. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_map_peek_elem(struct bpf_map *map, void *value) + * long bpf_map_peek_elem(struct bpf_map *map, void *value) * Description * Get an element from *map* without removing it. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) + * long bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) * Description * For socket policies, insert *len* bytes into *msg* at offset * *start*. @@ -2517,7 +2517,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) + * long bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) * Description * Will remove *len* bytes from a *msg* starting at byte *start*. * This may result in **ENOMEM** errors under certain situations if @@ -2529,7 +2529,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y) + * long bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y) * Description * This helper is used in programs implementing IR decoding, to * report a successfully decoded pointer movement. @@ -2543,7 +2543,7 @@ union bpf_attr { * Return * 0 * - * int bpf_spin_lock(struct bpf_spin_lock *lock) + * long bpf_spin_lock(struct bpf_spin_lock *lock) * Description * Acquire a spinlock represented by the pointer *lock*, which is * stored as part of a value of a map. Taking the lock allows to @@ -2591,7 +2591,7 @@ union bpf_attr { * Return * 0 * - * int bpf_spin_unlock(struct bpf_spin_lock *lock) + * long bpf_spin_unlock(struct bpf_spin_lock *lock) * Description * Release the *lock* previously locked by a call to * **bpf_spin_lock**\ (\ *lock*\ ). @@ -2614,7 +2614,7 @@ union bpf_attr { * A **struct bpf_tcp_sock** pointer on success, or **NULL** in * case of failure. * - * int bpf_skb_ecn_set_ce(struct sk_buff *skb) + * long bpf_skb_ecn_set_ce(struct sk_buff *skb) * Description * Set ECN (Explicit Congestion Notification) field of IP header * to **CE** (Congestion Encountered) if current value is **ECT** @@ -2651,7 +2651,7 @@ union bpf_attr { * result is from *reuse*\ **->socks**\ [] using the hash of the * tuple. * - * int bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) + * long bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) * Description * Check whether *iph* and *th* contain a valid SYN cookie ACK for * the listening socket in *sk*. @@ -2666,7 +2666,7 @@ union bpf_attr { * 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative * error otherwise. * - * int bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags) + * long bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags) * Description * Get name of sysctl in /proc/sys/ and copy it into provided by * program buffer *buf* of size *buf_len*. @@ -2682,7 +2682,7 @@ union bpf_attr { * **-E2BIG** if the buffer wasn't big enough (*buf* will contain * truncated name in this case). * - * int bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) + * long bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) * Description * Get current value of sysctl as it is presented in /proc/sys * (incl. newline, etc), and copy it as a string into provided @@ -2701,7 +2701,7 @@ union bpf_attr { * **-EINVAL** if current value was unavailable, e.g. because * sysctl is uninitialized and read returns -EIO for it. * - * int bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) + * long bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) * Description * Get new value being written by user space to sysctl (before * the actual write happens) and copy it as a string into @@ -2718,7 +2718,7 @@ union bpf_attr { * * **-EINVAL** if sysctl is being read. * - * int bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len) + * long bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len) * Description * Override new value being written by user space to sysctl with * value provided by program in buffer *buf* of size *buf_len*. @@ -2735,7 +2735,7 @@ union bpf_attr { * * **-EINVAL** if sysctl is being read. * - * int bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res) + * long bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res) * Description * Convert the initial part of the string from buffer *buf* of * size *buf_len* to a long integer according to the given base @@ -2759,7 +2759,7 @@ union bpf_attr { * * **-ERANGE** if resulting value was out of range. * - * int bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res) + * long bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res) * Description * Convert the initial part of the string from buffer *buf* of * size *buf_len* to an unsigned long integer according to the @@ -2810,7 +2810,7 @@ union bpf_attr { * **NULL** if not found or there was an error in adding * a new bpf-local-storage. * - * int bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk) + * long bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk) * Description * Delete a bpf-local-storage from a *sk*. * Return @@ -2818,7 +2818,7 @@ union bpf_attr { * * **-ENOENT** if the bpf-local-storage cannot be found. * - * int bpf_send_signal(u32 sig) + * long bpf_send_signal(u32 sig) * Description * Send signal *sig* to the process of the current task. * The signal may be delivered to any of this process's threads. @@ -2859,7 +2859,7 @@ union bpf_attr { * * **-EPROTONOSUPPORT** IP packet version is not 4 or 6 * - * int bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) + * long bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) * Description * Write raw *data* blob into a special BPF perf event held by * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf @@ -2883,21 +2883,21 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr) + * long bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr) * Description * Safely attempt to read *size* bytes from user space address * *unsafe_ptr* and store the data in *dst*. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr) + * long bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr) * Description * Safely attempt to read *size* bytes from kernel space address * *unsafe_ptr* and store the data in *dst*. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr) + * long bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr) * Description * Copy a NUL terminated string from an unsafe user address * *unsafe_ptr* to *dst*. The *size* should include the @@ -2941,7 +2941,7 @@ union bpf_attr { * including the trailing NUL character. On error, a negative * value. * - * int bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr) + * long bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr) * Description * Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr* * to *dst*. Same semantics as with **bpf_probe_read_user_str**\ () apply. @@ -2949,14 +2949,14 @@ union bpf_attr { * On success, the strictly positive length of the string, including * the trailing NUL character. On error, a negative value. * - * int bpf_tcp_send_ack(void *tp, u32 rcv_nxt) + * long bpf_tcp_send_ack(void *tp, u32 rcv_nxt) * Description * Send out a tcp-ack. *tp* is the in-kernel struct **tcp_sock**. * *rcv_nxt* is the ack_seq to be sent out. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_send_signal_thread(u32 sig) + * long bpf_send_signal_thread(u32 sig) * Description * Send signal *sig* to the thread corresponding to the current task. * Return @@ -2976,7 +2976,7 @@ union bpf_attr { * Return * The 64 bit jiffies * - * int bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags) + * long bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags) * Description * For an eBPF program attached to a perf event, retrieve the * branch records (**struct perf_branch_entry**) associated to *ctx* @@ -2995,7 +2995,7 @@ union bpf_attr { * * **-ENOENT** if architecture does not support branch records. * - * int bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size) + * long bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size) * Description * Returns 0 on success, values for *pid* and *tgid* as seen from the current * *namespace* will be returned in *nsdata*. @@ -3007,7 +3007,7 @@ union bpf_attr { * * **-ENOENT** if pidns does not exists for the current task. * - * int bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) + * long bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) * Description * Write raw *data* blob into a special BPF perf event held by * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf @@ -3062,7 +3062,7 @@ union bpf_attr { * Return * The id is returned or 0 in case the id could not be retrieved. * - * int bpf_sk_assign(struct sk_buff *skb, struct bpf_sock *sk, u64 flags) + * long bpf_sk_assign(struct sk_buff *skb, struct bpf_sock *sk, u64 flags) * Description * Assign the *sk* to the *skb*. When combined with appropriate * routing configuration to receive the packet towards the socket, @@ -3097,7 +3097,7 @@ union bpf_attr { * Return * Current *ktime*. * - * int bpf_seq_printf(struct seq_file *m, const char *fmt, u32 fmt_size, const void *data, u32 data_len) + * long bpf_seq_printf(struct seq_file *m, const char *fmt, u32 fmt_size, const void *data, u32 data_len) * Description * **bpf_seq_printf**\ () uses seq_file **seq_printf**\ () to print * out the format string. @@ -3126,7 +3126,7 @@ union bpf_attr { * * **-EOVERFLOW** if an overflow happened: The same object will be tried again. * - * int bpf_seq_write(struct seq_file *m, const void *data, u32 len) + * long bpf_seq_write(struct seq_file *m, const void *data, u32 len) * Description * **bpf_seq_write**\ () uses seq_file **seq_write**\ () to write the data. * The *m* represents the seq_file. The *data* and *len* represent the @@ -3221,7 +3221,7 @@ union bpf_attr { * Return * Requested value, or 0, if flags are not recognized. * - * int bpf_csum_level(struct sk_buff *skb, u64 level) + * long bpf_csum_level(struct sk_buff *skb, u64 level) * Description * Change the skbs checksum level by one layer up or down, or * reset it entirely to none in order to have the stack perform diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 19684813faae..be0efee49093 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -653,7 +653,7 @@ union bpf_attr { * Map value associated to *key*, or **NULL** if no entry was * found. * - * int bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags) + * long bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags) * Description * Add or update the value of the entry associated to *key* in * *map* with *value*. *flags* is one of: @@ -671,13 +671,13 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_map_delete_elem(struct bpf_map *map, const void *key) + * long bpf_map_delete_elem(struct bpf_map *map, const void *key) * Description * Delete entry with *key* from *map*. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr) + * long bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr) * Description * For tracing programs, safely attempt to read *size* bytes from * kernel space address *unsafe_ptr* and store the data in *dst*. @@ -695,7 +695,7 @@ union bpf_attr { * Return * Current *ktime*. * - * int bpf_trace_printk(const char *fmt, u32 fmt_size, ...) + * long bpf_trace_printk(const char *fmt, u32 fmt_size, ...) * Description * This helper is a "printk()-like" facility for debugging. It * prints a message defined by format *fmt* (of size *fmt_size*) @@ -775,7 +775,7 @@ union bpf_attr { * Return * The SMP id of the processor running the program. * - * int bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags) + * long bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags) * Description * Store *len* bytes from address *from* into the packet * associated to *skb*, at *offset*. *flags* are a combination of @@ -792,7 +792,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size) + * long bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size) * Description * Recompute the layer 3 (e.g. IP) checksum for the packet * associated to *skb*. Computation is incremental, so the helper @@ -817,7 +817,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags) + * long bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags) * Description * Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the * packet associated to *skb*. Computation is incremental, so the @@ -849,7 +849,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index) + * long bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index) * Description * This special helper is used to trigger a "tail call", or in * other words, to jump into another eBPF program. The same stack @@ -880,7 +880,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags) + * long bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags) * Description * Clone and redirect the packet associated to *skb* to another * net device of index *ifindex*. Both ingress and egress @@ -916,7 +916,7 @@ union bpf_attr { * A 64-bit integer containing the current GID and UID, and * created as such: *current_gid* **<< 32 \|** *current_uid*. * - * int bpf_get_current_comm(void *buf, u32 size_of_buf) + * long bpf_get_current_comm(void *buf, u32 size_of_buf) * Description * Copy the **comm** attribute of the current task into *buf* of * *size_of_buf*. The **comm** attribute contains the name of @@ -953,7 +953,7 @@ union bpf_attr { * Return * The classid, or 0 for the default unconfigured classid. * - * int bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) + * long bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) * Description * Push a *vlan_tci* (VLAN tag control information) of protocol * *vlan_proto* to the packet associated to *skb*, then update @@ -969,7 +969,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_vlan_pop(struct sk_buff *skb) + * long bpf_skb_vlan_pop(struct sk_buff *skb) * Description * Pop a VLAN header from the packet associated to *skb*. * @@ -981,7 +981,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) + * long bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) * Description * Get tunnel metadata. This helper takes a pointer *key* to an * empty **struct bpf_tunnel_key** of **size**, that will be @@ -1032,7 +1032,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) + * long bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) * Description * Populate tunnel metadata for packet associated to *skb.* The * tunnel metadata is set to the contents of *key*, of *size*. The @@ -1098,7 +1098,7 @@ union bpf_attr { * The value of the perf event counter read from the map, or a * negative error code in case of failure. * - * int bpf_redirect(u32 ifindex, u64 flags) + * long bpf_redirect(u32 ifindex, u64 flags) * Description * Redirect the packet to another net device of index *ifindex*. * This helper is somewhat similar to **bpf_clone_redirect**\ @@ -1145,7 +1145,7 @@ union bpf_attr { * The realm of the route for the packet associated to *skb*, or 0 * if none was found. * - * int bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) + * long bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) * Description * Write raw *data* blob into a special BPF perf event held by * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf @@ -1190,7 +1190,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len) + * long bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len) * Description * This helper was provided as an easy way to load data from a * packet. It can be used to load *len* bytes from *offset* from @@ -1207,7 +1207,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags) + * long bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags) * Description * Walk a user or a kernel stack and return its id. To achieve * this, the helper needs *ctx*, which is a pointer to the context @@ -1276,7 +1276,7 @@ union bpf_attr { * The checksum result, or a negative error code in case of * failure. * - * int bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) + * long bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) * Description * Retrieve tunnel options metadata for the packet associated to * *skb*, and store the raw tunnel option data to the buffer *opt* @@ -1294,7 +1294,7 @@ union bpf_attr { * Return * The size of the option data retrieved. * - * int bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) + * long bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) * Description * Set tunnel options metadata for the packet associated to *skb* * to the option data contained in the raw buffer *opt* of *size*. @@ -1304,7 +1304,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags) + * long bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags) * Description * Change the protocol of the *skb* to *proto*. Currently * supported are transition from IPv4 to IPv6, and from IPv6 to @@ -1331,7 +1331,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_change_type(struct sk_buff *skb, u32 type) + * long bpf_skb_change_type(struct sk_buff *skb, u32 type) * Description * Change the packet type for the packet associated to *skb*. This * comes down to setting *skb*\ **->pkt_type** to *type*, except @@ -1358,7 +1358,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index) + * long bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index) * Description * Check whether *skb* is a descendant of the cgroup2 held by * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. @@ -1389,7 +1389,7 @@ union bpf_attr { * Return * A pointer to the current task struct. * - * int bpf_probe_write_user(void *dst, const void *src, u32 len) + * long bpf_probe_write_user(void *dst, const void *src, u32 len) * Description * Attempt in a safe way to write *len* bytes from the buffer * *src* to *dst* in memory. It only works for threads that are in @@ -1408,7 +1408,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_current_task_under_cgroup(struct bpf_map *map, u32 index) + * long bpf_current_task_under_cgroup(struct bpf_map *map, u32 index) * Description * Check whether the probe is being run is the context of a given * subset of the cgroup2 hierarchy. The cgroup2 to test is held by @@ -1420,7 +1420,7 @@ union bpf_attr { * * 1, if the *skb* task does not belong to the cgroup2. * * A negative error code, if an error occurred. * - * int bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags) + * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags) * Description * Resize (trim or grow) the packet associated to *skb* to the * new *len*. The *flags* are reserved for future usage, and must @@ -1444,7 +1444,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_pull_data(struct sk_buff *skb, u32 len) + * long bpf_skb_pull_data(struct sk_buff *skb, u32 len) * Description * Pull in non-linear data in case the *skb* is non-linear and not * all of *len* are part of the linear section. Make *len* bytes @@ -1500,7 +1500,7 @@ union bpf_attr { * recalculation the next time the kernel tries to access this * hash or when the **bpf_get_hash_recalc**\ () helper is called. * - * int bpf_get_numa_node_id(void) + * long bpf_get_numa_node_id(void) * Description * Return the id of the current NUMA node. The primary use case * for this helper is the selection of sockets for the local NUMA @@ -1511,7 +1511,7 @@ union bpf_attr { * Return * The id of current NUMA node. * - * int bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags) + * long bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags) * Description * Grows headroom of packet associated to *skb* and adjusts the * offset of the MAC header accordingly, adding *len* bytes of @@ -1532,7 +1532,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta) + * long bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta) * Description * Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that * it is possible to use a negative value for *delta*. This helper @@ -1547,7 +1547,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr) + * long bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr) * Description * Copy a NUL terminated string from an unsafe kernel address * *unsafe_ptr* to *dst*. See **bpf_probe_read_kernel_str**\ () for @@ -1595,14 +1595,14 @@ union bpf_attr { * is returned (note that **overflowuid** might also be the actual * UID value for the socket). * - * u32 bpf_set_hash(struct sk_buff *skb, u32 hash) + * long bpf_set_hash(struct sk_buff *skb, u32 hash) * Description * Set the full hash for *skb* (set the field *skb*\ **->hash**) * to value *hash*. * Return * 0 * - * int bpf_setsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) + * long bpf_setsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) * Description * Emulate a call to **setsockopt()** on the socket associated to * *bpf_socket*, which must be a full socket. The *level* at @@ -1630,7 +1630,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags) + * long bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags) * Description * Grow or shrink the room for data in the packet associated to * *skb* by *len_diff*, and according to the selected *mode*. @@ -1676,7 +1676,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags) + * long bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags) * Description * Redirect the packet to the endpoint referenced by *map* at * index *key*. Depending on its type, this *map* can contain @@ -1697,7 +1697,7 @@ union bpf_attr { * **XDP_REDIRECT** on success, or the value of the two lower bits * of the *flags* argument on error. * - * int bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags) + * long bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags) * Description * Redirect the packet to the socket referenced by *map* (of type * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and @@ -1708,7 +1708,7 @@ union bpf_attr { * Return * **SK_PASS** on success, or **SK_DROP** on error. * - * int bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) + * long bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) * Description * Add an entry to, or update a *map* referencing sockets. The * *skops* is used as a new value for the entry associated to @@ -1727,7 +1727,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta) + * long bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta) * Description * Adjust the address pointed by *xdp_md*\ **->data_meta** by * *delta* (which can be positive or negative). Note that this @@ -1756,7 +1756,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size) + * long bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size) * Description * Read the value of a perf event counter, and store it into *buf* * of size *buf_size*. This helper relies on a *map* of type @@ -1806,7 +1806,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size) + * long bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size) * Description * For en eBPF program attached to a perf event, retrieve the * value of the event counter associated to *ctx* and store it in @@ -1817,7 +1817,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_getsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) + * long bpf_getsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) * Description * Emulate a call to **getsockopt()** on the socket associated to * *bpf_socket*, which must be a full socket. The *level* at @@ -1842,7 +1842,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_override_return(struct pt_regs *regs, u64 rc) + * long bpf_override_return(struct pt_regs *regs, u64 rc) * Description * Used for error injection, this helper uses kprobes to override * the return value of the probed function, and to set it to *rc*. @@ -1867,7 +1867,7 @@ union bpf_attr { * Return * 0 * - * int bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval) + * long bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval) * Description * Attempt to set the value of the **bpf_sock_ops_cb_flags** field * for the full TCP socket associated to *bpf_sock_ops* to @@ -1911,7 +1911,7 @@ union bpf_attr { * be set is returned (which comes down to 0 if all bits were set * as required). * - * int bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags) + * long bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags) * Description * This helper is used in programs implementing policies at the * socket level. If the message *msg* is allowed to pass (i.e. if @@ -1925,7 +1925,7 @@ union bpf_attr { * Return * **SK_PASS** on success, or **SK_DROP** on error. * - * int bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes) + * long bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes) * Description * For socket policies, apply the verdict of the eBPF program to * the next *bytes* (number of bytes) of message *msg*. @@ -1959,7 +1959,7 @@ union bpf_attr { * Return * 0 * - * int bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes) + * long bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes) * Description * For socket policies, prevent the execution of the verdict eBPF * program for message *msg* until *bytes* (byte number) have been @@ -1977,7 +1977,7 @@ union bpf_attr { * Return * 0 * - * int bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags) + * long bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags) * Description * For socket policies, pull in non-linear data from user space * for *msg* and set pointers *msg*\ **->data** and *msg*\ @@ -2008,7 +2008,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len) + * long bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len) * Description * Bind the socket associated to *ctx* to the address pointed by * *addr*, of length *addr_len*. This allows for making outgoing @@ -2026,7 +2026,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta) + * long bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta) * Description * Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is * possible to both shrink and grow the packet tail. @@ -2040,7 +2040,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags) + * long bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags) * Description * Retrieve the XFRM state (IP transform framework, see also * **ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*. @@ -2056,7 +2056,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags) + * long bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags) * Description * Return a user or a kernel stack in bpf program provided buffer. * To achieve this, the helper needs *ctx*, which is a pointer @@ -2089,7 +2089,7 @@ union bpf_attr { * A non-negative value equal to or less than *size* on success, * or a negative error in case of failure. * - * int bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header) + * long bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header) * Description * This helper is similar to **bpf_skb_load_bytes**\ () in that * it provides an easy way to load *len* bytes from *offset* @@ -2111,7 +2111,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags) + * long bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags) * Description * Do FIB lookup in kernel tables using parameters in *params*. * If lookup is successful and result shows packet is to be @@ -2142,7 +2142,7 @@ union bpf_attr { * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the * packet is not forwarded or needs assist from full stack * - * int bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) + * long bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) * Description * Add an entry to, or update a sockhash *map* referencing sockets. * The *skops* is used as a new value for the entry associated to @@ -2161,7 +2161,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags) + * long bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags) * Description * This helper is used in programs implementing policies at the * socket level. If the message *msg* is allowed to pass (i.e. if @@ -2175,7 +2175,7 @@ union bpf_attr { * Return * **SK_PASS** on success, or **SK_DROP** on error. * - * int bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags) + * long bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags) * Description * This helper is used in programs implementing policies at the * skb socket level. If the sk_buff *skb* is allowed to pass (i.e. @@ -2189,7 +2189,7 @@ union bpf_attr { * Return * **SK_PASS** on success, or **SK_DROP** on error. * - * int bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len) + * long bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len) * Description * Encapsulate the packet associated to *skb* within a Layer 3 * protocol header. This header is provided in the buffer at @@ -2226,7 +2226,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len) + * long bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len) * Description * Store *len* bytes from address *from* into the packet * associated to *skb*, at *offset*. Only the flags, tag and TLVs @@ -2241,7 +2241,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta) + * long bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta) * Description * Adjust the size allocated to TLVs in the outermost IPv6 * Segment Routing Header contained in the packet associated to @@ -2257,7 +2257,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len) + * long bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len) * Description * Apply an IPv6 Segment Routing action of type *action* to the * packet associated to *skb*. Each action takes a parameter @@ -2286,7 +2286,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_rc_repeat(void *ctx) + * long bpf_rc_repeat(void *ctx) * Description * This helper is used in programs implementing IR decoding, to * report a successfully decoded repeat key message. This delays @@ -2305,7 +2305,7 @@ union bpf_attr { * Return * 0 * - * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle) + * long bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle) * Description * This helper is used in programs implementing IR decoding, to * report a successfully decoded key press with *scancode*, @@ -2370,7 +2370,7 @@ union bpf_attr { * Return * A pointer to the local storage area. * - * int bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags) + * long bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags) * Description * Select a **SO_REUSEPORT** socket from a * **BPF_MAP_TYPE_REUSEPORT_ARRAY** *map*. @@ -2471,7 +2471,7 @@ union bpf_attr { * result is from *reuse*\ **->socks**\ [] using the hash of the * tuple. * - * int bpf_sk_release(struct bpf_sock *sock) + * long bpf_sk_release(struct bpf_sock *sock) * Description * Release the reference held by *sock*. *sock* must be a * non-**NULL** pointer that was returned from @@ -2479,7 +2479,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags) + * long bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags) * Description * Push an element *value* in *map*. *flags* is one of: * @@ -2489,19 +2489,19 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_map_pop_elem(struct bpf_map *map, void *value) + * long bpf_map_pop_elem(struct bpf_map *map, void *value) * Description * Pop an element from *map*. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_map_peek_elem(struct bpf_map *map, void *value) + * long bpf_map_peek_elem(struct bpf_map *map, void *value) * Description * Get an element from *map* without removing it. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) + * long bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) * Description * For socket policies, insert *len* bytes into *msg* at offset * *start*. @@ -2517,7 +2517,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) + * long bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) * Description * Will remove *len* bytes from a *msg* starting at byte *start*. * This may result in **ENOMEM** errors under certain situations if @@ -2529,7 +2529,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y) + * long bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y) * Description * This helper is used in programs implementing IR decoding, to * report a successfully decoded pointer movement. @@ -2543,7 +2543,7 @@ union bpf_attr { * Return * 0 * - * int bpf_spin_lock(struct bpf_spin_lock *lock) + * long bpf_spin_lock(struct bpf_spin_lock *lock) * Description * Acquire a spinlock represented by the pointer *lock*, which is * stored as part of a value of a map. Taking the lock allows to @@ -2591,7 +2591,7 @@ union bpf_attr { * Return * 0 * - * int bpf_spin_unlock(struct bpf_spin_lock *lock) + * long bpf_spin_unlock(struct bpf_spin_lock *lock) * Description * Release the *lock* previously locked by a call to * **bpf_spin_lock**\ (\ *lock*\ ). @@ -2614,7 +2614,7 @@ union bpf_attr { * A **struct bpf_tcp_sock** pointer on success, or **NULL** in * case of failure. * - * int bpf_skb_ecn_set_ce(struct sk_buff *skb) + * long bpf_skb_ecn_set_ce(struct sk_buff *skb) * Description * Set ECN (Explicit Congestion Notification) field of IP header * to **CE** (Congestion Encountered) if current value is **ECT** @@ -2651,7 +2651,7 @@ union bpf_attr { * result is from *reuse*\ **->socks**\ [] using the hash of the * tuple. * - * int bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) + * long bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) * Description * Check whether *iph* and *th* contain a valid SYN cookie ACK for * the listening socket in *sk*. @@ -2666,7 +2666,7 @@ union bpf_attr { * 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative * error otherwise. * - * int bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags) + * long bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags) * Description * Get name of sysctl in /proc/sys/ and copy it into provided by * program buffer *buf* of size *buf_len*. @@ -2682,7 +2682,7 @@ union bpf_attr { * **-E2BIG** if the buffer wasn't big enough (*buf* will contain * truncated name in this case). * - * int bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) + * long bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) * Description * Get current value of sysctl as it is presented in /proc/sys * (incl. newline, etc), and copy it as a string into provided @@ -2701,7 +2701,7 @@ union bpf_attr { * **-EINVAL** if current value was unavailable, e.g. because * sysctl is uninitialized and read returns -EIO for it. * - * int bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) + * long bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) * Description * Get new value being written by user space to sysctl (before * the actual write happens) and copy it as a string into @@ -2718,7 +2718,7 @@ union bpf_attr { * * **-EINVAL** if sysctl is being read. * - * int bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len) + * long bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len) * Description * Override new value being written by user space to sysctl with * value provided by program in buffer *buf* of size *buf_len*. @@ -2735,7 +2735,7 @@ union bpf_attr { * * **-EINVAL** if sysctl is being read. * - * int bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res) + * long bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res) * Description * Convert the initial part of the string from buffer *buf* of * size *buf_len* to a long integer according to the given base @@ -2759,7 +2759,7 @@ union bpf_attr { * * **-ERANGE** if resulting value was out of range. * - * int bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res) + * long bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res) * Description * Convert the initial part of the string from buffer *buf* of * size *buf_len* to an unsigned long integer according to the @@ -2810,7 +2810,7 @@ union bpf_attr { * **NULL** if not found or there was an error in adding * a new bpf-local-storage. * - * int bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk) + * long bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk) * Description * Delete a bpf-local-storage from a *sk*. * Return @@ -2818,7 +2818,7 @@ union bpf_attr { * * **-ENOENT** if the bpf-local-storage cannot be found. * - * int bpf_send_signal(u32 sig) + * long bpf_send_signal(u32 sig) * Description * Send signal *sig* to the process of the current task. * The signal may be delivered to any of this process's threads. @@ -2859,7 +2859,7 @@ union bpf_attr { * * **-EPROTONOSUPPORT** IP packet version is not 4 or 6 * - * int bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) + * long bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) * Description * Write raw *data* blob into a special BPF perf event held by * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf @@ -2883,21 +2883,21 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr) + * long bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr) * Description * Safely attempt to read *size* bytes from user space address * *unsafe_ptr* and store the data in *dst*. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr) + * long bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr) * Description * Safely attempt to read *size* bytes from kernel space address * *unsafe_ptr* and store the data in *dst*. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr) + * long bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr) * Description * Copy a NUL terminated string from an unsafe user address * *unsafe_ptr* to *dst*. The *size* should include the @@ -2941,7 +2941,7 @@ union bpf_attr { * including the trailing NUL character. On error, a negative * value. * - * int bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr) + * long bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr) * Description * Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr* * to *dst*. Same semantics as with **bpf_probe_read_user_str**\ () apply. @@ -2949,14 +2949,14 @@ union bpf_attr { * On success, the strictly positive length of the string, including * the trailing NUL character. On error, a negative value. * - * int bpf_tcp_send_ack(void *tp, u32 rcv_nxt) + * long bpf_tcp_send_ack(void *tp, u32 rcv_nxt) * Description * Send out a tcp-ack. *tp* is the in-kernel struct **tcp_sock**. * *rcv_nxt* is the ack_seq to be sent out. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_send_signal_thread(u32 sig) + * long bpf_send_signal_thread(u32 sig) * Description * Send signal *sig* to the thread corresponding to the current task. * Return @@ -2976,7 +2976,7 @@ union bpf_attr { * Return * The 64 bit jiffies * - * int bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags) + * long bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags) * Description * For an eBPF program attached to a perf event, retrieve the * branch records (**struct perf_branch_entry**) associated to *ctx* @@ -2995,7 +2995,7 @@ union bpf_attr { * * **-ENOENT** if architecture does not support branch records. * - * int bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size) + * long bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size) * Description * Returns 0 on success, values for *pid* and *tgid* as seen from the current * *namespace* will be returned in *nsdata*. @@ -3007,7 +3007,7 @@ union bpf_attr { * * **-ENOENT** if pidns does not exists for the current task. * - * int bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) + * long bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) * Description * Write raw *data* blob into a special BPF perf event held by * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf @@ -3062,7 +3062,7 @@ union bpf_attr { * Return * The id is returned or 0 in case the id could not be retrieved. * - * int bpf_sk_assign(struct sk_buff *skb, struct bpf_sock *sk, u64 flags) + * long bpf_sk_assign(struct sk_buff *skb, struct bpf_sock *sk, u64 flags) * Description * Assign the *sk* to the *skb*. When combined with appropriate * routing configuration to receive the packet towards the socket, @@ -3097,7 +3097,7 @@ union bpf_attr { * Return * Current *ktime*. * - * int bpf_seq_printf(struct seq_file *m, const char *fmt, u32 fmt_size, const void *data, u32 data_len) + * long bpf_seq_printf(struct seq_file *m, const char *fmt, u32 fmt_size, const void *data, u32 data_len) * Description * **bpf_seq_printf**\ () uses seq_file **seq_printf**\ () to print * out the format string. @@ -3126,7 +3126,7 @@ union bpf_attr { * * **-EOVERFLOW** if an overflow happened: The same object will be tried again. * - * int bpf_seq_write(struct seq_file *m, const void *data, u32 len) + * long bpf_seq_write(struct seq_file *m, const void *data, u32 len) * Description * **bpf_seq_write**\ () uses seq_file **seq_write**\ () to write the data. * The *m* represents the seq_file. The *data* and *len* represent the @@ -3221,7 +3221,7 @@ union bpf_attr { * Return * Requested value, or 0, if flags are not recognized. * - * int bpf_csum_level(struct sk_buff *skb, u64 level) + * long bpf_csum_level(struct sk_buff *skb, u64 level) * Description * Change the skbs checksum level by one layer up or down, or * reset it entirely to none in order to have the stack perform -- cgit v1.2.3-59-g8ed1b From e678e9ddea96590888b034e2a7ad1128bf1ea1ea Mon Sep 17 00:00:00 2001 From: Brian Vazquez Date: Tue, 23 Jun 2020 09:42:31 -0700 Subject: indirect_call_wrapper: extend indirect wrapper to support up to 4 calls There are many places where 2 annotations are not enough. This patch adds INDIRECT_CALL_3 and INDIRECT_CALL_4 to cover such cases. Signed-off-by: Brian Vazquez Acked-by: Paolo Abeni Signed-off-by: David S. Miller --- include/linux/indirect_call_wrapper.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'include') diff --git a/include/linux/indirect_call_wrapper.h b/include/linux/indirect_call_wrapper.h index 00d7e8e919c6..54c02c84906a 100644 --- a/include/linux/indirect_call_wrapper.h +++ b/include/linux/indirect_call_wrapper.h @@ -23,6 +23,16 @@ likely(f == f2) ? f2(__VA_ARGS__) : \ INDIRECT_CALL_1(f, f1, __VA_ARGS__); \ }) +#define INDIRECT_CALL_3(f, f3, f2, f1, ...) \ + ({ \ + likely(f == f3) ? f3(__VA_ARGS__) : \ + INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__); \ + }) +#define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) \ + ({ \ + likely(f == f4) ? f4(__VA_ARGS__) : \ + INDIRECT_CALL_3(f, f3, f2, f1, __VA_ARGS__); \ + }) #define INDIRECT_CALLABLE_DECLARE(f) f #define INDIRECT_CALLABLE_SCOPE @@ -30,6 +40,8 @@ #else #define INDIRECT_CALL_1(f, f1, ...) f(__VA_ARGS__) #define INDIRECT_CALL_2(f, f2, f1, ...) f(__VA_ARGS__) +#define INDIRECT_CALL_3(f, f3, f2, f1, ...) f(__VA_ARGS__) +#define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) f(__VA_ARGS__) #define INDIRECT_CALLABLE_DECLARE(f) #define INDIRECT_CALLABLE_SCOPE static #endif -- cgit v1.2.3-59-g8ed1b From 55cced4f813bece01f96256d96f283b9210d19ee Mon Sep 17 00:00:00 2001 From: Brian Vazquez Date: Tue, 23 Jun 2020 09:42:32 -0700 Subject: ipv6: fib6: avoid indirect calls from fib6_rule_lookup It was reported that a considerable amount of cycles were spent on the expensive indirect calls on fib6_rule_lookup. This patch introduces an inline helper called pol_route_func that uses the indirect_call_wrappers to avoid the indirect calls. This patch saves around 50ns per call. Performance was measured on the receiver by checking the amount of syncookies that server was able to generate under a synflood load. Traffic was generated using trafgen[1] which was pushing around 1Mpps on a single queue. Receiver was using only one rx queue which help to create a bottle neck and make the experiment rx-bounded. These are the syncookies generated over 10s from the different runs: Whithout the patch: TcpExtSyncookiesSent 3553749 0.0 TcpExtSyncookiesSent 3550895 0.0 TcpExtSyncookiesSent 3553845 0.0 TcpExtSyncookiesSent 3541050 0.0 TcpExtSyncookiesSent 3539921 0.0 TcpExtSyncookiesSent 3557659 0.0 TcpExtSyncookiesSent 3526812 0.0 TcpExtSyncookiesSent 3536121 0.0 TcpExtSyncookiesSent 3529963 0.0 TcpExtSyncookiesSent 3536319 0.0 With the patch: TcpExtSyncookiesSent 3611786 0.0 TcpExtSyncookiesSent 3596682 0.0 TcpExtSyncookiesSent 3606878 0.0 TcpExtSyncookiesSent 3599564 0.0 TcpExtSyncookiesSent 3601304 0.0 TcpExtSyncookiesSent 3609249 0.0 TcpExtSyncookiesSent 3617437 0.0 TcpExtSyncookiesSent 3608765 0.0 TcpExtSyncookiesSent 3620205 0.0 TcpExtSyncookiesSent 3601895 0.0 Without the patch the average is 354263 pkt/s or 2822 ns/pkt and with the patch the average is 360738 pkt/s or 2772 ns/pkt which gives an estimate of 50 ns per packet. [1] http://netsniff-ng.org/ Changelog since v1: - Change ordering in the ICW (Paolo Abeni) Cc: Luigi Rizzo Cc: Paolo Abeni Reported-by: Eric Dumazet Signed-off-by: Brian Vazquez Acked-by: Paolo Abeni Signed-off-by: David S. Miller --- include/net/ip6_fib.h | 36 ++++++++++++++++++++++++++++++++++++ net/ipv6/fib6_rules.c | 9 ++++++--- net/ipv6/ip6_fib.c | 3 ++- net/ipv6/route.c | 8 ++++---- 4 files changed, 48 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 3f615a29766e..cc8356fd927f 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -19,6 +19,7 @@ #include #include #include +#include #ifdef CONFIG_IPV6_MULTIPLE_TABLES #define FIB6_TABLE_HASHSZ 256 @@ -552,6 +553,41 @@ struct bpf_iter__ipv6_route { }; #endif +INDIRECT_CALLABLE_DECLARE(struct rt6_info *ip6_pol_route_output(struct net *net, + struct fib6_table *table, + struct flowi6 *fl6, + const struct sk_buff *skb, + int flags)); +INDIRECT_CALLABLE_DECLARE(struct rt6_info *ip6_pol_route_input(struct net *net, + struct fib6_table *table, + struct flowi6 *fl6, + const struct sk_buff *skb, + int flags)); +INDIRECT_CALLABLE_DECLARE(struct rt6_info *__ip6_route_redirect(struct net *net, + struct fib6_table *table, + struct flowi6 *fl6, + const struct sk_buff *skb, + int flags)); +INDIRECT_CALLABLE_DECLARE(struct rt6_info *ip6_pol_route_lookup(struct net *net, + struct fib6_table *table, + struct flowi6 *fl6, + const struct sk_buff *skb, + int flags)); +static inline struct rt6_info *pol_lookup_func(pol_lookup_t lookup, + struct net *net, + struct fib6_table *table, + struct flowi6 *fl6, + const struct sk_buff *skb, + int flags) +{ + return INDIRECT_CALL_4(lookup, + ip6_pol_route_output, + ip6_pol_route_input, + ip6_pol_route_lookup, + __ip6_route_redirect, + net, table, fl6, skb, flags); +} + #ifdef CONFIG_IPV6_MULTIPLE_TABLES static inline bool fib6_has_custom_rules(const struct net *net) { diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index fafe556d21e0..6053ef851555 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -111,11 +111,13 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, } else { struct rt6_info *rt; - rt = lookup(net, net->ipv6.fib6_local_tbl, fl6, skb, flags); + rt = pol_lookup_func(lookup, + net, net->ipv6.fib6_local_tbl, fl6, skb, flags); if (rt != net->ipv6.ip6_null_entry && rt->dst.error != -EAGAIN) return &rt->dst; ip6_rt_put_flags(rt, flags); - rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, skb, flags); + rt = pol_lookup_func(lookup, + net, net->ipv6.fib6_main_tbl, fl6, skb, flags); if (rt->dst.error != -EAGAIN) return &rt->dst; ip6_rt_put_flags(rt, flags); @@ -226,7 +228,8 @@ static int __fib6_rule_action(struct fib_rule *rule, struct flowi *flp, goto out; } - rt = lookup(net, table, flp6, arg->lookup_data, flags); + rt = pol_lookup_func(lookup, + net, table, flp6, arg->lookup_data, flags); if (rt != net->ipv6.ip6_null_entry) { err = fib6_rule_saddr(net, rule, flags, flp6, ip6_dst_idev(&rt->dst)->dev); diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 49ee89bbcba0..25a90f3f705c 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -314,7 +314,8 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, { struct rt6_info *rt; - rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, skb, flags); + rt = pol_lookup_func(lookup, + net, net->ipv6.fib6_main_tbl, fl6, skb, flags); if (rt->dst.error == -EAGAIN) { ip6_rt_put_flags(rt, flags); rt = net->ipv6.ip6_null_entry; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 82cbb46a2a4f..5852039ca9cf 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1207,7 +1207,7 @@ fallback: return nrt; } -static struct rt6_info *ip6_pol_route_lookup(struct net *net, +INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_lookup(struct net *net, struct fib6_table *table, struct flowi6 *fl6, const struct sk_buff *skb, @@ -2274,7 +2274,7 @@ out: } EXPORT_SYMBOL_GPL(ip6_pol_route); -static struct rt6_info *ip6_pol_route_input(struct net *net, +INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table, struct flowi6 *fl6, const struct sk_buff *skb, @@ -2465,7 +2465,7 @@ void ip6_route_input(struct sk_buff *skb) &fl6, skb, flags)); } -static struct rt6_info *ip6_pol_route_output(struct net *net, +INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table, struct flowi6 *fl6, const struct sk_buff *skb, @@ -2912,7 +2912,7 @@ struct ip6rd_flowi { struct in6_addr gateway; }; -static struct rt6_info *__ip6_route_redirect(struct net *net, +INDIRECT_CALLABLE_SCOPE struct rt6_info *__ip6_route_redirect(struct net *net, struct fib6_table *table, struct flowi6 *fl6, const struct sk_buff *skb, -- cgit v1.2.3-59-g8ed1b From bdfd2d1fa79acd03e18d1683419572f3682b39fd Mon Sep 17 00:00:00 2001 From: Jarod Wilson Date: Tue, 23 Jun 2020 16:40:01 -0400 Subject: bonding/xfrm: use real_dev instead of slave_dev Rather than requiring every hw crypto capable NIC driver to do a check for slave_dev being set, set real_dev in the xfrm layer and xso init time, and then override it in the bonding driver as needed. Then NIC drivers can always use real_dev, and at the same time, we eliminate the use of a variable name that probably shouldn't have been used in the first place, particularly given recent current events. CC: Boris Pismenny CC: Saeed Mahameed CC: Leon Romanovsky CC: Jay Vosburgh CC: Veaceslav Falico CC: Andy Gospodarek CC: "David S. Miller" CC: Jeff Kirsher CC: Jakub Kicinski CC: Steffen Klassert CC: Herbert Xu CC: netdev@vger.kernel.org Suggested-by: Saeed Mahameed Signed-off-by: Jarod Wilson Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 6 +-- drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c | 47 ++++++---------------- .../ethernet/mellanox/mlx5/core/en_accel/ipsec.c | 10 +---- include/net/xfrm.h | 2 +- net/xfrm/xfrm_device.c | 5 ++- 5 files changed, 21 insertions(+), 49 deletions(-) (limited to 'include') diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 90939ccf2a94..4ef99efc37f6 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -386,7 +386,7 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs) struct bonding *bond = netdev_priv(bond_dev); struct slave *slave = rtnl_dereference(bond->curr_active_slave); - xs->xso.slave_dev = slave->dev; + xs->xso.real_dev = slave->dev; bond->xs = xs; if (!(slave->dev->xfrmdev_ops @@ -411,7 +411,7 @@ static void bond_ipsec_del_sa(struct xfrm_state *xs) if (!slave) return; - xs->xso.slave_dev = slave->dev; + xs->xso.real_dev = slave->dev; if (!(slave->dev->xfrmdev_ops && slave->dev->xfrmdev_ops->xdo_dev_state_delete)) { @@ -440,7 +440,7 @@ static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) return false; } - xs->xso.slave_dev = slave_dev; + xs->xso.real_dev = slave_dev; return slave_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index 26b0a58a064d..6516980965a2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -427,14 +427,11 @@ static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec, static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs, u32 *mykey, u32 *mysalt) { - struct net_device *dev = xs->xso.dev; + struct net_device *dev = xs->xso.real_dev; unsigned char *key_data; char *alg_name = NULL; int key_len; - if (xs->xso.slave_dev) - dev = xs->xso.slave_dev; - if (!xs->aead) { netdev_err(dev, "Unsupported IPsec algorithm\n"); return -EINVAL; @@ -480,9 +477,9 @@ static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs, **/ static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs) { - struct net_device *dev = xs->xso.dev; - struct ixgbe_adapter *adapter; - struct ixgbe_hw *hw; + struct net_device *dev = xs->xso.real_dev; + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_hw *hw = &adapter->hw; u32 mfval, manc, reg; int num_filters = 4; bool manc_ipv4; @@ -500,12 +497,6 @@ static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs) #define BMCIP_V6 0x3 #define BMCIP_MASK 0x3 - if (xs->xso.slave_dev) - dev = xs->xso.slave_dev; - - adapter = netdev_priv(dev); - hw = &adapter->hw; - manc = IXGBE_READ_REG(hw, IXGBE_MANC); manc_ipv4 = !!(manc & MANC_EN_IPV4_FILTER); mfval = IXGBE_READ_REG(hw, IXGBE_MFVAL); @@ -569,22 +560,15 @@ static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs) **/ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) { - struct net_device *dev = xs->xso.dev; - struct ixgbe_adapter *adapter; - struct ixgbe_ipsec *ipsec; - struct ixgbe_hw *hw; + struct net_device *dev = xs->xso.real_dev; + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_ipsec *ipsec = adapter->ipsec; + struct ixgbe_hw *hw = &adapter->hw; int checked, match, first; u16 sa_idx; int ret; int i; - if (xs->xso.slave_dev) - dev = xs->xso.slave_dev; - - adapter = netdev_priv(dev); - ipsec = adapter->ipsec; - hw = &adapter->hw; - if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) { netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n", xs->id.proto); @@ -761,20 +745,13 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) **/ static void ixgbe_ipsec_del_sa(struct xfrm_state *xs) { - struct net_device *dev = xs->xso.dev; - struct ixgbe_adapter *adapter; - struct ixgbe_ipsec *ipsec; - struct ixgbe_hw *hw; + struct net_device *dev = xs->xso.real_dev; + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_ipsec *ipsec = adapter->ipsec; + struct ixgbe_hw *hw = &adapter->hw; u32 zerobuf[4] = {0, 0, 0, 0}; u16 sa_idx; - if (xs->xso.slave_dev) - dev = xs->xso.slave_dev; - - adapter = netdev_priv(dev); - ipsec = adapter->ipsec; - hw = &adapter->hw; - if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { struct rx_sa *rsa; u8 ipi; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 72ad6664bd73..bc55c82b55ba 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -207,12 +207,9 @@ mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry, static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x) { - struct net_device *netdev = x->xso.dev; + struct net_device *netdev = x->xso.real_dev; struct mlx5e_priv *priv; - if (x->xso.slave_dev) - netdev = x->xso.slave_dev; - priv = netdev_priv(netdev); if (x->props.aalgo != SADB_AALG_NONE) { @@ -288,15 +285,12 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x) static int mlx5e_xfrm_add_state(struct xfrm_state *x) { struct mlx5e_ipsec_sa_entry *sa_entry = NULL; - struct net_device *netdev = x->xso.dev; + struct net_device *netdev = x->xso.real_dev; struct mlx5_accel_esp_xfrm_attrs attrs; struct mlx5e_priv *priv; unsigned int sa_handle; int err; - if (x->xso.slave_dev) - netdev = x->xso.slave_dev; - priv = netdev_priv(netdev); err = mlx5e_xfrm_validate_state(x); diff --git a/include/net/xfrm.h b/include/net/xfrm.h index e20b2b27ec48..e648c9e6c919 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -127,7 +127,7 @@ struct xfrm_state_walk { struct xfrm_state_offload { struct net_device *dev; - struct net_device *slave_dev; + struct net_device *real_dev; unsigned long offload_handle; unsigned int num_exthdrs; u8 flags; diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index b8918fc5248b..7b64bb64c822 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c @@ -120,8 +120,8 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND) return skb; - /* This skb was already validated on the master dev */ - if ((x->xso.dev != dev) && (x->xso.slave_dev == dev)) + /* This skb was already validated on the upper/virtual dev */ + if ((x->xso.dev != dev) && (x->xso.real_dev == dev)) return skb; local_irq_save(flags); @@ -259,6 +259,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, } xso->dev = dev; + xso->real_dev = dev; xso->num_exthdrs = 1; xso->flags = xuo->flags; -- cgit v1.2.3-59-g8ed1b From 8e6cf365e1d5c70e275a77a3c5ad7e3dc685474c Mon Sep 17 00:00:00 2001 From: Richard Guy Briggs Date: Thu, 4 Jun 2020 09:20:49 -0400 Subject: audit: log nftables configuration change events iptables, ip6tables, arptables and ebtables table registration, replacement and unregistration configuration events are logged for the native (legacy) iptables setsockopt api, but not for the nftables netlink api which is used by the nft-variant of iptables in addition to nftables itself. Add calls to log the configuration actions in the nftables netlink api. This uses the same NETFILTER_CFG record format but overloads the table field. type=NETFILTER_CFG msg=audit(2020-05-28 17:46:41.878:162) : table=?:0;?:0 family=unspecified entries=2 op=nft_register_gen pid=396 subj=system_u:system_r:firewalld_t:s0 comm=firewalld ... type=NETFILTER_CFG msg=audit(2020-05-28 17:46:41.878:162) : table=firewalld:1;?:0 family=inet entries=0 op=nft_register_table pid=396 subj=system_u:system_r:firewalld_t:s0 comm=firewalld ... type=NETFILTER_CFG msg=audit(2020-05-28 17:46:41.911:163) : table=firewalld:1;filter_FORWARD:85 family=inet entries=8 op=nft_register_chain pid=396 subj=system_u:system_r:firewalld_t:s0 comm=firewalld ... type=NETFILTER_CFG msg=audit(2020-05-28 17:46:41.911:163) : table=firewalld:1;filter_FORWARD:85 family=inet entries=101 op=nft_register_rule pid=396 subj=system_u:system_r:firewalld_t:s0 comm=firewalld ... type=NETFILTER_CFG msg=audit(2020-05-28 17:46:41.911:163) : table=firewalld:1;__set0:87 family=inet entries=87 op=nft_register_setelem pid=396 subj=system_u:system_r:firewalld_t:s0 comm=firewalld ... type=NETFILTER_CFG msg=audit(2020-05-28 17:46:41.911:163) : table=firewalld:1;__set0:87 family=inet entries=0 op=nft_register_set pid=396 subj=system_u:system_r:firewalld_t:s0 comm=firewalld For further information please see issue https://github.com/linux-audit/audit-kernel/issues/124 Signed-off-by: Richard Guy Briggs Signed-off-by: Paul Moore --- include/linux/audit.h | 18 ++++++++ kernel/auditsc.c | 24 ++++++++-- net/netfilter/nf_tables_api.c | 103 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 142 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/audit.h b/include/linux/audit.h index 3fcd9ee49734..604ede630580 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -12,6 +12,7 @@ #include #include #include +#include #define AUDIT_INO_UNSET ((unsigned long)-1) #define AUDIT_DEV_UNSET ((dev_t)-1) @@ -98,6 +99,23 @@ enum audit_nfcfgop { AUDIT_XT_OP_REGISTER, AUDIT_XT_OP_REPLACE, AUDIT_XT_OP_UNREGISTER, + AUDIT_NFT_OP_TABLE_REGISTER, + AUDIT_NFT_OP_TABLE_UNREGISTER, + AUDIT_NFT_OP_CHAIN_REGISTER, + AUDIT_NFT_OP_CHAIN_UNREGISTER, + AUDIT_NFT_OP_RULE_REGISTER, + AUDIT_NFT_OP_RULE_UNREGISTER, + AUDIT_NFT_OP_SET_REGISTER, + AUDIT_NFT_OP_SET_UNREGISTER, + AUDIT_NFT_OP_SETELEM_REGISTER, + AUDIT_NFT_OP_SETELEM_UNREGISTER, + AUDIT_NFT_OP_GEN_REGISTER, + AUDIT_NFT_OP_OBJ_REGISTER, + AUDIT_NFT_OP_OBJ_UNREGISTER, + AUDIT_NFT_OP_OBJ_RESET, + AUDIT_NFT_OP_FLOWTABLE_REGISTER, + AUDIT_NFT_OP_FLOWTABLE_UNREGISTER, + AUDIT_NFT_OP_INVALID, }; extern int is_audit_feature_set(int which); diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 468a23390457..3a9100e95fda 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -75,6 +75,7 @@ #include #include #include +#include #include "audit.h" @@ -136,9 +137,26 @@ struct audit_nfcfgop_tab { }; static const struct audit_nfcfgop_tab audit_nfcfgs[] = { - { AUDIT_XT_OP_REGISTER, "register" }, - { AUDIT_XT_OP_REPLACE, "replace" }, - { AUDIT_XT_OP_UNREGISTER, "unregister" }, + { AUDIT_XT_OP_REGISTER, "xt_register" }, + { AUDIT_XT_OP_REPLACE, "xt_replace" }, + { AUDIT_XT_OP_UNREGISTER, "xt_unregister" }, + { AUDIT_NFT_OP_TABLE_REGISTER, "nft_register_table" }, + { AUDIT_NFT_OP_TABLE_UNREGISTER, "nft_unregister_table" }, + { AUDIT_NFT_OP_CHAIN_REGISTER, "nft_register_chain" }, + { AUDIT_NFT_OP_CHAIN_UNREGISTER, "nft_unregister_chain" }, + { AUDIT_NFT_OP_RULE_REGISTER, "nft_register_rule" }, + { AUDIT_NFT_OP_RULE_UNREGISTER, "nft_unregister_rule" }, + { AUDIT_NFT_OP_SET_REGISTER, "nft_register_set" }, + { AUDIT_NFT_OP_SET_UNREGISTER, "nft_unregister_set" }, + { AUDIT_NFT_OP_SETELEM_REGISTER, "nft_register_setelem" }, + { AUDIT_NFT_OP_SETELEM_UNREGISTER, "nft_unregister_setelem" }, + { AUDIT_NFT_OP_GEN_REGISTER, "nft_register_gen" }, + { AUDIT_NFT_OP_OBJ_REGISTER, "nft_register_obj" }, + { AUDIT_NFT_OP_OBJ_UNREGISTER, "nft_unregister_obj" }, + { AUDIT_NFT_OP_OBJ_RESET, "nft_reset_obj" }, + { AUDIT_NFT_OP_FLOWTABLE_REGISTER, "nft_register_flowtable" }, + { AUDIT_NFT_OP_FLOWTABLE_UNREGISTER, "nft_unregister_flowtable" }, + { AUDIT_NFT_OP_INVALID, "nft_invalid" }, }; static int audit_match_perm(struct audit_context *ctx, int mask) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 073aa1051d43..164700273947 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -693,6 +694,16 @@ static void nf_tables_table_notify(const struct nft_ctx *ctx, int event) { struct sk_buff *skb; int err; + char *buf = kasprintf(GFP_KERNEL, "%s:%llu;?:0", + ctx->table->name, ctx->table->handle); + + audit_log_nfcfg(buf, + ctx->family, + ctx->table->use, + event == NFT_MSG_NEWTABLE ? + AUDIT_NFT_OP_TABLE_REGISTER : + AUDIT_NFT_OP_TABLE_UNREGISTER); + kfree(buf); if (!ctx->report && !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES)) @@ -1428,6 +1439,17 @@ static void nf_tables_chain_notify(const struct nft_ctx *ctx, int event) { struct sk_buff *skb; int err; + char *buf = kasprintf(GFP_KERNEL, "%s:%llu;%s:%llu", + ctx->table->name, ctx->table->handle, + ctx->chain->name, ctx->chain->handle); + + audit_log_nfcfg(buf, + ctx->family, + ctx->chain->use, + event == NFT_MSG_NEWCHAIN ? + AUDIT_NFT_OP_CHAIN_REGISTER : + AUDIT_NFT_OP_CHAIN_UNREGISTER); + kfree(buf); if (!ctx->report && !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES)) @@ -2693,6 +2715,17 @@ static void nf_tables_rule_notify(const struct nft_ctx *ctx, { struct sk_buff *skb; int err; + char *buf = kasprintf(GFP_KERNEL, "%s:%llu;%s:%llu", + ctx->table->name, ctx->table->handle, + ctx->chain->name, ctx->chain->handle); + + audit_log_nfcfg(buf, + ctx->family, + rule->handle, + event == NFT_MSG_NEWRULE ? + AUDIT_NFT_OP_RULE_REGISTER : + AUDIT_NFT_OP_RULE_UNREGISTER); + kfree(buf); if (!ctx->report && !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES)) @@ -3695,6 +3728,17 @@ static void nf_tables_set_notify(const struct nft_ctx *ctx, struct sk_buff *skb; u32 portid = ctx->portid; int err; + char *buf = kasprintf(gfp_flags, "%s:%llu;%s:%llu", + ctx->table->name, ctx->table->handle, + set->name, set->handle); + + audit_log_nfcfg(buf, + ctx->family, + set->field_count, + event == NFT_MSG_NEWSET ? + AUDIT_NFT_OP_SET_REGISTER : + AUDIT_NFT_OP_SET_UNREGISTER); + kfree(buf); if (!ctx->report && !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES)) @@ -4811,6 +4855,17 @@ static void nf_tables_setelem_notify(const struct nft_ctx *ctx, u32 portid = ctx->portid; struct sk_buff *skb; int err; + char *buf = kasprintf(GFP_KERNEL, "%s:%llu;%s:%llu", + ctx->table->name, ctx->table->handle, + set->name, set->handle); + + audit_log_nfcfg(buf, + ctx->family, + set->handle, + event == NFT_MSG_NEWSETELEM ? + AUDIT_NFT_OP_SETELEM_REGISTER : + AUDIT_NFT_OP_SETELEM_UNREGISTER); + kfree(buf); if (!ctx->report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES)) return; @@ -5892,6 +5947,19 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb) obj->ops->type->type != filter->type) goto cont; + if (reset) { + char *buf = kasprintf(GFP_KERNEL, + "%s:%llu;?:0", + table->name, + table->handle); + + audit_log_nfcfg(buf, + family, + obj->handle, + AUDIT_NFT_OP_OBJ_RESET); + kfree(buf); + } + if (nf_tables_fill_obj_info(skb, net, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NFT_MSG_NEWOBJ, @@ -6002,6 +6070,17 @@ static int nf_tables_getobj(struct net *net, struct sock *nlsk, if (NFNL_MSG_TYPE(nlh->nlmsg_type) == NFT_MSG_GETOBJ_RESET) reset = true; + if (reset) { + char *buf = kasprintf(GFP_KERNEL, "%s:%llu;?:0", + table->name, table->handle); + + audit_log_nfcfg(buf, + family, + obj->handle, + AUDIT_NFT_OP_OBJ_RESET); + kfree(buf); + } + err = nf_tables_fill_obj_info(skb2, net, NETLINK_CB(skb).portid, nlh->nlmsg_seq, NFT_MSG_NEWOBJ, 0, family, table, obj, reset); @@ -6077,6 +6156,16 @@ void nft_obj_notify(struct net *net, const struct nft_table *table, { struct sk_buff *skb; int err; + char *buf = kasprintf(GFP_KERNEL, "%s:%llu;?:0", + table->name, table->handle); + + audit_log_nfcfg(buf, + family, + obj->handle, + event == NFT_MSG_NEWOBJ ? + AUDIT_NFT_OP_OBJ_REGISTER : + AUDIT_NFT_OP_OBJ_UNREGISTER); + kfree(buf); if (!report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES)) @@ -6856,6 +6945,17 @@ static void nf_tables_flowtable_notify(struct nft_ctx *ctx, { struct sk_buff *skb; int err; + char *buf = kasprintf(GFP_KERNEL, "%s:%llu;%s:%llu", + flowtable->table->name, flowtable->table->handle, + flowtable->name, flowtable->handle); + + audit_log_nfcfg(buf, + ctx->family, + flowtable->hooknum, + event == NFT_MSG_NEWFLOWTABLE ? + AUDIT_NFT_OP_FLOWTABLE_REGISTER : + AUDIT_NFT_OP_FLOWTABLE_UNREGISTER); + kfree(buf); if (ctx->report && !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES)) @@ -6977,6 +7077,9 @@ static void nf_tables_gen_notify(struct net *net, struct sk_buff *skb, struct sk_buff *skb2; int err; + audit_log_nfcfg("?:0;?:0", 0, net->nft.base_seq, + AUDIT_NFT_OP_GEN_REGISTER); + if (nlmsg_report(nlh) && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES)) return; -- cgit v1.2.3-59-g8ed1b From 243600ee660531d2b5b5ef3faab90c5f8ff4c2b6 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 23 Jun 2020 15:31:11 -0700 Subject: tcp: add declarations to avoid warnings Remove these errors: net/ipv6/tcp_ipv6.c:1550:29: warning: symbol 'tcp_v6_rcv' was not declared. Should it be static? net/ipv6/tcp_ipv6.c:1770:30: warning: symbol 'tcp_v6_early_demux' was not declared. Should it be static? net/ipv6/tcp_ipv6.c:1550:29: warning: no previous prototype for 'tcp_v6_rcv' [-Wmissing-prototypes] 1550 | INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb) | ^~~~~~~~~~ net/ipv6/tcp_ipv6.c:1770:30: warning: no previous prototype for 'tcp_v6_early_demux' [-Wmissing-prototypes] 1770 | INDIRECT_CALLABLE_SCOPE void tcp_v6_early_demux(struct sk_buff *skb) | ^~~~~~~~~~~~~~~~~~ Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/tcp.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/net/tcp.h b/include/net/tcp.h index cd9cc348dbf9..a8c36fa886a4 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -934,6 +934,8 @@ static inline int tcp_v6_sdif(const struct sk_buff *skb) } INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)); +INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb)); +INDIRECT_CALLABLE_DECLARE(void tcp_v6_early_demux(struct sk_buff *skb)); #endif -- cgit v1.2.3-59-g8ed1b From b03d2142bea8cf7407a0a668ce8f5f115bd226c4 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 23 Jun 2020 15:31:12 -0700 Subject: tcp: move ipv6_specific declaration to remove a warning ipv6_specific should be declared in tcp include files, not mptcp. This removes the following warning : CHECK net/ipv6/tcp_ipv6.c net/ipv6/tcp_ipv6.c:78:42: warning: symbol 'ipv6_specific' was not declared. Should it be static? Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/tcp.h | 2 ++ net/mptcp/protocol.h | 3 --- 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/net/tcp.h b/include/net/tcp.h index a8c36fa886a4..e6920ae0765c 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -933,6 +933,8 @@ static inline int tcp_v6_sdif(const struct sk_buff *skb) return 0; } +extern const struct inet_connection_sock_af_ops ipv6_specific; + INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)); INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb)); INDIRECT_CALLABLE_DECLARE(void tcp_v6_early_demux(struct sk_buff *skb)); diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index db56535dfc29..d4294b6d23e4 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -355,9 +355,6 @@ static inline void mptcp_subflow_tcp_fallback(struct sock *sk, } extern const struct inet_connection_sock_af_ops ipv4_specific; -#if IS_ENABLED(CONFIG_MPTCP_IPV6) -extern const struct inet_connection_sock_af_ops ipv6_specific; -#endif void mptcp_proto_init(void); #if IS_ENABLED(CONFIG_MPTCP_IPV6) -- cgit v1.2.3-59-g8ed1b From 9b9e2f250e3e6f59ad07e6d03838c27a100e0042 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 23 Jun 2020 15:31:13 -0700 Subject: tcp: move ipv4_specific to tcp include file Declare ipv4_specific once, in tcp.h were it belongs. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/tcp.h | 2 ++ include/net/transp_v6.h | 3 --- net/mptcp/protocol.h | 2 -- 3 files changed, 2 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/net/tcp.h b/include/net/tcp.h index e6920ae0765c..b0f0f93c681c 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -907,6 +907,8 @@ static inline void tcp_skb_bpf_redirect_clear(struct sk_buff *skb) TCP_SKB_CB(skb)->bpf.sk_redir = NULL; } +extern const struct inet_connection_sock_af_ops ipv4_specific; + #if IS_ENABLED(CONFIG_IPV6) /* This is the variant of inet6_iif() that must be used by TCP, * as TCP moves IP6CB into a different location in skb->cb[] diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h index a8f6020f1196..da06613c9603 100644 --- a/include/net/transp_v6.h +++ b/include/net/transp_v6.h @@ -56,9 +56,6 @@ ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, __u16 srcp, #define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006) -/* address family specific functions */ -extern const struct inet_connection_sock_af_ops ipv4_specific; - void inet6_destroy_sock(struct sock *sk); #define IPV6_SEQ_DGRAM_HEADER \ diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index d4294b6d23e4..06661781c9af 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -354,8 +354,6 @@ static inline void mptcp_subflow_tcp_fallback(struct sock *sk, inet_csk(sk)->icsk_af_ops = ctx->icsk_af_ops; } -extern const struct inet_connection_sock_af_ops ipv4_specific; - void mptcp_proto_init(void); #if IS_ENABLED(CONFIG_MPTCP_IPV6) int mptcp_proto_v6_init(void); -- cgit v1.2.3-59-g8ed1b From 5521d95e076238f1615cf1cdb135f318ef798b49 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 23 Jun 2020 15:31:14 -0700 Subject: net: move tcp gro declarations to net/tcp.h This patch removes following (C=1 W=1) warnings for CONFIG_RETPOLINE=y : net/ipv4/tcp_offload.c:306:16: warning: symbol 'tcp4_gro_receive' was not declared. Should it be static? net/ipv4/tcp_offload.c:306:17: warning: no previous prototype for 'tcp4_gro_receive' [-Wmissing-prototypes] net/ipv4/tcp_offload.c:319:29: warning: symbol 'tcp4_gro_complete' was not declared. Should it be static? net/ipv4/tcp_offload.c:319:29: warning: no previous prototype for 'tcp4_gro_complete' [-Wmissing-prototypes] CHECK net/ipv6/tcpv6_offload.c net/ipv6/tcpv6_offload.c:16:16: warning: symbol 'tcp6_gro_receive' was not declared. Should it be static? net/ipv6/tcpv6_offload.c:29:29: warning: symbol 'tcp6_gro_complete' was not declared. Should it be static? CC net/ipv6/tcpv6_offload.o net/ipv6/tcpv6_offload.c:16:17: warning: no previous prototype for 'tcp6_gro_receive' [-Wmissing-prototypes] 16 | struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb) | ^~~~~~~~~~~~~~~~ net/ipv6/tcpv6_offload.c:29:29: warning: no previous prototype for 'tcp6_gro_complete' [-Wmissing-prototypes] 29 | INDIRECT_CALLABLE_SCOPE int tcp6_gro_complete(struct sk_buff *skb, int thoff) | ^~~~~~~~~~~~~~~~~ Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/tcp.h | 4 ++++ net/ipv4/af_inet.c | 3 --- net/ipv6/ip6_offload.c | 4 +--- 3 files changed, 5 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/net/tcp.h b/include/net/tcp.h index b0f0f93c681c..27f848ab3995 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1957,6 +1957,10 @@ void tcp_v4_destroy_sock(struct sock *sk); struct sk_buff *tcp_gso_segment(struct sk_buff *skb, netdev_features_t features); struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb); +INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff)); +INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)); +INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *skb, int thoff)); +INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb)); int tcp_gro_complete(struct sk_buff *skb); void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr); diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 02aa5cb3a4fd..d8dbff1dd1fa 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1432,8 +1432,6 @@ static struct sk_buff *ipip_gso_segment(struct sk_buff *skb, return inet_gso_segment(skb, features); } -INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *, - struct sk_buff *)); INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp4_gro_receive(struct list_head *, struct sk_buff *)); struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb) @@ -1608,7 +1606,6 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) return -EINVAL; } -INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *, int)); INDIRECT_CALLABLE_DECLARE(int udp4_gro_complete(struct sk_buff *, int)); int inet_gro_complete(struct sk_buff *skb, int nhoff) { diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 7fbb44736a34..78eec5b42385 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -13,6 +13,7 @@ #include #include #include +#include #include "ip6_offload.h" @@ -177,8 +178,6 @@ static int ipv6_exthdrs_len(struct ipv6hdr *iph, return len; } -INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp6_gro_receive(struct list_head *, - struct sk_buff *)); INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp6_gro_receive(struct list_head *, struct sk_buff *)); INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head, @@ -319,7 +318,6 @@ static struct sk_buff *ip4ip6_gro_receive(struct list_head *head, return inet_gro_receive(head, skb); } -INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *, int)); INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int)); INDIRECT_CALLABLE_SCOPE int ipv6_gro_complete(struct sk_buff *skb, int nhoff) { -- cgit v1.2.3-59-g8ed1b From 6db693285cd109e566c553694002dea769612b24 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 23 Jun 2020 15:31:15 -0700 Subject: udp: move gro declarations to net/udp.h This removes following warnings : CC net/ipv4/udp_offload.o net/ipv4/udp_offload.c:504:17: warning: no previous prototype for 'udp4_gro_receive' [-Wmissing-prototypes] 504 | struct sk_buff *udp4_gro_receive(struct list_head *head, struct sk_buff *skb) | ^~~~~~~~~~~~~~~~ net/ipv4/udp_offload.c:584:29: warning: no previous prototype for 'udp4_gro_complete' [-Wmissing-prototypes] 584 | INDIRECT_CALLABLE_SCOPE int udp4_gro_complete(struct sk_buff *skb, int nhoff) | ^~~~~~~~~~~~~~~~~ CHECK net/ipv6/udp_offload.c net/ipv6/udp_offload.c:115:16: warning: symbol 'udp6_gro_receive' was not declared. Should it be static? net/ipv6/udp_offload.c:148:29: warning: symbol 'udp6_gro_complete' was not declared. Should it be static? CC net/ipv6/udp_offload.o net/ipv6/udp_offload.c:115:17: warning: no previous prototype for 'udp6_gro_receive' [-Wmissing-prototypes] 115 | struct sk_buff *udp6_gro_receive(struct list_head *head, struct sk_buff *skb) | ^~~~~~~~~~~~~~~~ net/ipv6/udp_offload.c:148:29: warning: no previous prototype for 'udp6_gro_complete' [-Wmissing-prototypes] 148 | INDIRECT_CALLABLE_SCOPE int udp6_gro_complete(struct sk_buff *skb, int nhoff) | ^~~~~~~~~~~~~~~~~ Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/udp.h | 7 +++++++ net/ipv4/af_inet.c | 3 --- net/ipv6/ip6_offload.c | 4 +--- 3 files changed, 8 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/net/udp.h b/include/net/udp.h index a8fa6c0c6ded..5a2d677432f0 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -27,6 +27,7 @@ #include #include #include +#include /** * struct udp_skb_cb - UDP(-Lite) private variables @@ -166,6 +167,12 @@ static inline void udp_csum_pull_header(struct sk_buff *skb) typedef struct sock *(*udp_lookup_t)(struct sk_buff *skb, __be16 sport, __be16 dport); +INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp4_gro_receive(struct list_head *, + struct sk_buff *)); +INDIRECT_CALLABLE_DECLARE(int udp4_gro_complete(struct sk_buff *, int)); +INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp6_gro_receive(struct list_head *, + struct sk_buff *)); +INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int)); struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb, struct udphdr *uh, struct sock *sk); int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup); diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index d8dbff1dd1fa..ea6ed6d487ed 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1432,8 +1432,6 @@ static struct sk_buff *ipip_gso_segment(struct sk_buff *skb, return inet_gso_segment(skb, features); } -INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp4_gro_receive(struct list_head *, - struct sk_buff *)); struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb) { const struct net_offload *ops; @@ -1606,7 +1604,6 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) return -EINVAL; } -INDIRECT_CALLABLE_DECLARE(int udp4_gro_complete(struct sk_buff *, int)); int inet_gro_complete(struct sk_buff *skb, int nhoff) { __be16 newlen = htons(skb->len - nhoff); diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 78eec5b42385..a80f90bf3ae7 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -14,6 +14,7 @@ #include #include #include +#include #include "ip6_offload.h" @@ -178,8 +179,6 @@ static int ipv6_exthdrs_len(struct ipv6hdr *iph, return len; } -INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp6_gro_receive(struct list_head *, - struct sk_buff *)); INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head, struct sk_buff *skb) { @@ -318,7 +317,6 @@ static struct sk_buff *ip4ip6_gro_receive(struct list_head *head, return inet_gro_receive(head, skb); } -INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int)); INDIRECT_CALLABLE_SCOPE int ipv6_gro_complete(struct sk_buff *skb, int nhoff) { const struct net_offload *ops; -- cgit v1.2.3-59-g8ed1b From 6f3934576853a4fa60dea74ac8822f0f016ef9e8 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 22 Jun 2020 18:07:41 -0500 Subject: net: ipv6: Use struct_size() helper and kcalloc() Make use of the struct_size() helper instead of an open-coded version in order to avoid any potential type mistakes. Also, remove unnecessary function ipv6_rpl_srh_alloc_size() and replace kzalloc() with kcalloc(), which has a 2-factor argument form for multiplication. This code was detected with the help of Coccinelle and, audited and fixed manually. Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- include/net/rpl.h | 6 ------ net/ipv6/exthdrs.c | 2 +- net/ipv6/rpl_iptunnel.c | 3 +-- 3 files changed, 2 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/net/rpl.h b/include/net/rpl.h index dceff60e8baf..308ef0a05cae 100644 --- a/include/net/rpl.h +++ b/include/net/rpl.h @@ -26,12 +26,6 @@ static inline void rpl_exit(void) {} /* Worst decompression memory usage ipv6 address (16) + pad 7 */ #define IPV6_RPL_SRH_WORST_SWAP_SIZE (sizeof(struct in6_addr) + 7) -static inline size_t ipv6_rpl_srh_alloc_size(unsigned char n) -{ - return sizeof(struct ipv6_rpl_sr_hdr) + - ((n + 1) * sizeof(struct in6_addr)); -} - size_t ipv6_rpl_srh_size(unsigned char n, unsigned char cmpri, unsigned char cmpre); diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index 5a8bbcdcaf2b..e9b366994475 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -580,7 +580,7 @@ looped_back: hdr->segments_left--; i = n - hdr->segments_left; - buf = kzalloc(ipv6_rpl_srh_alloc_size(n + 1) * 2, GFP_ATOMIC); + buf = kcalloc(struct_size(hdr, segments.addr, n + 2), 2, GFP_ATOMIC); if (unlikely(!buf)) { kfree_skb(skb); return -1; diff --git a/net/ipv6/rpl_iptunnel.c b/net/ipv6/rpl_iptunnel.c index c3ececd7cfc1..5fdf3ebb953f 100644 --- a/net/ipv6/rpl_iptunnel.c +++ b/net/ipv6/rpl_iptunnel.c @@ -136,8 +136,7 @@ static int rpl_do_srh_inline(struct sk_buff *skb, const struct rpl_lwt *rlwt, oldhdr = ipv6_hdr(skb); - buf = kzalloc(ipv6_rpl_srh_alloc_size(srh->segments_left - 1) * 2, - GFP_ATOMIC); + buf = kcalloc(struct_size(srh, segments.addr, srh->segments_left), 2, GFP_ATOMIC); if (!buf) return -ENOMEM; -- cgit v1.2.3-59-g8ed1b From 0cc8fecf041d3e5285380da62cc6662bdc942d8c Mon Sep 17 00:00:00 2001 From: Jeremy Linton Date: Mon, 22 Jun 2020 20:35:32 +0530 Subject: net: phy: Allow mdio buses to auto-probe c45 devices The mdiobus_scan logic is currently hardcoded to only work with c22 devices. This works fairly well in most cases, but its possible that a c45 device doesn't respond despite being a standard phy. If the parent hardware is capable, it makes sense to scan for c22 devices before falling back to c45. As we want this to reflect the capabilities of the STA, lets add a field to the mii_bus structure to represent the capability. That way devices can opt into the extended scanning. Existing users should continue to default to c22 only scanning as long as they are zero'ing the structure before use. Signed-off-by: Jeremy Linton Signed-off-by: Calvin Johnson Signed-off-by: David S. Miller --- drivers/net/phy/mdio_bus.c | 18 ++++++++++++++++-- include/linux/phy.h | 8 ++++++++ 2 files changed, 24 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 6ceee82b2839..ab9233c558d8 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -739,10 +739,24 @@ EXPORT_SYMBOL(mdiobus_free); */ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr) { - struct phy_device *phydev; + struct phy_device *phydev = ERR_PTR(-ENODEV); int err; - phydev = get_phy_device(bus, addr, false); + switch (bus->probe_capabilities) { + case MDIOBUS_NO_CAP: + case MDIOBUS_C22: + phydev = get_phy_device(bus, addr, false); + break; + case MDIOBUS_C45: + phydev = get_phy_device(bus, addr, true); + break; + case MDIOBUS_C22_C45: + phydev = get_phy_device(bus, addr, false); + if (IS_ERR(phydev)) + phydev = get_phy_device(bus, addr, true); + break; + } + if (IS_ERR(phydev)) return phydev; diff --git a/include/linux/phy.h b/include/linux/phy.h index 9248dd2ce4ca..7860d56c6bf5 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -298,6 +298,14 @@ struct mii_bus { /* RESET GPIO descriptor pointer */ struct gpio_desc *reset_gpiod; + /* bus capabilities, used for probing */ + enum { + MDIOBUS_NO_CAP = 0, + MDIOBUS_C22, + MDIOBUS_C45, + MDIOBUS_C22_C45, + } probe_capabilities; + /* protect access to the shared element */ struct mutex shared_lock; -- cgit v1.2.3-59-g8ed1b From 428d2459cceb77357b81c242ca22462a6a904817 Mon Sep 17 00:00:00 2001 From: Petr Vaněk Date: Sat, 30 May 2020 14:39:12 +0200 Subject: xfrm: introduce oseq-may-wrap flag MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit RFC 4303 in section 3.3.3 suggests to disable anti-replay for manually distributed ICVs in which case the sender does not need to monitor or reset the counter. However, the sender still increments the counter and when it reaches the maximum value, the counter rolls over back to zero. This patch introduces new extra_flag XFRM_SA_XFLAG_OSEQ_MAY_WRAP which allows sequence number to cycle in outbound packets if set. This flag is used only in legacy and bmp code, because esn should not be negotiated if anti-replay is disabled (see note in 3.3.3 section). Signed-off-by: Petr Vaněk Acked-by: Christophe Gouault Signed-off-by: Steffen Klassert --- include/uapi/linux/xfrm.h | 1 + net/xfrm/xfrm_replay.c | 12 ++++++++---- 2 files changed, 9 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h index ff7cfdc6cb44..ffc6a5391bb7 100644 --- a/include/uapi/linux/xfrm.h +++ b/include/uapi/linux/xfrm.h @@ -387,6 +387,7 @@ struct xfrm_usersa_info { }; #define XFRM_SA_XFLAG_DONT_ENCAP_DSCP 1 +#define XFRM_SA_XFLAG_OSEQ_MAY_WRAP 2 struct xfrm_usersa_id { xfrm_address_t daddr; diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c index 98943f8d01aa..c6a4338a0d08 100644 --- a/net/xfrm/xfrm_replay.c +++ b/net/xfrm/xfrm_replay.c @@ -89,7 +89,8 @@ static int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb) if (x->type->flags & XFRM_TYPE_REPLAY_PROT) { XFRM_SKB_CB(skb)->seq.output.low = ++x->replay.oseq; XFRM_SKB_CB(skb)->seq.output.hi = 0; - if (unlikely(x->replay.oseq == 0)) { + if (unlikely(x->replay.oseq == 0) && + !(x->props.extra_flags & XFRM_SA_XFLAG_OSEQ_MAY_WRAP)) { x->replay.oseq--; xfrm_audit_state_replay_overflow(x, skb); err = -EOVERFLOW; @@ -168,7 +169,8 @@ static int xfrm_replay_overflow_bmp(struct xfrm_state *x, struct sk_buff *skb) if (x->type->flags & XFRM_TYPE_REPLAY_PROT) { XFRM_SKB_CB(skb)->seq.output.low = ++replay_esn->oseq; XFRM_SKB_CB(skb)->seq.output.hi = 0; - if (unlikely(replay_esn->oseq == 0)) { + if (unlikely(replay_esn->oseq == 0) && + !(x->props.extra_flags & XFRM_SA_XFLAG_OSEQ_MAY_WRAP)) { replay_esn->oseq--; xfrm_audit_state_replay_overflow(x, skb); err = -EOVERFLOW; @@ -572,7 +574,8 @@ static int xfrm_replay_overflow_offload(struct xfrm_state *x, struct sk_buff *sk XFRM_SKB_CB(skb)->seq.output.hi = 0; xo->seq.hi = 0; - if (unlikely(oseq < x->replay.oseq)) { + if (unlikely(oseq < x->replay.oseq) && + !(x->props.extra_flags & XFRM_SA_XFLAG_OSEQ_MAY_WRAP)) { xfrm_audit_state_replay_overflow(x, skb); err = -EOVERFLOW; @@ -611,7 +614,8 @@ static int xfrm_replay_overflow_offload_bmp(struct xfrm_state *x, struct sk_buff XFRM_SKB_CB(skb)->seq.output.hi = 0; xo->seq.hi = 0; - if (unlikely(oseq < replay_esn->oseq)) { + if (unlikely(oseq < replay_esn->oseq) && + !(x->props.extra_flags & XFRM_SA_XFLAG_OSEQ_MAY_WRAP)) { xfrm_audit_state_replay_overflow(x, skb); err = -EOVERFLOW; -- cgit v1.2.3-59-g8ed1b From 4f47e8ab6ab796b5380f74866fa5287aca4dcc58 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 22 Jun 2020 16:40:29 +0800 Subject: xfrm: policy: match with both mark and mask on user interfaces In commit ed17b8d377ea ("xfrm: fix a warning in xfrm_policy_insert_list"), it would take 'priority' to make a policy unique, and allow duplicated policies with different 'priority' to be added, which is not expected by userland, as Tobias reported in strongswan. To fix this duplicated policies issue, and also fix the issue in commit ed17b8d377ea ("xfrm: fix a warning in xfrm_policy_insert_list"), when doing add/del/get/update on user interfaces, this patch is to change to look up a policy with both mark and mask by doing: mark.v == pol->mark.v && mark.m == pol->mark.m and leave the check: (mark & pol->mark.m) == pol->mark.v for tx/rx path only. As the userland expects an exact mark and mask match to manage policies. v1->v2: - make xfrm_policy_mark_match inline and fix the changelog as Tobias suggested. Fixes: 295fae568885 ("xfrm: Allow user space manipulation of SPD mark") Fixes: ed17b8d377ea ("xfrm: fix a warning in xfrm_policy_insert_list") Reported-by: Tobias Brunner Tested-by: Tobias Brunner Signed-off-by: Xin Long Signed-off-by: Steffen Klassert --- include/net/xfrm.h | 11 +++++++---- net/key/af_key.c | 4 ++-- net/xfrm/xfrm_policy.c | 39 ++++++++++++++++----------------------- net/xfrm/xfrm_user.c | 18 +++++++++++------- 4 files changed, 36 insertions(+), 36 deletions(-) (limited to 'include') diff --git a/include/net/xfrm.h b/include/net/xfrm.h index c7d213c9f9d8..5c20953c8deb 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -1630,13 +1630,16 @@ int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk, void *); void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net); int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl); -struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id, - u8 type, int dir, +struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, + const struct xfrm_mark *mark, + u32 if_id, u8 type, int dir, struct xfrm_selector *sel, struct xfrm_sec_ctx *ctx, int delete, int *err); -struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id, u8, - int dir, u32 id, int delete, int *err); +struct xfrm_policy *xfrm_policy_byid(struct net *net, + const struct xfrm_mark *mark, u32 if_id, + u8 type, int dir, u32 id, int delete, + int *err); int xfrm_policy_flush(struct net *net, u8 type, bool task_valid); void xfrm_policy_hash_rebuild(struct net *net); u32 xfrm_get_acqseq(void); diff --git a/net/key/af_key.c b/net/key/af_key.c index b67ed3a8486c..979c579afc63 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -2400,7 +2400,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa return err; } - xp = xfrm_policy_bysel_ctx(net, DUMMY_MARK, 0, XFRM_POLICY_TYPE_MAIN, + xp = xfrm_policy_bysel_ctx(net, &dummy_mark, 0, XFRM_POLICY_TYPE_MAIN, pol->sadb_x_policy_dir - 1, &sel, pol_ctx, 1, &err); security_xfrm_policy_free(pol_ctx); @@ -2651,7 +2651,7 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, const struct sadb_ return -EINVAL; delete = (hdr->sadb_msg_type == SADB_X_SPDDELETE2); - xp = xfrm_policy_byid(net, DUMMY_MARK, 0, XFRM_POLICY_TYPE_MAIN, + xp = xfrm_policy_byid(net, &dummy_mark, 0, XFRM_POLICY_TYPE_MAIN, dir, pol->sadb_x_policy_id, delete, &err); if (xp == NULL) return -ENOENT; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 564aa6492e7c..6847b3579f54 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1433,14 +1433,10 @@ static void xfrm_policy_requeue(struct xfrm_policy *old, spin_unlock_bh(&pq->hold_queue.lock); } -static bool xfrm_policy_mark_match(struct xfrm_policy *policy, - struct xfrm_policy *pol) +static inline bool xfrm_policy_mark_match(const struct xfrm_mark *mark, + struct xfrm_policy *pol) { - if (policy->mark.v == pol->mark.v && - policy->priority == pol->priority) - return true; - - return false; + return mark->v == pol->mark.v && mark->m == pol->mark.m; } static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed) @@ -1503,7 +1499,7 @@ static void xfrm_policy_insert_inexact_list(struct hlist_head *chain, if (pol->type == policy->type && pol->if_id == policy->if_id && !selector_cmp(&pol->selector, &policy->selector) && - xfrm_policy_mark_match(policy, pol) && + xfrm_policy_mark_match(&policy->mark, pol) && xfrm_sec_ctx_match(pol->security, policy->security) && !WARN_ON(delpol)) { delpol = pol; @@ -1538,7 +1534,7 @@ static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain, if (pol->type == policy->type && pol->if_id == policy->if_id && !selector_cmp(&pol->selector, &policy->selector) && - xfrm_policy_mark_match(policy, pol) && + xfrm_policy_mark_match(&policy->mark, pol) && xfrm_sec_ctx_match(pol->security, policy->security) && !WARN_ON(delpol)) { if (excl) @@ -1610,9 +1606,8 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) EXPORT_SYMBOL(xfrm_policy_insert); static struct xfrm_policy * -__xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id, - u8 type, int dir, - struct xfrm_selector *sel, +__xfrm_policy_bysel_ctx(struct hlist_head *chain, const struct xfrm_mark *mark, + u32 if_id, u8 type, int dir, struct xfrm_selector *sel, struct xfrm_sec_ctx *ctx) { struct xfrm_policy *pol; @@ -1623,7 +1618,7 @@ __xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id, hlist_for_each_entry(pol, chain, bydst) { if (pol->type == type && pol->if_id == if_id && - (mark & pol->mark.m) == pol->mark.v && + xfrm_policy_mark_match(mark, pol) && !selector_cmp(sel, &pol->selector) && xfrm_sec_ctx_match(ctx, pol->security)) return pol; @@ -1632,11 +1627,10 @@ __xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id, return NULL; } -struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id, - u8 type, int dir, - struct xfrm_selector *sel, - struct xfrm_sec_ctx *ctx, int delete, - int *err) +struct xfrm_policy * +xfrm_policy_bysel_ctx(struct net *net, const struct xfrm_mark *mark, u32 if_id, + u8 type, int dir, struct xfrm_selector *sel, + struct xfrm_sec_ctx *ctx, int delete, int *err) { struct xfrm_pol_inexact_bin *bin = NULL; struct xfrm_policy *pol, *ret = NULL; @@ -1703,9 +1697,9 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id, } EXPORT_SYMBOL(xfrm_policy_bysel_ctx); -struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id, - u8 type, int dir, u32 id, int delete, - int *err) +struct xfrm_policy * +xfrm_policy_byid(struct net *net, const struct xfrm_mark *mark, u32 if_id, + u8 type, int dir, u32 id, int delete, int *err) { struct xfrm_policy *pol, *ret; struct hlist_head *chain; @@ -1720,8 +1714,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id, ret = NULL; hlist_for_each_entry(pol, chain, byidx) { if (pol->type == type && pol->index == id && - pol->if_id == if_id && - (mark & pol->mark.m) == pol->mark.v) { + pol->if_id == if_id && xfrm_policy_mark_match(mark, pol)) { xfrm_pol_hold(pol); if (delete) { *err = security_xfrm_policy_delete( diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index e6cfaa680ef3..fbb7d9d06478 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -1863,7 +1863,6 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, struct km_event c; int delete; struct xfrm_mark m; - u32 mark = xfrm_mark_get(attrs, &m); u32 if_id = 0; p = nlmsg_data(nlh); @@ -1880,8 +1879,11 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, if (attrs[XFRMA_IF_ID]) if_id = nla_get_u32(attrs[XFRMA_IF_ID]); + xfrm_mark_get(attrs, &m); + if (p->index) - xp = xfrm_policy_byid(net, mark, if_id, type, p->dir, p->index, delete, &err); + xp = xfrm_policy_byid(net, &m, if_id, type, p->dir, + p->index, delete, &err); else { struct nlattr *rt = attrs[XFRMA_SEC_CTX]; struct xfrm_sec_ctx *ctx; @@ -1898,8 +1900,8 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, if (err) return err; } - xp = xfrm_policy_bysel_ctx(net, mark, if_id, type, p->dir, &p->sel, - ctx, delete, &err); + xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir, + &p->sel, ctx, delete, &err); security_xfrm_policy_free(ctx); } if (xp == NULL) @@ -2166,7 +2168,6 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, u8 type = XFRM_POLICY_TYPE_MAIN; int err = -ENOENT; struct xfrm_mark m; - u32 mark = xfrm_mark_get(attrs, &m); u32 if_id = 0; err = copy_from_user_policy_type(&type, attrs); @@ -2180,8 +2181,11 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, if (attrs[XFRMA_IF_ID]) if_id = nla_get_u32(attrs[XFRMA_IF_ID]); + xfrm_mark_get(attrs, &m); + if (p->index) - xp = xfrm_policy_byid(net, mark, if_id, type, p->dir, p->index, 0, &err); + xp = xfrm_policy_byid(net, &m, if_id, type, p->dir, p->index, + 0, &err); else { struct nlattr *rt = attrs[XFRMA_SEC_CTX]; struct xfrm_sec_ctx *ctx; @@ -2198,7 +2202,7 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, if (err) return err; } - xp = xfrm_policy_bysel_ctx(net, mark, if_id, type, p->dir, + xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir, &p->sel, ctx, 0, &err); security_xfrm_policy_free(ctx); } -- cgit v1.2.3-59-g8ed1b From 7e06886bbfca73717e45a0f20cdb4053793c191b Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 12 Jun 2020 18:00:55 +0200 Subject: drm/mipi-dbi: Remove ->enabled MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The atomic helpers try really hard to not lose track of things, duplicating enabled tracking in the driver is at best confusing. Double-enabling or disabling is a bug in atomic helpers. In the fb_dirty function we can just assume that the fb always exists, simple display pipe helpers guarantee that the crtc is only enabled together with the output, so we always have a primary plane around. Now in the update function we need to be a notch more careful, since that can also get called when the crtc is off. And we don't want to upload frames when that's the case, so filter that out too. Reviewed-by: Noralf Trønnes Acked-by: David Lechner Reviewed-by: Emil Velikov Signed-off-by: Daniel Vetter Cc: Maarten Lankhorst Cc: Maxime Ripard Cc: Thomas Zimmermann Cc: David Airlie Cc: Daniel Vetter Cc: David Lechner Link: https://patchwork.freedesktop.org/patch/msgid/20200612160056.2082681-7-daniel.vetter@ffwll.ch --- drivers/gpu/drm/drm_mipi_dbi.c | 16 ++++++---------- drivers/gpu/drm/tiny/ili9225.c | 12 +++--------- drivers/gpu/drm/tiny/st7586.c | 11 +++-------- include/drm/drm_mipi_dbi.h | 5 ----- 4 files changed, 12 insertions(+), 32 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c index fd8d672972a9..79532b9a324a 100644 --- a/drivers/gpu/drm/drm_mipi_dbi.c +++ b/drivers/gpu/drm/drm_mipi_dbi.c @@ -268,7 +268,7 @@ static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect) bool full; void *tr; - if (!dbidev->enabled) + if (WARN_ON(!fb)) return; if (!drm_dev_enter(fb->dev, &idx)) @@ -314,6 +314,9 @@ void mipi_dbi_pipe_update(struct drm_simple_display_pipe *pipe, struct drm_plane_state *state = pipe->plane.state; struct drm_rect rect; + if (!pipe->crtc.state->active) + return; + if (drm_atomic_helper_damage_merged(old_state, state, &rect)) mipi_dbi_fb_dirty(state->fb, &rect); } @@ -325,9 +328,8 @@ EXPORT_SYMBOL(mipi_dbi_pipe_update); * @crtc_state: CRTC state * @plane_state: Plane state * - * This function sets &mipi_dbi->enabled, flushes the whole framebuffer and - * enables the backlight. Drivers can use this in their - * &drm_simple_display_pipe_funcs->enable callback. + * Flushes the whole framebuffer and enables the backlight. Drivers can use this + * in their &drm_simple_display_pipe_funcs->enable callback. * * Note: Drivers which don't use mipi_dbi_pipe_update() because they have custom * framebuffer flushing, can't use this function since they both use the same @@ -349,7 +351,6 @@ void mipi_dbi_enable_flush(struct mipi_dbi_dev *dbidev, if (!drm_dev_enter(&dbidev->drm, &idx)) return; - dbidev->enabled = true; mipi_dbi_fb_dirty(fb, &rect); backlight_enable(dbidev->backlight); @@ -390,13 +391,8 @@ void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe) { struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev); - if (!dbidev->enabled) - return; - DRM_DEBUG_KMS("\n"); - dbidev->enabled = false; - if (dbidev->backlight) backlight_disable(dbidev->backlight); else diff --git a/drivers/gpu/drm/tiny/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c index 16400064320f..97a77262d791 100644 --- a/drivers/gpu/drm/tiny/ili9225.c +++ b/drivers/gpu/drm/tiny/ili9225.c @@ -89,9 +89,6 @@ static void ili9225_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect) bool full; void *tr; - if (!dbidev->enabled) - return; - if (!drm_dev_enter(fb->dev, &idx)) return; @@ -167,6 +164,9 @@ static void ili9225_pipe_update(struct drm_simple_display_pipe *pipe, struct drm_plane_state *state = pipe->plane.state; struct drm_rect rect; + if (!pipe->crtc.state->active) + return; + if (drm_atomic_helper_damage_merged(old_state, state, &rect)) ili9225_fb_dirty(state->fb, &rect); } @@ -275,7 +275,6 @@ static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe, ili9225_command(dbi, ILI9225_DISPLAY_CONTROL_1, 0x1017); - dbidev->enabled = true; ili9225_fb_dirty(fb, &rect); out_exit: drm_dev_exit(idx); @@ -295,16 +294,11 @@ static void ili9225_pipe_disable(struct drm_simple_display_pipe *pipe) * unplug. */ - if (!dbidev->enabled) - return; - ili9225_command(dbi, ILI9225_DISPLAY_CONTROL_1, 0x0000); msleep(50); ili9225_command(dbi, ILI9225_POWER_CONTROL_2, 0x0007); msleep(50); ili9225_command(dbi, ILI9225_POWER_CONTROL_1, 0x0a02); - - dbidev->enabled = false; } static int ili9225_dbi_command(struct mipi_dbi *dbi, u8 *cmd, u8 *par, diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c index 1311e5df8721..d05de03891f8 100644 --- a/drivers/gpu/drm/tiny/st7586.c +++ b/drivers/gpu/drm/tiny/st7586.c @@ -118,9 +118,6 @@ static void st7586_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect) struct mipi_dbi *dbi = &dbidev->dbi; int start, end, idx, ret = 0; - if (!dbidev->enabled) - return; - if (!drm_dev_enter(fb->dev, &idx)) return; @@ -161,6 +158,9 @@ static void st7586_pipe_update(struct drm_simple_display_pipe *pipe, struct drm_plane_state *state = pipe->plane.state; struct drm_rect rect; + if (!pipe->crtc.state->active) + return; + if (drm_atomic_helper_damage_merged(old_state, state, &rect)) st7586_fb_dirty(state->fb, &rect); } @@ -237,7 +237,6 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe, msleep(100); - dbidev->enabled = true; st7586_fb_dirty(fb, &rect); mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON); @@ -258,11 +257,7 @@ static void st7586_pipe_disable(struct drm_simple_display_pipe *pipe) DRM_DEBUG_KMS("\n"); - if (!dbidev->enabled) - return; - mipi_dbi_command(&dbidev->dbi, MIPI_DCS_SET_DISPLAY_OFF); - dbidev->enabled = false; } static const u32 st7586_formats[] = { diff --git a/include/drm/drm_mipi_dbi.h b/include/drm/drm_mipi_dbi.h index 4d0e49c0ed2c..c2827ceaba0d 100644 --- a/include/drm/drm_mipi_dbi.h +++ b/include/drm/drm_mipi_dbi.h @@ -94,11 +94,6 @@ struct mipi_dbi_dev { */ struct drm_display_mode mode; - /** - * @enabled: Pipeline is enabled - */ - bool enabled; - /** * @tx_buf: Buffer used for transfer (copy clip rect area) */ -- cgit v1.2.3-59-g8ed1b From 65959522f8060659e308977f09f3eb7b7af5e43f Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Tue, 23 Jun 2020 14:30:40 +0300 Subject: RDMA: Add support to dump resource tracker in RAW format Add support to get resource dump in raw format. It enable drivers to return the entire device specific QP/CQ/MR context without a need from the driver to set each field separately. The raw query returns only the device specific data, general data is still returned by using the existing queries. Example: $ rdma res show mr dev mlx5_1 mrn 2 -r -j [{"ifindex":7,"ifname":"mlx5_1", "data":[0,4,255,254,0,0,0,0,0,0,0,0,16,28,0,216,...]}] Link: https://lore.kernel.org/r/20200623113043.1228482-9-leon@kernel.org Signed-off-by: Maor Gottlieb Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/device.c | 3 + drivers/infiniband/core/nldev.c | 180 +++++++++++++++++++++++++-------------- include/rdma/ib_verbs.h | 3 + include/uapi/rdma/rdma_netlink.h | 8 ++ 4 files changed, 132 insertions(+), 62 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index cbe95e729cf1..1335ed1f1e4a 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -2619,8 +2619,11 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops) SET_DEVICE_OP(dev_ops, enable_driver); SET_DEVICE_OP(dev_ops, fill_res_cm_id_entry); SET_DEVICE_OP(dev_ops, fill_res_cq_entry); + SET_DEVICE_OP(dev_ops, fill_res_cq_entry_raw); SET_DEVICE_OP(dev_ops, fill_res_mr_entry); + SET_DEVICE_OP(dev_ops, fill_res_mr_entry_raw); SET_DEVICE_OP(dev_ops, fill_res_qp_entry); + SET_DEVICE_OP(dev_ops, fill_res_qp_entry_raw); SET_DEVICE_OP(dev_ops, fill_stat_mr_entry); SET_DEVICE_OP(dev_ops, get_dev_fw_str); SET_DEVICE_OP(dev_ops, get_dma_mr); diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index 394e307c342c..1051b5622b62 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -114,6 +114,7 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = { [RDMA_NLDEV_ATTR_RES_PS] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_QP] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_QP_ENTRY] = { .type = NLA_NESTED }, + [RDMA_NLDEV_ATTR_RES_RAW] = { .type = NLA_BINARY }, [RDMA_NLDEV_ATTR_RES_RKEY] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_RQPN] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_RQ_PSN] = { .type = NLA_U32 }, @@ -446,11 +447,11 @@ static int fill_res_name_pid(struct sk_buff *msg, return err ? -EMSGSIZE : 0; } -static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin, - struct rdma_restrack_entry *res, uint32_t port) +static int fill_res_qp_entry_query(struct sk_buff *msg, + struct rdma_restrack_entry *res, + struct ib_device *dev, + struct ib_qp *qp) { - struct ib_qp *qp = container_of(res, struct ib_qp, res); - struct ib_device *dev = qp->device; struct ib_qp_init_attr qp_init_attr; struct ib_qp_attr qp_attr; int ret; @@ -459,16 +460,6 @@ static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin, if (ret) return ret; - if (port && port != qp_attr.port_num) - return -EAGAIN; - - /* In create_qp() port is not set yet */ - if (qp_attr.port_num && - nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp_attr.port_num)) - goto err; - - if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num)) - goto err; if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC) { if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQPN, qp_attr.dest_qp_num)) @@ -492,13 +483,6 @@ static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin, if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, qp_attr.qp_state)) goto err; - if (!rdma_is_kernel_res(res) && - nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, qp->pd->res.id)) - goto err; - - if (fill_res_name_pid(msg, res)) - goto err; - if (dev->ops.fill_res_qp_entry) return dev->ops.fill_res_qp_entry(msg, qp); return 0; @@ -506,6 +490,48 @@ static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin, err: return -EMSGSIZE; } +static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin, + struct rdma_restrack_entry *res, uint32_t port) +{ + struct ib_qp *qp = container_of(res, struct ib_qp, res); + struct ib_device *dev = qp->device; + int ret; + + if (port && port != qp->port) + return -EAGAIN; + + /* In create_qp() port is not set yet */ + if (qp->port && nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp->port)) + return -EINVAL; + + ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num); + if (ret) + return -EMSGSIZE; + + if (!rdma_is_kernel_res(res) && + nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, qp->pd->res.id)) + return -EMSGSIZE; + + ret = fill_res_name_pid(msg, res); + if (ret) + return -EMSGSIZE; + + return fill_res_qp_entry_query(msg, res, dev, qp); +} + +static int fill_res_qp_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, + struct rdma_restrack_entry *res, uint32_t port) +{ + struct ib_qp *qp = container_of(res, struct ib_qp, res); + struct ib_device *dev = qp->device; + + if (port && port != qp->port) + return -EAGAIN; + if (!dev->ops.fill_res_qp_entry_raw) + return -EINVAL; + return dev->ops.fill_res_qp_entry_raw(msg, qp); +} + static int fill_res_cm_id_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { @@ -565,34 +591,42 @@ static int fill_res_cq_entry(struct sk_buff *msg, bool has_cap_net_admin, struct ib_device *dev = cq->device; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe)) - goto err; + return -EMSGSIZE; if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, atomic_read(&cq->usecnt), RDMA_NLDEV_ATTR_PAD)) - goto err; + return -EMSGSIZE; /* Poll context is only valid for kernel CQs */ if (rdma_is_kernel_res(res) && nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_POLL_CTX, cq->poll_ctx)) - goto err; + return -EMSGSIZE; if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, (cq->dim != NULL))) - goto err; + return -EMSGSIZE; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN, res->id)) - goto err; + return -EMSGSIZE; if (!rdma_is_kernel_res(res) && nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, cq->uobject->uevent.uobject.context->res.id)) - goto err; + return -EMSGSIZE; if (fill_res_name_pid(msg, res)) - goto err; + return -EMSGSIZE; - if (dev->ops.fill_res_cq_entry) - return dev->ops.fill_res_cq_entry(msg, cq); - return 0; + return (dev->ops.fill_res_cq_entry) ? + dev->ops.fill_res_cq_entry(msg, cq) : 0; +} -err: return -EMSGSIZE; +static int fill_res_cq_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, + struct rdma_restrack_entry *res, uint32_t port) +{ + struct ib_cq *cq = container_of(res, struct ib_cq, res); + struct ib_device *dev = cq->device; + + if (!dev->ops.fill_res_cq_entry_raw) + return -EINVAL; + return dev->ops.fill_res_cq_entry_raw(msg, cq); } static int fill_res_mr_entry(struct sk_buff *msg, bool has_cap_net_admin, @@ -603,30 +637,39 @@ static int fill_res_mr_entry(struct sk_buff *msg, bool has_cap_net_admin, if (has_cap_net_admin) { if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RKEY, mr->rkey)) - goto err; + return -EMSGSIZE; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LKEY, mr->lkey)) - goto err; + return -EMSGSIZE; } if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length, RDMA_NLDEV_ATTR_PAD)) - goto err; + return -EMSGSIZE; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id)) - goto err; + return -EMSGSIZE; if (!rdma_is_kernel_res(res) && nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, mr->pd->res.id)) - goto err; + return -EMSGSIZE; if (fill_res_name_pid(msg, res)) - goto err; + return -EMSGSIZE; - if (dev->ops.fill_res_mr_entry) - return dev->ops.fill_res_mr_entry(msg, mr); - return 0; + return (dev->ops.fill_res_mr_entry) ? + dev->ops.fill_res_mr_entry(msg, mr) : + 0; +} -err: return -EMSGSIZE; +static int fill_res_mr_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, + struct rdma_restrack_entry *res, uint32_t port) +{ + struct ib_mr *mr = container_of(res, struct ib_mr, res); + struct ib_device *dev = mr->pd->device; + + if (!dev->ops.fill_res_mr_entry_raw) + return -EINVAL; + return dev->ops.fill_res_mr_entry_raw(msg, mr); } static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin, @@ -1149,7 +1192,6 @@ static int nldev_res_get_dumpit(struct sk_buff *skb, struct nldev_fill_res_entry { enum rdma_nldev_attr nldev_attr; - enum rdma_nldev_command nldev_cmd; u8 flags; u32 entry; u32 id; @@ -1161,40 +1203,34 @@ enum nldev_res_flags { static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = { [RDMA_RESTRACK_QP] = { - .nldev_cmd = RDMA_NLDEV_CMD_RES_QP_GET, .nldev_attr = RDMA_NLDEV_ATTR_RES_QP, .entry = RDMA_NLDEV_ATTR_RES_QP_ENTRY, .id = RDMA_NLDEV_ATTR_RES_LQPN, }, [RDMA_RESTRACK_CM_ID] = { - .nldev_cmd = RDMA_NLDEV_CMD_RES_CM_ID_GET, .nldev_attr = RDMA_NLDEV_ATTR_RES_CM_ID, .entry = RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY, .id = RDMA_NLDEV_ATTR_RES_CM_IDN, }, [RDMA_RESTRACK_CQ] = { - .nldev_cmd = RDMA_NLDEV_CMD_RES_CQ_GET, .nldev_attr = RDMA_NLDEV_ATTR_RES_CQ, .flags = NLDEV_PER_DEV, .entry = RDMA_NLDEV_ATTR_RES_CQ_ENTRY, .id = RDMA_NLDEV_ATTR_RES_CQN, }, [RDMA_RESTRACK_MR] = { - .nldev_cmd = RDMA_NLDEV_CMD_RES_MR_GET, .nldev_attr = RDMA_NLDEV_ATTR_RES_MR, .flags = NLDEV_PER_DEV, .entry = RDMA_NLDEV_ATTR_RES_MR_ENTRY, .id = RDMA_NLDEV_ATTR_RES_MRN, }, [RDMA_RESTRACK_PD] = { - .nldev_cmd = RDMA_NLDEV_CMD_RES_PD_GET, .nldev_attr = RDMA_NLDEV_ATTR_RES_PD, .flags = NLDEV_PER_DEV, .entry = RDMA_NLDEV_ATTR_RES_PD_ENTRY, .id = RDMA_NLDEV_ATTR_RES_PDN, }, [RDMA_RESTRACK_COUNTER] = { - .nldev_cmd = RDMA_NLDEV_CMD_STAT_GET, .nldev_attr = RDMA_NLDEV_ATTR_STAT_COUNTER, .entry = RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY, .id = RDMA_NLDEV_ATTR_STAT_COUNTER_ID, @@ -1253,7 +1289,8 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh, } nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, - RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, fe->nldev_cmd), + RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, + RDMA_NL_GET_OP(nlh->nlmsg_type)), 0, 0); if (fill_nldev_handle(msg, device)) { @@ -1331,7 +1368,8 @@ static int res_get_common_dumpit(struct sk_buff *skb, } nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, - RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, fe->nldev_cmd), + RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, + RDMA_NL_GET_OP(cb->nlh->nlmsg_type)), 0, NLM_F_MULTI); if (fill_nldev_handle(skb, device)) { @@ -1413,26 +1451,29 @@ err_index: return ret; } -#define RES_GET_FUNCS(name, type) \ - static int nldev_res_get_##name##_dumpit(struct sk_buff *skb, \ +#define RES_GET_FUNCS(name, type) \ + static int nldev_res_get_##name##_dumpit(struct sk_buff *skb, \ struct netlink_callback *cb) \ - { \ - return res_get_common_dumpit(skb, cb, type, \ - fill_res_##name##_entry); \ - } \ - static int nldev_res_get_##name##_doit(struct sk_buff *skb, \ - struct nlmsghdr *nlh, \ + { \ + return res_get_common_dumpit(skb, cb, type, \ + fill_res_##name##_entry); \ + } \ + static int nldev_res_get_##name##_doit(struct sk_buff *skb, \ + struct nlmsghdr *nlh, \ struct netlink_ext_ack *extack) \ - { \ - return res_get_common_doit(skb, nlh, extack, type, \ - fill_res_##name##_entry); \ + { \ + return res_get_common_doit(skb, nlh, extack, type, \ + fill_res_##name##_entry); \ } RES_GET_FUNCS(qp, RDMA_RESTRACK_QP); +RES_GET_FUNCS(qp_raw, RDMA_RESTRACK_QP); RES_GET_FUNCS(cm_id, RDMA_RESTRACK_CM_ID); RES_GET_FUNCS(cq, RDMA_RESTRACK_CQ); +RES_GET_FUNCS(cq_raw, RDMA_RESTRACK_CQ); RES_GET_FUNCS(pd, RDMA_RESTRACK_PD); RES_GET_FUNCS(mr, RDMA_RESTRACK_MR); +RES_GET_FUNCS(mr_raw, RDMA_RESTRACK_MR); RES_GET_FUNCS(counter, RDMA_RESTRACK_COUNTER); static LIST_HEAD(link_ops); @@ -2117,6 +2158,21 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = { .doit = nldev_stat_del_doit, .flags = RDMA_NL_ADMIN_PERM, }, + [RDMA_NLDEV_CMD_RES_QP_GET_RAW] = { + .doit = nldev_res_get_qp_raw_doit, + .dump = nldev_res_get_qp_raw_dumpit, + .flags = RDMA_NL_ADMIN_PERM, + }, + [RDMA_NLDEV_CMD_RES_CQ_GET_RAW] = { + .doit = nldev_res_get_cq_raw_doit, + .dump = nldev_res_get_cq_raw_dumpit, + .flags = RDMA_NL_ADMIN_PERM, + }, + [RDMA_NLDEV_CMD_RES_MR_GET_RAW] = { + .doit = nldev_res_get_mr_raw_doit, + .dump = nldev_res_get_mr_raw_dumpit, + .flags = RDMA_NL_ADMIN_PERM, + }, }; void __init nldev_init(void) diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 9127cffafccd..77106ff3cd26 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -2583,8 +2583,11 @@ struct ib_device_ops { * Allows rdma drivers to add their own restrack attributes. */ int (*fill_res_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr); + int (*fill_res_mr_entry_raw)(struct sk_buff *msg, struct ib_mr *ibmr); int (*fill_res_cq_entry)(struct sk_buff *msg, struct ib_cq *ibcq); + int (*fill_res_cq_entry_raw)(struct sk_buff *msg, struct ib_cq *ibcq); int (*fill_res_qp_entry)(struct sk_buff *msg, struct ib_qp *ibqp); + int (*fill_res_qp_entry_raw)(struct sk_buff *msg, struct ib_qp *ibqp); int (*fill_res_cm_id_entry)(struct sk_buff *msg, struct rdma_cm_id *id); /* Device lifecycle callbacks */ diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h index 8e277783fa96..3826143d420d 100644 --- a/include/uapi/rdma/rdma_netlink.h +++ b/include/uapi/rdma/rdma_netlink.h @@ -287,6 +287,12 @@ enum rdma_nldev_command { RDMA_NLDEV_CMD_STAT_DEL, + RDMA_NLDEV_CMD_RES_QP_GET_RAW, + + RDMA_NLDEV_CMD_RES_CQ_GET_RAW, + + RDMA_NLDEV_CMD_RES_MR_GET_RAW, + RDMA_NLDEV_NUM_OPS }; @@ -525,6 +531,8 @@ enum rdma_nldev_attr { */ RDMA_NLDEV_ATTR_DEV_DIM, /* u8 */ + RDMA_NLDEV_ATTR_RES_RAW, /* binary */ + /* * Always the end */ -- cgit v1.2.3-59-g8ed1b From 62fb45d317c5fa08e4db093441835bb6f33acbd7 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Thu, 18 Jun 2020 16:42:06 +0200 Subject: USB: ch9: add "USB_" prefix in front of TEST defines For some reason, the TEST_ defines in the usb/ch9.h files did not have the USB_ prefix on it, making it a bit confusing when reading the file, as well as not the nicest thing to do in a uapi file. So fix that up and add the USB_ prefix on to them, and fix up all in-kernel usages. This included deleting the duplicate copy in the net2272.h file. Cc: Felipe Balbi Cc: Michal Simek Cc: Mathias Nyman Cc: Pawel Laszczak Cc: YueHaibing Cc: Nathan Chancellor Cc: Jason Yan Cc: Jia-Ju Bai Cc: Stephen Boyd Cc: Christophe JAILLET Cc: Arnd Bergmann Cc: Jules Irenge Cc: Alan Stern Cc: Thinh Nguyen Cc: Rob Gill Cc: Macpaul Lin Acked-by: Minas Harutyunyan Acked-by: Bin Liu Acked-by: Chunfeng Yun Acked-by: Peter Chen Link: https://lore.kernel.org/r/20200618144206.2655890-1-gregkh@linuxfoundation.org Signed-off-by: Greg Kroah-Hartman --- drivers/usb/cdns3/ep0.c | 8 ++++---- drivers/usb/chipidea/udc.c | 10 +++++----- drivers/usb/common/debug.c | 10 +++++----- drivers/usb/dwc2/debugfs.c | 20 ++++++++++---------- drivers/usb/dwc2/gadget.c | 10 +++++----- drivers/usb/dwc3/debugfs.c | 20 ++++++++++---------- drivers/usb/dwc3/ep0.c | 10 +++++----- drivers/usb/dwc3/gadget.c | 10 +++++----- drivers/usb/gadget/udc/bdc/bdc_ep.c | 10 +++++----- drivers/usb/gadget/udc/gr_udc.c | 4 ++-- drivers/usb/gadget/udc/mv_udc_core.c | 2 +- drivers/usb/gadget/udc/net2272.c | 2 +- drivers/usb/gadget/udc/net2272.h | 5 ----- drivers/usb/gadget/udc/udc-xilinx.c | 4 ++-- drivers/usb/host/xhci-hub.c | 7 ++++--- drivers/usb/misc/ehset.c | 8 ++++---- drivers/usb/mtu3/mtu3_gadget_ep0.c | 16 ++++++++-------- drivers/usb/musb/musb_gadget_ep0.c | 20 ++++++++------------ drivers/usb/musb/musb_virthub.c | 20 ++++++++++---------- include/uapi/linux/usb/ch9.h | 10 +++++----- 20 files changed, 99 insertions(+), 107 deletions(-) (limited to 'include') diff --git a/drivers/usb/cdns3/ep0.c b/drivers/usb/cdns3/ep0.c index 82645a2a0f52..04a522f5ae58 100644 --- a/drivers/usb/cdns3/ep0.c +++ b/drivers/usb/cdns3/ep0.c @@ -328,10 +328,10 @@ static int cdns3_ep0_feature_handle_device(struct cdns3_device *priv_dev, return -EINVAL; switch (tmode >> 8) { - case TEST_J: - case TEST_K: - case TEST_SE0_NAK: - case TEST_PACKET: + case USB_TEST_J: + case USB_TEST_K: + case USB_TEST_SE0_NAK: + case USB_TEST_PACKET: cdns3_set_register_bit(&priv_dev->regs->usb_cmd, USB_CMD_STMODE | USB_STS_TMODE_SEL(tmode - 1)); diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index db0cfde0cc3c..4beb25888917 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c @@ -1215,11 +1215,11 @@ __acquires(ci->lock) case USB_DEVICE_TEST_MODE: tmode = le16_to_cpu(req.wIndex) >> 8; switch (tmode) { - case TEST_J: - case TEST_K: - case TEST_SE0_NAK: - case TEST_PACKET: - case TEST_FORCE_EN: + case USB_TEST_J: + case USB_TEST_K: + case USB_TEST_SE0_NAK: + case USB_TEST_PACKET: + case USB_TEST_FORCE_ENABLE: ci->test_mode = tmode; err = isr_setup_status_phase( ci); diff --git a/drivers/usb/common/debug.c b/drivers/usb/common/debug.c index 92a986aeaa5d..410acd670ca7 100644 --- a/drivers/usb/common/debug.c +++ b/drivers/usb/common/debug.c @@ -53,15 +53,15 @@ static const char *usb_decode_device_feature(u16 wValue) static const char *usb_decode_test_mode(u16 wIndex) { switch (wIndex) { - case TEST_J: + case USB_TEST_J: return ": TEST_J"; - case TEST_K: + case USB_TEST_K: return ": TEST_K"; - case TEST_SE0_NAK: + case USB_TEST_SE0_NAK: return ": TEST_SE0_NAK"; - case TEST_PACKET: + case USB_TEST_PACKET: return ": TEST_PACKET"; - case TEST_FORCE_EN: + case USB_TEST_FORCE_ENABLE: return ": TEST_FORCE_EN"; default: return ": UNKNOWN"; diff --git a/drivers/usb/dwc2/debugfs.c b/drivers/usb/dwc2/debugfs.c index 3a0dcbfbc827..aaafd463d72a 100644 --- a/drivers/usb/dwc2/debugfs.c +++ b/drivers/usb/dwc2/debugfs.c @@ -37,15 +37,15 @@ static ssize_t testmode_write(struct file *file, const char __user *ubuf, size_t return -EFAULT; if (!strncmp(buf, "test_j", 6)) - testmode = TEST_J; + testmode = USB_TEST_J; else if (!strncmp(buf, "test_k", 6)) - testmode = TEST_K; + testmode = USB_TEST_K; else if (!strncmp(buf, "test_se0_nak", 12)) - testmode = TEST_SE0_NAK; + testmode = USB_TEST_SE0_NAK; else if (!strncmp(buf, "test_packet", 11)) - testmode = TEST_PACKET; + testmode = USB_TEST_PACKET; else if (!strncmp(buf, "test_force_enable", 17)) - testmode = TEST_FORCE_EN; + testmode = USB_TEST_FORCE_ENABLE; else testmode = 0; @@ -78,19 +78,19 @@ static int testmode_show(struct seq_file *s, void *unused) case 0: seq_puts(s, "no test\n"); break; - case TEST_J: + case USB_TEST_J: seq_puts(s, "test_j\n"); break; - case TEST_K: + case USB_TEST_K: seq_puts(s, "test_k\n"); break; - case TEST_SE0_NAK: + case USB_TEST_SE0_NAK: seq_puts(s, "test_se0_nak\n"); break; - case TEST_PACKET: + case USB_TEST_PACKET: seq_puts(s, "test_packet\n"); break; - case TEST_FORCE_EN: + case USB_TEST_FORCE_ENABLE: seq_puts(s, "test_force_enable\n"); break; default: diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 12b98b466287..38fc46b0c026 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -1561,11 +1561,11 @@ int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode) dctl &= ~DCTL_TSTCTL_MASK; switch (testmode) { - case TEST_J: - case TEST_K: - case TEST_SE0_NAK: - case TEST_PACKET: - case TEST_FORCE_EN: + case USB_TEST_J: + case USB_TEST_K: + case USB_TEST_SE0_NAK: + case USB_TEST_PACKET: + case USB_TEST_FORCE_ENABLE: dctl |= testmode << DCTL_TSTCTL_SHIFT; break; default: diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c index 6d9de334e46a..14dc6a37305d 100644 --- a/drivers/usb/dwc3/debugfs.c +++ b/drivers/usb/dwc3/debugfs.c @@ -466,19 +466,19 @@ static int dwc3_testmode_show(struct seq_file *s, void *unused) case 0: seq_printf(s, "no test\n"); break; - case TEST_J: + case USB_TEST_J: seq_printf(s, "test_j\n"); break; - case TEST_K: + case USB_TEST_K: seq_printf(s, "test_k\n"); break; - case TEST_SE0_NAK: + case USB_TEST_SE0_NAK: seq_printf(s, "test_se0_nak\n"); break; - case TEST_PACKET: + case USB_TEST_PACKET: seq_printf(s, "test_packet\n"); break; - case TEST_FORCE_EN: + case USB_TEST_FORCE_ENABLE: seq_printf(s, "test_force_enable\n"); break; default: @@ -506,15 +506,15 @@ static ssize_t dwc3_testmode_write(struct file *file, return -EFAULT; if (!strncmp(buf, "test_j", 6)) - testmode = TEST_J; + testmode = USB_TEST_J; else if (!strncmp(buf, "test_k", 6)) - testmode = TEST_K; + testmode = USB_TEST_K; else if (!strncmp(buf, "test_se0_nak", 12)) - testmode = TEST_SE0_NAK; + testmode = USB_TEST_SE0_NAK; else if (!strncmp(buf, "test_packet", 11)) - testmode = TEST_PACKET; + testmode = USB_TEST_PACKET; else if (!strncmp(buf, "test_force_enable", 17)) - testmode = TEST_FORCE_EN; + testmode = USB_TEST_FORCE_ENABLE; else testmode = 0; diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index 6dee4dabc0a4..8dd69728add3 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c @@ -425,11 +425,11 @@ static int dwc3_ep0_handle_test(struct dwc3 *dwc, enum usb_device_state state, return -EINVAL; switch (wIndex >> 8) { - case TEST_J: - case TEST_K: - case TEST_SE0_NAK: - case TEST_PACKET: - case TEST_FORCE_EN: + case USB_TEST_J: + case USB_TEST_K: + case USB_TEST_SE0_NAK: + case USB_TEST_PACKET: + case USB_TEST_FORCE_ENABLE: dwc->test_mode_nr = wIndex >> 8; dwc->test_mode = true; break; diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 80c3ef134e41..0b59b2f1cf26 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -46,11 +46,11 @@ int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode) reg &= ~DWC3_DCTL_TSTCTRL_MASK; switch (mode) { - case TEST_J: - case TEST_K: - case TEST_SE0_NAK: - case TEST_PACKET: - case TEST_FORCE_EN: + case USB_TEST_J: + case USB_TEST_K: + case USB_TEST_SE0_NAK: + case USB_TEST_PACKET: + case USB_TEST_FORCE_ENABLE: reg |= mode << 1; break; default: diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c index d49c6dc1082d..ba250cf75bef 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_ep.c +++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c @@ -927,11 +927,11 @@ static int bdc_set_test_mode(struct bdc *bdc) usb2_pm &= ~BDC_PTC_MASK; dev_dbg(bdc->dev, "%s\n", __func__); switch (bdc->test_mode) { - case TEST_J: - case TEST_K: - case TEST_SE0_NAK: - case TEST_PACKET: - case TEST_FORCE_EN: + case USB_TEST_J: + case USB_TEST_K: + case USB_TEST_SE0_NAK: + case USB_TEST_PACKET: + case USB_TEST_FORCE_ENABLE: usb2_pm |= bdc->test_mode << 28; break; default: diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c index 7164ad9800f1..345e28d76709 100644 --- a/drivers/usb/gadget/udc/gr_udc.c +++ b/drivers/usb/gadget/udc/gr_udc.c @@ -912,9 +912,9 @@ static int gr_device_request(struct gr_udc *dev, u8 type, u8 request, return gr_ep0_respond_empty(dev); case USB_DEVICE_TEST_MODE: - /* The hardware does not support TEST_FORCE_EN */ + /* The hardware does not support USB_TEST_FORCE_ENABLE */ test = index >> 8; - if (test >= TEST_J && test <= TEST_PACKET) { + if (test >= USB_TEST_J && test <= USB_TEST_PACKET) { dev->test_mode = test; return gr_ep0_respond(dev, NULL, 0, gr_ep0_testmode_complete); diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c index cafde053788b..69289717d856 100644 --- a/drivers/usb/gadget/udc/mv_udc_core.c +++ b/drivers/usb/gadget/udc/mv_udc_core.c @@ -1502,7 +1502,7 @@ out: static void mv_udc_testmode(struct mv_udc *udc, u16 index) { - if (index <= TEST_FORCE_EN) { + if (index <= USB_TEST_FORCE_ENABLE) { udc->test_mode = index; if (udc_prime_status(udc, EP_DIR_IN, 0, true)) ep0_stall(udc); diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c index 928057b206f1..fbbe62513545 100644 --- a/drivers/usb/gadget/udc/net2272.c +++ b/drivers/usb/gadget/udc/net2272.c @@ -1688,7 +1688,7 @@ net2272_set_test_mode(struct net2272 *dev, int mode) net2272_write(dev, USBTEST, mode); /* load test packet */ - if (mode == TEST_PACKET) { + if (mode == USB_TEST_PACKET) { /* switch to 8 bit mode */ net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) & ~(1 << DATA_WIDTH)); diff --git a/drivers/usb/gadget/udc/net2272.h b/drivers/usb/gadget/udc/net2272.h index 8e644627992d..87d0ab9ffeeb 100644 --- a/drivers/usb/gadget/udc/net2272.h +++ b/drivers/usb/gadget/udc/net2272.h @@ -105,11 +105,6 @@ #define USBTEST 0x32 #define TEST_MODE_SELECT 0 #define NORMAL_OPERATION 0 -#define TEST_J 1 -#define TEST_K 2 -#define TEST_SE0_NAK 3 -#define TEST_PACKET 4 -#define TEST_FORCE_ENABLE 5 #define XCVRDIAG 0x33 #define FORCE_FULL_SPEED 2 #define FORCE_HIGH_SPEED 3 diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c index 709553bdb233..d5e9d20c097d 100644 --- a/drivers/usb/gadget/udc/udc-xilinx.c +++ b/drivers/usb/gadget/udc/udc-xilinx.c @@ -2097,9 +2097,9 @@ static int xudc_probe(struct platform_device *pdev) /* Check for IP endianness */ udc->write_fn = xudc_write32_be; udc->read_fn = xudc_read32_be; - udc->write_fn(udc->addr, XUSB_TESTMODE_OFFSET, TEST_J); + udc->write_fn(udc->addr, XUSB_TESTMODE_OFFSET, USB_TEST_J); if ((udc->read_fn(udc->addr + XUSB_TESTMODE_OFFSET)) - != TEST_J) { + != USB_TEST_J) { udc->write_fn = xudc_write32; udc->read_fn = xudc_read32; } diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index f37316d2c8fa..073c54e42223 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -612,7 +612,7 @@ static void xhci_port_set_test_mode(struct xhci_hcd *xhci, temp |= test_mode << PORT_TEST_MODE_SHIFT; writel(temp, port->addr + PORTPMSC); xhci->test_mode = test_mode; - if (test_mode == TEST_FORCE_EN) + if (test_mode == USB_TEST_FORCE_ENABLE) xhci_start(xhci); } @@ -666,7 +666,7 @@ static int xhci_exit_test_mode(struct xhci_hcd *xhci) xhci_err(xhci, "Not in test mode, do nothing.\n"); return 0; } - if (xhci->test_mode == TEST_FORCE_EN && + if (xhci->test_mode == USB_TEST_FORCE_ENABLE && !(xhci->xhc_state & XHCI_STATE_HALTED)) { retval = xhci_halt(xhci); if (retval) @@ -1421,7 +1421,8 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, /* 4.19.6 Port Test Modes (USB2 Test Mode) */ if (hcd->speed != HCD_USB2) goto error; - if (test_mode > TEST_FORCE_EN || test_mode < TEST_J) + if (test_mode > USB_TEST_FORCE_ENABLE || + test_mode < USB_TEST_J) goto error; retval = xhci_enter_test_mode(xhci, test_mode, wIndex, &flags); diff --git a/drivers/usb/misc/ehset.c b/drivers/usb/misc/ehset.c index 7895d61e733b..2752e1f4f4d0 100644 --- a/drivers/usb/misc/ehset.c +++ b/drivers/usb/misc/ehset.c @@ -33,28 +33,28 @@ static int ehset_probe(struct usb_interface *intf, ret = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0), USB_REQ_SET_FEATURE, USB_RT_PORT, USB_PORT_FEAT_TEST, - (TEST_SE0_NAK << 8) | portnum, + (USB_TEST_SE0_NAK << 8) | portnum, NULL, 0, 1000); break; case TEST_J_PID: ret = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0), USB_REQ_SET_FEATURE, USB_RT_PORT, USB_PORT_FEAT_TEST, - (TEST_J << 8) | portnum, + (USB_TEST_J << 8) | portnum, NULL, 0, 1000); break; case TEST_K_PID: ret = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0), USB_REQ_SET_FEATURE, USB_RT_PORT, USB_PORT_FEAT_TEST, - (TEST_K << 8) | portnum, + (USB_TEST_K << 8) | portnum, NULL, 0, 1000); break; case TEST_PACKET_PID: ret = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0), USB_REQ_SET_FEATURE, USB_RT_PORT, USB_PORT_FEAT_TEST, - (TEST_PACKET << 8) | portnum, + (USB_TEST_PACKET << 8) | portnum, NULL, 0, 1000); break; case TEST_HS_HOST_PORT_SUSPEND_RESUME: diff --git a/drivers/usb/mtu3/mtu3_gadget_ep0.c b/drivers/usb/mtu3/mtu3_gadget_ep0.c index 2be182bd793a..563a0a2e970d 100644 --- a/drivers/usb/mtu3/mtu3_gadget_ep0.c +++ b/drivers/usb/mtu3/mtu3_gadget_ep0.c @@ -278,20 +278,20 @@ static int handle_test_mode(struct mtu3 *mtu, struct usb_ctrlrequest *setup) u32 value; switch (le16_to_cpu(setup->wIndex) >> 8) { - case TEST_J: - dev_dbg(mtu->dev, "TEST_J\n"); + case USB_TEST_J: + dev_dbg(mtu->dev, "USB_TEST_J\n"); mtu->test_mode_nr = TEST_J_MODE; break; - case TEST_K: - dev_dbg(mtu->dev, "TEST_K\n"); + case USB_TEST_K: + dev_dbg(mtu->dev, "USB_TEST_K\n"); mtu->test_mode_nr = TEST_K_MODE; break; - case TEST_SE0_NAK: - dev_dbg(mtu->dev, "TEST_SE0_NAK\n"); + case USB_TEST_SE0_NAK: + dev_dbg(mtu->dev, "USB_TEST_SE0_NAK\n"); mtu->test_mode_nr = TEST_SE0_NAK_MODE; break; - case TEST_PACKET: - dev_dbg(mtu->dev, "TEST_PACKET\n"); + case USB_TEST_PACKET: + dev_dbg(mtu->dev, "USB_TEST_PACKET\n"); mtu->test_mode_nr = TEST_PACKET_MODE; break; default: diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c index 91a5027b5c1f..0ae3e0be043e 100644 --- a/drivers/usb/musb/musb_gadget_ep0.c +++ b/drivers/usb/musb/musb_gadget_ep0.c @@ -311,27 +311,23 @@ __acquires(musb->lock) goto stall; switch (ctrlrequest->wIndex >> 8) { - case 1: - pr_debug("TEST_J\n"); - /* TEST_J */ + case USB_TEST_J: + pr_debug("USB_TEST_J\n"); musb->test_mode_nr = MUSB_TEST_J; break; - case 2: - /* TEST_K */ - pr_debug("TEST_K\n"); + case USB_TEST_K: + pr_debug("USB_TEST_K\n"); musb->test_mode_nr = MUSB_TEST_K; break; - case 3: - /* TEST_SE0_NAK */ - pr_debug("TEST_SE0_NAK\n"); + case USB_TEST_SE0_NAK: + pr_debug("USB_TEST_SE0_NAK\n"); musb->test_mode_nr = MUSB_TEST_SE0_NAK; break; - case 4: - /* TEST_PACKET */ - pr_debug("TEST_PACKET\n"); + case USB_TEST_PACKET: + pr_debug("USB_TEST_PACKET\n"); musb->test_mode_nr = MUSB_TEST_PACKET; break; diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c index a84ec27c4c12..cb7ae297a3af 100644 --- a/drivers/usb/musb/musb_virthub.c +++ b/drivers/usb/musb/musb_virthub.c @@ -385,25 +385,25 @@ int musb_hub_control( wIndex >>= 8; switch (wIndex) { - case 1: - pr_debug("TEST_J\n"); + case USB_TEST_J: + pr_debug("USB_TEST_J\n"); temp = MUSB_TEST_J; break; - case 2: - pr_debug("TEST_K\n"); + case USB_TEST_K: + pr_debug("USB_TEST_K\n"); temp = MUSB_TEST_K; break; - case 3: - pr_debug("TEST_SE0_NAK\n"); + case USB_TEST_SE0_NAK: + pr_debug("USB_TEST_SE0_NAK\n"); temp = MUSB_TEST_SE0_NAK; break; - case 4: - pr_debug("TEST_PACKET\n"); + case USB_TEST_PACKET: + pr_debug("USB_TEST_PACKET\n"); temp = MUSB_TEST_PACKET; musb_load_testpacket(musb); break; - case 5: - pr_debug("TEST_FORCE_ENABLE\n"); + case USB_TEST_FORCE_ENABLE: + pr_debug("USB_TEST_FORCE_ENABLE\n"); temp = MUSB_TEST_FORCE_HOST | MUSB_TEST_FORCE_HS; diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h index b1ed2ccfe9cf..48766fdf6580 100644 --- a/include/uapi/linux/usb/ch9.h +++ b/include/uapi/linux/usb/ch9.h @@ -138,11 +138,11 @@ * Test Mode Selectors * See USB 2.0 spec Table 9-7 */ -#define TEST_J 1 -#define TEST_K 2 -#define TEST_SE0_NAK 3 -#define TEST_PACKET 4 -#define TEST_FORCE_EN 5 +#define USB_TEST_J 1 +#define USB_TEST_K 2 +#define USB_TEST_SE0_NAK 3 +#define USB_TEST_PACKET 4 +#define USB_TEST_FORCE_ENABLE 5 /* Status Type */ #define USB_STATUS_TYPE_STANDARD 0 -- cgit v1.2.3-59-g8ed1b From feee1b8c490821f29aae416a5422795f5a29263d Mon Sep 17 00:00:00 2001 From: Alexander Popov Date: Wed, 24 Jun 2020 15:33:29 +0300 Subject: gcc-plugins/stackleak: Use asm instrumentation to avoid useless register saving The kernel code instrumentation in stackleak gcc plugin works in two stages. At first, stack tracking is added to GIMPLE representation of every function (except some special cases). And later, when stack frame size info is available, stack tracking is removed from the RTL representation of the functions with small stack frame. There is an unwanted side-effect for these functions: some of them do useless work with caller-saved registers. As an example of such case, proc_sys_write without() instrumentation: 55 push %rbp 41 b8 01 00 00 00 mov $0x1,%r8d 48 89 e5 mov %rsp,%rbp e8 11 ff ff ff callq ffffffff81284610 5d pop %rbp c3 retq 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1) 66 2e 0f 1f 84 00 00 nopw %cs:0x0(%rax,%rax,1) 00 00 00 proc_sys_write() with instrumentation: 55 push %rbp 48 89 e5 mov %rsp,%rbp 41 56 push %r14 41 55 push %r13 41 54 push %r12 53 push %rbx 49 89 f4 mov %rsi,%r12 48 89 fb mov %rdi,%rbx 49 89 d5 mov %rdx,%r13 49 89 ce mov %rcx,%r14 4c 89 f1 mov %r14,%rcx 4c 89 ea mov %r13,%rdx 4c 89 e6 mov %r12,%rsi 48 89 df mov %rbx,%rdi 41 b8 01 00 00 00 mov $0x1,%r8d e8 f2 fe ff ff callq ffffffff81298e80 5b pop %rbx 41 5c pop %r12 41 5d pop %r13 41 5e pop %r14 5d pop %rbp c3 retq 66 0f 1f 84 00 00 00 nopw 0x0(%rax,%rax,1) 00 00 Let's improve the instrumentation to avoid this: 1. Make stackleak_track_stack() save all register that it works with. Use no_caller_saved_registers attribute for that function. This attribute is available for x86_64 and i386 starting from gcc-7. 2. Insert calling stackleak_track_stack() in asm: asm volatile("call stackleak_track_stack" :: "r" (current_stack_pointer)) Here we use ASM_CALL_CONSTRAINT trick from arch/x86/include/asm/asm.h. The input constraint is taken into account during gcc shrink-wrapping optimization. It is needed to be sure that stackleak_track_stack() call is inserted after the prologue of the containing function, when the stack frame is prepared. This work is a deep reengineering of the idea described on grsecurity blog https://grsecurity.net/resolving_an_unfortunate_stackleak_interaction Signed-off-by: Alexander Popov Acked-by: Miguel Ojeda Link: https://lore.kernel.org/r/20200624123330.83226-5-alex.popov@linux.com Signed-off-by: Kees Cook --- include/linux/compiler_attributes.h | 13 +++ kernel/stackleak.c | 16 +-- scripts/Makefile.gcc-plugins | 2 + scripts/gcc-plugins/stackleak_plugin.c | 205 ++++++++++++++++++++++++++++----- 4 files changed, 196 insertions(+), 40 deletions(-) (limited to 'include') diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h index cdf016596659..551ea8cb70b1 100644 --- a/include/linux/compiler_attributes.h +++ b/include/linux/compiler_attributes.h @@ -37,6 +37,7 @@ # define __GCC4_has_attribute___copy__ 0 # define __GCC4_has_attribute___designated_init__ 0 # define __GCC4_has_attribute___externally_visible__ 1 +# define __GCC4_has_attribute___no_caller_saved_registers__ 0 # define __GCC4_has_attribute___noclone__ 1 # define __GCC4_has_attribute___nonstring__ 0 # define __GCC4_has_attribute___no_sanitize_address__ (__GNUC_MINOR__ >= 8) @@ -175,6 +176,18 @@ */ #define __mode(x) __attribute__((__mode__(x))) +/* + * Optional: only supported since gcc >= 7 + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/x86-Function-Attributes.html#index-no_005fcaller_005fsaved_005fregisters-function-attribute_002c-x86 + * clang: https://clang.llvm.org/docs/AttributeReference.html#no-caller-saved-registers + */ +#if __has_attribute(__no_caller_saved_registers__) +# define __no_caller_saved_registers __attribute__((__no_caller_saved_registers__)) +#else +# define __no_caller_saved_registers +#endif + /* * Optional: not supported by clang * diff --git a/kernel/stackleak.c b/kernel/stackleak.c index b193a59fc05b..a8fc9ae1d03d 100644 --- a/kernel/stackleak.c +++ b/kernel/stackleak.c @@ -104,19 +104,9 @@ asmlinkage void notrace stackleak_erase(void) } NOKPROBE_SYMBOL(stackleak_erase); -void __used notrace stackleak_track_stack(void) +void __used __no_caller_saved_registers notrace stackleak_track_stack(void) { - /* - * N.B. stackleak_erase() fills the kernel stack with the poison value, - * which has the register width. That code assumes that the value - * of 'lowest_stack' is aligned on the register width boundary. - * - * That is true for x86 and x86_64 because of the kernel stack - * alignment on these platforms (for details, see 'cc_stack_align' in - * arch/x86/Makefile). Take care of that when you port STACKLEAK to - * new platforms. - */ - unsigned long sp = (unsigned long)&sp; + unsigned long sp = current_stack_pointer; /* * Having CONFIG_STACKLEAK_TRACK_MIN_SIZE larger than @@ -125,6 +115,8 @@ void __used notrace stackleak_track_stack(void) */ BUILD_BUG_ON(CONFIG_STACKLEAK_TRACK_MIN_SIZE > STACKLEAK_SEARCH_DEPTH); + /* 'lowest_stack' should be aligned on the register width boundary */ + sp = ALIGN(sp, sizeof(unsigned long)); if (sp < current->lowest_stack && sp >= (unsigned long)task_stack_page(current) + sizeof(unsigned long)) { diff --git a/scripts/Makefile.gcc-plugins b/scripts/Makefile.gcc-plugins index 5f7df50cfe7a..952e46876329 100644 --- a/scripts/Makefile.gcc-plugins +++ b/scripts/Makefile.gcc-plugins @@ -33,6 +33,8 @@ gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STACKLEAK) \ += -DSTACKLEAK_PLUGIN gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STACKLEAK) \ += -fplugin-arg-stackleak_plugin-track-min-size=$(CONFIG_STACKLEAK_TRACK_MIN_SIZE) +gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STACKLEAK) \ + += -fplugin-arg-stackleak_plugin-arch=$(SRCARCH) ifdef CONFIG_GCC_PLUGIN_STACKLEAK DISABLE_STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-disable endif diff --git a/scripts/gcc-plugins/stackleak_plugin.c b/scripts/gcc-plugins/stackleak_plugin.c index cc75eeba0be1..a18b0d4af456 100644 --- a/scripts/gcc-plugins/stackleak_plugin.c +++ b/scripts/gcc-plugins/stackleak_plugin.c @@ -20,7 +20,7 @@ * * Debugging: * - use fprintf() to stderr, debug_generic_expr(), debug_gimple_stmt(), - * print_rtl() and print_simple_rtl(); + * print_rtl_single() and debug_rtx(); * - add "-fdump-tree-all -fdump-rtl-all" to the plugin CFLAGS in * Makefile.gcc-plugins to see the verbose dumps of the gcc passes; * - use gcc -E to understand the preprocessing shenanigans; @@ -32,6 +32,7 @@ __visible int plugin_is_GPL_compatible; static int track_frame_size = -1; +static bool build_for_x86 = false; static const char track_function[] = "stackleak_track_stack"; /* @@ -43,32 +44,31 @@ static GTY(()) tree track_function_decl; static struct plugin_info stackleak_plugin_info = { .version = "201707101337", .help = "track-min-size=nn\ttrack stack for functions with a stack frame size >= nn bytes\n" + "arch=target_arch\tspecify target build arch\n" "disable\t\tdo not activate the plugin\n" }; -static void stackleak_add_track_stack(gimple_stmt_iterator *gsi, bool after) +static void add_stack_tracking_gcall(gimple_stmt_iterator *gsi, bool after) { gimple stmt; - gcall *stackleak_track_stack; + gcall *gimple_call; cgraph_node_ptr node; basic_block bb; - /* Insert call to void stackleak_track_stack(void) */ + /* Insert calling stackleak_track_stack() */ stmt = gimple_build_call(track_function_decl, 0); - stackleak_track_stack = as_a_gcall(stmt); - if (after) { - gsi_insert_after(gsi, stackleak_track_stack, - GSI_CONTINUE_LINKING); - } else { - gsi_insert_before(gsi, stackleak_track_stack, GSI_SAME_STMT); - } + gimple_call = as_a_gcall(stmt); + if (after) + gsi_insert_after(gsi, gimple_call, GSI_CONTINUE_LINKING); + else + gsi_insert_before(gsi, gimple_call, GSI_SAME_STMT); /* Update the cgraph */ - bb = gimple_bb(stackleak_track_stack); + bb = gimple_bb(gimple_call); node = cgraph_get_create_node(track_function_decl); gcc_assert(node); cgraph_create_edge(cgraph_get_node(current_function_decl), node, - stackleak_track_stack, bb->count, + gimple_call, bb->count, compute_call_stmt_bb_frequency(current_function_decl, bb)); } @@ -85,6 +85,79 @@ static bool is_alloca(gimple stmt) return false; } +static tree get_current_stack_pointer_decl(void) +{ + varpool_node_ptr node; + + FOR_EACH_VARIABLE(node) { + tree var = NODE_DECL(node); + tree name = DECL_NAME(var); + + if (DECL_NAME_LENGTH(var) != sizeof("current_stack_pointer") - 1) + continue; + + if (strcmp(IDENTIFIER_POINTER(name), "current_stack_pointer")) + continue; + + return var; + } + + return NULL_TREE; +} + +static void add_stack_tracking_gasm(gimple_stmt_iterator *gsi, bool after) +{ + gasm *asm_call = NULL; + tree sp_decl, input; + vec *inputs = NULL; + + /* 'no_caller_saved_registers' is currently supported only for x86 */ + gcc_assert(build_for_x86); + + /* + * Insert calling stackleak_track_stack() in asm: + * asm volatile("call stackleak_track_stack" + * :: "r" (current_stack_pointer)) + * Use ASM_CALL_CONSTRAINT trick from arch/x86/include/asm/asm.h. + * This constraint is taken into account during gcc shrink-wrapping + * optimization. It is needed to be sure that stackleak_track_stack() + * call is inserted after the prologue of the containing function, + * when the stack frame is prepared. + */ + sp_decl = get_current_stack_pointer_decl(); + if (sp_decl == NULL_TREE) { + add_stack_tracking_gcall(gsi, after); + return; + } + input = build_tree_list(NULL_TREE, build_const_char_string(2, "r")); + input = chainon(NULL_TREE, build_tree_list(input, sp_decl)); + vec_safe_push(inputs, input); + asm_call = gimple_build_asm_vec("call stackleak_track_stack", + inputs, NULL, NULL, NULL); + gimple_asm_set_volatile(asm_call, true); + if (after) + gsi_insert_after(gsi, asm_call, GSI_CONTINUE_LINKING); + else + gsi_insert_before(gsi, asm_call, GSI_SAME_STMT); + update_stmt(asm_call); +} + +static void add_stack_tracking(gimple_stmt_iterator *gsi, bool after) +{ + /* + * The 'no_caller_saved_registers' attribute is used for + * stackleak_track_stack(). If the compiler supports this attribute for + * the target arch, we can add calling stackleak_track_stack() in asm. + * That improves performance: we avoid useless operations with the + * caller-saved registers in the functions from which we will remove + * stackleak_track_stack() call during the stackleak_cleanup pass. + */ + if (lookup_attribute_spec(get_identifier("no_caller_saved_registers"))) + add_stack_tracking_gasm(gsi, after); + else + add_stack_tracking_gcall(gsi, after); +} + /* * Work with the GIMPLE representation of the code. Insert the * stackleak_track_stack() call after alloca() and into the beginning @@ -94,7 +167,7 @@ static unsigned int stackleak_instrument_execute(void) { basic_block bb, entry_bb; bool prologue_instrumented = false, is_leaf = true; - gimple_stmt_iterator gsi; + gimple_stmt_iterator gsi = { 0 }; /* * ENTRY_BLOCK_PTR is a basic block which represents possible entry @@ -123,7 +196,7 @@ static unsigned int stackleak_instrument_execute(void) continue; /* Insert stackleak_track_stack() call after alloca() */ - stackleak_add_track_stack(&gsi, true); + add_stack_tracking(&gsi, true); if (bb == entry_bb) prologue_instrumented = true; } @@ -168,7 +241,7 @@ static unsigned int stackleak_instrument_execute(void) bb = single_succ(ENTRY_BLOCK_PTR_FOR_FN(cfun)); } gsi = gsi_after_labels(bb); - stackleak_add_track_stack(&gsi, false); + add_stack_tracking(&gsi, false); return 0; } @@ -182,21 +255,10 @@ static bool large_stack_frame(void) #endif } -/* - * Work with the RTL representation of the code. - * Remove the unneeded stackleak_track_stack() calls from the functions - * which don't call alloca() and don't have a large enough stack frame size. - */ -static unsigned int stackleak_cleanup_execute(void) +static void remove_stack_tracking_gcall(void) { rtx_insn *insn, *next; - if (cfun->calls_alloca) - return 0; - - if (large_stack_frame()) - return 0; - /* * Find stackleak_track_stack() calls. Loop through the chain of insns, * which is an RTL representation of the code for a function. @@ -257,6 +319,84 @@ static unsigned int stackleak_cleanup_execute(void) } #endif } +} + +static bool remove_stack_tracking_gasm(void) +{ + bool removed = false; + rtx_insn *insn, *next; + + /* 'no_caller_saved_registers' is currently supported only for x86 */ + gcc_assert(build_for_x86); + + /* + * Find stackleak_track_stack() asm calls. Loop through the chain of + * insns, which is an RTL representation of the code for a function. + * + * The example of a matching insn: + * (insn 11 5 12 2 (parallel [ (asm_operands/v + * ("call stackleak_track_stack") ("") 0 + * [ (reg/v:DI 7 sp [ current_stack_pointer ]) ] + * [ (asm_input:DI ("r")) ] []) + * (clobber (reg:CC 17 flags)) ]) -1 (nil)) + */ + for (insn = get_insns(); insn; insn = next) { + rtx body; + + next = NEXT_INSN(insn); + + /* Check the expression code of the insn */ + if (!NONJUMP_INSN_P(insn)) + continue; + + /* + * Check the expression code of the insn body, which is an RTL + * Expression (RTX) describing the side effect performed by + * that insn. + */ + body = PATTERN(insn); + + if (GET_CODE(body) != PARALLEL) + continue; + + body = XVECEXP(body, 0, 0); + + if (GET_CODE(body) != ASM_OPERANDS) + continue; + + if (strcmp(ASM_OPERANDS_TEMPLATE(body), + "call stackleak_track_stack")) { + continue; + } + + delete_insn_and_edges(insn); + gcc_assert(!removed); + removed = true; + } + + return removed; +} + +/* + * Work with the RTL representation of the code. + * Remove the unneeded stackleak_track_stack() calls from the functions + * which don't call alloca() and don't have a large enough stack frame size. + */ +static unsigned int stackleak_cleanup_execute(void) +{ + bool removed = false; + + if (cfun->calls_alloca) + return 0; + + if (large_stack_frame()) + return 0; + + if (lookup_attribute_spec(get_identifier("no_caller_saved_registers"))) + removed = remove_stack_tracking_gasm(); + + if (!removed) + remove_stack_tracking_gcall(); return 0; } @@ -392,6 +532,15 @@ __visible int plugin_init(struct plugin_name_args *plugin_info, plugin_name, argv[i].key, argv[i].value); return 1; } + } else if (!strcmp(argv[i].key, "arch")) { + if (!argv[i].value) { + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), + plugin_name, argv[i].key); + return 1; + } + + if (!strcmp(argv[i].value, "x86")) + build_for_x86 = true; } else { error(G_("unknown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); -- cgit v1.2.3-59-g8ed1b From 28bc24fc46f9c9f39ddefb424d6072041805b563 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Mon, 15 Jun 2020 09:48:33 +0200 Subject: vc: separate state There are two copies of some members of struct vc_data. This is because we need to save them and restore later. Move these memebers to a separate structure called vc_state. So now instead of members like: vc_x, vc_y and vc_saved_x, vc_saved_y we have state and saved_state (of type: struct vc_state) containing state.x, state.y and saved_state.x, saved_state.y This change: * makes clear what is saved & restored * eases save & restore by using memcpy (see save_cur and restore_cur) Finally, we document the newly added struct vc_state using kernel-doc. Signed-off-by: Jiri Slaby Link: https://lore.kernel.org/r/20200615074910.19267-1-jslaby@suse.cz Signed-off-by: Greg Kroah-Hartman --- drivers/accessibility/braille/braille_console.c | 10 +- drivers/staging/speakup/main.c | 28 +- drivers/tty/vt/vt.c | 327 ++++++++++++------------ drivers/video/console/mdacon.c | 2 +- drivers/video/console/sticon.c | 6 +- drivers/video/console/vgacon.c | 22 +- drivers/video/fbdev/core/bitblit.c | 6 +- drivers/video/fbdev/core/fbcon.c | 8 +- drivers/video/fbdev/core/fbcon_ccw.c | 4 +- drivers/video/fbdev/core/fbcon_cw.c | 4 +- drivers/video/fbdev/core/fbcon_ud.c | 4 +- drivers/video/fbdev/core/tileblit.c | 4 +- include/linux/console_struct.h | 54 ++-- 13 files changed, 239 insertions(+), 240 deletions(-) (limited to 'include') diff --git a/drivers/accessibility/braille/braille_console.c b/drivers/accessibility/braille/braille_console.c index a8f7c278b691..c2b452af6806 100644 --- a/drivers/accessibility/braille/braille_console.c +++ b/drivers/accessibility/braille/braille_console.c @@ -109,16 +109,16 @@ static void braille_write(u16 *buf) /* Follow the VC cursor*/ static void vc_follow_cursor(struct vc_data *vc) { - vc_x = vc->vc_x - (vc->vc_x % WIDTH); - vc_y = vc->vc_y; - lastvc_x = vc->vc_x; - lastvc_y = vc->vc_y; + vc_x = vc->state.x - (vc->state.x % WIDTH); + vc_y = vc->state.y; + lastvc_x = vc->state.x; + lastvc_y = vc->state.y; } /* Maybe the VC cursor moved, if so follow it */ static void vc_maybe_cursor_moved(struct vc_data *vc) { - if (vc->vc_x != lastvc_x || vc->vc_y != lastvc_y) + if (vc->state.x != lastvc_x || vc->state.y != lastvc_y) vc_follow_cursor(vc); } diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c index 02471d932d71..ddfd12afe3b9 100644 --- a/drivers/staging/speakup/main.c +++ b/drivers/staging/speakup/main.c @@ -263,8 +263,8 @@ static unsigned char get_attributes(struct vc_data *vc, u16 *pos) static void speakup_date(struct vc_data *vc) { - spk_x = spk_cx = vc->vc_x; - spk_y = spk_cy = vc->vc_y; + spk_x = spk_cx = vc->state.x; + spk_y = spk_cy = vc->state.y; spk_pos = spk_cp = vc->vc_pos; spk_old_attr = spk_attr; spk_attr = get_attributes(vc, (u_short *)spk_pos); @@ -1551,9 +1551,9 @@ static void do_handle_cursor(struct vc_data *vc, u_char value, char up_flag) */ is_cursor = value + 1; old_cursor_pos = vc->vc_pos; - old_cursor_x = vc->vc_x; - old_cursor_y = vc->vc_y; - speakup_console[vc->vc_num]->ht.cy = vc->vc_y; + old_cursor_x = vc->state.x; + old_cursor_y = vc->state.y; + speakup_console[vc->vc_num]->ht.cy = vc->state.y; cursor_con = vc->vc_num; if (cursor_track == CT_Highlight) reset_highlight_buffers(vc); @@ -1574,8 +1574,8 @@ static void update_color_buffer(struct vc_data *vc, const u16 *ic, int len) i = 0; if (speakup_console[vc_num]->ht.highsize[bi] == 0) { speakup_console[vc_num]->ht.rpos[bi] = vc->vc_pos; - speakup_console[vc_num]->ht.rx[bi] = vc->vc_x; - speakup_console[vc_num]->ht.ry[bi] = vc->vc_y; + speakup_console[vc_num]->ht.rx[bi] = vc->state.x; + speakup_console[vc_num]->ht.ry[bi] = vc->state.y; } while ((hi < COLOR_BUFFER_SIZE) && (i < len)) { if (ic[i] > 32) { @@ -1664,9 +1664,9 @@ static int speak_highlight(struct vc_data *vc) return 0; hc = get_highlight_color(vc); if (hc != -1) { - d = vc->vc_y - speakup_console[vc_num]->ht.cy; + d = vc->state.y - speakup_console[vc_num]->ht.cy; if ((d == 1) || (d == -1)) - if (speakup_console[vc_num]->ht.ry[hc] != vc->vc_y) + if (speakup_console[vc_num]->ht.ry[hc] != vc->state.y) return 0; spk_parked |= 0x01; spk_do_flush(); @@ -1693,8 +1693,8 @@ static void cursor_done(struct timer_list *unused) } speakup_date(vc); if (win_enabled) { - if (vc->vc_x >= win_left && vc->vc_x <= win_right && - vc->vc_y >= win_top && vc->vc_y <= win_bottom) { + if (vc->state.x >= win_left && vc->state.x <= win_right && + vc->state.y >= win_top && vc->state.y <= win_bottom) { spk_keydown = 0; is_cursor = 0; goto out; @@ -1757,7 +1757,7 @@ static void speakup_con_write(struct vc_data *vc, u16 *str, int len) if (!spin_trylock_irqsave(&speakup_info.spinlock, flags)) /* Speakup output, discard */ return; - if (spk_bell_pos && spk_keydown && (vc->vc_x == spk_bell_pos - 1)) + if (spk_bell_pos && spk_keydown && (vc->state.x == spk_bell_pos - 1)) bleep(3); if ((is_cursor) || (cursor_track == read_all_mode)) { if (cursor_track == CT_Highlight) @@ -1766,8 +1766,8 @@ static void speakup_con_write(struct vc_data *vc, u16 *str, int len) return; } if (win_enabled) { - if (vc->vc_x >= win_left && vc->vc_x <= win_right && - vc->vc_y >= win_top && vc->vc_y <= win_bottom) { + if (vc->state.x >= win_left && vc->state.x <= win_right && + vc->state.y >= win_top && vc->state.y <= win_bottom) { spin_unlock_irqrestore(&speakup_info.spinlock, flags); return; } diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 48a8199f7845..76f52935e0c8 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -381,7 +381,7 @@ static void vc_uniscr_putc(struct vc_data *vc, char32_t uc) struct uni_screen *uniscr = get_vc_uniscr(vc); if (uniscr) - uniscr->lines[vc->vc_y][vc->vc_x] = uc; + uniscr->lines[vc->state.y][vc->state.x] = uc; } static void vc_uniscr_insert(struct vc_data *vc, unsigned int nr) @@ -389,8 +389,8 @@ static void vc_uniscr_insert(struct vc_data *vc, unsigned int nr) struct uni_screen *uniscr = get_vc_uniscr(vc); if (uniscr) { - char32_t *ln = uniscr->lines[vc->vc_y]; - unsigned int x = vc->vc_x, cols = vc->vc_cols; + char32_t *ln = uniscr->lines[vc->state.y]; + unsigned int x = vc->state.x, cols = vc->vc_cols; memmove(&ln[x + nr], &ln[x], (cols - x - nr) * sizeof(*ln)); memset32(&ln[x], ' ', nr); @@ -402,8 +402,8 @@ static void vc_uniscr_delete(struct vc_data *vc, unsigned int nr) struct uni_screen *uniscr = get_vc_uniscr(vc); if (uniscr) { - char32_t *ln = uniscr->lines[vc->vc_y]; - unsigned int x = vc->vc_x, cols = vc->vc_cols; + char32_t *ln = uniscr->lines[vc->state.y]; + unsigned int x = vc->state.x, cols = vc->vc_cols; memcpy(&ln[x], &ln[x + nr], (cols - x - nr) * sizeof(*ln)); memset32(&ln[cols - nr], ' ', nr); @@ -416,7 +416,7 @@ static void vc_uniscr_clear_line(struct vc_data *vc, unsigned int x, struct uni_screen *uniscr = get_vc_uniscr(vc); if (uniscr) { - char32_t *ln = uniscr->lines[vc->vc_y]; + char32_t *ln = uniscr->lines[vc->state.y]; memset32(&ln[x], ' ', nr); } @@ -750,10 +750,11 @@ static u8 build_attr(struct vc_data *vc, u8 _color, u8 _intensity, u8 _blink, static void update_attr(struct vc_data *vc) { - vc->vc_attr = build_attr(vc, vc->vc_color, vc->vc_intensity, - vc->vc_blink, vc->vc_underline, - vc->vc_reverse ^ vc->vc_decscnm, vc->vc_italic); - vc->vc_video_erase_char = (build_attr(vc, vc->vc_color, 1, vc->vc_blink, 0, vc->vc_decscnm, 0) << 8) | ' '; + vc->vc_attr = build_attr(vc, vc->state.color, vc->state.intensity, + vc->state.blink, vc->state.underline, + vc->state.reverse ^ vc->vc_decscnm, vc->state.italic); + vc->vc_video_erase_char = ' ' | (build_attr(vc, vc->state.color, 1, + vc->state.blink, 0, vc->vc_decscnm, 0) << 8); } /* Note: inverting the screen twice should revert to the original state */ @@ -842,12 +843,12 @@ static void insert_char(struct vc_data *vc, unsigned int nr) unsigned short *p = (unsigned short *) vc->vc_pos; vc_uniscr_insert(vc, nr); - scr_memmovew(p + nr, p, (vc->vc_cols - vc->vc_x - nr) * 2); + scr_memmovew(p + nr, p, (vc->vc_cols - vc->state.x - nr) * 2); scr_memsetw(p, vc->vc_video_erase_char, nr * 2); vc->vc_need_wrap = 0; if (con_should_update(vc)) do_update_region(vc, (unsigned long) p, - vc->vc_cols - vc->vc_x); + vc->vc_cols - vc->state.x); } static void delete_char(struct vc_data *vc, unsigned int nr) @@ -855,13 +856,13 @@ static void delete_char(struct vc_data *vc, unsigned int nr) unsigned short *p = (unsigned short *) vc->vc_pos; vc_uniscr_delete(vc, nr); - scr_memcpyw(p, p + nr, (vc->vc_cols - vc->vc_x - nr) * 2); - scr_memsetw(p + vc->vc_cols - vc->vc_x - nr, vc->vc_video_erase_char, + scr_memcpyw(p, p + nr, (vc->vc_cols - vc->state.x - nr) * 2); + scr_memsetw(p + vc->vc_cols - vc->state.x - nr, vc->vc_video_erase_char, nr * 2); vc->vc_need_wrap = 0; if (con_should_update(vc)) do_update_region(vc, (unsigned long) p, - vc->vc_cols - vc->vc_x); + vc->vc_cols - vc->state.x); } static int softcursor_original = -1; @@ -880,7 +881,7 @@ static void add_softcursor(struct vc_data *vc) if ((type & 0x40) && ((i & 0x700) == ((i & 0x7000) >> 4))) i ^= 0x0700; scr_writew(i, (u16 *) vc->vc_pos); if (con_should_update(vc)) - vc->vc_sw->con_putc(vc, i, vc->vc_y, vc->vc_x); + vc->vc_sw->con_putc(vc, i, vc->state.y, vc->state.x); } static void hide_softcursor(struct vc_data *vc) @@ -889,7 +890,7 @@ static void hide_softcursor(struct vc_data *vc) scr_writew(softcursor_original, (u16 *)vc->vc_pos); if (con_should_update(vc)) vc->vc_sw->con_putc(vc, softcursor_original, - vc->vc_y, vc->vc_x); + vc->state.y, vc->state.x); softcursor_original = -1; } } @@ -927,7 +928,8 @@ static void set_origin(struct vc_data *vc) vc->vc_origin = (unsigned long)vc->vc_screenbuf; vc->vc_visible_origin = vc->vc_origin; vc->vc_scr_end = vc->vc_origin + vc->vc_screenbuf_size; - vc->vc_pos = vc->vc_origin + vc->vc_size_row * vc->vc_y + 2 * vc->vc_x; + vc->vc_pos = vc->vc_origin + vc->vc_size_row * vc->state.y + + 2 * vc->state.x; } static void save_screen(struct vc_data *vc) @@ -1250,8 +1252,8 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, new_origin = (long) newscreen; new_scr_end = new_origin + new_screen_size; - if (vc->vc_y > new_rows) { - if (old_rows - vc->vc_y < new_rows) { + if (vc->state.y > new_rows) { + if (old_rows - vc->state.y < new_rows) { /* * Cursor near the bottom, copy contents from the * bottom of buffer @@ -1262,7 +1264,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, * Cursor is in no man's land, copy 1/2 screenful * from the top and bottom of cursor position */ - first_copied_row = (vc->vc_y - new_rows/2); + first_copied_row = (vc->state.y - new_rows/2); } old_origin += first_copied_row * old_row_size; } else @@ -1296,7 +1298,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, /* do part of a reset_terminal() */ vc->vc_top = 0; vc->vc_bottom = vc->vc_rows; - gotoxy(vc, vc->vc_x, vc->vc_y); + gotoxy(vc, vc->state.x, vc->state.y); save_cur(vc); if (tty) { @@ -1431,12 +1433,12 @@ static void gotoxy(struct vc_data *vc, int new_x, int new_y) int min_y, max_y; if (new_x < 0) - vc->vc_x = 0; + vc->state.x = 0; else { if (new_x >= vc->vc_cols) - vc->vc_x = vc->vc_cols - 1; + vc->state.x = vc->vc_cols - 1; else - vc->vc_x = new_x; + vc->state.x = new_x; } if (vc->vc_decom) { @@ -1447,12 +1449,13 @@ static void gotoxy(struct vc_data *vc, int new_x, int new_y) max_y = vc->vc_rows; } if (new_y < min_y) - vc->vc_y = min_y; + vc->state.y = min_y; else if (new_y >= max_y) - vc->vc_y = max_y - 1; + vc->state.y = max_y - 1; else - vc->vc_y = new_y; - vc->vc_pos = vc->vc_origin + vc->vc_y * vc->vc_size_row + (vc->vc_x<<1); + vc->state.y = new_y; + vc->vc_pos = vc->vc_origin + vc->state.y * vc->vc_size_row + + (vc->state.x << 1); vc->vc_need_wrap = 0; } @@ -1479,10 +1482,10 @@ static void lf(struct vc_data *vc) /* don't scroll if above bottom of scrolling region, or * if below scrolling region */ - if (vc->vc_y + 1 == vc->vc_bottom) + if (vc->state.y + 1 == vc->vc_bottom) con_scroll(vc, vc->vc_top, vc->vc_bottom, SM_UP, 1); - else if (vc->vc_y < vc->vc_rows - 1) { - vc->vc_y++; + else if (vc->state.y < vc->vc_rows - 1) { + vc->state.y++; vc->vc_pos += vc->vc_size_row; } vc->vc_need_wrap = 0; @@ -1494,10 +1497,10 @@ static void ri(struct vc_data *vc) /* don't scroll if below top of scrolling region, or * if above scrolling region */ - if (vc->vc_y == vc->vc_top) + if (vc->state.y == vc->vc_top) con_scroll(vc, vc->vc_top, vc->vc_bottom, SM_DOWN, 1); - else if (vc->vc_y > 0) { - vc->vc_y--; + else if (vc->state.y > 0) { + vc->state.y--; vc->vc_pos -= vc->vc_size_row; } vc->vc_need_wrap = 0; @@ -1505,16 +1508,16 @@ static void ri(struct vc_data *vc) static inline void cr(struct vc_data *vc) { - vc->vc_pos -= vc->vc_x << 1; - vc->vc_need_wrap = vc->vc_x = 0; + vc->vc_pos -= vc->state.x << 1; + vc->vc_need_wrap = vc->state.x = 0; notify_write(vc, '\r'); } static inline void bs(struct vc_data *vc) { - if (vc->vc_x) { + if (vc->state.x) { vc->vc_pos -= 2; - vc->vc_x--; + vc->state.x--; vc->vc_need_wrap = 0; notify_write(vc, '\b'); } @@ -1532,16 +1535,16 @@ static void csi_J(struct vc_data *vc, int vpar) switch (vpar) { case 0: /* erase from cursor to end of display */ - vc_uniscr_clear_line(vc, vc->vc_x, - vc->vc_cols - vc->vc_x); - vc_uniscr_clear_lines(vc, vc->vc_y + 1, - vc->vc_rows - vc->vc_y - 1); + vc_uniscr_clear_line(vc, vc->state.x, + vc->vc_cols - vc->state.x); + vc_uniscr_clear_lines(vc, vc->state.y + 1, + vc->vc_rows - vc->state.y - 1); count = (vc->vc_scr_end - vc->vc_pos) >> 1; start = (unsigned short *)vc->vc_pos; break; case 1: /* erase from start to cursor */ - vc_uniscr_clear_line(vc, 0, vc->vc_x + 1); - vc_uniscr_clear_lines(vc, 0, vc->vc_y); + vc_uniscr_clear_line(vc, 0, vc->state.x + 1); + vc_uniscr_clear_lines(vc, 0, vc->state.y); count = ((vc->vc_pos - vc->vc_origin) >> 1) + 1; start = (unsigned short *)vc->vc_origin; break; @@ -1571,20 +1574,20 @@ static void csi_K(struct vc_data *vc, int vpar) switch (vpar) { case 0: /* erase from cursor to end of line */ offset = 0; - count = vc->vc_cols - vc->vc_x; + count = vc->vc_cols - vc->state.x; break; case 1: /* erase from start of line to cursor */ - offset = -vc->vc_x; - count = vc->vc_x + 1; + offset = -vc->state.x; + count = vc->state.x + 1; break; case 2: /* erase whole line */ - offset = -vc->vc_x; + offset = -vc->state.x; count = vc->vc_cols; break; default: return; } - vc_uniscr_clear_line(vc, vc->vc_x + offset, count); + vc_uniscr_clear_line(vc, vc->state.x + offset, count); scr_memsetw(start + offset, vc->vc_video_erase_char, 2 * count); vc->vc_need_wrap = 0; if (con_should_update(vc)) @@ -1597,23 +1600,23 @@ static void csi_X(struct vc_data *vc, int vpar) /* erase the following vpar posi if (!vpar) vpar++; - count = (vpar > vc->vc_cols - vc->vc_x) ? (vc->vc_cols - vc->vc_x) : vpar; + count = (vpar > vc->vc_cols - vc->state.x) ? (vc->vc_cols - vc->state.x) : vpar; - vc_uniscr_clear_line(vc, vc->vc_x, count); + vc_uniscr_clear_line(vc, vc->state.x, count); scr_memsetw((unsigned short *)vc->vc_pos, vc->vc_video_erase_char, 2 * count); if (con_should_update(vc)) - vc->vc_sw->con_clear(vc, vc->vc_y, vc->vc_x, 1, count); + vc->vc_sw->con_clear(vc, vc->state.y, vc->state.x, 1, count); vc->vc_need_wrap = 0; } static void default_attr(struct vc_data *vc) { - vc->vc_intensity = 1; - vc->vc_italic = 0; - vc->vc_underline = 0; - vc->vc_reverse = 0; - vc->vc_blink = 0; - vc->vc_color = vc->vc_def_color; + vc->state.intensity = 1; + vc->state.italic = 0; + vc->state.underline = 0; + vc->state.reverse = 0; + vc->state.blink = 0; + vc->state.color = vc->vc_def_color; } struct rgb { u8 r; u8 g; u8 b; }; @@ -1649,19 +1652,19 @@ static void rgb_foreground(struct vc_data *vc, const struct rgb *c) if (hue == 7 && max <= 0x55) { hue = 0; - vc->vc_intensity = 2; + vc->state.intensity = 2; } else if (max > 0xaa) - vc->vc_intensity = 2; + vc->state.intensity = 2; else - vc->vc_intensity = 1; + vc->state.intensity = 1; - vc->vc_color = (vc->vc_color & 0xf0) | hue; + vc->state.color = (vc->state.color & 0xf0) | hue; } static void rgb_background(struct vc_data *vc, const struct rgb *c) { /* For backgrounds, err on the dark side. */ - vc->vc_color = (vc->vc_color & 0x0f) + vc->state.color = (vc->state.color & 0x0f) | (c->r&0x80) >> 1 | (c->g&0x80) >> 2 | (c->b&0x80) >> 3; } @@ -1712,13 +1715,13 @@ static void csi_m(struct vc_data *vc) default_attr(vc); break; case 1: - vc->vc_intensity = 2; + vc->state.intensity = 2; break; case 2: - vc->vc_intensity = 0; + vc->state.intensity = 0; break; case 3: - vc->vc_italic = 1; + vc->state.italic = 1; break; case 21: /* @@ -1726,21 +1729,21 @@ static void csi_m(struct vc_data *vc) * convert it to a single underline. */ case 4: - vc->vc_underline = 1; + vc->state.underline = 1; break; case 5: - vc->vc_blink = 1; + vc->state.blink = 1; break; case 7: - vc->vc_reverse = 1; + vc->state.reverse = 1; break; case 10: /* ANSI X3.64-1979 (SCO-ish?) * Select primary font, don't display control chars if * defined, don't set bit 8 on output. */ - vc->vc_translate = set_translate(vc->vc_charset == 0 - ? vc->vc_G0_charset - : vc->vc_G1_charset, vc); + vc->vc_translate = set_translate(vc->state.charset == 0 + ? vc->state.G0_charset + : vc->state.G1_charset, vc); vc->vc_disp_ctrl = 0; vc->vc_toggle_meta = 0; break; @@ -1761,19 +1764,19 @@ static void csi_m(struct vc_data *vc) vc->vc_toggle_meta = 1; break; case 22: - vc->vc_intensity = 1; + vc->state.intensity = 1; break; case 23: - vc->vc_italic = 0; + vc->state.italic = 0; break; case 24: - vc->vc_underline = 0; + vc->state.underline = 0; break; case 25: - vc->vc_blink = 0; + vc->state.blink = 0; break; case 27: - vc->vc_reverse = 0; + vc->state.reverse = 0; break; case 38: i = vc_t416_color(vc, i, rgb_foreground); @@ -1782,25 +1785,25 @@ static void csi_m(struct vc_data *vc) i = vc_t416_color(vc, i, rgb_background); break; case 39: - vc->vc_color = (vc->vc_def_color & 0x0f) | - (vc->vc_color & 0xf0); + vc->state.color = (vc->vc_def_color & 0x0f) | + (vc->state.color & 0xf0); break; case 49: - vc->vc_color = (vc->vc_def_color & 0xf0) | - (vc->vc_color & 0x0f); + vc->state.color = (vc->vc_def_color & 0xf0) | + (vc->state.color & 0x0f); break; default: if (vc->vc_par[i] >= 90 && vc->vc_par[i] <= 107) { if (vc->vc_par[i] < 100) - vc->vc_intensity = 2; + vc->state.intensity = 2; vc->vc_par[i] -= 60; } if (vc->vc_par[i] >= 30 && vc->vc_par[i] <= 37) - vc->vc_color = color_table[vc->vc_par[i] - 30] - | (vc->vc_color & 0xf0); + vc->state.color = color_table[vc->vc_par[i] - 30] + | (vc->state.color & 0xf0); else if (vc->vc_par[i] >= 40 && vc->vc_par[i] <= 47) - vc->vc_color = (color_table[vc->vc_par[i] - 40] << 4) - | (vc->vc_color & 0x0f); + vc->state.color = (color_table[vc->vc_par[i] - 40] << 4) + | (vc->state.color & 0x0f); break; } update_attr(vc); @@ -1819,7 +1822,7 @@ static void cursor_report(struct vc_data *vc, struct tty_struct *tty) { char buf[40]; - sprintf(buf, "\033[%d;%dR", vc->vc_y + (vc->vc_decom ? vc->vc_top + 1 : 1), vc->vc_x + 1); + sprintf(buf, "\033[%d;%dR", vc->state.y + (vc->vc_decom ? vc->vc_top + 1 : 1), vc->state.x + 1); respond_string(buf, tty->port); } @@ -1924,14 +1927,14 @@ static void setterm_command(struct vc_data *vc) case 1: /* set color for underline mode */ if (vc->vc_can_do_color && vc->vc_par[1] < 16) { vc->vc_ulcolor = color_table[vc->vc_par[1]]; - if (vc->vc_underline) + if (vc->state.underline) update_attr(vc); } break; case 2: /* set color for half intensity mode */ if (vc->vc_can_do_color && vc->vc_par[1] < 16) { vc->vc_halfcolor = color_table[vc->vc_par[1]]; - if (vc->vc_intensity == 0) + if (vc->state.intensity == 0) update_attr(vc); } break; @@ -1985,8 +1988,8 @@ static void setterm_command(struct vc_data *vc) /* console_lock is held */ static void csi_at(struct vc_data *vc, unsigned int nr) { - if (nr > vc->vc_cols - vc->vc_x) - nr = vc->vc_cols - vc->vc_x; + if (nr > vc->vc_cols - vc->state.x) + nr = vc->vc_cols - vc->state.x; else if (!nr) nr = 1; insert_char(vc, nr); @@ -1995,19 +1998,19 @@ static void csi_at(struct vc_data *vc, unsigned int nr) /* console_lock is held */ static void csi_L(struct vc_data *vc, unsigned int nr) { - if (nr > vc->vc_rows - vc->vc_y) - nr = vc->vc_rows - vc->vc_y; + if (nr > vc->vc_rows - vc->state.y) + nr = vc->vc_rows - vc->state.y; else if (!nr) nr = 1; - con_scroll(vc, vc->vc_y, vc->vc_bottom, SM_DOWN, nr); + con_scroll(vc, vc->state.y, vc->vc_bottom, SM_DOWN, nr); vc->vc_need_wrap = 0; } /* console_lock is held */ static void csi_P(struct vc_data *vc, unsigned int nr) { - if (nr > vc->vc_cols - vc->vc_x) - nr = vc->vc_cols - vc->vc_x; + if (nr > vc->vc_cols - vc->state.x) + nr = vc->vc_cols - vc->state.x; else if (!nr) nr = 1; delete_char(vc, nr); @@ -2016,44 +2019,28 @@ static void csi_P(struct vc_data *vc, unsigned int nr) /* console_lock is held */ static void csi_M(struct vc_data *vc, unsigned int nr) { - if (nr > vc->vc_rows - vc->vc_y) - nr = vc->vc_rows - vc->vc_y; + if (nr > vc->vc_rows - vc->state.y) + nr = vc->vc_rows - vc->state.y; else if (!nr) nr=1; - con_scroll(vc, vc->vc_y, vc->vc_bottom, SM_UP, nr); + con_scroll(vc, vc->state.y, vc->vc_bottom, SM_UP, nr); vc->vc_need_wrap = 0; } /* console_lock is held (except via vc_init->reset_terminal */ static void save_cur(struct vc_data *vc) { - vc->vc_saved_x = vc->vc_x; - vc->vc_saved_y = vc->vc_y; - vc->vc_s_intensity = vc->vc_intensity; - vc->vc_s_italic = vc->vc_italic; - vc->vc_s_underline = vc->vc_underline; - vc->vc_s_blink = vc->vc_blink; - vc->vc_s_reverse = vc->vc_reverse; - vc->vc_s_charset = vc->vc_charset; - vc->vc_s_color = vc->vc_color; - vc->vc_saved_G0 = vc->vc_G0_charset; - vc->vc_saved_G1 = vc->vc_G1_charset; + memcpy(&vc->saved_state, &vc->state, sizeof(vc->state)); } /* console_lock is held */ static void restore_cur(struct vc_data *vc) { - gotoxy(vc, vc->vc_saved_x, vc->vc_saved_y); - vc->vc_intensity = vc->vc_s_intensity; - vc->vc_italic = vc->vc_s_italic; - vc->vc_underline = vc->vc_s_underline; - vc->vc_blink = vc->vc_s_blink; - vc->vc_reverse = vc->vc_s_reverse; - vc->vc_charset = vc->vc_s_charset; - vc->vc_color = vc->vc_s_color; - vc->vc_G0_charset = vc->vc_saved_G0; - vc->vc_G1_charset = vc->vc_saved_G1; - vc->vc_translate = set_translate(vc->vc_charset ? vc->vc_G1_charset : vc->vc_G0_charset, vc); + memcpy(&vc->state, &vc->saved_state, sizeof(vc->state)); + + gotoxy(vc, vc->state.x, vc->state.y); + vc->vc_translate = set_translate(vc->state.charset ? vc->state.G1_charset : + vc->state.G0_charset, vc); update_attr(vc); vc->vc_need_wrap = 0; } @@ -2070,9 +2057,9 @@ static void reset_terminal(struct vc_data *vc, int do_clear) vc->vc_state = ESnormal; vc->vc_priv = EPecma; vc->vc_translate = set_translate(LAT1_MAP, vc); - vc->vc_G0_charset = LAT1_MAP; - vc->vc_G1_charset = GRAF_MAP; - vc->vc_charset = 0; + vc->state.G0_charset = LAT1_MAP; + vc->state.G1_charset = GRAF_MAP; + vc->state.charset = 0; vc->vc_need_wrap = 0; vc->vc_report_mouse = 0; vc->vc_utf = default_utf8; @@ -2136,13 +2123,13 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) bs(vc); return; case 9: - vc->vc_pos -= (vc->vc_x << 1); - while (vc->vc_x < vc->vc_cols - 1) { - vc->vc_x++; - if (vc->vc_tab_stop[7 & (vc->vc_x >> 5)] & (1 << (vc->vc_x & 31))) + vc->vc_pos -= (vc->state.x << 1); + while (vc->state.x < vc->vc_cols - 1) { + vc->state.x++; + if (vc->vc_tab_stop[7 & (vc->state.x >> 5)] & (1 << (vc->state.x & 31))) break; } - vc->vc_pos += (vc->vc_x << 1); + vc->vc_pos += (vc->state.x << 1); notify_write(vc, '\t'); return; case 10: case 11: case 12: @@ -2154,13 +2141,13 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) cr(vc); return; case 14: - vc->vc_charset = 1; - vc->vc_translate = set_translate(vc->vc_G1_charset, vc); + vc->state.charset = 1; + vc->vc_translate = set_translate(vc->state.G1_charset, vc); vc->vc_disp_ctrl = 1; return; case 15: - vc->vc_charset = 0; - vc->vc_translate = set_translate(vc->vc_G0_charset, vc); + vc->state.charset = 0; + vc->vc_translate = set_translate(vc->state.G0_charset, vc); vc->vc_disp_ctrl = 0; return; case 24: case 26: @@ -2200,7 +2187,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) lf(vc); return; case 'H': - vc->vc_tab_stop[7 & (vc->vc_x >> 5)] |= (1 << (vc->vc_x & 31)); + vc->vc_tab_stop[7 & (vc->state.x >> 5)] |= (1 << (vc->state.x & 31)); return; case 'Z': respond_ID(tty); @@ -2347,42 +2334,42 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) case 'G': case '`': if (vc->vc_par[0]) vc->vc_par[0]--; - gotoxy(vc, vc->vc_par[0], vc->vc_y); + gotoxy(vc, vc->vc_par[0], vc->state.y); return; case 'A': if (!vc->vc_par[0]) vc->vc_par[0]++; - gotoxy(vc, vc->vc_x, vc->vc_y - vc->vc_par[0]); + gotoxy(vc, vc->state.x, vc->state.y - vc->vc_par[0]); return; case 'B': case 'e': if (!vc->vc_par[0]) vc->vc_par[0]++; - gotoxy(vc, vc->vc_x, vc->vc_y + vc->vc_par[0]); + gotoxy(vc, vc->state.x, vc->state.y + vc->vc_par[0]); return; case 'C': case 'a': if (!vc->vc_par[0]) vc->vc_par[0]++; - gotoxy(vc, vc->vc_x + vc->vc_par[0], vc->vc_y); + gotoxy(vc, vc->state.x + vc->vc_par[0], vc->state.y); return; case 'D': if (!vc->vc_par[0]) vc->vc_par[0]++; - gotoxy(vc, vc->vc_x - vc->vc_par[0], vc->vc_y); + gotoxy(vc, vc->state.x - vc->vc_par[0], vc->state.y); return; case 'E': if (!vc->vc_par[0]) vc->vc_par[0]++; - gotoxy(vc, 0, vc->vc_y + vc->vc_par[0]); + gotoxy(vc, 0, vc->state.y + vc->vc_par[0]); return; case 'F': if (!vc->vc_par[0]) vc->vc_par[0]++; - gotoxy(vc, 0, vc->vc_y - vc->vc_par[0]); + gotoxy(vc, 0, vc->state.y - vc->vc_par[0]); return; case 'd': if (vc->vc_par[0]) vc->vc_par[0]--; - gotoxay(vc, vc->vc_x ,vc->vc_par[0]); + gotoxay(vc, vc->state.x ,vc->vc_par[0]); return; case 'H': case 'f': if (vc->vc_par[0]) @@ -2412,7 +2399,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) return; case 'g': if (!vc->vc_par[0]) - vc->vc_tab_stop[7 & (vc->vc_x >> 5)] &= ~(1 << (vc->vc_x & 31)); + vc->vc_tab_stop[7 & (vc->state.x >> 5)] &= ~(1 << (vc->state.x & 31)); else if (vc->vc_par[0] == 3) { vc->vc_tab_stop[0] = vc->vc_tab_stop[1] = @@ -2497,28 +2484,28 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) return; case ESsetG0: if (c == '0') - vc->vc_G0_charset = GRAF_MAP; + vc->state.G0_charset = GRAF_MAP; else if (c == 'B') - vc->vc_G0_charset = LAT1_MAP; + vc->state.G0_charset = LAT1_MAP; else if (c == 'U') - vc->vc_G0_charset = IBMPC_MAP; + vc->state.G0_charset = IBMPC_MAP; else if (c == 'K') - vc->vc_G0_charset = USER_MAP; - if (vc->vc_charset == 0) - vc->vc_translate = set_translate(vc->vc_G0_charset, vc); + vc->state.G0_charset = USER_MAP; + if (vc->state.charset == 0) + vc->vc_translate = set_translate(vc->state.G0_charset, vc); vc->vc_state = ESnormal; return; case ESsetG1: if (c == '0') - vc->vc_G1_charset = GRAF_MAP; + vc->state.G1_charset = GRAF_MAP; else if (c == 'B') - vc->vc_G1_charset = LAT1_MAP; + vc->state.G1_charset = LAT1_MAP; else if (c == 'U') - vc->vc_G1_charset = IBMPC_MAP; + vc->state.G1_charset = IBMPC_MAP; else if (c == 'K') - vc->vc_G1_charset = USER_MAP; - if (vc->vc_charset == 1) - vc->vc_translate = set_translate(vc->vc_G1_charset, vc); + vc->state.G1_charset = USER_MAP; + if (vc->state.charset == 1) + vc->vc_translate = set_translate(vc->state.G1_charset, vc); vc->vc_state = ESnormal; return; case ESosc: @@ -2572,7 +2559,7 @@ static void con_flush(struct vc_data *vc, unsigned long draw_from, return; vc->vc_sw->con_putcs(vc, (u16 *)draw_from, - (u16 *)draw_to - (u16 *)draw_from, vc->vc_y, *draw_x); + (u16 *)draw_to - (u16 *)draw_from, vc->state.y, *draw_x); *draw_x = -1; } @@ -2788,14 +2775,14 @@ rescan_last_byte: (vc_attr << 8) + tc, (u16 *) vc->vc_pos); if (con_should_update(vc) && draw_x < 0) { - draw_x = vc->vc_x; + draw_x = vc->state.x; draw_from = vc->vc_pos; } - if (vc->vc_x == vc->vc_cols - 1) { + if (vc->state.x == vc->vc_cols - 1) { vc->vc_need_wrap = vc->vc_decawm; draw_to = vc->vc_pos + 2; } else { - vc->vc_x++; + vc->state.x++; draw_to = (vc->vc_pos += 2); } @@ -2972,25 +2959,25 @@ static void vt_console_print(struct console *co, const char *b, unsigned count) hide_cursor(vc); start = (ushort *)vc->vc_pos; - start_x = vc->vc_x; + start_x = vc->state.x; cnt = 0; while (count--) { c = *b++; if (c == 10 || c == 13 || c == 8 || vc->vc_need_wrap) { if (cnt && con_is_visible(vc)) - vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, start_x); + vc->vc_sw->con_putcs(vc, start, cnt, vc->state.y, start_x); cnt = 0; if (c == 8) { /* backspace */ bs(vc); start = (ushort *)vc->vc_pos; - start_x = vc->vc_x; + start_x = vc->state.x; continue; } if (c != 13) lf(vc); cr(vc); start = (ushort *)vc->vc_pos; - start_x = vc->vc_x; + start_x = vc->state.x; if (c == 10 || c == 13) continue; } @@ -2998,15 +2985,15 @@ static void vt_console_print(struct console *co, const char *b, unsigned count) scr_writew((vc->vc_attr << 8) + c, (unsigned short *)vc->vc_pos); notify_write(vc, c); cnt++; - if (vc->vc_x == vc->vc_cols - 1) { + if (vc->state.x == vc->vc_cols - 1) { vc->vc_need_wrap = 1; } else { vc->vc_pos += 2; - vc->vc_x++; + vc->state.x++; } } if (cnt && con_is_visible(vc)) - vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, start_x); + vc->vc_sw->con_putcs(vc, start, cnt, vc->state.y, start_x); set_cursor(vc); notify_update(vc); @@ -3401,7 +3388,7 @@ static int __init con_init(void) master_display_fg = vc = vc_cons[currcons].d; set_origin(vc); save_screen(vc); - gotoxy(vc, vc->vc_x, vc->vc_y); + gotoxy(vc, vc->state.x, vc->state.y); csi_J(vc, 0); update_screen(vc); pr_info("Console: %s %s %dx%d\n", @@ -4680,8 +4667,8 @@ EXPORT_SYMBOL_GPL(screen_pos); void getconsxy(struct vc_data *vc, unsigned char *p) { /* clamp values if they don't fit */ - p[0] = min(vc->vc_x, 0xFFu); - p[1] = min(vc->vc_y, 0xFFu); + p[0] = min(vc->state.x, 0xFFu); + p[1] = min(vc->state.y, 0xFFu); } void putconsxy(struct vc_data *vc, unsigned char *p) diff --git a/drivers/video/console/mdacon.c b/drivers/video/console/mdacon.c index d0d427a2f1a3..d64c5ce84125 100644 --- a/drivers/video/console/mdacon.c +++ b/drivers/video/console/mdacon.c @@ -488,7 +488,7 @@ static void mdacon_cursor(struct vc_data *c, int mode) return; } - mda_set_cursor(c->vc_y*mda_num_columns*2 + c->vc_x*2); + mda_set_cursor(c->state.y * mda_num_columns * 2 + c->state.x * 2); switch (c->vc_cursor_type & 0x0f) { diff --git a/drivers/video/console/sticon.c b/drivers/video/console/sticon.c index 79c9bd8d3025..90083eb80515 100644 --- a/drivers/video/console/sticon.c +++ b/drivers/video/console/sticon.c @@ -132,10 +132,10 @@ static void sticon_cursor(struct vc_data *conp, int mode) { unsigned short car1; - car1 = conp->vc_screenbuf[conp->vc_x + conp->vc_y * conp->vc_cols]; + car1 = conp->vc_screenbuf[conp->state.x + conp->state.y * conp->vc_cols]; switch (mode) { case CM_ERASE: - sti_putc(sticon_sti, car1, conp->vc_y, conp->vc_x); + sti_putc(sticon_sti, car1, conp->state.y, conp->state.x); break; case CM_MOVE: case CM_DRAW: @@ -146,7 +146,7 @@ static void sticon_cursor(struct vc_data *conp, int mode) case CUR_TWO_THIRDS: case CUR_BLOCK: sti_putc(sticon_sti, (car1 & 255) + (0 << 8) + (7 << 11), - conp->vc_y, conp->vc_x); + conp->state.y, conp->state.x); break; } break; diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index 998b0de1812f..d073fa167e87 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -718,9 +718,9 @@ static void vgacon_cursor(struct vc_data *c, int mode) case CM_ERASE: write_vga(14, (c->vc_pos - vga_vram_base) / 2); if (vga_video_type >= VIDEO_TYPE_VGAC) - vgacon_set_cursor_size(c->vc_x, 31, 30); + vgacon_set_cursor_size(c->state.x, 31, 30); else - vgacon_set_cursor_size(c->vc_x, 31, 31); + vgacon_set_cursor_size(c->state.x, 31, 31); break; case CM_MOVE: @@ -728,7 +728,7 @@ static void vgacon_cursor(struct vc_data *c, int mode) write_vga(14, (c->vc_pos - vga_vram_base) / 2); switch (c->vc_cursor_type & 0x0f) { case CUR_UNDERLINE: - vgacon_set_cursor_size(c->vc_x, + vgacon_set_cursor_size(c->state.x, c->vc_font.height - (c->vc_font.height < 10 ? 2 : 3), @@ -737,21 +737,21 @@ static void vgacon_cursor(struct vc_data *c, int mode) 10 ? 1 : 2)); break; case CUR_TWO_THIRDS: - vgacon_set_cursor_size(c->vc_x, + vgacon_set_cursor_size(c->state.x, c->vc_font.height / 3, c->vc_font.height - (c->vc_font.height < 10 ? 1 : 2)); break; case CUR_LOWER_THIRD: - vgacon_set_cursor_size(c->vc_x, + vgacon_set_cursor_size(c->state.x, (c->vc_font.height * 2) / 3, c->vc_font.height - (c->vc_font.height < 10 ? 1 : 2)); break; case CUR_LOWER_HALF: - vgacon_set_cursor_size(c->vc_x, + vgacon_set_cursor_size(c->state.x, c->vc_font.height / 2, c->vc_font.height - (c->vc_font.height < @@ -759,12 +759,12 @@ static void vgacon_cursor(struct vc_data *c, int mode) break; case CUR_NONE: if (vga_video_type >= VIDEO_TYPE_VGAC) - vgacon_set_cursor_size(c->vc_x, 31, 30); + vgacon_set_cursor_size(c->state.x, 31, 30); else - vgacon_set_cursor_size(c->vc_x, 31, 31); + vgacon_set_cursor_size(c->state.x, 31, 31); break; default: - vgacon_set_cursor_size(c->vc_x, 1, + vgacon_set_cursor_size(c->state.x, 1, c->vc_font.height); break; } @@ -1352,8 +1352,8 @@ static void vgacon_save_screen(struct vc_data *c) * console initialization routines. */ vga_bootup_console = 1; - c->vc_x = screen_info.orig_x; - c->vc_y = screen_info.orig_y; + c->state.x = screen_info.orig_x; + c->state.y = screen_info.orig_y; } /* We can't copy in more than the size of the video buffer, diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c index ca935c09a261..c750470a31ec 100644 --- a/drivers/video/fbdev/core/bitblit.c +++ b/drivers/video/fbdev/core/bitblit.c @@ -240,7 +240,7 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode, struct fbcon_ops *ops = info->fbcon_par; unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; int w = DIV_ROUND_UP(vc->vc_font.width, 8), c; - int y = real_y(ops->p, vc->vc_y); + int y = real_y(ops->p, vc->state.y); int attribute, use_sw = (vc->vc_cursor_type & 0x10); int err = 1; char *src; @@ -286,10 +286,10 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode, cursor.set |= FB_CUR_SETCMAP; } - if ((ops->cursor_state.image.dx != (vc->vc_font.width * vc->vc_x)) || + if ((ops->cursor_state.image.dx != (vc->vc_font.width * vc->state.x)) || (ops->cursor_state.image.dy != (vc->vc_font.height * y)) || ops->cursor_reset) { - ops->cursor_state.image.dx = vc->vc_font.width * vc->vc_x; + ops->cursor_state.image.dx = vc->vc_font.width * vc->state.x; ops->cursor_state.image.dy = vc->vc_font.height * y; cursor.set |= FB_CUR_SETPOS; } diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 9d28a8e3328f..38d2a00b0ccf 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -655,11 +655,11 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info, } if (!save) { int lines; - if (vc->vc_y + logo_lines >= rows) - lines = rows - vc->vc_y - 1; + if (vc->state.y + logo_lines >= rows) + lines = rows - vc->state.y - 1; else lines = logo_lines; - vc->vc_y += lines; + vc->state.y += lines; vc->vc_pos += lines * vc->vc_size_row; } } @@ -677,7 +677,7 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info, vc->vc_size_row * rows); scr_memcpyw(q, save, logo_lines * new_cols * 2); - vc->vc_y += logo_lines; + vc->state.y += logo_lines; vc->vc_pos += logo_lines * vc->vc_size_row; kfree(save); } diff --git a/drivers/video/fbdev/core/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c index dfa9a8aa4509..9d06446a1a3b 100644 --- a/drivers/video/fbdev/core/fbcon_ccw.c +++ b/drivers/video/fbdev/core/fbcon_ccw.c @@ -225,7 +225,7 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode, struct fbcon_ops *ops = info->fbcon_par; unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; int w = (vc->vc_font.height + 7) >> 3, c; - int y = real_y(ops->p, vc->vc_y); + int y = real_y(ops->p, vc->state.y); int attribute, use_sw = (vc->vc_cursor_type & 0x10); int err = 1, dx, dy; char *src; @@ -284,7 +284,7 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode, } dx = y * vc->vc_font.height; - dy = vyres - ((vc->vc_x + 1) * vc->vc_font.width); + dy = vyres - ((vc->state.x + 1) * vc->vc_font.width); if (ops->cursor_state.image.dx != dx || ops->cursor_state.image.dy != dy || diff --git a/drivers/video/fbdev/core/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c index ce08251bfd38..4b5f76bb01e5 100644 --- a/drivers/video/fbdev/core/fbcon_cw.c +++ b/drivers/video/fbdev/core/fbcon_cw.c @@ -208,7 +208,7 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode, struct fbcon_ops *ops = info->fbcon_par; unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; int w = (vc->vc_font.height + 7) >> 3, c; - int y = real_y(ops->p, vc->vc_y); + int y = real_y(ops->p, vc->state.y); int attribute, use_sw = (vc->vc_cursor_type & 0x10); int err = 1, dx, dy; char *src; @@ -267,7 +267,7 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode, } dx = vxres - ((y * vc->vc_font.height) + vc->vc_font.height); - dy = vc->vc_x * vc->vc_font.width; + dy = vc->state.x * vc->vc_font.width; if (ops->cursor_state.image.dx != dx || ops->cursor_state.image.dy != dy || diff --git a/drivers/video/fbdev/core/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c index 1936afc78fec..7e0ae3549dc7 100644 --- a/drivers/video/fbdev/core/fbcon_ud.c +++ b/drivers/video/fbdev/core/fbcon_ud.c @@ -255,7 +255,7 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode, struct fbcon_ops *ops = info->fbcon_par; unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; int w = (vc->vc_font.width + 7) >> 3, c; - int y = real_y(ops->p, vc->vc_y); + int y = real_y(ops->p, vc->state.y); int attribute, use_sw = (vc->vc_cursor_type & 0x10); int err = 1, dx, dy; char *src; @@ -315,7 +315,7 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode, } dy = vyres - ((y * vc->vc_font.height) + vc->vc_font.height); - dx = vxres - ((vc->vc_x * vc->vc_font.width) + vc->vc_font.width); + dx = vxres - ((vc->state.x * vc->vc_font.width) + vc->vc_font.width); if (ops->cursor_state.image.dx != dx || ops->cursor_state.image.dy != dy || diff --git a/drivers/video/fbdev/core/tileblit.c b/drivers/video/fbdev/core/tileblit.c index 93390312957f..ac51425687e4 100644 --- a/drivers/video/fbdev/core/tileblit.c +++ b/drivers/video/fbdev/core/tileblit.c @@ -85,8 +85,8 @@ static void tile_cursor(struct vc_data *vc, struct fb_info *info, int mode, struct fb_tilecursor cursor; int use_sw = (vc->vc_cursor_type & 0x10); - cursor.sx = vc->vc_x; - cursor.sy = vc->vc_y; + cursor.sx = vc->state.x; + cursor.sy = vc->state.y; cursor.mode = (mode == CM_ERASE || use_sw) ? 0 : 1; cursor.fg = fg; cursor.bg = bg; diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h index 24d4c16e3ae0..162f4337c767 100644 --- a/include/linux/console_struct.h +++ b/include/linux/console_struct.h @@ -22,6 +22,37 @@ struct uni_screen; #define NPAR 16 +/** + * struct vc_state -- state of a VC + * @x: cursor's x-position + * @y: cursor's y-position + * @color: foreground & background colors + * @G0_charset: what's G0 slot set to (like GRAF_MAP, LAT1_MAP) + * @G1_charset: what's G1 slot set to (like GRAF_MAP, LAT1_MAP) + * @charset: what character set to use (0=G0 or 1=G1) + * @intensity: 0=half-bright, 1=normal, 2=bold + * @reverse: reversed foreground/background colors + * + * These members are defined separately from struct vc_data as we save & + * restore them at times. + */ +struct vc_state { + unsigned int x, y; + + unsigned char color; + + unsigned char G0_charset; + unsigned char G1_charset; + unsigned int charset : 1; + + /* attribute flags */ + unsigned int intensity : 2; + unsigned int italic : 1; + unsigned int underline : 1; + unsigned int blink : 1; + unsigned int reverse : 1; +}; + /* * Example: vc_data of a console that was scrolled 3 lines down. * @@ -57,6 +88,8 @@ struct uni_screen; struct vc_data { struct tty_port port; /* Upper level data */ + struct vc_state state, saved_state; + unsigned short vc_num; /* Console number */ unsigned int vc_cols; /* [#] Console size */ unsigned int vc_rows; @@ -73,8 +106,6 @@ struct vc_data { /* attributes for all characters on screen */ unsigned char vc_attr; /* Current attributes */ unsigned char vc_def_color; /* Default colors */ - unsigned char vc_color; /* Foreground & background */ - unsigned char vc_s_color; /* Saved foreground & background */ unsigned char vc_ulcolor; /* Color for underline mode */ unsigned char vc_itcolor; unsigned char vc_halfcolor; /* Color for half intensity mode */ @@ -82,8 +113,6 @@ struct vc_data { unsigned int vc_cursor_type; unsigned short vc_complement_mask; /* [#] Xor mask for mouse pointer */ unsigned short vc_s_complement_mask; /* Saved mouse pointer mask */ - unsigned int vc_x, vc_y; /* Cursor position */ - unsigned int vc_saved_x, vc_saved_y; unsigned long vc_pos; /* Cursor address */ /* fonts */ unsigned short vc_hi_font_mask; /* [#] Attribute set for upper 256 chars of font or 0 if not supported */ @@ -98,8 +127,6 @@ struct vc_data { int vt_newvt; wait_queue_head_t paste_wait; /* mode flags */ - unsigned int vc_charset : 1; /* Character set G0 / G1 */ - unsigned int vc_s_charset : 1; /* Saved character set */ unsigned int vc_disp_ctrl : 1; /* Display chars < 32? */ unsigned int vc_toggle_meta : 1; /* Toggle high bit? */ unsigned int vc_decscnm : 1; /* Screen Mode */ @@ -107,17 +134,6 @@ struct vc_data { unsigned int vc_decawm : 1; /* Autowrap Mode */ unsigned int vc_deccm : 1; /* Cursor Visible */ unsigned int vc_decim : 1; /* Insert Mode */ - /* attribute flags */ - unsigned int vc_intensity : 2; /* 0=half-bright, 1=normal, 2=bold */ - unsigned int vc_italic:1; - unsigned int vc_underline : 1; - unsigned int vc_blink : 1; - unsigned int vc_reverse : 1; - unsigned int vc_s_intensity : 2; /* saved rendition */ - unsigned int vc_s_italic:1; - unsigned int vc_s_underline : 1; - unsigned int vc_s_blink : 1; - unsigned int vc_s_reverse : 1; /* misc */ unsigned int vc_priv : 3; unsigned int vc_need_wrap : 1; @@ -129,10 +145,6 @@ struct vc_data { unsigned int vc_tab_stop[8]; /* Tab stops. 256 columns. */ unsigned char vc_palette[16*3]; /* Colour palette for VGA+ */ unsigned short * vc_translate; - unsigned char vc_G0_charset; - unsigned char vc_G1_charset; - unsigned char vc_saved_G0; - unsigned char vc_saved_G1; unsigned int vc_resize_user; /* resize request from user */ unsigned int vc_bell_pitch; /* Console bell pitch */ unsigned int vc_bell_duration; /* Console bell duration */ -- cgit v1.2.3-59-g8ed1b From b84ae3dc70fedf4bdee2dbfa487fd23b606fbb82 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Mon, 15 Jun 2020 09:48:34 +0200 Subject: vt: introduce enum vc_intensity for intensity Introduce names (en enum) for 0, 1, and 2 constants. We now have VCI_HALF_BRIGHT, VCI_NORMAL, and VCI_BOLD instead. Apart from the cleanup, 1) the enum allows for better type checking, and 2) this saves some code. No more fiddling with bits is needed in assembly now. (OTOH, the structure is larger.) Signed-off-by: Jiri Slaby Link: https://lore.kernel.org/r/20200615074910.19267-2-jslaby@suse.cz Signed-off-by: Greg Kroah-Hartman --- drivers/tty/vt/vt.c | 32 +++++++++++++++++--------------- drivers/usb/misc/sisusbvga/sisusb_con.c | 6 +++--- drivers/video/console/mdacon.c | 5 +++-- drivers/video/console/sticon.c | 3 ++- drivers/video/console/vgacon.c | 9 +++++---- include/linux/console.h | 5 ++++- include/linux/console_struct.h | 11 +++++++++-- 7 files changed, 43 insertions(+), 28 deletions(-) (limited to 'include') diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 76f52935e0c8..71bf483e5640 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -705,8 +705,9 @@ void update_region(struct vc_data *vc, unsigned long start, int count) /* Structure of attributes is hardware-dependent */ -static u8 build_attr(struct vc_data *vc, u8 _color, u8 _intensity, u8 _blink, - u8 _underline, u8 _reverse, u8 _italic) +static u8 build_attr(struct vc_data *vc, u8 _color, + enum vc_intensity _intensity, u8 _blink, u8 _underline, + u8 _reverse, u8 _italic) { if (vc->vc_sw->con_build_attr) return vc->vc_sw->con_build_attr(vc, _color, _intensity, @@ -734,13 +735,13 @@ static u8 build_attr(struct vc_data *vc, u8 _color, u8 _intensity, u8 _blink, a = (a & 0xF0) | vc->vc_itcolor; else if (_underline) a = (a & 0xf0) | vc->vc_ulcolor; - else if (_intensity == 0) + else if (_intensity == VCI_HALF_BRIGHT) a = (a & 0xf0) | vc->vc_halfcolor; if (_reverse) a = ((a) & 0x88) | ((((a) >> 4) | ((a) << 4)) & 0x77); if (_blink) a ^= 0x80; - if (_intensity == 2) + if (_intensity == VCI_BOLD) a ^= 0x08; if (vc->vc_hi_font_mask == 0x100) a <<= 1; @@ -753,8 +754,9 @@ static void update_attr(struct vc_data *vc) vc->vc_attr = build_attr(vc, vc->state.color, vc->state.intensity, vc->state.blink, vc->state.underline, vc->state.reverse ^ vc->vc_decscnm, vc->state.italic); - vc->vc_video_erase_char = ' ' | (build_attr(vc, vc->state.color, 1, - vc->state.blink, 0, vc->vc_decscnm, 0) << 8); + vc->vc_video_erase_char = ' ' | (build_attr(vc, vc->state.color, + VCI_NORMAL, vc->state.blink, 0, vc->vc_decscnm, + 0) << 8); } /* Note: inverting the screen twice should revert to the original state */ @@ -1611,7 +1613,7 @@ static void csi_X(struct vc_data *vc, int vpar) /* erase the following vpar posi static void default_attr(struct vc_data *vc) { - vc->state.intensity = 1; + vc->state.intensity = VCI_NORMAL; vc->state.italic = 0; vc->state.underline = 0; vc->state.reverse = 0; @@ -1652,11 +1654,11 @@ static void rgb_foreground(struct vc_data *vc, const struct rgb *c) if (hue == 7 && max <= 0x55) { hue = 0; - vc->state.intensity = 2; + vc->state.intensity = VCI_BOLD; } else if (max > 0xaa) - vc->state.intensity = 2; + vc->state.intensity = VCI_BOLD; else - vc->state.intensity = 1; + vc->state.intensity = VCI_NORMAL; vc->state.color = (vc->state.color & 0xf0) | hue; } @@ -1715,10 +1717,10 @@ static void csi_m(struct vc_data *vc) default_attr(vc); break; case 1: - vc->state.intensity = 2; + vc->state.intensity = VCI_BOLD; break; case 2: - vc->state.intensity = 0; + vc->state.intensity = VCI_HALF_BRIGHT; break; case 3: vc->state.italic = 1; @@ -1764,7 +1766,7 @@ static void csi_m(struct vc_data *vc) vc->vc_toggle_meta = 1; break; case 22: - vc->state.intensity = 1; + vc->state.intensity = VCI_NORMAL; break; case 23: vc->state.italic = 0; @@ -1795,7 +1797,7 @@ static void csi_m(struct vc_data *vc) default: if (vc->vc_par[i] >= 90 && vc->vc_par[i] <= 107) { if (vc->vc_par[i] < 100) - vc->state.intensity = 2; + vc->state.intensity = VCI_BOLD; vc->vc_par[i] -= 60; } if (vc->vc_par[i] >= 30 && vc->vc_par[i] <= 37) @@ -1934,7 +1936,7 @@ static void setterm_command(struct vc_data *vc) case 2: /* set color for half intensity mode */ if (vc->vc_can_do_color && vc->vc_par[1] < 16) { vc->vc_halfcolor = color_table[vc->vc_par[1]]; - if (vc->state.intensity == 0) + if (vc->state.intensity == VCI_HALF_BRIGHT) update_attr(vc); } break; diff --git a/drivers/usb/misc/sisusbvga/sisusb_con.c b/drivers/usb/misc/sisusbvga/sisusb_con.c index cd0155310fea..c59fe641b8b5 100644 --- a/drivers/usb/misc/sisusbvga/sisusb_con.c +++ b/drivers/usb/misc/sisusbvga/sisusb_con.c @@ -302,14 +302,14 @@ sisusbcon_deinit(struct vc_data *c) /* interface routine */ static u8 -sisusbcon_build_attr(struct vc_data *c, u8 color, u8 intensity, +sisusbcon_build_attr(struct vc_data *c, u8 color, enum vc_intensity intensity, u8 blink, u8 underline, u8 reverse, u8 unused) { u8 attr = color; if (underline) attr = (attr & 0xf0) | c->vc_ulcolor; - else if (intensity == 0) + else if (intensity == VCI_HALF_BRIGHT) attr = (attr & 0xf0) | c->vc_halfcolor; if (reverse) @@ -320,7 +320,7 @@ sisusbcon_build_attr(struct vc_data *c, u8 color, u8 intensity, if (blink) attr ^= 0x80; - if (intensity == 2) + if (intensity == VCI_BOLD) attr ^= 0x08; return attr; diff --git a/drivers/video/console/mdacon.c b/drivers/video/console/mdacon.c index d64c5ce84125..e3da664df16c 100644 --- a/drivers/video/console/mdacon.c +++ b/drivers/video/console/mdacon.c @@ -394,7 +394,8 @@ static inline u16 mda_convert_attr(u16 ch) (ch & 0x00ff) | attr; } -static u8 mdacon_build_attr(struct vc_data *c, u8 color, u8 intensity, +static u8 mdacon_build_attr(struct vc_data *c, u8 color, + enum vc_intensity intensity, u8 blink, u8 underline, u8 reverse, u8 italic) { /* The attribute is just a bit vector: @@ -405,7 +406,7 @@ static u8 mdacon_build_attr(struct vc_data *c, u8 color, u8 intensity, * Bit 7 : blink */ - return (intensity & 3) | + return (intensity & VCI_MASK) | ((underline & 1) << 2) | ((reverse & 1) << 3) | (!!italic << 4) | diff --git a/drivers/video/console/sticon.c b/drivers/video/console/sticon.c index 90083eb80515..a847067abbe5 100644 --- a/drivers/video/console/sticon.c +++ b/drivers/video/console/sticon.c @@ -288,7 +288,8 @@ static unsigned long sticon_getxy(struct vc_data *conp, unsigned long pos, return ret; } -static u8 sticon_build_attr(struct vc_data *conp, u8 color, u8 intens, +static u8 sticon_build_attr(struct vc_data *conp, u8 color, + enum vc_intensity intens, u8 blink, u8 underline, u8 reverse, u8 italic) { u8 attr = ((color & 0x70) >> 1) | ((color & 7)); diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index d073fa167e87..d0b26e2318d3 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -629,7 +629,8 @@ static void vgacon_deinit(struct vc_data *c) con_set_default_unimap(c); } -static u8 vgacon_build_attr(struct vc_data *c, u8 color, u8 intensity, +static u8 vgacon_build_attr(struct vc_data *c, u8 color, + enum vc_intensity intensity, u8 blink, u8 underline, u8 reverse, u8 italic) { u8 attr = color; @@ -639,7 +640,7 @@ static u8 vgacon_build_attr(struct vc_data *c, u8 color, u8 intensity, attr = (attr & 0xF0) | c->vc_itcolor; else if (underline) attr = (attr & 0xf0) | c->vc_ulcolor; - else if (intensity == 0) + else if (intensity == VCI_HALF_BRIGHT) attr = (attr & 0xf0) | c->vc_halfcolor; } if (reverse) @@ -648,14 +649,14 @@ static u8 vgacon_build_attr(struct vc_data *c, u8 color, u8 intensity, 0x77); if (blink) attr ^= 0x80; - if (intensity == 2) + if (intensity == VCI_BOLD) attr ^= 0x08; if (!vga_can_do_color) { if (italic) attr = (attr & 0xF8) | 0x02; else if (underline) attr = (attr & 0xf8) | 0x01; - else if (intensity == 0) + else if (intensity == VCI_HALF_BRIGHT) attr = (attr & 0xf0) | 0x08; } return attr; diff --git a/include/linux/console.h b/include/linux/console.h index 75dd20650fbe..10c04779ae49 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -35,6 +35,8 @@ enum con_scroll { SM_DOWN, }; +enum vc_intensity; + /** * struct consw - callbacks for consoles * @@ -74,7 +76,8 @@ struct consw { void (*con_scrolldelta)(struct vc_data *vc, int lines); int (*con_set_origin)(struct vc_data *vc); void (*con_save_screen)(struct vc_data *vc); - u8 (*con_build_attr)(struct vc_data *vc, u8 color, u8 intensity, + u8 (*con_build_attr)(struct vc_data *vc, u8 color, + enum vc_intensity intensity, u8 blink, u8 underline, u8 reverse, u8 italic); void (*con_invert_region)(struct vc_data *vc, u16 *p, int count); u16 *(*con_screen_pos)(struct vc_data *vc, int offset); diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h index 162f4337c767..e901d98790bf 100644 --- a/include/linux/console_struct.h +++ b/include/linux/console_struct.h @@ -22,6 +22,13 @@ struct uni_screen; #define NPAR 16 +enum vc_intensity { + VCI_HALF_BRIGHT, + VCI_NORMAL, + VCI_BOLD, + VCI_MASK = 0x3, +}; + /** * struct vc_state -- state of a VC * @x: cursor's x-position @@ -30,7 +37,7 @@ struct uni_screen; * @G0_charset: what's G0 slot set to (like GRAF_MAP, LAT1_MAP) * @G1_charset: what's G1 slot set to (like GRAF_MAP, LAT1_MAP) * @charset: what character set to use (0=G0 or 1=G1) - * @intensity: 0=half-bright, 1=normal, 2=bold + * @intensity: see enum vc_intensity for values * @reverse: reversed foreground/background colors * * These members are defined separately from struct vc_data as we save & @@ -46,7 +53,7 @@ struct vc_state { unsigned int charset : 1; /* attribute flags */ - unsigned int intensity : 2; + enum vc_intensity intensity; unsigned int italic : 1; unsigned int underline : 1; unsigned int blink : 1; -- cgit v1.2.3-59-g8ed1b From 77bc14f273c2dfecbf87f41fdc00345d99597e13 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Mon, 15 Jun 2020 09:48:35 +0200 Subject: vc: switch state to bool The code currently uses bitfields to store true-false values. Switch all of that to bools. Apart from the cleanup, it saves 20B of code as many shifts, ANDs, and ORs became simple movzb's. Signed-off-by: Jiri Slaby Link: https://lore.kernel.org/r/20200615074910.19267-3-jslaby@suse.cz Signed-off-by: Greg Kroah-Hartman --- drivers/tty/vt/vt.c | 32 ++++++++++++++++---------------- drivers/usb/misc/sisusbvga/sisusb_con.c | 3 ++- drivers/video/console/mdacon.c | 11 ++++++----- drivers/video/console/sticon.c | 3 ++- drivers/video/console/vgacon.c | 3 ++- include/linux/console.h | 2 +- include/linux/console_struct.h | 8 ++++---- 7 files changed, 33 insertions(+), 29 deletions(-) (limited to 'include') diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 71bf483e5640..26cb1fc48b27 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -706,8 +706,8 @@ void update_region(struct vc_data *vc, unsigned long start, int count) /* Structure of attributes is hardware-dependent */ static u8 build_attr(struct vc_data *vc, u8 _color, - enum vc_intensity _intensity, u8 _blink, u8 _underline, - u8 _reverse, u8 _italic) + enum vc_intensity _intensity, bool _blink, bool _underline, + bool _reverse, bool _italic) { if (vc->vc_sw->con_build_attr) return vc->vc_sw->con_build_attr(vc, _color, _intensity, @@ -755,8 +755,8 @@ static void update_attr(struct vc_data *vc) vc->state.blink, vc->state.underline, vc->state.reverse ^ vc->vc_decscnm, vc->state.italic); vc->vc_video_erase_char = ' ' | (build_attr(vc, vc->state.color, - VCI_NORMAL, vc->state.blink, 0, vc->vc_decscnm, - 0) << 8); + VCI_NORMAL, vc->state.blink, false, + vc->vc_decscnm, false) << 8); } /* Note: inverting the screen twice should revert to the original state */ @@ -1614,10 +1614,10 @@ static void csi_X(struct vc_data *vc, int vpar) /* erase the following vpar posi static void default_attr(struct vc_data *vc) { vc->state.intensity = VCI_NORMAL; - vc->state.italic = 0; - vc->state.underline = 0; - vc->state.reverse = 0; - vc->state.blink = 0; + vc->state.italic = false; + vc->state.underline = false; + vc->state.reverse = false; + vc->state.blink = false; vc->state.color = vc->vc_def_color; } @@ -1723,7 +1723,7 @@ static void csi_m(struct vc_data *vc) vc->state.intensity = VCI_HALF_BRIGHT; break; case 3: - vc->state.italic = 1; + vc->state.italic = true; break; case 21: /* @@ -1731,13 +1731,13 @@ static void csi_m(struct vc_data *vc) * convert it to a single underline. */ case 4: - vc->state.underline = 1; + vc->state.underline = true; break; case 5: - vc->state.blink = 1; + vc->state.blink = true; break; case 7: - vc->state.reverse = 1; + vc->state.reverse = true; break; case 10: /* ANSI X3.64-1979 (SCO-ish?) * Select primary font, don't display control chars if @@ -1769,16 +1769,16 @@ static void csi_m(struct vc_data *vc) vc->state.intensity = VCI_NORMAL; break; case 23: - vc->state.italic = 0; + vc->state.italic = false; break; case 24: - vc->state.underline = 0; + vc->state.underline = false; break; case 25: - vc->state.blink = 0; + vc->state.blink = false; break; case 27: - vc->state.reverse = 0; + vc->state.reverse = false; break; case 38: i = vc_t416_color(vc, i, rgb_foreground); diff --git a/drivers/usb/misc/sisusbvga/sisusb_con.c b/drivers/usb/misc/sisusbvga/sisusb_con.c index c59fe641b8b5..80657c49310a 100644 --- a/drivers/usb/misc/sisusbvga/sisusb_con.c +++ b/drivers/usb/misc/sisusbvga/sisusb_con.c @@ -303,7 +303,8 @@ sisusbcon_deinit(struct vc_data *c) /* interface routine */ static u8 sisusbcon_build_attr(struct vc_data *c, u8 color, enum vc_intensity intensity, - u8 blink, u8 underline, u8 reverse, u8 unused) + bool blink, bool underline, bool reverse, + bool unused) { u8 attr = color; diff --git a/drivers/video/console/mdacon.c b/drivers/video/console/mdacon.c index e3da664df16c..00cb6245fbef 100644 --- a/drivers/video/console/mdacon.c +++ b/drivers/video/console/mdacon.c @@ -396,7 +396,8 @@ static inline u16 mda_convert_attr(u16 ch) static u8 mdacon_build_attr(struct vc_data *c, u8 color, enum vc_intensity intensity, - u8 blink, u8 underline, u8 reverse, u8 italic) + bool blink, bool underline, bool reverse, + bool italic) { /* The attribute is just a bit vector: * @@ -407,10 +408,10 @@ static u8 mdacon_build_attr(struct vc_data *c, u8 color, */ return (intensity & VCI_MASK) | - ((underline & 1) << 2) | - ((reverse & 1) << 3) | - (!!italic << 4) | - ((blink & 1) << 7); + (underline << 2) | + (reverse << 3) | + (italic << 4) | + (blink << 7); } static void mdacon_invert_region(struct vc_data *c, u16 *p, int count) diff --git a/drivers/video/console/sticon.c b/drivers/video/console/sticon.c index a847067abbe5..bbcdfd312c36 100644 --- a/drivers/video/console/sticon.c +++ b/drivers/video/console/sticon.c @@ -290,7 +290,8 @@ static unsigned long sticon_getxy(struct vc_data *conp, unsigned long pos, static u8 sticon_build_attr(struct vc_data *conp, u8 color, enum vc_intensity intens, - u8 blink, u8 underline, u8 reverse, u8 italic) + bool blink, bool underline, bool reverse, + bool italic) { u8 attr = ((color & 0x70) >> 1) | ((color & 7)); diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index d0b26e2318d3..c1c4ce28ac5e 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -631,7 +631,8 @@ static void vgacon_deinit(struct vc_data *c) static u8 vgacon_build_attr(struct vc_data *c, u8 color, enum vc_intensity intensity, - u8 blink, u8 underline, u8 reverse, u8 italic) + bool blink, bool underline, bool reverse, + bool italic) { u8 attr = color; diff --git a/include/linux/console.h b/include/linux/console.h index 10c04779ae49..964b67912b04 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -78,7 +78,7 @@ struct consw { void (*con_save_screen)(struct vc_data *vc); u8 (*con_build_attr)(struct vc_data *vc, u8 color, enum vc_intensity intensity, - u8 blink, u8 underline, u8 reverse, u8 italic); + bool blink, bool underline, bool reverse, bool italic); void (*con_invert_region)(struct vc_data *vc, u16 *p, int count); u16 *(*con_screen_pos)(struct vc_data *vc, int offset); unsigned long (*con_getxy)(struct vc_data *vc, unsigned long position, diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h index e901d98790bf..fa1abffe64be 100644 --- a/include/linux/console_struct.h +++ b/include/linux/console_struct.h @@ -54,10 +54,10 @@ struct vc_state { /* attribute flags */ enum vc_intensity intensity; - unsigned int italic : 1; - unsigned int underline : 1; - unsigned int blink : 1; - unsigned int reverse : 1; + bool italic; + bool underline; + bool blink; + bool reverse; }; /* -- cgit v1.2.3-59-g8ed1b From b70ec4d97f4cc4f6f804ca57419d070b80a9874e Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Mon, 15 Jun 2020 09:48:37 +0200 Subject: vt: switch G0/1_charset to an array Declare Gx_charset[2] instead of G0_charset and G1_charset. It makes the code simpler (without ternary operators). Signed-off-by: Jiri Slaby Link: https://lore.kernel.org/r/20200615074910.19267-5-jslaby@suse.cz Signed-off-by: Greg Kroah-Hartman --- drivers/tty/vt/vt.c | 19 ++++++++----------- include/linux/console_struct.h | 6 ++---- 2 files changed, 10 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 729c7c8c682b..4e79cda0c2be 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -1743,9 +1743,7 @@ static void csi_m(struct vc_data *vc) * Select primary font, don't display control chars if * defined, don't set bit 8 on output. */ - vc->vc_translate = set_translate(vc->state.charset == 0 - ? vc->state.G0_charset - : vc->state.G1_charset, vc); + vc->vc_translate = set_translate(vc->state.Gx_charset[vc->state.charset], vc); vc->vc_disp_ctrl = 0; vc->vc_toggle_meta = 0; break; @@ -2041,8 +2039,8 @@ static void restore_cur(struct vc_data *vc) memcpy(&vc->state, &vc->saved_state, sizeof(vc->state)); gotoxy(vc, vc->state.x, vc->state.y); - vc->vc_translate = set_translate(vc->state.charset ? vc->state.G1_charset : - vc->state.G0_charset, vc); + vc->vc_translate = set_translate(vc->state.Gx_charset[vc->state.charset], + vc); update_attr(vc); vc->vc_need_wrap = 0; } @@ -2059,8 +2057,8 @@ static void reset_terminal(struct vc_data *vc, int do_clear) vc->vc_state = ESnormal; vc->vc_priv = EPecma; vc->vc_translate = set_translate(LAT1_MAP, vc); - vc->state.G0_charset = LAT1_MAP; - vc->state.G1_charset = GRAF_MAP; + vc->state.Gx_charset[0] = LAT1_MAP; + vc->state.Gx_charset[1] = GRAF_MAP; vc->state.charset = 0; vc->vc_need_wrap = 0; vc->vc_report_mouse = 0; @@ -2105,8 +2103,7 @@ static void reset_terminal(struct vc_data *vc, int do_clear) static void vc_setGx(struct vc_data *vc, unsigned int which, int c) { - unsigned char *charset = which == 0 ? &vc->state.G0_charset : - &vc->state.G1_charset; + unsigned char *charset = &vc->state.Gx_charset[which]; switch (c) { case '0': @@ -2168,12 +2165,12 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) return; case 14: vc->state.charset = 1; - vc->vc_translate = set_translate(vc->state.G1_charset, vc); + vc->vc_translate = set_translate(vc->state.Gx_charset[1], vc); vc->vc_disp_ctrl = 1; return; case 15: vc->state.charset = 0; - vc->vc_translate = set_translate(vc->state.G0_charset, vc); + vc->vc_translate = set_translate(vc->state.Gx_charset[0], vc); vc->vc_disp_ctrl = 0; return; case 24: case 26: diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h index fa1abffe64be..623e86689c3a 100644 --- a/include/linux/console_struct.h +++ b/include/linux/console_struct.h @@ -34,8 +34,7 @@ enum vc_intensity { * @x: cursor's x-position * @y: cursor's y-position * @color: foreground & background colors - * @G0_charset: what's G0 slot set to (like GRAF_MAP, LAT1_MAP) - * @G1_charset: what's G1 slot set to (like GRAF_MAP, LAT1_MAP) + * @Gx_charset: what's G0/G1 slot set to (like GRAF_MAP, LAT1_MAP) * @charset: what character set to use (0=G0 or 1=G1) * @intensity: see enum vc_intensity for values * @reverse: reversed foreground/background colors @@ -48,8 +47,7 @@ struct vc_state { unsigned char color; - unsigned char G0_charset; - unsigned char G1_charset; + unsigned char Gx_charset[2]; unsigned int charset : 1; /* attribute flags */ -- cgit v1.2.3-59-g8ed1b From dbee4cffa1bfe23263edde1d17cdbef0de3a6ac0 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Mon, 15 Jun 2020 09:48:38 +0200 Subject: vt: convert vc_tab_stop to bitmap vc_tab_stop is used as a bitmap, but defined as an unsigned int array. Switch it to bitmap and convert all users to the bitmap interface. Note the difference in behavior! We no longer mask the top 24 bits away from x, hence we do not wrap tabs at 256th column. Instead, we silently drop attempts to set a tab behind 256 columns. And we will also seek by '\t' to the rightmost column, when behind that boundary. I do not think the original behavior was desired and that someone relies on that. If this turns out to be the case, we can change the added 'if's back to masks here and there instead... (Or we can increase the limit as fb consoles now have 240 chars here. And they could have more with higher than my resolution, of course.) Signed-off-by: Jiri Slaby Link: https://lore.kernel.org/r/20200615074910.19267-6-jslaby@suse.cz Signed-off-by: Greg Kroah-Hartman --- drivers/tty/vt/vt.c | 44 +++++++++++++++++------------------------- include/linux/console_struct.h | 3 ++- 2 files changed, 20 insertions(+), 27 deletions(-) (limited to 'include') diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 4e79cda0c2be..3adb7f409524 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -2052,6 +2052,8 @@ enum { ESnormal, ESesc, ESsquare, ESgetpars, ESfunckey, /* console_lock is held (except via vc_init()) */ static void reset_terminal(struct vc_data *vc, int do_clear) { + unsigned int i; + vc->vc_top = 0; vc->vc_bottom = vc->vc_rows; vc->vc_state = ESnormal; @@ -2082,14 +2084,9 @@ static void reset_terminal(struct vc_data *vc, int do_clear) default_attr(vc); update_attr(vc); - vc->vc_tab_stop[0] = - vc->vc_tab_stop[1] = - vc->vc_tab_stop[2] = - vc->vc_tab_stop[3] = - vc->vc_tab_stop[4] = - vc->vc_tab_stop[5] = - vc->vc_tab_stop[6] = - vc->vc_tab_stop[7] = 0x01010101; + bitmap_zero(vc->vc_tab_stop, VC_TABSTOPS_COUNT); + for (i = 0; i < VC_TABSTOPS_COUNT; i += 8) + set_bit(i, vc->vc_tab_stop); vc->vc_bell_pitch = DEFAULT_BELL_PITCH; vc->vc_bell_duration = DEFAULT_BELL_DURATION; @@ -2147,11 +2144,13 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) return; case 9: vc->vc_pos -= (vc->state.x << 1); - while (vc->state.x < vc->vc_cols - 1) { - vc->state.x++; - if (vc->vc_tab_stop[7 & (vc->state.x >> 5)] & (1 << (vc->state.x & 31))) - break; - } + + vc->state.x = find_next_bit(vc->vc_tab_stop, + min(vc->vc_cols - 1, VC_TABSTOPS_COUNT), + vc->state.x + 1); + if (vc->state.x >= VC_TABSTOPS_COUNT) + vc->state.x = vc->vc_cols - 1; + vc->vc_pos += (vc->state.x << 1); notify_write(vc, '\t'); return; @@ -2210,7 +2209,8 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) lf(vc); return; case 'H': - vc->vc_tab_stop[7 & (vc->state.x >> 5)] |= (1 << (vc->state.x & 31)); + if (vc->state.x < VC_TABSTOPS_COUNT) + set_bit(vc->state.x, vc->vc_tab_stop); return; case 'Z': respond_ID(tty); @@ -2421,18 +2421,10 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) respond_ID(tty); return; case 'g': - if (!vc->vc_par[0]) - vc->vc_tab_stop[7 & (vc->state.x >> 5)] &= ~(1 << (vc->state.x & 31)); - else if (vc->vc_par[0] == 3) { - vc->vc_tab_stop[0] = - vc->vc_tab_stop[1] = - vc->vc_tab_stop[2] = - vc->vc_tab_stop[3] = - vc->vc_tab_stop[4] = - vc->vc_tab_stop[5] = - vc->vc_tab_stop[6] = - vc->vc_tab_stop[7] = 0; - } + if (!vc->vc_par[0] && vc->state.x < VC_TABSTOPS_COUNT) + set_bit(vc->state.x, vc->vc_tab_stop); + else if (vc->vc_par[0] == 3) + bitmap_zero(vc->vc_tab_stop, VC_TABSTOPS_COUNT); return; case 'm': csi_m(vc); diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h index 623e86689c3a..81f7afcd061a 100644 --- a/include/linux/console_struct.h +++ b/include/linux/console_struct.h @@ -21,6 +21,7 @@ struct uni_pagedir; struct uni_screen; #define NPAR 16 +#define VC_TABSTOPS_COUNT 256U enum vc_intensity { VCI_HALF_BRIGHT, @@ -147,7 +148,7 @@ struct vc_data { unsigned char vc_utf : 1; /* Unicode UTF-8 encoding */ unsigned char vc_utf_count; int vc_utf_char; - unsigned int vc_tab_stop[8]; /* Tab stops. 256 columns. */ + DECLARE_BITMAP(vc_tab_stop, VC_TABSTOPS_COUNT); /* Tab stops. 256 columns. */ unsigned char vc_palette[16*3]; /* Colour palette for VGA+ */ unsigned short * vc_translate; unsigned int vc_resize_user; /* resize request from user */ -- cgit v1.2.3-59-g8ed1b From 7d4a3112f07878ba9c6bffbcdb2dea2dcfc5c1f9 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Mon, 15 Jun 2020 09:48:39 +0200 Subject: vt: remove 25 years stale comment vc_cons was made global (non-static) in 1.3.38, almost 25 years ago. Remove a comment which says that it would be a disadvantage to do so :P. Signed-off-by: Jiri Slaby Link: https://lore.kernel.org/r/20200615074910.19267-7-jslaby@suse.cz Signed-off-by: Greg Kroah-Hartman --- include/linux/console_struct.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h index 81f7afcd061a..40ed52f67bc5 100644 --- a/include/linux/console_struct.h +++ b/include/linux/console_struct.h @@ -167,8 +167,7 @@ struct vc { struct work_struct SAK_work; /* might add scrmem, kbd at some time, - to have everything in one place - the disadvantage - would be that vc_cons etc can no longer be static */ + to have everything in one place */ }; extern struct vc vc_cons [MAX_NR_CONSOLES]; -- cgit v1.2.3-59-g8ed1b From 9a6f72d9b6c121415bc2867329fe77b0d2a52dc1 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Mon, 15 Jun 2020 09:48:41 +0200 Subject: vt: get rid of VT10.ID macros VT100ID is unused, but defined twice. Kill it. VT102ID is used only in respond_ID. Define there a variable with proper type and use that instead. Then drop both defines of VT102ID too. Signed-off-by: Jiri Slaby Link: https://lore.kernel.org/r/20200615074910.19267-9-jslaby@suse.cz Signed-off-by: Greg Kroah-Hartman --- drivers/tty/vt/vt.c | 11 ++++------- include/linux/console.h | 6 ------ 2 files changed, 4 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 49c9d1e4067c..8d9e532f050a 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -1397,12 +1397,6 @@ enum { EPecma = 0, EPdec, EPeq, EPgt, EPlt}; #define kbdapplic VC_APPLIC #define lnm VC_CRLF -/* - * this is what the terminal answers to a ESC-Z or csi0c query. - */ -#define VT100ID "\033[?1;2c" -#define VT102ID "\033[?6c" - const unsigned char color_table[] = { 0, 4, 2, 6, 1, 5, 3, 7, 8,12,10,14, 9,13,11,15 }; @@ -1835,7 +1829,10 @@ static inline void status_report(struct tty_struct *tty) static inline void respond_ID(struct tty_struct *tty) { - respond_string(VT102ID, strlen(VT102ID), tty->port); + /* terminal answer to an ESC-Z or csi0c query. */ + static const char vt102_id[] = "\033[?6c"; + + respond_string(vt102_id, strlen(vt102_id), tty->port); } void mouse_report(struct tty_struct *tty, int butt, int mrx, int mry) diff --git a/include/linux/console.h b/include/linux/console.h index 964b67912b04..0670d3491e0e 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -24,12 +24,6 @@ struct module; struct tty_struct; struct notifier_block; -/* - * this is what the terminal answers to a ESC-Z or csi0c query. - */ -#define VT100ID "\033[?1;2c" -#define VT102ID "\033[?6c" - enum con_scroll { SM_UP, SM_DOWN, -- cgit v1.2.3-59-g8ed1b From a018180cc3485e71f7912e36bf93caa635e0e4af Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Mon, 15 Jun 2020 09:48:42 +0200 Subject: vt: move vc_translate to vt.c and rename it vc_translate is used only in vt.c, so move the definition from a header there. Also, it used to be a macro, so be modern and make a static inline from it. This makes the code actually readable. And as a preparation for next patches, rename it to vc_translate_ascii. vc_translate will be a wrapper for both unicode and this one. Signed-off-by: Jiri Slaby Link: https://lore.kernel.org/r/20200615074910.19267-10-jslaby@suse.cz Signed-off-by: Greg Kroah-Hartman --- drivers/tty/vt/vt.c | 14 +++++++++++++- include/linux/vt_kern.h | 3 --- 2 files changed, 13 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 8d9e532f050a..b86639351dd2 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -2560,6 +2560,18 @@ static void con_flush(struct vc_data *vc, unsigned long draw_from, *draw_x = -1; } +static inline int vc_translate_ascii(const struct vc_data *vc, int c) +{ + if (IS_ENABLED(CONFIG_CONSOLE_TRANSLATIONS)) { + if (vc->vc_toggle_meta) + c |= 0x80; + + return vc->vc_translate[c]; + } + + return c; +} + /* acquires console_lock */ static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int count) { @@ -2687,7 +2699,7 @@ rescan_last_byte: c = 0xfffd; tc = c; } else { /* no utf or alternate charset mode */ - tc = vc_translate(vc, c); + tc = vc_translate_ascii(vc, c); } param.c = tc; diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h index abf5bccf906a..349e39c3ab60 100644 --- a/include/linux/vt_kern.h +++ b/include/linux/vt_kern.h @@ -74,8 +74,6 @@ int con_set_default_unimap(struct vc_data *vc); void con_free_unimap(struct vc_data *vc); int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc); -#define vc_translate(vc, c) ((vc)->vc_translate[(c) | \ - ((vc)->vc_toggle_meta ? 0x80 : 0)]) #else static inline int con_set_trans_old(unsigned char __user *table) { @@ -124,7 +122,6 @@ int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc) return 0; } -#define vc_translate(vc, c) (c) #endif /* vt.c */ -- cgit v1.2.3-59-g8ed1b From 4dfa3c54f908d7ec20b88671329d6a3205d37d36 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Mon, 15 Jun 2020 09:48:57 +0200 Subject: vt: redefine world of cursor macros The cursor code used to use magic constants, ANDs, ORs, and some macros. Redefine all this to make some sense. In particular: * Drop CUR_DEFAULT, which is CUR_UNDERLINE. CUR_DEFAULT was used only for cur_default variable initialization, so use CUR_UNDERLINE there to make obvious what's the default. * Drop CUR_HWMASK. Instead, define CUR_SIZE() which explains it more. And use it all over the places. * Define few more masks and bits which will be used in next patches instead of magic constants. * Define CUR_MAKE to build up cursor value. Signed-off-by: Jiri Slaby Cc: Bartlomiej Zolnierkiewicz Cc: dri-devel@lists.freedesktop.org Cc: linux-fbdev@vger.kernel.org Link: https://lore.kernel.org/r/20200615074910.19267-25-jslaby@suse.cz Signed-off-by: Greg Kroah-Hartman --- drivers/tty/vt/vt.c | 2 +- drivers/video/fbdev/core/bitblit.c | 2 +- drivers/video/fbdev/core/fbcon_ccw.c | 2 +- drivers/video/fbdev/core/fbcon_cw.c | 2 +- drivers/video/fbdev/core/fbcon_ud.c | 2 +- include/linux/console_struct.h | 28 +++++++++++++++++----------- 6 files changed, 22 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index f7d5a3c3845f..af1ef717f416 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -163,7 +163,7 @@ module_param(default_utf8, int, S_IRUGO | S_IWUSR); int global_cursor_default = -1; module_param(global_cursor_default, int, S_IRUGO | S_IWUSR); -static int cur_default = CUR_DEFAULT; +static int cur_default = CUR_UNDERLINE; module_param(cur_default, int, S_IRUGO | S_IWUSR); /* diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c index c750470a31ec..3b002b365a5a 100644 --- a/drivers/video/fbdev/core/bitblit.c +++ b/drivers/video/fbdev/core/bitblit.c @@ -325,7 +325,7 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode, ops->p->cursor_shape = vc->vc_cursor_type; cursor.set |= FB_CUR_SETSHAPE; - switch (ops->p->cursor_shape & CUR_HWMASK) { + switch (CUR_SIZE(ops->p->cursor_shape)) { case CUR_NONE: cur_height = 0; break; diff --git a/drivers/video/fbdev/core/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c index 9d06446a1a3b..5b67bcebe34c 100644 --- a/drivers/video/fbdev/core/fbcon_ccw.c +++ b/drivers/video/fbdev/core/fbcon_ccw.c @@ -325,7 +325,7 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode, ops->p->cursor_shape = vc->vc_cursor_type; cursor.set |= FB_CUR_SETSHAPE; - switch (ops->p->cursor_shape & CUR_HWMASK) { + switch (CUR_SIZE(ops->p->cursor_shape)) { case CUR_NONE: cur_height = 0; break; diff --git a/drivers/video/fbdev/core/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c index 4b5f76bb01e5..f1aab3ae3bc9 100644 --- a/drivers/video/fbdev/core/fbcon_cw.c +++ b/drivers/video/fbdev/core/fbcon_cw.c @@ -308,7 +308,7 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode, ops->p->cursor_shape = vc->vc_cursor_type; cursor.set |= FB_CUR_SETSHAPE; - switch (ops->p->cursor_shape & CUR_HWMASK) { + switch (CUR_SIZE(ops->p->cursor_shape)) { case CUR_NONE: cur_height = 0; break; diff --git a/drivers/video/fbdev/core/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c index 7e0ae3549dc7..81ed6f6bed67 100644 --- a/drivers/video/fbdev/core/fbcon_ud.c +++ b/drivers/video/fbdev/core/fbcon_ud.c @@ -348,7 +348,7 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode, ops->p->cursor_shape = vc->vc_cursor_type; cursor.set |= FB_CUR_SETSHAPE; - switch (ops->p->cursor_shape & CUR_HWMASK) { + switch (CUR_SIZE(ops->p->cursor_shape)) { case CUR_NONE: cur_height = 0; break; diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h index 40ed52f67bc5..153734816b49 100644 --- a/include/linux/console_struct.h +++ b/include/linux/console_struct.h @@ -173,17 +173,23 @@ struct vc { extern struct vc vc_cons [MAX_NR_CONSOLES]; extern void vc_SAK(struct work_struct *work); -#define CUR_DEF 0 -#define CUR_NONE 1 -#define CUR_UNDERLINE 2 -#define CUR_LOWER_THIRD 3 -#define CUR_LOWER_HALF 4 -#define CUR_TWO_THIRDS 5 -#define CUR_BLOCK 6 -#define CUR_HWMASK 0x0f -#define CUR_SWMASK 0xfff0 - -#define CUR_DEFAULT CUR_UNDERLINE +#define CUR_MAKE(size, change, set) ((size) | ((change) << 8) | \ + ((set) << 16)) +#define CUR_SIZE(c) ((c) & 0x00000f) +# define CUR_DEF 0 +# define CUR_NONE 1 +# define CUR_UNDERLINE 2 +# define CUR_LOWER_THIRD 3 +# define CUR_LOWER_HALF 4 +# define CUR_TWO_THIRDS 5 +# define CUR_BLOCK 6 +#define CUR_SW 0x000010 +#define CUR_ALWAYS_BG 0x000020 +#define CUR_INVERT_FG_BG 0x000040 +#define CUR_FG 0x000700 +#define CUR_BG 0x007000 +#define CUR_CHANGE(c) ((c) & 0x00ff00) +#define CUR_SET(c) (((c) & 0xff0000) >> 8) bool con_is_visible(const struct vc_data *vc); -- cgit v1.2.3-59-g8ed1b From 521b512b157a1315ff2bf11c11ab184c79515aea Mon Sep 17 00:00:00 2001 From: Lukasz Luba Date: Wed, 27 May 2020 10:58:47 +0100 Subject: PM / EM: change naming convention from 'capacity' to 'performance' The Energy Model uses concept of performance domain and capacity states in order to calculate power used by CPUs. Change naming convention from capacity to performance state would enable wider usage in future, e.g. upcoming support for other devices other than CPUs. Acked-by: Daniel Lezcano Acked-by: Quentin Perret Signed-off-by: Lukasz Luba Signed-off-by: Rafael J. Wysocki --- drivers/thermal/cpufreq_cooling.c | 12 +++--- include/linux/energy_model.h | 86 +++++++++++++++++++++------------------ kernel/power/energy_model.c | 44 ++++++++++---------- kernel/sched/topology.c | 20 ++++----- 4 files changed, 84 insertions(+), 78 deletions(-) (limited to 'include') diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c index 9e124020519f..641995ebc107 100644 --- a/drivers/thermal/cpufreq_cooling.c +++ b/drivers/thermal/cpufreq_cooling.c @@ -333,18 +333,18 @@ static inline bool em_is_sane(struct cpufreq_cooling_device *cpufreq_cdev, return false; policy = cpufreq_cdev->policy; - if (!cpumask_equal(policy->related_cpus, to_cpumask(em->cpus))) { + if (!cpumask_equal(policy->related_cpus, em_span_cpus(em))) { pr_err("The span of pd %*pbl is misaligned with cpufreq policy %*pbl\n", - cpumask_pr_args(to_cpumask(em->cpus)), + cpumask_pr_args(em_span_cpus(em)), cpumask_pr_args(policy->related_cpus)); return false; } nr_levels = cpufreq_cdev->max_level + 1; - if (em->nr_cap_states != nr_levels) { - pr_err("The number of cap states in pd %*pbl (%u) doesn't match the number of cooling levels (%u)\n", - cpumask_pr_args(to_cpumask(em->cpus)), - em->nr_cap_states, nr_levels); + if (em_pd_nr_perf_states(em) != nr_levels) { + pr_err("The number of performance states in pd %*pbl (%u) doesn't match the number of cooling levels (%u)\n", + cpumask_pr_args(em_span_cpus(em)), + em_pd_nr_perf_states(em), nr_levels); return false; } diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h index ade6486a3382..fe336a9eb5d4 100644 --- a/include/linux/energy_model.h +++ b/include/linux/energy_model.h @@ -10,13 +10,13 @@ #include /** - * em_cap_state - Capacity state of a performance domain + * em_perf_state - Performance state of a performance domain * @frequency: The CPU frequency in KHz, for consistency with CPUFreq * @power: The power consumed by 1 CPU at this level, in milli-watts * @cost: The cost coefficient associated with this level, used during * energy calculation. Equal to: power * max_frequency / frequency */ -struct em_cap_state { +struct em_perf_state { unsigned long frequency; unsigned long power; unsigned long cost; @@ -24,8 +24,8 @@ struct em_cap_state { /** * em_perf_domain - Performance domain - * @table: List of capacity states, in ascending order - * @nr_cap_states: Number of capacity states + * @table: List of performance states, in ascending order + * @nr_perf_states: Number of performance states * @cpus: Cpumask covering the CPUs of the domain * * A "performance domain" represents a group of CPUs whose performance is @@ -34,22 +34,27 @@ struct em_cap_state { * CPUFreq policies. */ struct em_perf_domain { - struct em_cap_state *table; - int nr_cap_states; + struct em_perf_state *table; + int nr_perf_states; unsigned long cpus[]; }; +#define em_span_cpus(em) (to_cpumask((em)->cpus)) + #ifdef CONFIG_ENERGY_MODEL #define EM_CPU_MAX_POWER 0xFFFF struct em_data_callback { /** - * active_power() - Provide power at the next capacity state of a CPU - * @power : Active power at the capacity state in mW (modified) - * @freq : Frequency at the capacity state in kHz (modified) + * active_power() - Provide power at the next performance state of + * a CPU + * @power : Active power at the performance state in mW + * (modified) + * @freq : Frequency at the performance state in kHz + * (modified) * @cpu : CPU for which we do this operation * - * active_power() must find the lowest capacity state of 'cpu' above + * active_power() must find the lowest performance state of 'cpu' above * 'freq' and update 'power' and 'freq' to the matching active power * and frequency. * @@ -80,46 +85,46 @@ static inline unsigned long em_pd_energy(struct em_perf_domain *pd, unsigned long max_util, unsigned long sum_util) { unsigned long freq, scale_cpu; - struct em_cap_state *cs; + struct em_perf_state *ps; int i, cpu; /* - * In order to predict the capacity state, map the utilization of the - * most utilized CPU of the performance domain to a requested frequency, - * like schedutil. + * In order to predict the performance state, map the utilization of + * the most utilized CPU of the performance domain to a requested + * frequency, like schedutil. */ cpu = cpumask_first(to_cpumask(pd->cpus)); scale_cpu = arch_scale_cpu_capacity(cpu); - cs = &pd->table[pd->nr_cap_states - 1]; - freq = map_util_freq(max_util, cs->frequency, scale_cpu); + ps = &pd->table[pd->nr_perf_states - 1]; + freq = map_util_freq(max_util, ps->frequency, scale_cpu); /* - * Find the lowest capacity state of the Energy Model above the + * Find the lowest performance state of the Energy Model above the * requested frequency. */ - for (i = 0; i < pd->nr_cap_states; i++) { - cs = &pd->table[i]; - if (cs->frequency >= freq) + for (i = 0; i < pd->nr_perf_states; i++) { + ps = &pd->table[i]; + if (ps->frequency >= freq) break; } /* - * The capacity of a CPU in the domain at that capacity state (cs) + * The capacity of a CPU in the domain at the performance state (ps) * can be computed as: * - * cs->freq * scale_cpu - * cs->cap = -------------------- (1) + * ps->freq * scale_cpu + * ps->cap = -------------------- (1) * cpu_max_freq * * So, ignoring the costs of idle states (which are not available in - * the EM), the energy consumed by this CPU at that capacity state is - * estimated as: + * the EM), the energy consumed by this CPU at that performance state + * is estimated as: * - * cs->power * cpu_util + * ps->power * cpu_util * cpu_nrg = -------------------- (2) - * cs->cap + * ps->cap * - * since 'cpu_util / cs->cap' represents its percentage of busy time. + * since 'cpu_util / ps->cap' represents its percentage of busy time. * * NOTE: Although the result of this computation actually is in * units of power, it can be manipulated as an energy value @@ -129,34 +134,35 @@ static inline unsigned long em_pd_energy(struct em_perf_domain *pd, * By injecting (1) in (2), 'cpu_nrg' can be re-expressed as a product * of two terms: * - * cs->power * cpu_max_freq cpu_util + * ps->power * cpu_max_freq cpu_util * cpu_nrg = ------------------------ * --------- (3) - * cs->freq scale_cpu + * ps->freq scale_cpu * - * The first term is static, and is stored in the em_cap_state struct - * as 'cs->cost'. + * The first term is static, and is stored in the em_perf_state struct + * as 'ps->cost'. * * Since all CPUs of the domain have the same micro-architecture, they - * share the same 'cs->cost', and the same CPU capacity. Hence, the + * share the same 'ps->cost', and the same CPU capacity. Hence, the * total energy of the domain (which is the simple sum of the energy of * all of its CPUs) can be factorized as: * - * cs->cost * \Sum cpu_util + * ps->cost * \Sum cpu_util * pd_nrg = ------------------------ (4) * scale_cpu */ - return cs->cost * sum_util / scale_cpu; + return ps->cost * sum_util / scale_cpu; } /** - * em_pd_nr_cap_states() - Get the number of capacity states of a perf. domain + * em_pd_nr_perf_states() - Get the number of performance states of a perf. + * domain * @pd : performance domain for which this must be done * - * Return: the number of capacity states in the performance domain table + * Return: the number of performance states in the performance domain table */ -static inline int em_pd_nr_cap_states(struct em_perf_domain *pd) +static inline int em_pd_nr_perf_states(struct em_perf_domain *pd) { - return pd->nr_cap_states; + return pd->nr_perf_states; } #else @@ -177,7 +183,7 @@ static inline unsigned long em_pd_energy(struct em_perf_domain *pd, { return 0; } -static inline int em_pd_nr_cap_states(struct em_perf_domain *pd) +static inline int em_pd_nr_perf_states(struct em_perf_domain *pd) { return 0; } diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c index 0a9326f5f421..9892d548a0fa 100644 --- a/kernel/power/energy_model.c +++ b/kernel/power/energy_model.c @@ -27,18 +27,18 @@ static DEFINE_MUTEX(em_pd_mutex); #ifdef CONFIG_DEBUG_FS static struct dentry *rootdir; -static void em_debug_create_cs(struct em_cap_state *cs, struct dentry *pd) +static void em_debug_create_ps(struct em_perf_state *ps, struct dentry *pd) { struct dentry *d; char name[24]; - snprintf(name, sizeof(name), "cs:%lu", cs->frequency); + snprintf(name, sizeof(name), "ps:%lu", ps->frequency); - /* Create per-cs directory */ + /* Create per-ps directory */ d = debugfs_create_dir(name, pd); - debugfs_create_ulong("frequency", 0444, d, &cs->frequency); - debugfs_create_ulong("power", 0444, d, &cs->power); - debugfs_create_ulong("cost", 0444, d, &cs->cost); + debugfs_create_ulong("frequency", 0444, d, &ps->frequency); + debugfs_create_ulong("power", 0444, d, &ps->power); + debugfs_create_ulong("cost", 0444, d, &ps->cost); } static int em_debug_cpus_show(struct seq_file *s, void *unused) @@ -62,9 +62,9 @@ static void em_debug_create_pd(struct em_perf_domain *pd, int cpu) debugfs_create_file("cpus", 0444, d, pd->cpus, &em_debug_cpus_fops); - /* Create a sub-directory for each capacity state */ - for (i = 0; i < pd->nr_cap_states; i++) - em_debug_create_cs(&pd->table[i], d); + /* Create a sub-directory for each performance state */ + for (i = 0; i < pd->nr_perf_states; i++) + em_debug_create_ps(&pd->table[i], d); } static int __init em_debug_init(void) @@ -84,7 +84,7 @@ static struct em_perf_domain *em_create_pd(cpumask_t *span, int nr_states, unsigned long opp_eff, prev_opp_eff = ULONG_MAX; unsigned long power, freq, prev_freq = 0; int i, ret, cpu = cpumask_first(span); - struct em_cap_state *table; + struct em_perf_state *table; struct em_perf_domain *pd; u64 fmax; @@ -99,26 +99,26 @@ static struct em_perf_domain *em_create_pd(cpumask_t *span, int nr_states, if (!table) goto free_pd; - /* Build the list of capacity states for this performance domain */ + /* Build the list of performance states for this performance domain */ for (i = 0, freq = 0; i < nr_states; i++, freq++) { /* * active_power() is a driver callback which ceils 'freq' to - * lowest capacity state of 'cpu' above 'freq' and updates + * lowest performance state of 'cpu' above 'freq' and updates * 'power' and 'freq' accordingly. */ ret = cb->active_power(&power, &freq, cpu); if (ret) { - pr_err("pd%d: invalid cap. state: %d\n", cpu, ret); - goto free_cs_table; + pr_err("pd%d: invalid perf. state: %d\n", cpu, ret); + goto free_ps_table; } /* * We expect the driver callback to increase the frequency for - * higher capacity states. + * higher performance states. */ if (freq <= prev_freq) { pr_err("pd%d: non-increasing freq: %lu\n", cpu, freq); - goto free_cs_table; + goto free_ps_table; } /* @@ -127,7 +127,7 @@ static struct em_perf_domain *em_create_pd(cpumask_t *span, int nr_states, */ if (!power || power > EM_CPU_MAX_POWER) { pr_err("pd%d: invalid power: %lu\n", cpu, power); - goto free_cs_table; + goto free_ps_table; } table[i].power = power; @@ -141,12 +141,12 @@ static struct em_perf_domain *em_create_pd(cpumask_t *span, int nr_states, */ opp_eff = freq / power; if (opp_eff >= prev_opp_eff) - pr_warn("pd%d: hertz/watts ratio non-monotonically decreasing: em_cap_state %d >= em_cap_state%d\n", + pr_warn("pd%d: hertz/watts ratio non-monotonically decreasing: em_perf_state %d >= em_perf_state%d\n", cpu, i, i - 1); prev_opp_eff = opp_eff; } - /* Compute the cost of each capacity_state. */ + /* Compute the cost of each performance state. */ fmax = (u64) table[nr_states - 1].frequency; for (i = 0; i < nr_states; i++) { table[i].cost = div64_u64(fmax * table[i].power, @@ -154,14 +154,14 @@ static struct em_perf_domain *em_create_pd(cpumask_t *span, int nr_states, } pd->table = table; - pd->nr_cap_states = nr_states; + pd->nr_perf_states = nr_states; cpumask_copy(to_cpumask(pd->cpus), span); em_debug_create_pd(pd, cpu); return pd; -free_cs_table: +free_ps_table: kfree(table); free_pd: kfree(pd); @@ -185,7 +185,7 @@ EXPORT_SYMBOL_GPL(em_cpu_get); /** * em_register_perf_domain() - Register the Energy Model of a performance domain * @span : Mask of CPUs in the performance domain - * @nr_states : Number of capacity states to register + * @nr_states : Number of performance states to register * @cb : Callback functions providing the data of the Energy Model * * Create Energy Model tables for a performance domain using the callbacks diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index ba81187bb7af..2f91d3126365 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -272,10 +272,10 @@ static void perf_domain_debug(const struct cpumask *cpu_map, printk(KERN_DEBUG "root_domain %*pbl:", cpumask_pr_args(cpu_map)); while (pd) { - printk(KERN_CONT " pd%d:{ cpus=%*pbl nr_cstate=%d }", + printk(KERN_CONT " pd%d:{ cpus=%*pbl nr_pstate=%d }", cpumask_first(perf_domain_span(pd)), cpumask_pr_args(perf_domain_span(pd)), - em_pd_nr_cap_states(pd->em_pd)); + em_pd_nr_perf_states(pd->em_pd)); pd = pd->next; } @@ -313,26 +313,26 @@ static void sched_energy_set(bool has_eas) * * The complexity of the Energy Model is defined as: * - * C = nr_pd * (nr_cpus + nr_cs) + * C = nr_pd * (nr_cpus + nr_ps) * * with parameters defined as: * - nr_pd: the number of performance domains * - nr_cpus: the number of CPUs - * - nr_cs: the sum of the number of capacity states of all performance + * - nr_ps: the sum of the number of performance states of all performance * domains (for example, on a system with 2 performance domains, - * with 10 capacity states each, nr_cs = 2 * 10 = 20). + * with 10 performance states each, nr_ps = 2 * 10 = 20). * * It is generally not a good idea to use such a model in the wake-up path on * very complex platforms because of the associated scheduling overheads. The * arbitrary constraint below prevents that. It makes EAS usable up to 16 CPUs - * with per-CPU DVFS and less than 8 capacity states each, for example. + * with per-CPU DVFS and less than 8 performance states each, for example. */ #define EM_MAX_COMPLEXITY 2048 extern struct cpufreq_governor schedutil_gov; static bool build_perf_domains(const struct cpumask *cpu_map) { - int i, nr_pd = 0, nr_cs = 0, nr_cpus = cpumask_weight(cpu_map); + int i, nr_pd = 0, nr_ps = 0, nr_cpus = cpumask_weight(cpu_map); struct perf_domain *pd = NULL, *tmp; int cpu = cpumask_first(cpu_map); struct root_domain *rd = cpu_rq(cpu)->rd; @@ -384,15 +384,15 @@ static bool build_perf_domains(const struct cpumask *cpu_map) pd = tmp; /* - * Count performance domains and capacity states for the + * Count performance domains and performance states for the * complexity check. */ nr_pd++; - nr_cs += em_pd_nr_cap_states(pd->em_pd); + nr_ps += em_pd_nr_perf_states(pd->em_pd); } /* Bail out if the Energy Model complexity is too high. */ - if (nr_pd * (nr_cs + nr_cpus) > EM_MAX_COMPLEXITY) { + if (nr_pd * (nr_ps + nr_cpus) > EM_MAX_COMPLEXITY) { WARN(1, "rd %*pbl: Failed to start EAS, EM complexity is too high\n", cpumask_pr_args(cpu_map)); goto free; -- cgit v1.2.3-59-g8ed1b From 7d9895c7fbfc9c70afce7029b7de0f3f974adb88 Mon Sep 17 00:00:00 2001 From: Lukasz Luba Date: Wed, 27 May 2020 10:58:48 +0100 Subject: PM / EM: introduce em_dev_register_perf_domain function Add now function in the Energy Model framework which is going to support new devices. This function will help in transition and make it smoother. For now it still checks if the cpumask is a valid pointer, which will be removed later when the new structures and infrastructure will be ready. Acked-by: Daniel Lezcano Acked-by: Quentin Perret Signed-off-by: Lukasz Luba Signed-off-by: Rafael J. Wysocki --- include/linux/energy_model.h | 13 +++++++++++-- kernel/power/energy_model.c | 40 ++++++++++++++++++++++++++++++++++------ 2 files changed, 45 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h index fe336a9eb5d4..7c048df98447 100644 --- a/include/linux/energy_model.h +++ b/include/linux/energy_model.h @@ -2,6 +2,7 @@ #ifndef _LINUX_ENERGY_MODEL_H #define _LINUX_ENERGY_MODEL_H #include +#include #include #include #include @@ -42,7 +43,7 @@ struct em_perf_domain { #define em_span_cpus(em) (to_cpumask((em)->cpus)) #ifdef CONFIG_ENERGY_MODEL -#define EM_CPU_MAX_POWER 0xFFFF +#define EM_MAX_POWER 0xFFFF struct em_data_callback { /** @@ -59,7 +60,7 @@ struct em_data_callback { * and frequency. * * The power is the one of a single CPU in the domain, expressed in - * milli-watts. It is expected to fit in the [0, EM_CPU_MAX_POWER] + * milli-watts. It is expected to fit in the [0, EM_MAX_POWER] * range. * * Return 0 on success. @@ -71,6 +72,8 @@ struct em_data_callback { struct em_perf_domain *em_cpu_get(int cpu); int em_register_perf_domain(cpumask_t *span, unsigned int nr_states, struct em_data_callback *cb); +int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, + struct em_data_callback *cb, cpumask_t *span); /** * em_pd_energy() - Estimates the energy consumed by the CPUs of a perf. domain @@ -174,6 +177,12 @@ static inline int em_register_perf_domain(cpumask_t *span, { return -EINVAL; } +static inline +int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, + struct em_data_callback *cb, cpumask_t *span) +{ + return -EINVAL; +} static inline struct em_perf_domain *em_cpu_get(int cpu) { return NULL; diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c index 9892d548a0fa..875b163e54ab 100644 --- a/kernel/power/energy_model.c +++ b/kernel/power/energy_model.c @@ -125,7 +125,7 @@ static struct em_perf_domain *em_create_pd(cpumask_t *span, int nr_states, * The power returned by active_state() is expected to be * positive, in milli-watts and to fit into 16 bits. */ - if (!power || power > EM_CPU_MAX_POWER) { + if (!power || power > EM_MAX_POWER) { pr_err("pd%d: invalid power: %lu\n", cpu, power); goto free_ps_table; } @@ -183,10 +183,13 @@ struct em_perf_domain *em_cpu_get(int cpu) EXPORT_SYMBOL_GPL(em_cpu_get); /** - * em_register_perf_domain() - Register the Energy Model of a performance domain - * @span : Mask of CPUs in the performance domain + * em_dev_register_perf_domain() - Register the Energy Model (EM) for a device + * @dev : Device for which the EM is to register * @nr_states : Number of performance states to register * @cb : Callback functions providing the data of the Energy Model + * @span : Pointer to cpumask_t, which in case of a CPU device is + * obligatory. It can be taken from i.e. 'policy->cpus'. For other + * type of devices this should be set to NULL. * * Create Energy Model tables for a performance domain using the callbacks * defined in cb. @@ -196,14 +199,14 @@ EXPORT_SYMBOL_GPL(em_cpu_get); * * Return 0 on success */ -int em_register_perf_domain(cpumask_t *span, unsigned int nr_states, - struct em_data_callback *cb) +int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, + struct em_data_callback *cb, cpumask_t *span) { unsigned long cap, prev_cap = 0; struct em_perf_domain *pd; int cpu, ret = 0; - if (!span || !nr_states || !cb) + if (!dev || !span || !nr_states || !cb) return -EINVAL; /* @@ -255,4 +258,29 @@ unlock: return ret; } +EXPORT_SYMBOL_GPL(em_dev_register_perf_domain); + +/** + * em_register_perf_domain() - Register the Energy Model of a performance domain + * @span : Mask of CPUs in the performance domain + * @nr_states : Number of capacity states to register + * @cb : Callback functions providing the data of the Energy Model + * + * Create Energy Model tables for a performance domain using the callbacks + * defined in cb. + * + * If multiple clients register the same performance domain, all but the first + * registration will be ignored. + * + * Return 0 on success + */ +int em_register_perf_domain(cpumask_t *span, unsigned int nr_states, + struct em_data_callback *cb) +{ + struct device *cpu_dev; + + cpu_dev = get_cpu_device(cpumask_first(span)); + + return em_dev_register_perf_domain(cpu_dev, nr_states, cb, span); +} EXPORT_SYMBOL_GPL(em_register_perf_domain); -- cgit v1.2.3-59-g8ed1b From d0351cc3b0f57214d157e4d589564730af2aedae Mon Sep 17 00:00:00 2001 From: Lukasz Luba Date: Wed, 27 May 2020 10:58:49 +0100 Subject: PM / EM: update callback structure and add device pointer The Energy Model framework is going to support devices other that CPUs. In order to make this happen change the callback function and add pointer to a device as an argument. Update the related users to use new function and new callback from the Energy Model. Acked-by: Quentin Perret Signed-off-by: Lukasz Luba Acked-by: Daniel Lezcano Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/scmi-cpufreq.c | 11 +++-------- drivers/opp/of.c | 9 ++------- include/linux/energy_model.h | 15 ++++++++------- kernel/power/energy_model.c | 9 +++++---- 4 files changed, 18 insertions(+), 26 deletions(-) (limited to 'include') diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c index 61623e2ff149..11ee24e06d12 100644 --- a/drivers/cpufreq/scmi-cpufreq.c +++ b/drivers/cpufreq/scmi-cpufreq.c @@ -103,17 +103,12 @@ scmi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) } static int __maybe_unused -scmi_get_cpu_power(unsigned long *power, unsigned long *KHz, int cpu) +scmi_get_cpu_power(unsigned long *power, unsigned long *KHz, + struct device *cpu_dev) { - struct device *cpu_dev = get_cpu_device(cpu); unsigned long Hz; int ret, domain; - if (!cpu_dev) { - pr_err("failed to get cpu%d device\n", cpu); - return -ENODEV; - } - domain = handle->perf_ops->device_domain_id(cpu_dev); if (domain < 0) return domain; @@ -200,7 +195,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy) policy->fast_switch_possible = true; - em_register_perf_domain(policy->cpus, nr_opp, &em_cb); + em_dev_register_perf_domain(cpu_dev, nr_opp, &em_cb, policy->cpus); return 0; diff --git a/drivers/opp/of.c b/drivers/opp/of.c index 9a5873591a40..e273f419a4bf 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -1216,9 +1216,8 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_of_node); * calculation failed because of missing parameters, 0 otherwise. */ static int __maybe_unused _get_cpu_power(unsigned long *mW, unsigned long *kHz, - int cpu) + struct device *cpu_dev) { - struct device *cpu_dev; struct dev_pm_opp *opp; struct device_node *np; unsigned long mV, Hz; @@ -1226,10 +1225,6 @@ static int __maybe_unused _get_cpu_power(unsigned long *mW, unsigned long *kHz, u64 tmp; int ret; - cpu_dev = get_cpu_device(cpu); - if (!cpu_dev) - return -ENODEV; - np = of_node_get(cpu_dev->of_node); if (!np) return -EINVAL; @@ -1297,6 +1292,6 @@ void dev_pm_opp_of_register_em(struct cpumask *cpus) if (ret || !cap) return; - em_register_perf_domain(cpus, nr_opp, &em_cb); + em_dev_register_perf_domain(cpu_dev, nr_opp, &em_cb, cpus); } EXPORT_SYMBOL_GPL(dev_pm_opp_of_register_em); diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h index 7c048df98447..7076cb22b247 100644 --- a/include/linux/energy_model.h +++ b/include/linux/energy_model.h @@ -48,24 +48,25 @@ struct em_perf_domain { struct em_data_callback { /** * active_power() - Provide power at the next performance state of - * a CPU + * a device * @power : Active power at the performance state in mW * (modified) * @freq : Frequency at the performance state in kHz * (modified) - * @cpu : CPU for which we do this operation + * @dev : Device for which we do this operation (can be a CPU) * - * active_power() must find the lowest performance state of 'cpu' above + * active_power() must find the lowest performance state of 'dev' above * 'freq' and update 'power' and 'freq' to the matching active power * and frequency. * - * The power is the one of a single CPU in the domain, expressed in - * milli-watts. It is expected to fit in the [0, EM_MAX_POWER] - * range. + * In case of CPUs, the power is the one of a single CPU in the domain, + * expressed in milli-watts. It is expected to fit in the + * [0, EM_MAX_POWER] range. * * Return 0 on success. */ - int (*active_power)(unsigned long *power, unsigned long *freq, int cpu); + int (*active_power)(unsigned long *power, unsigned long *freq, + struct device *dev); }; #define EM_DATA_CB(_active_power_cb) { .active_power = &_active_power_cb } diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c index 875b163e54ab..5b8a1566526a 100644 --- a/kernel/power/energy_model.c +++ b/kernel/power/energy_model.c @@ -78,8 +78,9 @@ core_initcall(em_debug_init); #else /* CONFIG_DEBUG_FS */ static void em_debug_create_pd(struct em_perf_domain *pd, int cpu) {} #endif -static struct em_perf_domain *em_create_pd(cpumask_t *span, int nr_states, - struct em_data_callback *cb) +static struct em_perf_domain * +em_create_pd(struct device *dev, int nr_states, struct em_data_callback *cb, + cpumask_t *span) { unsigned long opp_eff, prev_opp_eff = ULONG_MAX; unsigned long power, freq, prev_freq = 0; @@ -106,7 +107,7 @@ static struct em_perf_domain *em_create_pd(cpumask_t *span, int nr_states, * lowest performance state of 'cpu' above 'freq' and updates * 'power' and 'freq' accordingly. */ - ret = cb->active_power(&power, &freq, cpu); + ret = cb->active_power(&power, &freq, dev); if (ret) { pr_err("pd%d: invalid perf. state: %d\n", cpu, ret); goto free_ps_table; @@ -237,7 +238,7 @@ int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, } /* Create the performance domain and add it to the Energy Model. */ - pd = em_create_pd(span, nr_states, cb); + pd = em_create_pd(dev, nr_states, cb, span); if (!pd) { ret = -EINVAL; goto unlock; -- cgit v1.2.3-59-g8ed1b From c3077b5d97a39223a2d4b95a21ccff660836170f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 11 Jun 2020 08:44:41 +0200 Subject: blk-mq: merge blk-softirq.c into blk-mq.c __blk_complete_request is only called from the blk-mq code, and duplicates a lot of code from blk-mq.c. Move it there to prepare for better code sharing and simplifications. Signed-off-by: Christoph Hellwig Reviewed-by: Daniel Wagner Signed-off-by: Jens Axboe --- block/Makefile | 2 +- block/blk-mq.c | 135 ++++++++++++++++++++++++++++++++++++++++++ block/blk-softirq.c | 156 ------------------------------------------------- include/linux/blkdev.h | 1 - 4 files changed, 136 insertions(+), 158 deletions(-) delete mode 100644 block/blk-softirq.c (limited to 'include') diff --git a/block/Makefile b/block/Makefile index 78719169fb2a..8d841f5f986f 100644 --- a/block/Makefile +++ b/block/Makefile @@ -5,7 +5,7 @@ obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-sysfs.o \ blk-flush.o blk-settings.o blk-ioc.o blk-map.o \ - blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \ + blk-exec.o blk-merge.o blk-timeout.o \ blk-lib.o blk-mq.o blk-mq-tag.o blk-stat.o \ blk-mq-sysfs.o blk-mq-cpumap.o blk-mq-sched.o ioctl.o \ genhd.o ioprio.o badblocks.o partitions/ blk-rq-qos.o diff --git a/block/blk-mq.c b/block/blk-mq.c index a9aa6d1e44cf..60febbf6f8d9 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -41,6 +41,8 @@ #include "blk-mq-sched.h" #include "blk-rq-qos.h" +static DEFINE_PER_CPU(struct list_head, blk_cpu_done); + static void blk_mq_poll_stats_start(struct request_queue *q); static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb); @@ -574,6 +576,130 @@ void blk_mq_end_request(struct request *rq, blk_status_t error) } EXPORT_SYMBOL(blk_mq_end_request); +/* + * Softirq action handler - move entries to local list and loop over them + * while passing them to the queue registered handler. + */ +static __latent_entropy void blk_done_softirq(struct softirq_action *h) +{ + struct list_head *cpu_list, local_list; + + local_irq_disable(); + cpu_list = this_cpu_ptr(&blk_cpu_done); + list_replace_init(cpu_list, &local_list); + local_irq_enable(); + + while (!list_empty(&local_list)) { + struct request *rq; + + rq = list_entry(local_list.next, struct request, ipi_list); + list_del_init(&rq->ipi_list); + rq->q->mq_ops->complete(rq); + } +} + +#ifdef CONFIG_SMP +static void trigger_softirq(void *data) +{ + struct request *rq = data; + struct list_head *list; + + list = this_cpu_ptr(&blk_cpu_done); + list_add_tail(&rq->ipi_list, list); + + if (list->next == &rq->ipi_list) + raise_softirq_irqoff(BLOCK_SOFTIRQ); +} + +/* + * Setup and invoke a run of 'trigger_softirq' on the given cpu. + */ +static int raise_blk_irq(int cpu, struct request *rq) +{ + if (cpu_online(cpu)) { + call_single_data_t *data = &rq->csd; + + data->func = trigger_softirq; + data->info = rq; + data->flags = 0; + + smp_call_function_single_async(cpu, data); + return 0; + } + + return 1; +} +#else /* CONFIG_SMP */ +static int raise_blk_irq(int cpu, struct request *rq) +{ + return 1; +} +#endif + +static int blk_softirq_cpu_dead(unsigned int cpu) +{ + /* + * If a CPU goes away, splice its entries to the current CPU + * and trigger a run of the softirq + */ + local_irq_disable(); + list_splice_init(&per_cpu(blk_cpu_done, cpu), + this_cpu_ptr(&blk_cpu_done)); + raise_softirq_irqoff(BLOCK_SOFTIRQ); + local_irq_enable(); + + return 0; +} + +static void __blk_complete_request(struct request *req) +{ + struct request_queue *q = req->q; + int cpu, ccpu = req->mq_ctx->cpu; + unsigned long flags; + bool shared = false; + + BUG_ON(!q->mq_ops->complete); + + local_irq_save(flags); + cpu = smp_processor_id(); + + /* + * Select completion CPU + */ + if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) && ccpu != -1) { + if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) + shared = cpus_share_cache(cpu, ccpu); + } else + ccpu = cpu; + + /* + * If current CPU and requested CPU share a cache, run the softirq on + * the current CPU. One might concern this is just like + * QUEUE_FLAG_SAME_FORCE, but actually not. blk_complete_request() is + * running in interrupt handler, and currently I/O controller doesn't + * support multiple interrupts, so current CPU is unique actually. This + * avoids IPI sending from current CPU to the first CPU of a group. + */ + if (ccpu == cpu || shared) { + struct list_head *list; +do_local: + list = this_cpu_ptr(&blk_cpu_done); + list_add_tail(&req->ipi_list, list); + + /* + * if the list only contains our just added request, + * signal a raise of the softirq. If there are already + * entries there, someone already raised the irq but it + * hasn't run yet. + */ + if (list->next == &req->ipi_list) + raise_softirq_irqoff(BLOCK_SOFTIRQ); + } else if (raise_blk_irq(ccpu, req)) + goto do_local; + + local_irq_restore(flags); +} + static void __blk_mq_complete_request_remote(void *data) { struct request *rq = data; @@ -3760,6 +3886,15 @@ EXPORT_SYMBOL(blk_mq_rq_cpu); static int __init blk_mq_init(void) { + int i; + + for_each_possible_cpu(i) + INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); + open_softirq(BLOCK_SOFTIRQ, blk_done_softirq); + + cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD, + "block/softirq:dead", NULL, + blk_softirq_cpu_dead); cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL, blk_mq_hctx_notify_dead); cpuhp_setup_state_multi(CPUHP_AP_BLK_MQ_ONLINE, "block/mq:online", diff --git a/block/blk-softirq.c b/block/blk-softirq.c deleted file mode 100644 index 6e7ec87d49fa..000000000000 --- a/block/blk-softirq.c +++ /dev/null @@ -1,156 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Functions related to softirq rq completions - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "blk.h" - -static DEFINE_PER_CPU(struct list_head, blk_cpu_done); - -/* - * Softirq action handler - move entries to local list and loop over them - * while passing them to the queue registered handler. - */ -static __latent_entropy void blk_done_softirq(struct softirq_action *h) -{ - struct list_head *cpu_list, local_list; - - local_irq_disable(); - cpu_list = this_cpu_ptr(&blk_cpu_done); - list_replace_init(cpu_list, &local_list); - local_irq_enable(); - - while (!list_empty(&local_list)) { - struct request *rq; - - rq = list_entry(local_list.next, struct request, ipi_list); - list_del_init(&rq->ipi_list); - rq->q->mq_ops->complete(rq); - } -} - -#ifdef CONFIG_SMP -static void trigger_softirq(void *data) -{ - struct request *rq = data; - struct list_head *list; - - list = this_cpu_ptr(&blk_cpu_done); - list_add_tail(&rq->ipi_list, list); - - if (list->next == &rq->ipi_list) - raise_softirq_irqoff(BLOCK_SOFTIRQ); -} - -/* - * Setup and invoke a run of 'trigger_softirq' on the given cpu. - */ -static int raise_blk_irq(int cpu, struct request *rq) -{ - if (cpu_online(cpu)) { - call_single_data_t *data = &rq->csd; - - data->func = trigger_softirq; - data->info = rq; - data->flags = 0; - - smp_call_function_single_async(cpu, data); - return 0; - } - - return 1; -} -#else /* CONFIG_SMP */ -static int raise_blk_irq(int cpu, struct request *rq) -{ - return 1; -} -#endif - -static int blk_softirq_cpu_dead(unsigned int cpu) -{ - /* - * If a CPU goes away, splice its entries to the current CPU - * and trigger a run of the softirq - */ - local_irq_disable(); - list_splice_init(&per_cpu(blk_cpu_done, cpu), - this_cpu_ptr(&blk_cpu_done)); - raise_softirq_irqoff(BLOCK_SOFTIRQ); - local_irq_enable(); - - return 0; -} - -void __blk_complete_request(struct request *req) -{ - struct request_queue *q = req->q; - int cpu, ccpu = req->mq_ctx->cpu; - unsigned long flags; - bool shared = false; - - BUG_ON(!q->mq_ops->complete); - - local_irq_save(flags); - cpu = smp_processor_id(); - - /* - * Select completion CPU - */ - if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) && ccpu != -1) { - if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) - shared = cpus_share_cache(cpu, ccpu); - } else - ccpu = cpu; - - /* - * If current CPU and requested CPU share a cache, run the softirq on - * the current CPU. One might concern this is just like - * QUEUE_FLAG_SAME_FORCE, but actually not. blk_complete_request() is - * running in interrupt handler, and currently I/O controller doesn't - * support multiple interrupts, so current CPU is unique actually. This - * avoids IPI sending from current CPU to the first CPU of a group. - */ - if (ccpu == cpu || shared) { - struct list_head *list; -do_local: - list = this_cpu_ptr(&blk_cpu_done); - list_add_tail(&req->ipi_list, list); - - /* - * if the list only contains our just added request, - * signal a raise of the softirq. If there are already - * entries there, someone already raised the irq but it - * hasn't run yet. - */ - if (list->next == &req->ipi_list) - raise_softirq_irqoff(BLOCK_SOFTIRQ); - } else if (raise_blk_irq(ccpu, req)) - goto do_local; - - local_irq_restore(flags); -} - -static __init int blk_softirq_init(void) -{ - int i; - - for_each_possible_cpu(i) - INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); - - open_softirq(BLOCK_SOFTIRQ, blk_done_softirq); - cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD, - "block/softirq:dead", NULL, - blk_softirq_cpu_dead); - return 0; -} -subsys_initcall(blk_softirq_init); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 8fd900998b4e..98712cfc7a34 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1078,7 +1078,6 @@ void blk_steal_bios(struct bio_list *list, struct request *rq); extern bool blk_update_request(struct request *rq, blk_status_t error, unsigned int nr_bytes); -extern void __blk_complete_request(struct request *); extern void blk_abort_request(struct request *); /* -- cgit v1.2.3-59-g8ed1b From 15f73f5b3e5958f2d169fe13c420eeeeae07bbf2 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 11 Jun 2020 08:44:47 +0200 Subject: blk-mq: move failure injection out of blk_mq_complete_request Move the call to blk_should_fake_timeout out of blk_mq_complete_request and into the drivers, skipping call sites that are obvious error handlers, and remove the now superflous blk_mq_force_complete_rq helper. This ensures we don't keep injecting errors into completions that just terminate the Linux request after the hardware has been reset or the command has been aborted. Reviewed-by: Daniel Wagner Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq.c | 34 +++++++--------------------------- block/blk-timeout.c | 6 ++---- block/blk.h | 9 --------- block/bsg-lib.c | 5 ++++- drivers/block/loop.c | 6 ++++-- drivers/block/mtip32xx/mtip32xx.c | 3 ++- drivers/block/nbd.c | 5 ++++- drivers/block/null_blk_main.c | 5 +++-- drivers/block/skd_main.c | 9 ++++++--- drivers/block/virtio_blk.c | 3 ++- drivers/block/xen-blkfront.c | 3 ++- drivers/md/dm-rq.c | 3 ++- drivers/mmc/core/block.c | 8 ++++---- drivers/nvme/host/core.c | 2 +- drivers/nvme/host/nvme.h | 3 ++- drivers/s390/block/dasd.c | 2 +- drivers/s390/block/scm_blk.c | 3 ++- drivers/scsi/scsi_lib.c | 12 +++--------- include/linux/blk-mq.h | 12 ++++++++++-- 19 files changed, 61 insertions(+), 72 deletions(-) (limited to 'include') diff --git a/block/blk-mq.c b/block/blk-mq.c index ce772ab19188..3f4f227cf830 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -655,16 +655,13 @@ static void __blk_mq_complete_request_remote(void *data) } /** - * blk_mq_force_complete_rq() - Force complete the request, bypassing any error - * injection that could drop the completion. - * @rq: Request to be force completed + * blk_mq_complete_request - end I/O on a request + * @rq: the request being processed * - * Drivers should use blk_mq_complete_request() to complete requests in their - * normal IO path. For timeout error recovery, drivers may call this forced - * completion routine after they've reclaimed timed out requests to bypass - * potentially subsequent fake timeouts. - */ -void blk_mq_force_complete_rq(struct request *rq) + * Description: + * Complete a request by scheduling the ->complete_rq operation. + **/ +void blk_mq_complete_request(struct request *rq) { struct blk_mq_ctx *ctx = rq->mq_ctx; struct request_queue *q = rq->q; @@ -702,7 +699,7 @@ void blk_mq_force_complete_rq(struct request *rq) } put_cpu(); } -EXPORT_SYMBOL_GPL(blk_mq_force_complete_rq); +EXPORT_SYMBOL(blk_mq_complete_request); static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx) __releases(hctx->srcu) @@ -724,23 +721,6 @@ static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx) *srcu_idx = srcu_read_lock(hctx->srcu); } -/** - * blk_mq_complete_request - end I/O on a request - * @rq: the request being processed - * - * Description: - * Ends all I/O on a request. It does not handle partial completions. - * The actual completion happens out-of-order, through a IPI handler. - **/ -bool blk_mq_complete_request(struct request *rq) -{ - if (unlikely(blk_should_fake_timeout(rq->q))) - return false; - blk_mq_force_complete_rq(rq); - return true; -} -EXPORT_SYMBOL(blk_mq_complete_request); - /** * blk_mq_start_request - Start processing a request * @rq: Pointer to request to be started diff --git a/block/blk-timeout.c b/block/blk-timeout.c index 8aa68fae96ad..3a1ac6434758 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c @@ -20,13 +20,11 @@ static int __init setup_fail_io_timeout(char *str) } __setup("fail_io_timeout=", setup_fail_io_timeout); -int blk_should_fake_timeout(struct request_queue *q) +bool __blk_should_fake_timeout(struct request_queue *q) { - if (!test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags)) - return 0; - return should_fail(&fail_io_timeout, 1); } +EXPORT_SYMBOL_GPL(__blk_should_fake_timeout); static int __init fail_io_timeout_debugfs(void) { diff --git a/block/blk.h b/block/blk.h index b5d1f0fc6547..8ba4a5e4fe07 100644 --- a/block/blk.h +++ b/block/blk.h @@ -223,18 +223,9 @@ ssize_t part_fail_show(struct device *dev, struct device_attribute *attr, char *buf); ssize_t part_fail_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); - -#ifdef CONFIG_FAIL_IO_TIMEOUT -int blk_should_fake_timeout(struct request_queue *); ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); ssize_t part_timeout_store(struct device *, struct device_attribute *, const char *, size_t); -#else -static inline int blk_should_fake_timeout(struct request_queue *q) -{ - return 0; -} -#endif void __blk_queue_split(struct request_queue *q, struct bio **bio, unsigned int *nr_segs); diff --git a/block/bsg-lib.c b/block/bsg-lib.c index 6cbb7926534c..fb7b347f8010 100644 --- a/block/bsg-lib.c +++ b/block/bsg-lib.c @@ -181,9 +181,12 @@ EXPORT_SYMBOL_GPL(bsg_job_get); void bsg_job_done(struct bsg_job *job, int result, unsigned int reply_payload_rcv_len) { + struct request *rq = blk_mq_rq_from_pdu(job); + job->result = result; job->reply_payload_rcv_len = reply_payload_rcv_len; - blk_mq_complete_request(blk_mq_rq_from_pdu(job)); + if (likely(!blk_should_fake_timeout(rq->q))) + blk_mq_complete_request(rq); } EXPORT_SYMBOL_GPL(bsg_job_done); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 475e1a738560..4acae248790c 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -509,7 +509,8 @@ static void lo_rw_aio_do_completion(struct loop_cmd *cmd) return; kfree(cmd->bvec); cmd->bvec = NULL; - blk_mq_complete_request(rq); + if (likely(!blk_should_fake_timeout(rq->q))) + blk_mq_complete_request(rq); } static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2) @@ -2048,7 +2049,8 @@ static void loop_handle_cmd(struct loop_cmd *cmd) cmd->ret = ret; else cmd->ret = ret ? -EIO : 0; - blk_mq_complete_request(rq); + if (likely(!blk_should_fake_timeout(rq->q))) + blk_mq_complete_request(rq); } } diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index f6bafa9a68b9..153e2cdecb4d 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -492,7 +492,8 @@ static void mtip_complete_command(struct mtip_cmd *cmd, blk_status_t status) struct request *req = blk_mq_rq_from_pdu(cmd); cmd->status = status; - blk_mq_complete_request(req); + if (likely(!blk_should_fake_timeout(req->q))) + blk_mq_complete_request(req); } /* diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 43cff01a5a67..01794cd2b6ca 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -784,6 +784,7 @@ static void recv_work(struct work_struct *work) struct nbd_device *nbd = args->nbd; struct nbd_config *config = nbd->config; struct nbd_cmd *cmd; + struct request *rq; while (1) { cmd = nbd_read_stat(nbd, args->index); @@ -796,7 +797,9 @@ static void recv_work(struct work_struct *work) break; } - blk_mq_complete_request(blk_mq_rq_from_pdu(cmd)); + rq = blk_mq_rq_from_pdu(cmd); + if (likely(!blk_should_fake_timeout(rq->q))) + blk_mq_complete_request(rq); } atomic_dec(&config->recv_threads); wake_up(&config->recv_wq); diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c index 87b31f9ca362..82259242b9b5 100644 --- a/drivers/block/null_blk_main.c +++ b/drivers/block/null_blk_main.c @@ -1283,7 +1283,8 @@ static inline void nullb_complete_cmd(struct nullb_cmd *cmd) case NULL_IRQ_SOFTIRQ: switch (cmd->nq->dev->queue_mode) { case NULL_Q_MQ: - blk_mq_complete_request(cmd->rq); + if (likely(!blk_should_fake_timeout(cmd->rq->q))) + blk_mq_complete_request(cmd->rq); break; case NULL_Q_BIO: /* @@ -1423,7 +1424,7 @@ static bool should_requeue_request(struct request *rq) static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res) { pr_info("rq %p timed out\n", rq); - blk_mq_force_complete_rq(rq); + blk_mq_complete_request(rq); return BLK_EH_DONE; } diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c index 51569c199a6c..3a476dc1d14f 100644 --- a/drivers/block/skd_main.c +++ b/drivers/block/skd_main.c @@ -1417,7 +1417,8 @@ static void skd_resolve_req_exception(struct skd_device *skdev, case SKD_CHECK_STATUS_REPORT_GOOD: case SKD_CHECK_STATUS_REPORT_SMART_ALERT: skreq->status = BLK_STS_OK; - blk_mq_complete_request(req); + if (likely(!blk_should_fake_timeout(req->q))) + blk_mq_complete_request(req); break; case SKD_CHECK_STATUS_BUSY_IMMINENT: @@ -1440,7 +1441,8 @@ static void skd_resolve_req_exception(struct skd_device *skdev, case SKD_CHECK_STATUS_REPORT_ERROR: default: skreq->status = BLK_STS_IOERR; - blk_mq_complete_request(req); + if (likely(!blk_should_fake_timeout(req->q))) + blk_mq_complete_request(req); break; } } @@ -1560,7 +1562,8 @@ static int skd_isr_completion_posted(struct skd_device *skdev, */ if (likely(cmp_status == SAM_STAT_GOOD)) { skreq->status = BLK_STS_OK; - blk_mq_complete_request(rq); + if (likely(!blk_should_fake_timeout(rq->q))) + blk_mq_complete_request(rq); } else { skd_resolve_req_exception(skdev, skreq, rq); } diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 9d21bf0f155e..741804bd8a14 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -171,7 +171,8 @@ static void virtblk_done(struct virtqueue *vq) while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { struct request *req = blk_mq_rq_from_pdu(vbr); - blk_mq_complete_request(req); + if (likely(!blk_should_fake_timeout(req->q))) + blk_mq_complete_request(req); req_done = true; } if (unlikely(virtqueue_is_broken(vq))) diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 3b889ea950c2..3bb3dd8da9b0 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -1655,7 +1655,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) BUG(); } - blk_mq_complete_request(req); + if (likely(!blk_should_fake_timeout(req->q))) + blk_mq_complete_request(req); } rinfo->ring.rsp_cons = i; diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index f60c02512121..5aec1cd09348 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -288,7 +288,8 @@ static void dm_complete_request(struct request *rq, blk_status_t error) struct dm_rq_target_io *tio = tio_from_request(rq); tio->error = error; - blk_mq_complete_request(rq); + if (likely(!blk_should_fake_timeout(rq->q))) + blk_mq_complete_request(rq); } /* diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 7896952de1ac..4791c82f8f7c 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -1446,7 +1446,7 @@ static void mmc_blk_cqe_req_done(struct mmc_request *mrq) */ if (mq->in_recovery) mmc_blk_cqe_complete_rq(mq, req); - else + else if (likely(!blk_should_fake_timeout(req->q))) blk_mq_complete_request(req); } @@ -1926,7 +1926,7 @@ static void mmc_blk_hsq_req_done(struct mmc_request *mrq) */ if (mq->in_recovery) mmc_blk_cqe_complete_rq(mq, req); - else + else if (likely(!blk_should_fake_timeout(req->q))) blk_mq_complete_request(req); } @@ -1936,7 +1936,7 @@ void mmc_blk_mq_complete(struct request *req) if (mq->use_cqe) mmc_blk_cqe_complete_rq(mq, req); - else + else if (likely(!blk_should_fake_timeout(req->q))) mmc_blk_mq_complete_rq(mq, req); } @@ -1988,7 +1988,7 @@ static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req) */ if (mq->in_recovery) mmc_blk_mq_complete_rq(mq, req); - else + else if (likely(!blk_should_fake_timeout(req->q))) blk_mq_complete_request(req); mmc_blk_mq_dec_in_flight(mq, req); diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index c2c5bc4fb702..6810c8812aed 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -304,7 +304,7 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved) return true; nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD; - blk_mq_force_complete_rq(req); + blk_mq_complete_request(req); return true; } EXPORT_SYMBOL_GPL(nvme_cancel_request); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index c0f4226d3299..034613205701 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -481,7 +481,8 @@ static inline void nvme_end_request(struct request *req, __le16 status, rq->result = result; /* inject error when permitted by fault injection framework */ nvme_should_fail(req); - blk_mq_complete_request(req); + if (likely(!blk_should_fake_timeout(req->q))) + blk_mq_complete_request(req); } static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl) diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index cf87eb27879f..eb17fea8075c 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -2802,7 +2802,7 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) blk_update_request(req, BLK_STS_OK, blk_rq_bytes(req) - proc_bytes); blk_mq_requeue_request(req, true); - } else { + } else if (likely(!blk_should_fake_timeout(req->q))) { blk_mq_complete_request(req); } } diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index e01889394c84..a4f6f2e62b1d 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c @@ -256,7 +256,8 @@ static void scm_request_finish(struct scm_request *scmrq) for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) { error = blk_mq_rq_to_pdu(scmrq->request[i]); *error = scmrq->error; - blk_mq_complete_request(scmrq->request[i]); + if (likely(!blk_should_fake_timeout(scmrq->request[i]->q))) + blk_mq_complete_request(scmrq->request[i]); } atomic_dec(&bdev->queued_reqs); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 0ba7a65e7c8d..6ca91d09eca1 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1589,18 +1589,12 @@ static blk_status_t scsi_mq_prep_fn(struct request *req) static void scsi_mq_done(struct scsi_cmnd *cmd) { + if (unlikely(blk_should_fake_timeout(cmd->request->q))) + return; if (unlikely(test_and_set_bit(SCMD_STATE_COMPLETE, &cmd->state))) return; trace_scsi_dispatch_cmd_done(cmd); - - /* - * If the block layer didn't complete the request due to a timeout - * injection, scsi must clear its internal completed state so that the - * timeout handler will see it needs to escalate its own error - * recovery. - */ - if (unlikely(!blk_mq_complete_request(cmd->request))) - clear_bit(SCMD_STATE_COMPLETE, &cmd->state); + blk_mq_complete_request(cmd->request); } static void scsi_mq_put_budget(struct blk_mq_hw_ctx *hctx) diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index d6fcae17da5a..8e6ab766aef7 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -503,8 +503,7 @@ void __blk_mq_end_request(struct request *rq, blk_status_t error); void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); void blk_mq_kick_requeue_list(struct request_queue *q); void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); -bool blk_mq_complete_request(struct request *rq); -void blk_mq_force_complete_rq(struct request *rq); +void blk_mq_complete_request(struct request *rq); bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, struct bio *bio, unsigned int nr_segs); bool blk_mq_queue_stopped(struct request_queue *q); @@ -537,6 +536,15 @@ void blk_mq_quiesce_queue_nowait(struct request_queue *q); unsigned int blk_mq_rq_cpu(struct request *rq); +bool __blk_should_fake_timeout(struct request_queue *q); +static inline bool blk_should_fake_timeout(struct request_queue *q) +{ + if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) && + test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags)) + return __blk_should_fake_timeout(q); + return false; +} + /** * blk_mq_rq_from_pdu - cast a PDU to a request * @pdu: the PDU (Protocol Data Unit) to be casted -- cgit v1.2.3-59-g8ed1b From 40d09b53bfc557af7481b9d80f060a7ac9c7d314 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 11 Jun 2020 08:44:50 +0200 Subject: blk-mq: add a new blk_mq_complete_request_remote API This is a variant of blk_mq_complete_request_remote that only completes the request if it needs to be bounced to another CPU or a softirq. If the request can be completed locally the function returns false and lets the driver complete it without requring and indirect function call. Reviewed-by: Daniel Wagner Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq.c | 45 ++++++++++++++++++++++++++------------------- include/linux/blk-mq.h | 1 + 2 files changed, 27 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/block/blk-mq.c b/block/blk-mq.c index 961635b40999..b8738b3c6d06 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -632,8 +632,11 @@ static int blk_softirq_cpu_dead(unsigned int cpu) return 0; } -static void __blk_mq_complete_request(struct request *rq) + +static void __blk_mq_complete_request_remote(void *data) { + struct request *rq = data; + /* * For most of single queue controllers, there is only one irq vector * for handling I/O completion, and the only irq's affinity is set @@ -649,11 +652,6 @@ static void __blk_mq_complete_request(struct request *rq) rq->q->mq_ops->complete(rq); } -static void __blk_mq_complete_request_remote(void *data) -{ - __blk_mq_complete_request(data); -} - static inline bool blk_mq_complete_need_ipi(struct request *rq) { int cpu = raw_smp_processor_id(); @@ -672,14 +670,7 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq) return cpu_online(rq->mq_ctx->cpu); } -/** - * blk_mq_complete_request - end I/O on a request - * @rq: the request being processed - * - * Description: - * Complete a request by scheduling the ->complete_rq operation. - **/ -void blk_mq_complete_request(struct request *rq) +bool blk_mq_complete_request_remote(struct request *rq) { WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); @@ -687,10 +678,8 @@ void blk_mq_complete_request(struct request *rq) * For a polled request, always complete locallly, it's pointless * to redirect the completion. */ - if (rq->cmd_flags & REQ_HIPRI) { - rq->q->mq_ops->complete(rq); - return; - } + if (rq->cmd_flags & REQ_HIPRI) + return false; if (blk_mq_complete_need_ipi(rq)) { rq->csd.func = __blk_mq_complete_request_remote; @@ -698,8 +687,26 @@ void blk_mq_complete_request(struct request *rq) rq->csd.flags = 0; smp_call_function_single_async(rq->mq_ctx->cpu, &rq->csd); } else { - __blk_mq_complete_request(rq); + if (rq->q->nr_hw_queues > 1) + return false; + blk_mq_trigger_softirq(rq); } + + return true; +} +EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote); + +/** + * blk_mq_complete_request - end I/O on a request + * @rq: the request being processed + * + * Description: + * Complete a request by scheduling the ->complete_rq operation. + **/ +void blk_mq_complete_request(struct request *rq) +{ + if (!blk_mq_complete_request_remote(rq)) + rq->q->mq_ops->complete(rq); } EXPORT_SYMBOL(blk_mq_complete_request); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 8e6ab766aef7..1641ec6cd7e5 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -504,6 +504,7 @@ void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); void blk_mq_kick_requeue_list(struct request_queue *q); void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); void blk_mq_complete_request(struct request *rq); +bool blk_mq_complete_request_remote(struct request *rq); bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, struct bio *bio, unsigned int nr_segs); bool blk_mq_queue_stopped(struct request_queue *q); -- cgit v1.2.3-59-g8ed1b From e8c7d14ac6c37c173ec606907d38802b00302988 Mon Sep 17 00:00:00 2001 From: Luis Chamberlain Date: Fri, 19 Jun 2020 20:47:25 +0000 Subject: block: revert back to synchronous request_queue removal Commit dc9edc44de6c ("block: Fix a blk_exit_rl() regression") merged on v4.12 moved the work behind blk_release_queue() into a workqueue after a splat floated around which indicated some work on blk_release_queue() could sleep in blk_exit_rl(). This splat would be possible when a driver called blk_put_queue() or blk_cleanup_queue() (which calls blk_put_queue() as its final call) from an atomic context. blk_put_queue() decrements the refcount for the request_queue kobject, and upon reaching 0 blk_release_queue() is called. Although blk_exit_rl() is now removed through commit db6d99523560 ("block: remove request_list code") on v5.0, we reserve the right to be able to sleep within blk_release_queue() context. The last reference for the request_queue must not be called from atomic context. *When* the last reference to the request_queue reaches 0 varies, and so let's take the opportunity to document when that is expected to happen and also document the context of the related calls as best as possible so we can avoid future issues, and with the hopes that the synchronous request_queue removal sticks. We revert back to synchronous request_queue removal because asynchronous removal creates a regression with expected userspace interaction with several drivers. An example is when removing the loopback driver, one uses ioctls from userspace to do so, but upon return and if successful, one expects the device to be removed. Likewise if one races to add another device the new one may not be added as it is still being removed. This was expected behavior before and it now fails as the device is still present and busy still. Moving to asynchronous request_queue removal could have broken many scripts which relied on the removal to have been completed if there was no error. Document this expectation as well so that this doesn't regress userspace again. Using asynchronous request_queue removal however has helped us find other bugs. In the future we can test what could break with this arrangement by enabling CONFIG_DEBUG_KOBJECT_RELEASE. While at it, update the docs with the context expectations for the request_queue / gendisk refcount decrement, and make these expectations explicit by using might_sleep(). Fixes: dc9edc44de6c ("block: Fix a blk_exit_rl() regression") Suggested-by: Nicolai Stange Signed-off-by: Luis Chamberlain Reviewed-by: Christoph Hellwig Reviewed-by: Bart Van Assche Cc: Bart Van Assche Cc: Omar Sandoval Cc: Hannes Reinecke Cc: Nicolai Stange Cc: Greg Kroah-Hartman Cc: Michal Hocko Cc: yu kuai Signed-off-by: Jens Axboe --- block/blk-core.c | 8 ++++++++ block/blk-sysfs.c | 43 ++++++++++++++++++++++--------------------- block/genhd.c | 17 +++++++++++++++++ include/linux/blkdev.h | 2 -- 4 files changed, 47 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/block/blk-core.c b/block/blk-core.c index f68398cb2ef6..a99b22fac38a 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -327,6 +327,9 @@ EXPORT_SYMBOL_GPL(blk_clear_pm_only); * * Decrements the refcount of the request_queue kobject. When this reaches 0 * we'll have blk_release_queue() called. + * + * Context: Any context, but the last reference must not be dropped from + * atomic context. */ void blk_put_queue(struct request_queue *q) { @@ -359,9 +362,14 @@ EXPORT_SYMBOL_GPL(blk_set_queue_dying); * * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and * put it. All future requests will be failed immediately with -ENODEV. + * + * Context: can sleep */ void blk_cleanup_queue(struct request_queue *q) { + /* cannot be called from atomic context */ + might_sleep(); + WARN_ON_ONCE(blk_queue_registered(q)); /* mark @q DYING, no new request or merges will be allowed afterwards */ diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 02643e149d5e..561624d4cc4e 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -873,22 +873,32 @@ static void blk_exit_queue(struct request_queue *q) bdi_put(q->backing_dev_info); } - /** - * __blk_release_queue - release a request queue - * @work: pointer to the release_work member of the request queue to be released + * blk_release_queue - releases all allocated resources of the request_queue + * @kobj: pointer to a kobject, whose container is a request_queue + * + * This function releases all allocated resources of the request queue. + * + * The struct request_queue refcount is incremented with blk_get_queue() and + * decremented with blk_put_queue(). Once the refcount reaches 0 this function + * is called. + * + * For drivers that have a request_queue on a gendisk and added with + * __device_add_disk() the refcount to request_queue will reach 0 with + * the last put_disk() called by the driver. For drivers which don't use + * __device_add_disk() this happens with blk_cleanup_queue(). * - * Description: - * This function is called when a block device is being unregistered. The - * process of releasing a request queue starts with blk_cleanup_queue, which - * set the appropriate flags and then calls blk_put_queue, that decrements - * the reference counter of the request queue. Once the reference counter - * of the request queue reaches zero, blk_release_queue is called to release - * all allocated resources of the request queue. + * Drivers exist which depend on the release of the request_queue to be + * synchronous, it should not be deferred. + * + * Context: can sleep */ -static void __blk_release_queue(struct work_struct *work) +static void blk_release_queue(struct kobject *kobj) { - struct request_queue *q = container_of(work, typeof(*q), release_work); + struct request_queue *q = + container_of(kobj, struct request_queue, kobj); + + might_sleep(); if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) blk_stat_remove_callback(q, q->poll_cb); @@ -917,15 +927,6 @@ static void __blk_release_queue(struct work_struct *work) call_rcu(&q->rcu_head, blk_free_queue_rcu); } -static void blk_release_queue(struct kobject *kobj) -{ - struct request_queue *q = - container_of(kobj, struct request_queue, kobj); - - INIT_WORK(&q->release_work, __blk_release_queue); - schedule_work(&q->release_work); -} - static const struct sysfs_ops queue_sysfs_ops = { .show = queue_attr_show, .store = queue_attr_store, diff --git a/block/genhd.c b/block/genhd.c index 1be86b1f43ec..60ae4e1b4d38 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -889,12 +889,19 @@ static void invalidate_partition(struct gendisk *disk, int partno) * The final removal of the struct gendisk happens when its refcount reaches 0 * with put_disk(), which should be called after del_gendisk(), if * __device_add_disk() was used. + * + * Drivers exist which depend on the release of the gendisk to be synchronous, + * it should not be deferred. + * + * Context: can sleep */ void del_gendisk(struct gendisk *disk) { struct disk_part_iter piter; struct hd_struct *part; + might_sleep(); + blk_integrity_del(disk); disk_del_events(disk); @@ -1548,11 +1555,15 @@ int disk_expand_part_tbl(struct gendisk *disk, int partno) * drivers we also call blk_put_queue() for them, and we expect the * request_queue refcount to reach 0 at this point, and so the request_queue * will also be freed prior to the disk. + * + * Context: can sleep */ static void disk_release(struct device *dev) { struct gendisk *disk = dev_to_disk(dev); + might_sleep(); + blk_free_devt(dev->devt); disk_release_events(disk); kfree(disk->random); @@ -1797,6 +1808,9 @@ EXPORT_SYMBOL(get_disk_and_module); * * This decrements the refcount for the struct gendisk. When this reaches 0 * we'll have disk_release() called. + * + * Context: Any context, but the last reference must not be dropped from + * atomic context. */ void put_disk(struct gendisk *disk) { @@ -1811,6 +1825,9 @@ EXPORT_SYMBOL(put_disk); * * This is a counterpart of get_disk_and_module() and thus also of * get_gendisk(). + * + * Context: Any context, but the last reference must not be dropped from + * atomic context. */ void put_disk_and_module(struct gendisk *disk) { diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 98712cfc7a34..e214e0e9f868 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -584,8 +584,6 @@ struct request_queue { size_t cmd_size; - struct work_struct release_work; - #define BLK_MAX_WRITE_HINTS 5 u64 write_hints[BLK_MAX_WRITE_HINTS]; }; -- cgit v1.2.3-59-g8ed1b From 85e0cbbb8a79537dbc465e9deb449a08b2b092a6 Mon Sep 17 00:00:00 2001 From: Luis Chamberlain Date: Fri, 19 Jun 2020 20:47:30 +0000 Subject: block: create the request_queue debugfs_dir on registration We were only creating the request_queue debugfs_dir only for make_request block drivers (multiqueue), but never for request-based block drivers. We did this as we were only creating non-blktrace additional debugfs files on that directory for make_request drivers. However, since blktrace *always* creates that directory anyway, we special-case the use of that directory on blktrace. Other than this being an eye-sore, this exposes request-based block drivers to the same debugfs fragile race that used to exist with make_request block drivers where if we start adding files onto that directory we can later run a race with a double removal of dentries on the directory if we don't deal with this carefully on blktrace. Instead, just simplify things by always creating the request_queue debugfs_dir on request_queue registration. Rename the mutex also to reflect the fact that this is used outside of the blktrace context. Signed-off-by: Luis Chamberlain Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-core.c | 8 +------ block/blk-mq-debugfs.c | 5 ----- block/blk-sysfs.c | 9 ++++++++ block/blk.h | 2 -- include/linux/blkdev.h | 5 +++-- kernel/trace/blktrace.c | 58 ++++++++++++++++++++++--------------------------- 6 files changed, 39 insertions(+), 48 deletions(-) (limited to 'include') diff --git a/block/blk-core.c b/block/blk-core.c index a99b22fac38a..a9769c1a2875 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -51,9 +51,7 @@ #include "blk-pm.h" #include "blk-rq-qos.h" -#ifdef CONFIG_DEBUG_FS struct dentry *blk_debugfs_root; -#endif EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); @@ -555,9 +553,7 @@ struct request_queue *__blk_alloc_queue(int node_id) kobject_init(&q->kobj, &blk_queue_ktype); -#ifdef CONFIG_BLK_DEV_IO_TRACE - mutex_init(&q->blk_trace_mutex); -#endif + mutex_init(&q->debugfs_mutex); mutex_init(&q->sysfs_lock); mutex_init(&q->sysfs_dir_lock); spin_lock_init(&q->queue_lock); @@ -1931,9 +1927,7 @@ int __init blk_dev_init(void) blk_requestq_cachep = kmem_cache_create("request_queue", sizeof(struct request_queue), 0, SLAB_PANIC, NULL); -#ifdef CONFIG_DEBUG_FS blk_debugfs_root = debugfs_create_dir("block", NULL); -#endif return 0; } diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 15df3a36e9fa..a2800bc56fb4 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -824,9 +824,6 @@ void blk_mq_debugfs_register(struct request_queue *q) struct blk_mq_hw_ctx *hctx; int i; - q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent), - blk_debugfs_root); - debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs); /* @@ -857,9 +854,7 @@ void blk_mq_debugfs_register(struct request_queue *q) void blk_mq_debugfs_unregister(struct request_queue *q) { - debugfs_remove_recursive(q->debugfs_dir); q->sched_debugfs_dir = NULL; - q->debugfs_dir = NULL; } static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx, diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 561624d4cc4e..be67952e7be2 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -11,6 +11,7 @@ #include #include #include +#include #include "blk.h" #include "blk-mq.h" @@ -917,6 +918,9 @@ static void blk_release_queue(struct kobject *kobj) blk_mq_release(q); blk_trace_shutdown(q); + mutex_lock(&q->debugfs_mutex); + debugfs_remove_recursive(q->debugfs_dir); + mutex_unlock(&q->debugfs_mutex); if (queue_is_mq(q)) blk_mq_debugfs_unregister(q); @@ -989,6 +993,11 @@ int blk_register_queue(struct gendisk *disk) goto unlock; } + mutex_lock(&q->debugfs_mutex); + q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent), + blk_debugfs_root); + mutex_unlock(&q->debugfs_mutex); + if (queue_is_mq(q)) { __blk_mq_register_dev(dev, q); blk_mq_debugfs_register(q); diff --git a/block/blk.h b/block/blk.h index 8ba4a5e4fe07..3a120a070dac 100644 --- a/block/blk.h +++ b/block/blk.h @@ -14,9 +14,7 @@ /* Max future timer expiry for timeouts */ #define BLK_MAX_TIMEOUT (5 * HZ) -#ifdef CONFIG_DEBUG_FS extern struct dentry *blk_debugfs_root; -#endif struct blk_flush_queue { unsigned int flush_pending_idx:1; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index e214e0e9f868..c0701237116d 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -528,9 +528,9 @@ struct request_queue { unsigned int sg_timeout; unsigned int sg_reserved_size; int node; + struct mutex debugfs_mutex; #ifdef CONFIG_BLK_DEV_IO_TRACE struct blk_trace __rcu *blk_trace; - struct mutex blk_trace_mutex; #endif /* * for flush operations @@ -574,8 +574,9 @@ struct request_queue { struct list_head tag_set_list; struct bio_set bio_split; -#ifdef CONFIG_BLK_DEBUG_FS struct dentry *debugfs_dir; + +#ifdef CONFIG_BLK_DEBUG_FS struct dentry *sched_debugfs_dir; struct dentry *rqos_debugfs_dir; #endif diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 46c273f4dec5..c086c38f4954 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -348,7 +348,7 @@ static int __blk_trace_remove(struct request_queue *q) struct blk_trace *bt; bt = rcu_replace_pointer(q->blk_trace, NULL, - lockdep_is_held(&q->blk_trace_mutex)); + lockdep_is_held(&q->debugfs_mutex)); if (!bt) return -EINVAL; @@ -362,9 +362,9 @@ int blk_trace_remove(struct request_queue *q) { int ret; - mutex_lock(&q->blk_trace_mutex); + mutex_lock(&q->debugfs_mutex); ret = __blk_trace_remove(q); - mutex_unlock(&q->blk_trace_mutex); + mutex_unlock(&q->debugfs_mutex); return ret; } @@ -483,14 +483,11 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, struct dentry *dir = NULL; int ret; - lockdep_assert_held(&q->blk_trace_mutex); + lockdep_assert_held(&q->debugfs_mutex); if (!buts->buf_size || !buts->buf_nr) return -EINVAL; - if (!blk_debugfs_root) - return -ENOENT; - strncpy(buts->name, name, BLKTRACE_BDEV_SIZE); buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0'; @@ -505,7 +502,7 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, * we can be. */ if (rcu_dereference_protected(q->blk_trace, - lockdep_is_held(&q->blk_trace_mutex))) { + lockdep_is_held(&q->debugfs_mutex))) { pr_warn("Concurrent blktraces are not allowed on %s\n", buts->name); return -EBUSY; @@ -524,18 +521,15 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, if (!bt->msg_data) goto err; -#ifdef CONFIG_BLK_DEBUG_FS /* - * When tracing whole make_request drivers (multiqueue) block devices, - * reuse the existing debugfs directory created by the block layer on - * init. For request-based block devices, all partitions block devices, + * When tracing the whole disk reuse the existing debugfs directory + * created by the block layer on init. For partitions block devices, * and scsi-generic block devices we create a temporary new debugfs * directory that will be removed once the trace ends. */ - if (queue_is_mq(q) && bdev && bdev == bdev->bd_contains) + if (bdev && bdev == bdev->bd_contains) dir = q->debugfs_dir; else -#endif bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root); /* @@ -617,9 +611,9 @@ int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, { int ret; - mutex_lock(&q->blk_trace_mutex); + mutex_lock(&q->debugfs_mutex); ret = __blk_trace_setup(q, name, dev, bdev, arg); - mutex_unlock(&q->blk_trace_mutex); + mutex_unlock(&q->debugfs_mutex); return ret; } @@ -665,7 +659,7 @@ static int __blk_trace_startstop(struct request_queue *q, int start) struct blk_trace *bt; bt = rcu_dereference_protected(q->blk_trace, - lockdep_is_held(&q->blk_trace_mutex)); + lockdep_is_held(&q->debugfs_mutex)); if (bt == NULL) return -EINVAL; @@ -705,9 +699,9 @@ int blk_trace_startstop(struct request_queue *q, int start) { int ret; - mutex_lock(&q->blk_trace_mutex); + mutex_lock(&q->debugfs_mutex); ret = __blk_trace_startstop(q, start); - mutex_unlock(&q->blk_trace_mutex); + mutex_unlock(&q->debugfs_mutex); return ret; } @@ -736,7 +730,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) if (!q) return -ENXIO; - mutex_lock(&q->blk_trace_mutex); + mutex_lock(&q->debugfs_mutex); switch (cmd) { case BLKTRACESETUP: @@ -763,7 +757,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) break; } - mutex_unlock(&q->blk_trace_mutex); + mutex_unlock(&q->debugfs_mutex); return ret; } @@ -774,14 +768,14 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) **/ void blk_trace_shutdown(struct request_queue *q) { - mutex_lock(&q->blk_trace_mutex); + mutex_lock(&q->debugfs_mutex); if (rcu_dereference_protected(q->blk_trace, - lockdep_is_held(&q->blk_trace_mutex))) { + lockdep_is_held(&q->debugfs_mutex))) { __blk_trace_startstop(q, 0); __blk_trace_remove(q); } - mutex_unlock(&q->blk_trace_mutex); + mutex_unlock(&q->debugfs_mutex); } #ifdef CONFIG_BLK_CGROUP @@ -1662,7 +1656,7 @@ static int blk_trace_remove_queue(struct request_queue *q) struct blk_trace *bt; bt = rcu_replace_pointer(q->blk_trace, NULL, - lockdep_is_held(&q->blk_trace_mutex)); + lockdep_is_held(&q->debugfs_mutex)); if (bt == NULL) return -EINVAL; @@ -1837,10 +1831,10 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev, if (q == NULL) goto out_bdput; - mutex_lock(&q->blk_trace_mutex); + mutex_lock(&q->debugfs_mutex); bt = rcu_dereference_protected(q->blk_trace, - lockdep_is_held(&q->blk_trace_mutex)); + lockdep_is_held(&q->debugfs_mutex)); if (attr == &dev_attr_enable) { ret = sprintf(buf, "%u\n", !!bt); goto out_unlock_bdev; @@ -1858,7 +1852,7 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev, ret = sprintf(buf, "%llu\n", bt->end_lba); out_unlock_bdev: - mutex_unlock(&q->blk_trace_mutex); + mutex_unlock(&q->debugfs_mutex); out_bdput: bdput(bdev); out: @@ -1901,10 +1895,10 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev, if (q == NULL) goto out_bdput; - mutex_lock(&q->blk_trace_mutex); + mutex_lock(&q->debugfs_mutex); bt = rcu_dereference_protected(q->blk_trace, - lockdep_is_held(&q->blk_trace_mutex)); + lockdep_is_held(&q->debugfs_mutex)); if (attr == &dev_attr_enable) { if (!!value == !!bt) { ret = 0; @@ -1921,7 +1915,7 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev, if (bt == NULL) { ret = blk_trace_setup_queue(q, bdev); bt = rcu_dereference_protected(q->blk_trace, - lockdep_is_held(&q->blk_trace_mutex)); + lockdep_is_held(&q->debugfs_mutex)); } if (ret == 0) { @@ -1936,7 +1930,7 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev, } out_unlock_bdev: - mutex_unlock(&q->blk_trace_mutex); + mutex_unlock(&q->debugfs_mutex); out_bdput: bdput(bdev); out: -- cgit v1.2.3-59-g8ed1b From b818f09e46f9f6a66471f81bf83094ff0a477d0c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 20 Jun 2020 09:16:35 +0200 Subject: tty/sysrq: emergency_thaw_all does not depend on CONFIG_BLOCK We can also thaw non-block file systems. Remove the CONFIG_BLOCK in sysrq.c after making the prototype available unconditionally. Signed-off-by: Christoph Hellwig Reviewed-by: Bart Van Assche Reviewed-by: Greg Kroah-Hartman Signed-off-by: Jens Axboe --- drivers/tty/sysrq.c | 2 -- include/linux/fs.h | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index 7c95afa905a0..a8e39b2cdd55 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -403,7 +403,6 @@ static const struct sysrq_key_op sysrq_moom_op = { .enable_mask = SYSRQ_ENABLE_SIGNAL, }; -#ifdef CONFIG_BLOCK static void sysrq_handle_thaw(int key) { emergency_thaw_all(); @@ -414,7 +413,6 @@ static const struct sysrq_key_op sysrq_thaw_op = { .action_msg = "Emergency Thaw of all frozen filesystems", .enable_mask = SYSRQ_ENABLE_SIGNAL, }; -#endif static void sysrq_handle_kill(int key) { diff --git a/include/linux/fs.h b/include/linux/fs.h index 3f881a892ea7..7f40dbafbf6d 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2593,7 +2593,6 @@ extern void invalidate_bdev(struct block_device *); extern void iterate_bdevs(void (*)(struct block_device *, void *), void *); extern int sync_blockdev(struct block_device *bdev); extern struct super_block *freeze_bdev(struct block_device *); -extern void emergency_thaw_all(void); extern void emergency_thaw_bdev(struct super_block *sb); extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); extern int fsync_bdev(struct block_device *); @@ -2633,6 +2632,7 @@ static inline bool sb_is_blkdev_sb(struct super_block *sb) return false; } #endif +void emergency_thaw_all(void); extern int sync_filesystem(struct super_block *); extern const struct file_operations def_blk_fops; extern const struct file_operations def_chr_fops; -- cgit v1.2.3-59-g8ed1b From 764b23bd9af8ff8ecc664816e39d4791b6a72bfd Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 20 Jun 2020 09:16:36 +0200 Subject: block: mark bd_finish_claiming static Signed-off-by: Christoph Hellwig Reviewed-by: Bart Van Assche Signed-off-by: Jens Axboe --- fs/block_dev.c | 5 ++--- include/linux/fs.h | 2 -- 2 files changed, 2 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/fs/block_dev.c b/fs/block_dev.c index 0ae656e022fd..0e0d43dc27d3 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -1187,8 +1187,8 @@ static void bd_clear_claiming(struct block_device *whole, void *holder) * Finish exclusive open of a block device. Mark the device as exlusively * open by the holder and wake up all waiters for exclusive open to finish. */ -void bd_finish_claiming(struct block_device *bdev, struct block_device *whole, - void *holder) +static void bd_finish_claiming(struct block_device *bdev, + struct block_device *whole, void *holder) { spin_lock(&bdev_lock); BUG_ON(!bd_may_claim(bdev, whole, holder)); @@ -1203,7 +1203,6 @@ void bd_finish_claiming(struct block_device *bdev, struct block_device *whole, bd_clear_claiming(whole, holder); spin_unlock(&bdev_lock); } -EXPORT_SYMBOL(bd_finish_claiming); /** * bd_abort_claiming - abort claiming of a block device diff --git a/include/linux/fs.h b/include/linux/fs.h index 7f40dbafbf6d..b1c960e9b84e 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2646,8 +2646,6 @@ extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder); extern struct block_device *bd_start_claiming(struct block_device *bdev, void *holder); -extern void bd_finish_claiming(struct block_device *bdev, - struct block_device *whole, void *holder); extern void bd_abort_claiming(struct block_device *bdev, struct block_device *whole, void *holder); extern void blkdev_put(struct block_device *bdev, fmode_t mode); -- cgit v1.2.3-59-g8ed1b From 7dbac5baa887facf80373825b8f66c626703621f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 20 Jun 2020 09:16:37 +0200 Subject: fs: remove an unused block_device_operations forward declaration Signed-off-by: Christoph Hellwig Reviewed-by: Bart Van Assche Signed-off-by: Jens Axboe --- include/linux/fs.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include') diff --git a/include/linux/fs.h b/include/linux/fs.h index b1c960e9b84e..0d282c853691 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1774,8 +1774,6 @@ struct dir_context { loff_t pos; }; -struct block_device_operations; - /* These macros are for out of kernel modules to test that * the kernel supports the unlocked_ioctl and compat_ioctl * fields in struct file_operations. */ -- cgit v1.2.3-59-g8ed1b From 4e24566a134ea167441a1ffa3d439a27cf400880 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 20 Jun 2020 09:16:38 +0200 Subject: fs: remove the HAVE_UNLOCKED_IOCTL and HAVE_COMPAT_IOCTL defines These are not defined anywhere, and contrary to the comments we really do not care about out of tree code at all. Signed-off-by: Christoph Hellwig Reviewed-by: Bart Van Assche Signed-off-by: Jens Axboe --- include/linux/fs.h | 6 ------ 1 file changed, 6 deletions(-) (limited to 'include') diff --git a/include/linux/fs.h b/include/linux/fs.h index 0d282c853691..224edcc5b56e 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1774,12 +1774,6 @@ struct dir_context { loff_t pos; }; -/* These macros are for out of kernel modules to test that - * the kernel supports the unlocked_ioctl and compat_ioctl - * fields in struct file_operations. */ -#define HAVE_COMPAT_IOCTL 1 -#define HAVE_UNLOCKED_IOCTL 1 - /* * These flags let !MMU mmap() govern direct device mapping vs immediate * copying more easily for MAP_PRIVATE, especially for ROM filesystems. -- cgit v1.2.3-59-g8ed1b From 75362a1792d16a61f0277d3610dea2f50a16bf3e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 20 Jun 2020 09:16:39 +0200 Subject: fs: remove the mount_bdev and kill_block_super stubs No one calls these functions without CONFIG_BLOCK, so don't bother stubbing them out. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- include/linux/fs.h | 16 ---------------- 1 file changed, 16 deletions(-) (limited to 'include') diff --git a/include/linux/fs.h b/include/linux/fs.h index 224edcc5b56e..9ee09e2b5a97 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2256,18 +2256,9 @@ struct file_system_type { #define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME) -#ifdef CONFIG_BLOCK extern struct dentry *mount_bdev(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, int (*fill_super)(struct super_block *, void *, int)); -#else -static inline struct dentry *mount_bdev(struct file_system_type *fs_type, - int flags, const char *dev_name, void *data, - int (*fill_super)(struct super_block *, void *, int)) -{ - return ERR_PTR(-ENODEV); -} -#endif extern struct dentry *mount_single(struct file_system_type *fs_type, int flags, void *data, int (*fill_super)(struct super_block *, void *, int)); @@ -2276,14 +2267,7 @@ extern struct dentry *mount_nodev(struct file_system_type *fs_type, int (*fill_super)(struct super_block *, void *, int)); extern struct dentry *mount_subtree(struct vfsmount *mnt, const char *path); void generic_shutdown_super(struct super_block *sb); -#ifdef CONFIG_BLOCK void kill_block_super(struct super_block *sb); -#else -static inline void kill_block_super(struct super_block *sb) -{ - BUG(); -} -#endif void kill_anon_super(struct super_block *sb); void kill_litter_super(struct super_block *sb); void deactivate_super(struct super_block *sb); -- cgit v1.2.3-59-g8ed1b From dd0dca223e091bbacdd3c7ce9cf06b373da59816 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 20 Jun 2020 09:16:40 +0200 Subject: block: simplify sb_is_blkdev_sb Just use IS_ENABLED instead of providing a stub for !CONFIG_BLOCK. Signed-off-by: Christoph Hellwig Reviewed-by: Bart Van Assche Signed-off-by: Jens Axboe --- include/linux/fs.h | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/linux/fs.h b/include/linux/fs.h index 9ee09e2b5a97..7f3ae38335d4 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2557,6 +2557,12 @@ extern struct kmem_cache *names_cachep; #define __getname() kmem_cache_alloc(names_cachep, GFP_KERNEL) #define __putname(name) kmem_cache_free(names_cachep, (void *)(name)) +extern struct super_block *blockdev_superblock; +static inline bool sb_is_blkdev_sb(struct super_block *sb) +{ + return IS_ENABLED(CONFIG_BLOCK) && sb == blockdev_superblock; +} + #ifdef CONFIG_BLOCK extern int register_blkdev(unsigned int, const char *); extern void unregister_blkdev(unsigned int, const char *); @@ -2572,13 +2578,6 @@ extern struct super_block *freeze_bdev(struct block_device *); extern void emergency_thaw_bdev(struct super_block *sb); extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); extern int fsync_bdev(struct block_device *); - -extern struct super_block *blockdev_superblock; - -static inline bool sb_is_blkdev_sb(struct super_block *sb) -{ - return sb == blockdev_superblock; -} #else static inline void bd_forget(struct inode *inode) {} static inline int sync_blockdev(struct block_device *bdev) { return 0; } @@ -2602,11 +2601,6 @@ static inline int emergency_thaw_bdev(struct super_block *sb) static inline void iterate_bdevs(void (*f)(struct block_device *, void *), void *arg) { } - -static inline bool sb_is_blkdev_sb(struct super_block *sb) -{ - return false; -} #endif void emergency_thaw_all(void); extern int sync_filesystem(struct super_block *); -- cgit v1.2.3-59-g8ed1b From 3f1266f1f82d7b8c72472a8921e80aa3e611fb62 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 20 Jun 2020 09:16:41 +0200 Subject: block: move block-related definitions out of fs.h Move most of the block related definition out of fs.h into more suitable headers. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- fs/affs/file.c | 1 + fs/hfs/inode.c | 1 + fs/internal.h | 17 ++++++++- fs/ntfs/dir.c | 1 + fs/proc/devices.c | 1 + fs/quota/dquot.c | 1 + fs/reiserfs/procfs.c | 1 + include/linux/blkdev.h | 46 +++++++++++++++++++++++ include/linux/fs.h | 92 ---------------------------------------------- include/linux/genhd.h | 27 ++++++++++++++ include/linux/jbd2.h | 1 + security/loadpin/loadpin.c | 1 + 12 files changed, 96 insertions(+), 94 deletions(-) (limited to 'include') diff --git a/fs/affs/file.c b/fs/affs/file.c index a85817f54483..a26a0f96c119 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c @@ -14,6 +14,7 @@ */ #include +#include #include "affs.h" static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext); diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c index 2f224b98ee94..f35a37c65e5f 100644 --- a/fs/hfs/inode.c +++ b/fs/hfs/inode.c @@ -17,6 +17,7 @@ #include #include #include +#include #include "hfs_fs.h" #include "btree.h" diff --git a/fs/internal.h b/fs/internal.h index 9b863a7bd708..969988d3d397 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -23,7 +23,9 @@ struct user_namespace; extern void __init bdev_cache_init(void); extern int __sync_blockdev(struct block_device *bdev, int wait); - +void iterate_bdevs(void (*)(struct block_device *, void *), void *); +void emergency_thaw_bdev(struct super_block *sb); +void bd_forget(struct inode *inode); #else static inline void bdev_cache_init(void) { @@ -33,7 +35,18 @@ static inline int __sync_blockdev(struct block_device *bdev, int wait) { return 0; } -#endif +static inline void iterate_bdevs(void (*f)(struct block_device *, void *), + void *arg) +{ +} +static inline int emergency_thaw_bdev(struct super_block *sb) +{ + return 0; +} +static inline void bd_forget(struct inode *inode) +{ +} +#endif /* CONFIG_BLOCK */ /* * buffer.c diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c index 3c4811469ae8..a87d4391e6b5 100644 --- a/fs/ntfs/dir.c +++ b/fs/ntfs/dir.c @@ -8,6 +8,7 @@ #include #include +#include #include "dir.h" #include "aops.h" diff --git a/fs/proc/devices.c b/fs/proc/devices.c index 37d38697eaf8..837971e74109 100644 --- a/fs/proc/devices.c +++ b/fs/proc/devices.c @@ -3,6 +3,7 @@ #include #include #include +#include static int devinfo_show(struct seq_file *f, void *v) { diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 7b4bac91146b..bb02989d92b6 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -78,6 +78,7 @@ #include #include #include +#include #include "../internal.h" /* ugh */ #include diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c index ff336513c254..155b82870333 100644 --- a/fs/reiserfs/procfs.c +++ b/fs/reiserfs/procfs.c @@ -15,6 +15,7 @@ #include "reiserfs.h" #include #include +#include /* * LOCKING: diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index c0701237116d..cf8f692f62a9 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1918,4 +1918,50 @@ static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time) } #endif /* CONFIG_BLOCK */ +int bdev_read_only(struct block_device *bdev); +int set_blocksize(struct block_device *bdev, int size); + +const char *bdevname(struct block_device *bdev, char *buffer); +struct block_device *lookup_bdev(const char *); + +void blkdev_show(struct seq_file *seqf, off_t offset); + +#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */ +#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */ +#ifdef CONFIG_BLOCK +#define BLKDEV_MAJOR_MAX 512 +#else +#define BLKDEV_MAJOR_MAX 0 +#endif + +int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder); +struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, + void *holder); +struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder); +struct block_device *bd_start_claiming(struct block_device *bdev, void *holder); +void bd_abort_claiming(struct block_device *bdev, struct block_device *whole, + void *holder); +void blkdev_put(struct block_device *bdev, fmode_t mode); + +struct block_device *bdget(dev_t); +struct block_device *bdgrab(struct block_device *bdev); +void bdput(struct block_device *); + +#ifdef CONFIG_BLOCK +void invalidate_bdev(struct block_device *bdev); +int sync_blockdev(struct block_device *bdev); +#else +static inline void invalidate_bdev(struct block_device *bdev) +{ +} +static inline int sync_blockdev(struct block_device *bdev) +{ + return 0; +} #endif +int fsync_bdev(struct block_device *bdev); + +struct super_block *freeze_bdev(struct block_device *bdev); +int thaw_bdev(struct block_device *bdev, struct super_block *sb); + +#endif /* _LINUX_BLKDEV_H */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 7f3ae38335d4..add30c3bdf9a 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2563,79 +2563,10 @@ static inline bool sb_is_blkdev_sb(struct super_block *sb) return IS_ENABLED(CONFIG_BLOCK) && sb == blockdev_superblock; } -#ifdef CONFIG_BLOCK -extern int register_blkdev(unsigned int, const char *); -extern void unregister_blkdev(unsigned int, const char *); -extern struct block_device *bdget(dev_t); -extern struct block_device *bdgrab(struct block_device *bdev); -extern void bd_set_size(struct block_device *, loff_t size); -extern void bd_forget(struct inode *inode); -extern void bdput(struct block_device *); -extern void invalidate_bdev(struct block_device *); -extern void iterate_bdevs(void (*)(struct block_device *, void *), void *); -extern int sync_blockdev(struct block_device *bdev); -extern struct super_block *freeze_bdev(struct block_device *); -extern void emergency_thaw_bdev(struct super_block *sb); -extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); -extern int fsync_bdev(struct block_device *); -#else -static inline void bd_forget(struct inode *inode) {} -static inline int sync_blockdev(struct block_device *bdev) { return 0; } -static inline void invalidate_bdev(struct block_device *bdev) {} - -static inline struct super_block *freeze_bdev(struct block_device *sb) -{ - return NULL; -} - -static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb) -{ - return 0; -} - -static inline int emergency_thaw_bdev(struct super_block *sb) -{ - return 0; -} - -static inline void iterate_bdevs(void (*f)(struct block_device *, void *), void *arg) -{ -} -#endif void emergency_thaw_all(void); extern int sync_filesystem(struct super_block *); extern const struct file_operations def_blk_fops; extern const struct file_operations def_chr_fops; -#ifdef CONFIG_BLOCK -extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long); -extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long); -extern int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder); -extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, - void *holder); -extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, - void *holder); -extern struct block_device *bd_start_claiming(struct block_device *bdev, - void *holder); -extern void bd_abort_claiming(struct block_device *bdev, - struct block_device *whole, void *holder); -extern void blkdev_put(struct block_device *bdev, fmode_t mode); - -#ifdef CONFIG_SYSFS -extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk); -extern void bd_unlink_disk_holder(struct block_device *bdev, - struct gendisk *disk); -#else -static inline int bd_link_disk_holder(struct block_device *bdev, - struct gendisk *disk) -{ - return 0; -} -static inline void bd_unlink_disk_holder(struct block_device *bdev, - struct gendisk *disk) -{ -} -#endif -#endif /* fs/char_dev.c */ #define CHRDEV_MAJOR_MAX 512 @@ -2666,31 +2597,12 @@ static inline void unregister_chrdev(unsigned int major, const char *name) __unregister_chrdev(major, 0, 256, name); } -/* fs/block_dev.c */ -#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */ -#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */ - -#ifdef CONFIG_BLOCK -#define BLKDEV_MAJOR_MAX 512 -extern const char *bdevname(struct block_device *bdev, char *buffer); -extern struct block_device *lookup_bdev(const char *); -extern void blkdev_show(struct seq_file *,off_t); - -#else -#define BLKDEV_MAJOR_MAX 0 -#endif - extern void init_special_inode(struct inode *, umode_t, dev_t); /* Invalid inode operations -- fs/bad_inode.c */ extern void make_bad_inode(struct inode *); extern bool is_bad_inode(struct inode *); -#ifdef CONFIG_BLOCK -extern int revalidate_disk(struct gendisk *); -extern int check_disk_change(struct block_device *); -extern int __invalidate_device(struct block_device *, bool); -#endif unsigned long invalidate_mapping_pages(struct address_space *mapping, pgoff_t start, pgoff_t end); @@ -3090,10 +3002,6 @@ static inline void remove_inode_hash(struct inode *inode) extern void inode_sb_list_add(struct inode *inode); -#ifdef CONFIG_BLOCK -extern int bdev_read_only(struct block_device *); -#endif -extern int set_blocksize(struct block_device *, int); extern int sb_set_blocksize(struct super_block *, int); extern int sb_min_blocksize(struct super_block *, int); diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 392aad5e29a2..83f8e0d83228 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -373,6 +373,33 @@ extern void blk_unregister_region(dev_t devt, unsigned long range); #define alloc_disk(minors) alloc_disk_node(minors, NUMA_NO_NODE) +int register_blkdev(unsigned int major, const char *name); +void unregister_blkdev(unsigned int major, const char *name); + +int revalidate_disk(struct gendisk *disk); +int check_disk_change(struct block_device *bdev); +int __invalidate_device(struct block_device *bdev, bool kill_dirty); +void bd_set_size(struct block_device *bdev, loff_t size); + +/* for drivers/char/raw.c: */ +int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long); +long compat_blkdev_ioctl(struct file *, unsigned, unsigned long); + +#ifdef CONFIG_SYSFS +int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk); +void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk); +#else +static inline int bd_link_disk_holder(struct block_device *bdev, + struct gendisk *disk) +{ + return 0; +} +static inline void bd_unlink_disk_holder(struct block_device *bdev, + struct gendisk *disk) +{ +} +#endif /* CONFIG_SYSFS */ + #else /* CONFIG_BLOCK */ static inline void printk_all_partitions(void) { } diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index d56128df2aff..4aaa29772bb0 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -27,6 +27,7 @@ #include #include #include +#include #include #endif diff --git a/security/loadpin/loadpin.c b/security/loadpin/loadpin.c index ee5cb944f4ad..670a1aebb8a1 100644 --- a/security/loadpin/loadpin.c +++ b/security/loadpin/loadpin.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include /* current */ #include -- cgit v1.2.3-59-g8ed1b From d2de7ea48d83195ef1310555f1fdd9e8e1bab0d3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 20 Jun 2020 09:16:42 +0200 Subject: fs: move the buffer_heads_over_limit stub to buffer_head.h Move the !CONFIG_BLOCK stub to the same place as the non-stub declaration. Signed-off-by: Christoph Hellwig Reviewed-by: Bart Van Assche Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 1 - include/linux/buffer_head.h | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index cf8f692f62a9..c824c6fee35d 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1838,7 +1838,6 @@ struct block_device; /* * stubs for when the block layer is configured out */ -#define buffer_heads_over_limit 0 static inline long nr_blockdev_pages(void) { diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 22fb11e2d2e0..6b47f94378c5 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -406,6 +406,7 @@ static inline int inode_has_buffers(struct inode *inode) { return 0; } static inline void invalidate_inode_buffers(struct inode *inode) {} static inline int remove_inode_buffers(struct inode *inode) { return 1; } static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; } +#define buffer_heads_over_limit 0 #endif /* CONFIG_BLOCK */ #endif /* _LINUX_BUFFER_HEAD_H */ -- cgit v1.2.3-59-g8ed1b From 1a4dcfa8bc10d6bf4f94ac20adc2b30a1da72cfd Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 20 Jun 2020 09:16:43 +0200 Subject: block: reduce ifdef CONFIG_BLOCK madness in headers Large part of bio.h, blkdev.h and genhd.h are under ifdef CONFIG_BLOCK for no good reason. Only stub out function that are called from code that is not dependent on CONFIG_BLOCK and leave the harmless other declarations around. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- include/linux/bio.h | 3 -- include/linux/blkdev.h | 92 ++++++++++++++++++++++---------------------------- include/linux/genhd.h | 14 ++++---- 3 files changed, 46 insertions(+), 63 deletions(-) (limited to 'include') diff --git a/include/linux/bio.h b/include/linux/bio.h index 91676d4b2dfe..0282f8aa8593 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -8,8 +8,6 @@ #include #include #include - -#ifdef CONFIG_BLOCK /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ #include @@ -824,5 +822,4 @@ static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb) bio->bi_opf |= REQ_NOWAIT; } -#endif /* CONFIG_BLOCK */ #endif /* __LINUX_BIO_H */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index c824c6fee35d..f788bddc9219 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -4,9 +4,6 @@ #include #include - -#ifdef CONFIG_BLOCK - #include #include #include @@ -1163,13 +1160,13 @@ static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq, return __blk_rq_map_sg(q, rq, sglist, &last_sg); } extern void blk_dump_rq_flags(struct request *, char *); -extern long nr_blockdev_pages(void); bool __must_check blk_get_queue(struct request_queue *); struct request_queue *blk_alloc_queue(make_request_fn make_request, int node_id); extern void blk_put_queue(struct request_queue *); extern void blk_set_queue_dying(struct request_queue *); +#ifdef CONFIG_BLOCK /* * blk_plug permits building a queue of related requests by holding the I/O * fragments for a short period. This allows merging of sequential requests @@ -1229,9 +1226,47 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk) !list_empty(&plug->cb_list)); } +int blkdev_issue_flush(struct block_device *, gfp_t); +long nr_blockdev_pages(void); +#else /* CONFIG_BLOCK */ +struct blk_plug { +}; + +static inline void blk_start_plug(struct blk_plug *plug) +{ +} + +static inline void blk_finish_plug(struct blk_plug *plug) +{ +} + +static inline void blk_flush_plug(struct task_struct *task) +{ +} + +static inline void blk_schedule_flush_plug(struct task_struct *task) +{ +} + + +static inline bool blk_needs_flush_plug(struct task_struct *tsk) +{ + return false; +} + +static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask) +{ + return 0; +} + +static inline long nr_blockdev_pages(void) +{ + return 0; +} +#endif /* CONFIG_BLOCK */ + extern void blk_io_schedule(void); -int blkdev_issue_flush(struct block_device *, gfp_t); extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct page *page); @@ -1831,51 +1866,6 @@ static inline bool blk_req_can_dispatch_to_zone(struct request *rq) } #endif /* CONFIG_BLK_DEV_ZONED */ -#else /* CONFIG_BLOCK */ - -struct block_device; - -/* - * stubs for when the block layer is configured out - */ - -static inline long nr_blockdev_pages(void) -{ - return 0; -} - -struct blk_plug { -}; - -static inline void blk_start_plug(struct blk_plug *plug) -{ -} - -static inline void blk_finish_plug(struct blk_plug *plug) -{ -} - -static inline void blk_flush_plug(struct task_struct *task) -{ -} - -static inline void blk_schedule_flush_plug(struct task_struct *task) -{ -} - - -static inline bool blk_needs_flush_plug(struct task_struct *tsk) -{ - return false; -} - -static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask) -{ - return 0; -} - -#endif /* CONFIG_BLOCK */ - static inline void blk_wake_io_task(struct task_struct *waiter) { /* @@ -1889,7 +1879,6 @@ static inline void blk_wake_io_task(struct task_struct *waiter) wake_up_process(waiter); } -#ifdef CONFIG_BLOCK unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors, unsigned int op); void disk_end_io_acct(struct gendisk *disk, unsigned int op, @@ -1915,7 +1904,6 @@ static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time) { return disk_end_io_acct(bio->bi_disk, bio_op(bio), start_time); } -#endif /* CONFIG_BLOCK */ int bdev_read_only(struct block_device *bdev); int set_blocksize(struct block_device *bdev, int size); diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 83f8e0d83228..31a54072ffd6 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -19,8 +19,6 @@ #include #include -#ifdef CONFIG_BLOCK - #define dev_to_disk(device) container_of((device), struct gendisk, part0.__dev) #define dev_to_part(device) container_of((device), struct hd_struct, __dev) #define disk_to_dev(disk) (&(disk)->part0.__dev) @@ -337,12 +335,9 @@ static inline void set_capacity(struct gendisk *disk, sector_t size) disk->part0.nr_sects = size; } -extern dev_t blk_lookup_devt(const char *name, int partno); - int bdev_disk_changed(struct block_device *bdev, bool invalidate); int blk_add_partitions(struct gendisk *disk, struct block_device *bdev); int blk_drop_partitions(struct block_device *bdev); -extern void printk_all_partitions(void); extern struct gendisk *__alloc_disk_node(int minors, int node_id); extern struct kobject *get_disk_and_module(struct gendisk *disk); @@ -400,10 +395,13 @@ static inline void bd_unlink_disk_holder(struct block_device *bdev, } #endif /* CONFIG_SYSFS */ +#ifdef CONFIG_BLOCK +void printk_all_partitions(void); +dev_t blk_lookup_devt(const char *name, int partno); #else /* CONFIG_BLOCK */ - -static inline void printk_all_partitions(void) { } - +static inline void printk_all_partitions(void) +{ +} static inline dev_t blk_lookup_devt(const char *name, int partno) { dev_t devt = MKDEV(0, 0); -- cgit v1.2.3-59-g8ed1b From 621c1f42945e76015c3a585e7a9fe6e71665eba0 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 20 Jun 2020 09:16:44 +0200 Subject: block: move struct block_device to blk_types.h Move the struct block_device definition together with most of the block layer definitions, as it has nothing to do with the rest of fs.h. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- fs/adfs/super.c | 1 + fs/befs/linuxvfs.c | 1 + fs/efs/super.c | 1 + fs/jfs/jfs_mount.c | 1 + fs/jfs/resize.c | 1 + include/linux/blk_types.h | 39 ++++++++++++++++++++++++++++++++++++++- include/linux/blkdev.h | 1 + include/linux/dasd_mod.h | 2 ++ include/linux/fs.h | 41 ----------------------------------------- 9 files changed, 46 insertions(+), 42 deletions(-) (limited to 'include') diff --git a/fs/adfs/super.c b/fs/adfs/super.c index a3cc8ecb50da..d553bb5bc17a 100644 --- a/fs/adfs/super.c +++ b/fs/adfs/super.c @@ -12,6 +12,7 @@ #include #include #include +#include #include "adfs.h" #include "dir_f.h" #include "dir_fplus.h" diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index 64cdf4d8e424..2482032021ca 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c @@ -22,6 +22,7 @@ #include #include #include +#include #include "befs.h" #include "btree.h" diff --git a/fs/efs/super.c b/fs/efs/super.c index 4a6ebff2af76..a4a945d0ac6a 100644 --- a/fs/efs/super.c +++ b/fs/efs/super.c @@ -13,6 +13,7 @@ #include #include #include +#include #include "efs.h" #include diff --git a/fs/jfs/jfs_mount.c b/fs/jfs/jfs_mount.c index eb8b9e233d73..2935d4c776ec 100644 --- a/fs/jfs/jfs_mount.c +++ b/fs/jfs/jfs_mount.c @@ -36,6 +36,7 @@ #include #include +#include #include "jfs_incore.h" #include "jfs_filsys.h" diff --git a/fs/jfs/resize.c b/fs/jfs/resize.c index 66acea9d878b..bde787c354fc 100644 --- a/fs/jfs/resize.c +++ b/fs/jfs/resize.c @@ -6,6 +6,7 @@ #include #include #include +#include #include "jfs_incore.h" #include "jfs_filsys.h" #include "jfs_metapage.h" diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index ccb895f911b1..a602132cbe32 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -14,12 +14,49 @@ struct bio_set; struct bio; struct bio_integrity_payload; struct page; -struct block_device; struct io_context; struct cgroup_subsys_state; typedef void (bio_end_io_t) (struct bio *); struct bio_crypt_ctx; +struct block_device { + dev_t bd_dev; /* not a kdev_t - it's a search key */ + int bd_openers; + struct inode * bd_inode; /* will die */ + struct super_block * bd_super; + struct mutex bd_mutex; /* open/close mutex */ + void * bd_claiming; + void * bd_holder; + int bd_holders; + bool bd_write_holder; +#ifdef CONFIG_SYSFS + struct list_head bd_holder_disks; +#endif + struct block_device * bd_contains; + unsigned bd_block_size; + u8 bd_partno; + struct hd_struct * bd_part; + /* number of times partitions within this device have been opened. */ + unsigned bd_part_count; + int bd_invalidated; + struct gendisk * bd_disk; + struct request_queue * bd_queue; + struct backing_dev_info *bd_bdi; + struct list_head bd_list; + /* + * Private data. You must have bd_claim'ed the block_device + * to use this. NOTE: bd_claim allows an owner to claim + * the same device multiple times, the owner must take special + * care to not mess up bd_private for that case. + */ + unsigned long bd_private; + + /* The counter of freeze processes */ + int bd_fsfreeze_count; + /* Mutex for freeze */ + struct mutex bd_fsfreeze_mutex; +} __randomize_layout; + /* * Block error status values. See block/blk-core:blk_errors for the details. * Alpha cannot write a byte atomically, so we need to use 32-bit value. diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index f788bddc9219..15497782c176 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1930,6 +1930,7 @@ void bd_abort_claiming(struct block_device *bdev, struct block_device *whole, void *holder); void blkdev_put(struct block_device *bdev, fmode_t mode); +struct block_device *I_BDEV(struct inode *inode); struct block_device *bdget(dev_t); struct block_device *bdgrab(struct block_device *bdev); void bdput(struct block_device *); diff --git a/include/linux/dasd_mod.h b/include/linux/dasd_mod.h index d39abad2ff6e..14e6cf8c6267 100644 --- a/include/linux/dasd_mod.h +++ b/include/linux/dasd_mod.h @@ -4,6 +4,8 @@ #include +struct gendisk; + extern int dasd_biodasdinfo(struct gendisk *disk, dasd_information2_t *info); #endif diff --git a/include/linux/fs.h b/include/linux/fs.h index add30c3bdf9a..1d7c4f7465d2 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -470,45 +470,6 @@ struct address_space { * must be enforced here for CRIS, to let the least significant bit * of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON. */ -struct request_queue; - -struct block_device { - dev_t bd_dev; /* not a kdev_t - it's a search key */ - int bd_openers; - struct inode * bd_inode; /* will die */ - struct super_block * bd_super; - struct mutex bd_mutex; /* open/close mutex */ - void * bd_claiming; - void * bd_holder; - int bd_holders; - bool bd_write_holder; -#ifdef CONFIG_SYSFS - struct list_head bd_holder_disks; -#endif - struct block_device * bd_contains; - unsigned bd_block_size; - u8 bd_partno; - struct hd_struct * bd_part; - /* number of times partitions within this device have been opened. */ - unsigned bd_part_count; - int bd_invalidated; - struct gendisk * bd_disk; - struct request_queue * bd_queue; - struct backing_dev_info *bd_bdi; - struct list_head bd_list; - /* - * Private data. You must have bd_claim'ed the block_device - * to use this. NOTE: bd_claim allows an owner to claim - * the same device multiple times, the owner must take special - * care to not mess up bd_private for that case. - */ - unsigned long bd_private; - - /* The counter of freeze processes */ - int bd_fsfreeze_count; - /* Mutex for freeze */ - struct mutex bd_fsfreeze_mutex; -} __randomize_layout; /* XArray tags, for tagging dirty and writeback pages in the pagecache. */ #define PAGECACHE_TAG_DIRTY XA_MARK_0 @@ -907,8 +868,6 @@ static inline unsigned imajor(const struct inode *inode) return MAJOR(inode->i_rdev); } -extern struct block_device *I_BDEV(struct inode *inode); - struct fown_struct { rwlock_t lock; /* protects pid, uid, euid fields */ struct pid *pid; /* pid or -pgrp where SIGIO should be sent */ -- cgit v1.2.3-59-g8ed1b From 1bc138c622959979eb547be2d3bbc6442a5c80b0 Mon Sep 17 00:00:00 2001 From: Lukasz Luba Date: Wed, 10 Jun 2020 11:12:23 +0100 Subject: PM / EM: add support for other devices than CPUs in Energy Model Add support for other devices than CPUs. The registration function does not require a valid cpumask pointer and is ready to handle new devices. Some of the internal structures has been reorganized in order to keep consistent view (like removing per_cpu pd pointers). Signed-off-by: Lukasz Luba Signed-off-by: Rafael J. Wysocki --- include/linux/device.h | 5 + include/linux/energy_model.h | 29 +++-- kernel/power/energy_model.c | 244 +++++++++++++++++++++++++++++-------------- 3 files changed, 194 insertions(+), 84 deletions(-) (limited to 'include') diff --git a/include/linux/device.h b/include/linux/device.h index 15460a5ac024..b72e6f9ad845 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -13,6 +13,7 @@ #define _DEVICE_H_ #include +#include #include #include #include @@ -559,6 +560,10 @@ struct device { struct dev_pm_info power; struct dev_pm_domain *pm_domain; +#ifdef CONFIG_ENERGY_MODEL + struct em_perf_domain *em_pd; +#endif + #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN struct irq_domain *msi_domain; #endif diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h index 7076cb22b247..2d4689964029 100644 --- a/include/linux/energy_model.h +++ b/include/linux/energy_model.h @@ -12,8 +12,10 @@ /** * em_perf_state - Performance state of a performance domain - * @frequency: The CPU frequency in KHz, for consistency with CPUFreq - * @power: The power consumed by 1 CPU at this level, in milli-watts + * @frequency: The frequency in KHz, for consistency with CPUFreq + * @power: The power consumed at this level, in milli-watts (by 1 CPU or + by a registered device). It can be a total power: static and + dynamic. * @cost: The cost coefficient associated with this level, used during * energy calculation. Equal to: power * max_frequency / frequency */ @@ -27,12 +29,16 @@ struct em_perf_state { * em_perf_domain - Performance domain * @table: List of performance states, in ascending order * @nr_perf_states: Number of performance states - * @cpus: Cpumask covering the CPUs of the domain + * @cpus: Cpumask covering the CPUs of the domain. It's here + * for performance reasons to avoid potential cache + * misses during energy calculations in the scheduler + * and simplifies allocating/freeing that memory region. * - * A "performance domain" represents a group of CPUs whose performance is - * scaled together. All CPUs of a performance domain must have the same - * micro-architecture. Performance domains often have a 1-to-1 mapping with - * CPUFreq policies. + * In case of CPU device, a "performance domain" represents a group of CPUs + * whose performance is scaled together. All CPUs of a performance domain + * must have the same micro-architecture. Performance domains often have + * a 1-to-1 mapping with CPUFreq policies. In case of other devices the @cpus + * field is unused. */ struct em_perf_domain { struct em_perf_state *table; @@ -71,10 +77,12 @@ struct em_data_callback { #define EM_DATA_CB(_active_power_cb) { .active_power = &_active_power_cb } struct em_perf_domain *em_cpu_get(int cpu); +struct em_perf_domain *em_pd_get(struct device *dev); int em_register_perf_domain(cpumask_t *span, unsigned int nr_states, struct em_data_callback *cb); int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, struct em_data_callback *cb, cpumask_t *span); +void em_dev_unregister_perf_domain(struct device *dev); /** * em_pd_energy() - Estimates the energy consumed by the CPUs of a perf. domain @@ -184,10 +192,17 @@ int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, { return -EINVAL; } +static inline void em_dev_unregister_perf_domain(struct device *dev) +{ +} static inline struct em_perf_domain *em_cpu_get(int cpu) { return NULL; } +static inline struct em_perf_domain *em_pd_get(struct device *dev) +{ + return NULL; +} static inline unsigned long em_pd_energy(struct em_perf_domain *pd, unsigned long max_util, unsigned long sum_util) { diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c index 5b8a1566526a..32d76e78f992 100644 --- a/kernel/power/energy_model.c +++ b/kernel/power/energy_model.c @@ -1,9 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Energy Model of CPUs + * Energy Model of devices * - * Copyright (c) 2018, Arm ltd. + * Copyright (c) 2018-2020, Arm ltd. * Written by: Quentin Perret, Arm ltd. + * Improvements provided by: Lukasz Luba, Arm ltd. */ #define pr_fmt(fmt) "energy_model: " fmt @@ -15,15 +16,17 @@ #include #include -/* Mapping of each CPU to the performance domain to which it belongs. */ -static DEFINE_PER_CPU(struct em_perf_domain *, em_data); - /* * Mutex serializing the registrations of performance domains and letting * callbacks defined by drivers sleep. */ static DEFINE_MUTEX(em_pd_mutex); +static bool _is_cpu_device(struct device *dev) +{ + return (dev->bus == &cpu_subsys); +} + #ifdef CONFIG_DEBUG_FS static struct dentry *rootdir; @@ -49,22 +52,30 @@ static int em_debug_cpus_show(struct seq_file *s, void *unused) } DEFINE_SHOW_ATTRIBUTE(em_debug_cpus); -static void em_debug_create_pd(struct em_perf_domain *pd, int cpu) +static void em_debug_create_pd(struct device *dev) { struct dentry *d; - char name[8]; int i; - snprintf(name, sizeof(name), "pd%d", cpu); - /* Create the directory of the performance domain */ - d = debugfs_create_dir(name, rootdir); + d = debugfs_create_dir(dev_name(dev), rootdir); - debugfs_create_file("cpus", 0444, d, pd->cpus, &em_debug_cpus_fops); + if (_is_cpu_device(dev)) + debugfs_create_file("cpus", 0444, d, dev->em_pd->cpus, + &em_debug_cpus_fops); /* Create a sub-directory for each performance state */ - for (i = 0; i < pd->nr_perf_states; i++) - em_debug_create_ps(&pd->table[i], d); + for (i = 0; i < dev->em_pd->nr_perf_states; i++) + em_debug_create_ps(&dev->em_pd->table[i], d); + +} + +static void em_debug_remove_pd(struct device *dev) +{ + struct dentry *debug_dir; + + debug_dir = debugfs_lookup(dev_name(dev), rootdir); + debugfs_remove_recursive(debug_dir); } static int __init em_debug_init(void) @@ -76,40 +87,34 @@ static int __init em_debug_init(void) } core_initcall(em_debug_init); #else /* CONFIG_DEBUG_FS */ -static void em_debug_create_pd(struct em_perf_domain *pd, int cpu) {} +static void em_debug_create_pd(struct device *dev) {} +static void em_debug_remove_pd(struct device *dev) {} #endif -static struct em_perf_domain * -em_create_pd(struct device *dev, int nr_states, struct em_data_callback *cb, - cpumask_t *span) + +static int em_create_perf_table(struct device *dev, struct em_perf_domain *pd, + int nr_states, struct em_data_callback *cb) { unsigned long opp_eff, prev_opp_eff = ULONG_MAX; unsigned long power, freq, prev_freq = 0; - int i, ret, cpu = cpumask_first(span); struct em_perf_state *table; - struct em_perf_domain *pd; + int i, ret; u64 fmax; - if (!cb->active_power) - return NULL; - - pd = kzalloc(sizeof(*pd) + cpumask_size(), GFP_KERNEL); - if (!pd) - return NULL; - table = kcalloc(nr_states, sizeof(*table), GFP_KERNEL); if (!table) - goto free_pd; + return -ENOMEM; /* Build the list of performance states for this performance domain */ for (i = 0, freq = 0; i < nr_states; i++, freq++) { /* * active_power() is a driver callback which ceils 'freq' to - * lowest performance state of 'cpu' above 'freq' and updates + * lowest performance state of 'dev' above 'freq' and updates * 'power' and 'freq' accordingly. */ ret = cb->active_power(&power, &freq, dev); if (ret) { - pr_err("pd%d: invalid perf. state: %d\n", cpu, ret); + dev_err(dev, "EM: invalid perf. state: %d\n", + ret); goto free_ps_table; } @@ -118,7 +123,8 @@ em_create_pd(struct device *dev, int nr_states, struct em_data_callback *cb, * higher performance states. */ if (freq <= prev_freq) { - pr_err("pd%d: non-increasing freq: %lu\n", cpu, freq); + dev_err(dev, "EM: non-increasing freq: %lu\n", + freq); goto free_ps_table; } @@ -127,7 +133,8 @@ em_create_pd(struct device *dev, int nr_states, struct em_data_callback *cb, * positive, in milli-watts and to fit into 16 bits. */ if (!power || power > EM_MAX_POWER) { - pr_err("pd%d: invalid power: %lu\n", cpu, power); + dev_err(dev, "EM: invalid power: %lu\n", + power); goto free_ps_table; } @@ -142,8 +149,8 @@ em_create_pd(struct device *dev, int nr_states, struct em_data_callback *cb, */ opp_eff = freq / power; if (opp_eff >= prev_opp_eff) - pr_warn("pd%d: hertz/watts ratio non-monotonically decreasing: em_perf_state %d >= em_perf_state%d\n", - cpu, i, i - 1); + dev_dbg(dev, "EM: hertz/watts ratio non-monotonically decreasing: em_perf_state %d >= em_perf_state%d\n", + i, i - 1); prev_opp_eff = opp_eff; } @@ -156,30 +163,82 @@ em_create_pd(struct device *dev, int nr_states, struct em_data_callback *cb, pd->table = table; pd->nr_perf_states = nr_states; - cpumask_copy(to_cpumask(pd->cpus), span); - em_debug_create_pd(pd, cpu); - - return pd; + return 0; free_ps_table: kfree(table); -free_pd: - kfree(pd); + return -EINVAL; +} + +static int em_create_pd(struct device *dev, int nr_states, + struct em_data_callback *cb, cpumask_t *cpus) +{ + struct em_perf_domain *pd; + struct device *cpu_dev; + int cpu, ret; + + if (_is_cpu_device(dev)) { + pd = kzalloc(sizeof(*pd) + cpumask_size(), GFP_KERNEL); + if (!pd) + return -ENOMEM; + + cpumask_copy(em_span_cpus(pd), cpus); + } else { + pd = kzalloc(sizeof(*pd), GFP_KERNEL); + if (!pd) + return -ENOMEM; + } + + ret = em_create_perf_table(dev, pd, nr_states, cb); + if (ret) { + kfree(pd); + return ret; + } + + if (_is_cpu_device(dev)) + for_each_cpu(cpu, cpus) { + cpu_dev = get_cpu_device(cpu); + cpu_dev->em_pd = pd; + } + + dev->em_pd = pd; + + return 0; +} + +/** + * em_pd_get() - Return the performance domain for a device + * @dev : Device to find the performance domain for + * + * Returns the performance domain to which @dev belongs, or NULL if it doesn't + * exist. + */ +struct em_perf_domain *em_pd_get(struct device *dev) +{ + if (IS_ERR_OR_NULL(dev)) + return NULL; - return NULL; + return dev->em_pd; } +EXPORT_SYMBOL_GPL(em_pd_get); /** * em_cpu_get() - Return the performance domain for a CPU * @cpu : CPU to find the performance domain for * - * Return: the performance domain to which 'cpu' belongs, or NULL if it doesn't + * Returns the performance domain to which @cpu belongs, or NULL if it doesn't * exist. */ struct em_perf_domain *em_cpu_get(int cpu) { - return READ_ONCE(per_cpu(em_data, cpu)); + struct device *cpu_dev; + + cpu_dev = get_cpu_device(cpu); + if (!cpu_dev) + return NULL; + + return em_pd_get(cpu_dev); } EXPORT_SYMBOL_GPL(em_cpu_get); @@ -188,7 +247,7 @@ EXPORT_SYMBOL_GPL(em_cpu_get); * @dev : Device for which the EM is to register * @nr_states : Number of performance states to register * @cb : Callback functions providing the data of the Energy Model - * @span : Pointer to cpumask_t, which in case of a CPU device is + * @cpus : Pointer to cpumask_t, which in case of a CPU device is * obligatory. It can be taken from i.e. 'policy->cpus'. For other * type of devices this should be set to NULL. * @@ -201,13 +260,12 @@ EXPORT_SYMBOL_GPL(em_cpu_get); * Return 0 on success */ int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, - struct em_data_callback *cb, cpumask_t *span) + struct em_data_callback *cb, cpumask_t *cpus) { unsigned long cap, prev_cap = 0; - struct em_perf_domain *pd; - int cpu, ret = 0; + int cpu, ret; - if (!dev || !span || !nr_states || !cb) + if (!dev || !nr_states || !cb) return -EINVAL; /* @@ -216,47 +274,50 @@ int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, */ mutex_lock(&em_pd_mutex); - for_each_cpu(cpu, span) { - /* Make sure we don't register again an existing domain. */ - if (READ_ONCE(per_cpu(em_data, cpu))) { - ret = -EEXIST; - goto unlock; - } + if (dev->em_pd) { + ret = -EEXIST; + goto unlock; + } - /* - * All CPUs of a domain must have the same micro-architecture - * since they all share the same table. - */ - cap = arch_scale_cpu_capacity(cpu); - if (prev_cap && prev_cap != cap) { - pr_err("CPUs of %*pbl must have the same capacity\n", - cpumask_pr_args(span)); + if (_is_cpu_device(dev)) { + if (!cpus) { + dev_err(dev, "EM: invalid CPU mask\n"); ret = -EINVAL; goto unlock; } - prev_cap = cap; + + for_each_cpu(cpu, cpus) { + if (em_cpu_get(cpu)) { + dev_err(dev, "EM: exists for CPU%d\n", cpu); + ret = -EEXIST; + goto unlock; + } + /* + * All CPUs of a domain must have the same + * micro-architecture since they all share the same + * table. + */ + cap = arch_scale_cpu_capacity(cpu); + if (prev_cap && prev_cap != cap) { + dev_err(dev, "EM: CPUs of %*pbl must have the same capacity\n", + cpumask_pr_args(cpus)); + + ret = -EINVAL; + goto unlock; + } + prev_cap = cap; + } } - /* Create the performance domain and add it to the Energy Model. */ - pd = em_create_pd(dev, nr_states, cb, span); - if (!pd) { - ret = -EINVAL; + ret = em_create_pd(dev, nr_states, cb, cpus); + if (ret) goto unlock; - } - for_each_cpu(cpu, span) { - /* - * The per-cpu array can be read concurrently from em_cpu_get(). - * The barrier enforces the ordering needed to make sure readers - * can only access well formed em_perf_domain structs. - */ - smp_store_release(per_cpu_ptr(&em_data, cpu), pd); - } + em_debug_create_pd(dev); + dev_info(dev, "EM: created perf domain\n"); - pr_debug("Created perf domain %*pbl\n", cpumask_pr_args(span)); unlock: mutex_unlock(&em_pd_mutex); - return ret; } EXPORT_SYMBOL_GPL(em_dev_register_perf_domain); @@ -285,3 +346,32 @@ int em_register_perf_domain(cpumask_t *span, unsigned int nr_states, return em_dev_register_perf_domain(cpu_dev, nr_states, cb, span); } EXPORT_SYMBOL_GPL(em_register_perf_domain); + +/** + * em_dev_unregister_perf_domain() - Unregister Energy Model (EM) for a device + * @dev : Device for which the EM is registered + * + * Unregister the EM for the specified @dev (but not a CPU device). + */ +void em_dev_unregister_perf_domain(struct device *dev) +{ + if (IS_ERR_OR_NULL(dev) || !dev->em_pd) + return; + + if (_is_cpu_device(dev)) + return; + + /* + * The mutex separates all register/unregister requests and protects + * from potential clean-up/setup issues in the debugfs directories. + * The debugfs directory name is the same as device's name. + */ + mutex_lock(&em_pd_mutex); + em_debug_remove_pd(dev); + + kfree(dev->em_pd->table); + kfree(dev->em_pd); + dev->em_pd = NULL; + mutex_unlock(&em_pd_mutex); +} +EXPORT_SYMBOL_GPL(em_dev_unregister_perf_domain); -- cgit v1.2.3-59-g8ed1b From 07891f15d91317b2220a0b610a2d7e324a88105d Mon Sep 17 00:00:00 2001 From: Lukasz Luba Date: Wed, 27 May 2020 10:58:51 +0100 Subject: PM / EM: remove em_register_perf_domain Remove old function em_register_perf_domain which is no longer needed. There is em_dev_register_perf_domain that covers old use cases and new as well. Acked-by: Daniel Lezcano Acked-by: Quentin Perret Signed-off-by: Lukasz Luba Signed-off-by: Rafael J. Wysocki --- include/linux/energy_model.h | 7 ------- kernel/power/energy_model.c | 25 ------------------------- 2 files changed, 32 deletions(-) (limited to 'include') diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h index 2d4689964029..0f94e871a202 100644 --- a/include/linux/energy_model.h +++ b/include/linux/energy_model.h @@ -78,8 +78,6 @@ struct em_data_callback { struct em_perf_domain *em_cpu_get(int cpu); struct em_perf_domain *em_pd_get(struct device *dev); -int em_register_perf_domain(cpumask_t *span, unsigned int nr_states, - struct em_data_callback *cb); int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, struct em_data_callback *cb, cpumask_t *span); void em_dev_unregister_perf_domain(struct device *dev); @@ -181,11 +179,6 @@ static inline int em_pd_nr_perf_states(struct em_perf_domain *pd) struct em_data_callback {}; #define EM_DATA_CB(_active_power_cb) { } -static inline int em_register_perf_domain(cpumask_t *span, - unsigned int nr_states, struct em_data_callback *cb) -{ - return -EINVAL; -} static inline int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, struct em_data_callback *cb, cpumask_t *span) diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c index 32d76e78f992..c1ff7fa030ab 100644 --- a/kernel/power/energy_model.c +++ b/kernel/power/energy_model.c @@ -322,31 +322,6 @@ unlock: } EXPORT_SYMBOL_GPL(em_dev_register_perf_domain); -/** - * em_register_perf_domain() - Register the Energy Model of a performance domain - * @span : Mask of CPUs in the performance domain - * @nr_states : Number of capacity states to register - * @cb : Callback functions providing the data of the Energy Model - * - * Create Energy Model tables for a performance domain using the callbacks - * defined in cb. - * - * If multiple clients register the same performance domain, all but the first - * registration will be ignored. - * - * Return 0 on success - */ -int em_register_perf_domain(cpumask_t *span, unsigned int nr_states, - struct em_data_callback *cb) -{ - struct device *cpu_dev; - - cpu_dev = get_cpu_device(cpumask_first(span)); - - return em_dev_register_perf_domain(cpu_dev, nr_states, cb, span); -} -EXPORT_SYMBOL_GPL(em_register_perf_domain); - /** * em_dev_unregister_perf_domain() - Unregister Energy Model (EM) for a device * @dev : Device for which the EM is registered -- cgit v1.2.3-59-g8ed1b From f0b5694791ce70dba16758c3b838d5ddc7731b02 Mon Sep 17 00:00:00 2001 From: Lukasz Luba Date: Wed, 27 May 2020 10:58:52 +0100 Subject: PM / EM: change name of em_pd_energy to em_cpu_energy Energy Model framework now supports other devices than CPUs. Refactor some of the functions in order to prevent wrong usage. The old function em_pd_energy has to generic name. It must not be used without proper cpumask pointer, which is possible only for CPU devices. Thus, rename it and add proper description to warn of potential wrong usage for other devices. Acked-by: Daniel Lezcano Acked-by: Quentin Perret Signed-off-by: Lukasz Luba Signed-off-by: Rafael J. Wysocki --- include/linux/energy_model.h | 11 ++++++++--- kernel/sched/fair.c | 2 +- 2 files changed, 9 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h index 0f94e871a202..b67a51c574b9 100644 --- a/include/linux/energy_model.h +++ b/include/linux/energy_model.h @@ -83,15 +83,20 @@ int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, void em_dev_unregister_perf_domain(struct device *dev); /** - * em_pd_energy() - Estimates the energy consumed by the CPUs of a perf. domain + * em_cpu_energy() - Estimates the energy consumed by the CPUs of a + performance domain * @pd : performance domain for which energy has to be estimated * @max_util : highest utilization among CPUs of the domain * @sum_util : sum of the utilization of all CPUs in the domain * + * This function must be used only for CPU devices. There is no validation, + * i.e. if the EM is a CPU type and has cpumask allocated. It is called from + * the scheduler code quite frequently and that is why there is not checks. + * * Return: the sum of the energy consumed by the CPUs of the domain assuming * a capacity state satisfying the max utilization of the domain. */ -static inline unsigned long em_pd_energy(struct em_perf_domain *pd, +static inline unsigned long em_cpu_energy(struct em_perf_domain *pd, unsigned long max_util, unsigned long sum_util) { unsigned long freq, scale_cpu; @@ -196,7 +201,7 @@ static inline struct em_perf_domain *em_pd_get(struct device *dev) { return NULL; } -static inline unsigned long em_pd_energy(struct em_perf_domain *pd, +static inline unsigned long em_cpu_energy(struct em_perf_domain *pd, unsigned long max_util, unsigned long sum_util) { return 0; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index cbcb2f71599b..6da601c8d383 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6497,7 +6497,7 @@ compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd) max_util = max(max_util, cpu_util); } - return em_pd_energy(pd->em_pd, max_util, sum_util); + return em_cpu_energy(pd->em_pd, max_util, sum_util); } /* -- cgit v1.2.3-59-g8ed1b From 0e0ffa855d1590e54ec0033404a49e2e57e294fe Mon Sep 17 00:00:00 2001 From: Lukasz Luba Date: Wed, 27 May 2020 10:58:54 +0100 Subject: OPP: refactor dev_pm_opp_of_register_em() and update related drivers The Energy Model framework supports not only CPU devices. Drop the CPU specific interface with cpumask and add struct device. Add also a return value, user might use it. This new interface provides easy way to create a simple Energy Model, which then might be used by e.g. thermal subsystem. Acked-by: Daniel Lezcano Signed-off-by: Lukasz Luba Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq-dt.c | 2 +- drivers/cpufreq/imx6q-cpufreq.c | 2 +- drivers/cpufreq/mediatek-cpufreq.c | 2 +- drivers/cpufreq/omap-cpufreq.c | 2 +- drivers/cpufreq/qcom-cpufreq-hw.c | 2 +- drivers/cpufreq/scpi-cpufreq.c | 2 +- drivers/cpufreq/vexpress-spc-cpufreq.c | 2 +- drivers/opp/of.c | 71 +++++++++++++++++++++------------- include/linux/pm_opp.h | 15 ++++++- 9 files changed, 65 insertions(+), 35 deletions(-) (limited to 'include') diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index 79742bbd221f..944d7b45afe9 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -279,7 +279,7 @@ static int cpufreq_init(struct cpufreq_policy *policy) policy->cpuinfo.transition_latency = transition_latency; policy->dvfs_possible_from_any_cpu = true; - dev_pm_opp_of_register_em(policy->cpus); + dev_pm_opp_of_register_em(cpu_dev, policy->cpus); return 0; diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c index fdb2ffffbd15..ef7b34c1fd2b 100644 --- a/drivers/cpufreq/imx6q-cpufreq.c +++ b/drivers/cpufreq/imx6q-cpufreq.c @@ -193,7 +193,7 @@ static int imx6q_cpufreq_init(struct cpufreq_policy *policy) policy->clk = clks[ARM].clk; cpufreq_generic_init(policy, freq_table, transition_latency); policy->suspend_freq = max_freq; - dev_pm_opp_of_register_em(policy->cpus); + dev_pm_opp_of_register_em(cpu_dev, policy->cpus); return 0; } diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c index 0c98dd08273d..7d1212c9b7c8 100644 --- a/drivers/cpufreq/mediatek-cpufreq.c +++ b/drivers/cpufreq/mediatek-cpufreq.c @@ -448,7 +448,7 @@ static int mtk_cpufreq_init(struct cpufreq_policy *policy) policy->driver_data = info; policy->clk = info->cpu_clk; - dev_pm_opp_of_register_em(policy->cpus); + dev_pm_opp_of_register_em(info->cpu_dev, policy->cpus); return 0; } diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c index 8d14b42a8c6f..3694bb030df3 100644 --- a/drivers/cpufreq/omap-cpufreq.c +++ b/drivers/cpufreq/omap-cpufreq.c @@ -131,7 +131,7 @@ static int omap_cpu_init(struct cpufreq_policy *policy) /* FIXME: what's the actual transition time? */ cpufreq_generic_init(policy, freq_table, 300 * 1000); - dev_pm_opp_of_register_em(policy->cpus); + dev_pm_opp_of_register_em(mpu_dev, policy->cpus); return 0; } diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c index fc92a8842e25..0a04b6f03b9a 100644 --- a/drivers/cpufreq/qcom-cpufreq-hw.c +++ b/drivers/cpufreq/qcom-cpufreq-hw.c @@ -238,7 +238,7 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy) goto error; } - dev_pm_opp_of_register_em(policy->cpus); + dev_pm_opp_of_register_em(cpu_dev, policy->cpus); policy->fast_switch_possible = true; diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c index 20d1f85d5f5a..b0f5388b8854 100644 --- a/drivers/cpufreq/scpi-cpufreq.c +++ b/drivers/cpufreq/scpi-cpufreq.c @@ -167,7 +167,7 @@ static int scpi_cpufreq_init(struct cpufreq_policy *policy) policy->fast_switch_possible = false; - dev_pm_opp_of_register_em(policy->cpus); + dev_pm_opp_of_register_em(cpu_dev, policy->cpus); return 0; diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c index 83c85d3d67e3..4e8b1dee7c9a 100644 --- a/drivers/cpufreq/vexpress-spc-cpufreq.c +++ b/drivers/cpufreq/vexpress-spc-cpufreq.c @@ -450,7 +450,7 @@ static int ve_spc_cpufreq_init(struct cpufreq_policy *policy) policy->freq_table = freq_table[cur_cluster]; policy->cpuinfo.transition_latency = 1000000; /* 1 ms */ - dev_pm_opp_of_register_em(policy->cpus); + dev_pm_opp_of_register_em(cpu_dev, policy->cpus); if (is_bL_switching_enabled()) per_cpu(cpu_last_req_freq, policy->cpu) = diff --git a/drivers/opp/of.c b/drivers/opp/of.c index e273f419a4bf..4aa42739599e 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -1205,18 +1205,18 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_of_node); /* * Callback function provided to the Energy Model framework upon registration. - * This computes the power estimated by @CPU at @kHz if it is the frequency + * This computes the power estimated by @dev at @kHz if it is the frequency * of an existing OPP, or at the frequency of the first OPP above @kHz otherwise * (see dev_pm_opp_find_freq_ceil()). This function updates @kHz to the ceiled * frequency and @mW to the associated power. The power is estimated as - * P = C * V^2 * f with C being the CPU's capacitance and V and f respectively - * the voltage and frequency of the OPP. + * P = C * V^2 * f with C being the device's capacitance and V and f + * respectively the voltage and frequency of the OPP. * - * Returns -ENODEV if the CPU device cannot be found, -EINVAL if the power - * calculation failed because of missing parameters, 0 otherwise. + * Returns -EINVAL if the power calculation failed because of missing + * parameters, 0 otherwise. */ -static int __maybe_unused _get_cpu_power(unsigned long *mW, unsigned long *kHz, - struct device *cpu_dev) +static int __maybe_unused _get_power(unsigned long *mW, unsigned long *kHz, + struct device *dev) { struct dev_pm_opp *opp; struct device_node *np; @@ -1225,7 +1225,7 @@ static int __maybe_unused _get_cpu_power(unsigned long *mW, unsigned long *kHz, u64 tmp; int ret; - np = of_node_get(cpu_dev->of_node); + np = of_node_get(dev->of_node); if (!np) return -EINVAL; @@ -1235,7 +1235,7 @@ static int __maybe_unused _get_cpu_power(unsigned long *mW, unsigned long *kHz, return -EINVAL; Hz = *kHz * 1000; - opp = dev_pm_opp_find_freq_ceil(cpu_dev, &Hz); + opp = dev_pm_opp_find_freq_ceil(dev, &Hz); if (IS_ERR(opp)) return -EINVAL; @@ -1255,30 +1255,38 @@ static int __maybe_unused _get_cpu_power(unsigned long *mW, unsigned long *kHz, /** * dev_pm_opp_of_register_em() - Attempt to register an Energy Model - * @cpus : CPUs for which an Energy Model has to be registered + * @dev : Device for which an Energy Model has to be registered + * @cpus : CPUs for which an Energy Model has to be registered. For + * other type of devices it should be set to NULL. * * This checks whether the "dynamic-power-coefficient" devicetree property has * been specified, and tries to register an Energy Model with it if it has. + * Having this property means the voltages are known for OPPs and the EM + * might be calculated. */ -void dev_pm_opp_of_register_em(struct cpumask *cpus) +int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus) { - struct em_data_callback em_cb = EM_DATA_CB(_get_cpu_power); - int ret, nr_opp, cpu = cpumask_first(cpus); - struct device *cpu_dev; + struct em_data_callback em_cb = EM_DATA_CB(_get_power); struct device_node *np; + int ret, nr_opp; u32 cap; - cpu_dev = get_cpu_device(cpu); - if (!cpu_dev) - return; + if (IS_ERR_OR_NULL(dev)) { + ret = -EINVAL; + goto failed; + } - nr_opp = dev_pm_opp_get_opp_count(cpu_dev); - if (nr_opp <= 0) - return; + nr_opp = dev_pm_opp_get_opp_count(dev); + if (nr_opp <= 0) { + ret = -EINVAL; + goto failed; + } - np = of_node_get(cpu_dev->of_node); - if (!np) - return; + np = of_node_get(dev->of_node); + if (!np) { + ret = -EINVAL; + goto failed; + } /* * Register an EM only if the 'dynamic-power-coefficient' property is @@ -1289,9 +1297,20 @@ void dev_pm_opp_of_register_em(struct cpumask *cpus) */ ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap); of_node_put(np); - if (ret || !cap) - return; + if (ret || !cap) { + dev_dbg(dev, "Couldn't find proper 'dynamic-power-coefficient' in DT\n"); + ret = -EINVAL; + goto failed; + } - em_dev_register_perf_domain(cpu_dev, nr_opp, &em_cb, cpus); + ret = em_dev_register_perf_domain(dev, nr_opp, &em_cb, cpus); + if (ret) + goto failed; + + return 0; + +failed: + dev_dbg(dev, "Couldn't register Energy Model %d\n", ret); + return ret; } EXPORT_SYMBOL_GPL(dev_pm_opp_of_register_em); diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index d5c4a329321d..ee34c553f6bf 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -11,6 +11,7 @@ #ifndef __LINUX_OPP_H__ #define __LINUX_OPP_H__ +#include #include #include @@ -373,7 +374,11 @@ struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev); struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp); int of_get_required_opp_performance_state(struct device_node *np, int index); int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table); -void dev_pm_opp_of_register_em(struct cpumask *cpus); +int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus); +static inline void dev_pm_opp_of_unregister_em(struct device *dev) +{ + em_dev_unregister_perf_domain(dev); +} #else static inline int dev_pm_opp_of_add_table(struct device *dev) { @@ -413,7 +418,13 @@ static inline struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp) return NULL; } -static inline void dev_pm_opp_of_register_em(struct cpumask *cpus) +static inline int dev_pm_opp_of_register_em(struct device *dev, + struct cpumask *cpus) +{ + return -ENOTSUPP; +} + +static inline void dev_pm_opp_of_unregister_em(struct device *dev) { } -- cgit v1.2.3-59-g8ed1b From dfde1d7dee9bfd095a4f16c9e0579a10f4092e81 Mon Sep 17 00:00:00 2001 From: Dmitry Yakunin Date: Sat, 20 Jun 2020 18:30:50 +0300 Subject: sock: Move sock_valbool_flag to header This is preparation for usage in bpf_setsockopt. Signed-off-by: Dmitry Yakunin Signed-off-by: Alexei Starovoitov Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20200620153052.9439-1-zeil@yandex-team.ru --- include/net/sock.h | 9 +++++++++ net/core/sock.c | 9 --------- 2 files changed, 9 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/net/sock.h b/include/net/sock.h index c53cc42b5ab9..8ba438b671d7 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -879,6 +879,15 @@ static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag) __clear_bit(flag, &sk->sk_flags); } +static inline void sock_valbool_flag(struct sock *sk, enum sock_flags bit, + int valbool) +{ + if (valbool) + sock_set_flag(sk, bit); + else + sock_reset_flag(sk, bit); +} + static inline bool sock_flag(const struct sock *sk, enum sock_flags flag) { return test_bit(flag, &sk->sk_flags); diff --git a/net/core/sock.c b/net/core/sock.c index 6c4acf1f0220..5ba4753bc04d 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -695,15 +695,6 @@ out: return ret; } -static inline void sock_valbool_flag(struct sock *sk, enum sock_flags bit, - int valbool) -{ - if (valbool) - sock_set_flag(sk, bit); - else - sock_reset_flag(sk, bit); -} - bool sk_mc_loop(struct sock *sk) { if (dev_recursion_level()) -- cgit v1.2.3-59-g8ed1b From aad4a0a9513af962137c4842463d11ed491eec37 Mon Sep 17 00:00:00 2001 From: Dmitry Yakunin Date: Sat, 20 Jun 2020 18:30:51 +0300 Subject: tcp: Expose tcp_sock_set_keepidle_locked This is preparation for usage in bpf_setsockopt. v2: - remove redundant EXPORT_SYMBOL (Alexei Starovoitov) Signed-off-by: Dmitry Yakunin Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200620153052.9439-2-zeil@yandex-team.ru --- include/linux/tcp.h | 1 + net/ipv4/tcp.c | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 9aac824c523c..3bdec31ce8f4 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -499,6 +499,7 @@ int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from, int pcount, void tcp_sock_set_cork(struct sock *sk, bool on); int tcp_sock_set_keepcnt(struct sock *sk, int val); +int tcp_sock_set_keepidle_locked(struct sock *sk, int val); int tcp_sock_set_keepidle(struct sock *sk, int val); int tcp_sock_set_keepintvl(struct sock *sk, int val); void tcp_sock_set_nodelay(struct sock *sk); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 810cc164f795..de36c91d32ea 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2957,7 +2957,7 @@ void tcp_sock_set_user_timeout(struct sock *sk, u32 val) } EXPORT_SYMBOL(tcp_sock_set_user_timeout); -static int __tcp_sock_set_keepidle(struct sock *sk, int val) +int tcp_sock_set_keepidle_locked(struct sock *sk, int val) { struct tcp_sock *tp = tcp_sk(sk); @@ -2984,7 +2984,7 @@ int tcp_sock_set_keepidle(struct sock *sk, int val) int err; lock_sock(sk); - err = __tcp_sock_set_keepidle(sk, val); + err = tcp_sock_set_keepidle_locked(sk, val); release_sock(sk); return err; } @@ -3183,7 +3183,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, break; case TCP_KEEPIDLE: - err = __tcp_sock_set_keepidle(sk, val); + err = tcp_sock_set_keepidle_locked(sk, val); break; case TCP_KEEPINTVL: if (val < 1 || val > MAX_TCP_KEEPINTVL) -- cgit v1.2.3-59-g8ed1b From f9bcf96837f158db6ea982d15cd2c8161ca6bc23 Mon Sep 17 00:00:00 2001 From: Dmitry Yakunin Date: Sat, 20 Jun 2020 18:30:52 +0300 Subject: bpf: Add SO_KEEPALIVE and related options to bpf_setsockopt This patch adds support of SO_KEEPALIVE flag and TCP related options to bpf_setsockopt() routine. This is helpful if we want to enable or tune TCP keepalive for applications which don't do it in the userspace code. v3: - update kernel-doc in uapi (Nikita Vetoshkin ) v4: - update kernel-doc in tools too (Alexei Starovoitov) - add test to selftests (Alexei Starovoitov) Signed-off-by: Dmitry Yakunin Signed-off-by: Alexei Starovoitov Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20200620153052.9439-3-zeil@yandex-team.ru --- include/uapi/linux/bpf.h | 7 +++-- net/core/filter.c | 36 ++++++++++++++++++++++- tools/include/uapi/linux/bpf.h | 7 +++-- tools/testing/selftests/bpf/progs/connect4_prog.c | 27 +++++++++++++++++ 4 files changed, 72 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 9d3923e6b860..d9737d51dd19 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1621,10 +1621,13 @@ union bpf_attr { * * * **SOL_SOCKET**, which supports the following *optname*\ s: * **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**, - * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**. + * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**, + * **SO_BINDTODEVICE**, **SO_KEEPALIVE**. * * **IPPROTO_TCP**, which supports the following *optname*\ s: * **TCP_CONGESTION**, **TCP_BPF_IW**, - * **TCP_BPF_SNDCWND_CLAMP**. + * **TCP_BPF_SNDCWND_CLAMP**, **TCP_SAVE_SYN**, + * **TCP_KEEPIDLE**, **TCP_KEEPINTVL**, **TCP_KEEPCNT**, + * **TCP_SYNCNT**, **TCP_USER_TIMEOUT**. * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**. * Return diff --git a/net/core/filter.c b/net/core/filter.c index 73395384afe2..c713b6b8938f 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -4289,10 +4289,10 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen, u32 flags) { char devname[IFNAMSIZ]; + int val, valbool; struct net *net; int ifindex; int ret = 0; - int val; if (!sk_fullsock(sk)) return -EINVAL; @@ -4303,6 +4303,7 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname, if (optlen != sizeof(int) && optname != SO_BINDTODEVICE) return -EINVAL; val = *((int *)optval); + valbool = val ? 1 : 0; /* Only some socketops are supported */ switch (optname) { @@ -4361,6 +4362,11 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname, } ret = sock_bindtoindex(sk, ifindex, false); break; + case SO_KEEPALIVE: + if (sk->sk_prot->keepalive) + sk->sk_prot->keepalive(sk, valbool); + sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); + break; default: ret = -EINVAL; } @@ -4421,6 +4427,7 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname, ret = tcp_set_congestion_control(sk, name, false, reinit, true); } else { + struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); if (optlen != sizeof(int)) @@ -4449,6 +4456,33 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname, else tp->save_syn = val; break; + case TCP_KEEPIDLE: + ret = tcp_sock_set_keepidle_locked(sk, val); + break; + case TCP_KEEPINTVL: + if (val < 1 || val > MAX_TCP_KEEPINTVL) + ret = -EINVAL; + else + tp->keepalive_intvl = val * HZ; + break; + case TCP_KEEPCNT: + if (val < 1 || val > MAX_TCP_KEEPCNT) + ret = -EINVAL; + else + tp->keepalive_probes = val; + break; + case TCP_SYNCNT: + if (val < 1 || val > MAX_TCP_SYNCNT) + ret = -EINVAL; + else + icsk->icsk_syn_retries = val; + break; + case TCP_USER_TIMEOUT: + if (val < 0) + ret = -EINVAL; + else + icsk->icsk_user_timeout = val; + break; default: ret = -EINVAL; } diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 9d3923e6b860..d9737d51dd19 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1621,10 +1621,13 @@ union bpf_attr { * * * **SOL_SOCKET**, which supports the following *optname*\ s: * **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**, - * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**. + * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**, + * **SO_BINDTODEVICE**, **SO_KEEPALIVE**. * * **IPPROTO_TCP**, which supports the following *optname*\ s: * **TCP_CONGESTION**, **TCP_BPF_IW**, - * **TCP_BPF_SNDCWND_CLAMP**. + * **TCP_BPF_SNDCWND_CLAMP**, **TCP_SAVE_SYN**, + * **TCP_KEEPIDLE**, **TCP_KEEPINTVL**, **TCP_KEEPCNT**, + * **TCP_SYNCNT**, **TCP_USER_TIMEOUT**. * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**. * Return diff --git a/tools/testing/selftests/bpf/progs/connect4_prog.c b/tools/testing/selftests/bpf/progs/connect4_prog.c index 1ab2c5eba86c..b1b2773c0b9d 100644 --- a/tools/testing/selftests/bpf/progs/connect4_prog.c +++ b/tools/testing/selftests/bpf/progs/connect4_prog.c @@ -104,6 +104,30 @@ static __inline int bind_to_device(struct bpf_sock_addr *ctx) return 0; } +static __inline int set_keepalive(struct bpf_sock_addr *ctx) +{ + int zero = 0, one = 1; + + if (bpf_setsockopt(ctx, SOL_SOCKET, SO_KEEPALIVE, &one, sizeof(one))) + return 1; + if (ctx->type == SOCK_STREAM) { + if (bpf_setsockopt(ctx, SOL_TCP, TCP_KEEPIDLE, &one, sizeof(one))) + return 1; + if (bpf_setsockopt(ctx, SOL_TCP, TCP_KEEPINTVL, &one, sizeof(one))) + return 1; + if (bpf_setsockopt(ctx, SOL_TCP, TCP_KEEPCNT, &one, sizeof(one))) + return 1; + if (bpf_setsockopt(ctx, SOL_TCP, TCP_SYNCNT, &one, sizeof(one))) + return 1; + if (bpf_setsockopt(ctx, SOL_TCP, TCP_USER_TIMEOUT, &one, sizeof(one))) + return 1; + } + if (bpf_setsockopt(ctx, SOL_SOCKET, SO_KEEPALIVE, &zero, sizeof(zero))) + return 1; + + return 0; +} + SEC("cgroup/connect4") int connect_v4_prog(struct bpf_sock_addr *ctx) { @@ -121,6 +145,9 @@ int connect_v4_prog(struct bpf_sock_addr *ctx) if (bind_to_device(ctx)) return 0; + if (set_keepalive(ctx)) + return 0; + if (ctx->type != SOCK_STREAM && ctx->type != SOCK_DGRAM) return 0; else if (ctx->type == SOCK_STREAM) -- cgit v1.2.3-59-g8ed1b From 14c2b89634a28577a242cb34f339d981da6d3ef6 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Wed, 24 Jun 2020 13:54:21 +0300 Subject: RDMA/core: Delete not-used create RWQ table function The RWQ table is used for RSS uverbs and not in used for the kernel consumers, delete ib_create_rwq_ind_table() routine that is not called at all. Link: https://lore.kernel.org/r/20200624105422.1452290-5-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/verbs.c | 39 --------------------------------------- include/rdma/ib_verbs.h | 3 --- 2 files changed, 42 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index a3761d432a79..7232e6ec2e91 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -2409,45 +2409,6 @@ int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, } EXPORT_SYMBOL(ib_modify_wq); -/* - * ib_create_rwq_ind_table - Creates a RQ Indirection Table. - * @device: The device on which to create the rwq indirection table. - * @ib_rwq_ind_table_init_attr: A list of initial attributes required to - * create the Indirection Table. - * - * Note: The life time of ib_rwq_ind_table_init_attr->ind_tbl is not less - * than the created ib_rwq_ind_table object and the caller is responsible - * for its memory allocation/free. - */ -struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device, - struct ib_rwq_ind_table_init_attr *init_attr) -{ - struct ib_rwq_ind_table *rwq_ind_table; - int i; - u32 table_size; - - if (!device->ops.create_rwq_ind_table) - return ERR_PTR(-EOPNOTSUPP); - - table_size = (1 << init_attr->log_ind_tbl_size); - rwq_ind_table = device->ops.create_rwq_ind_table(device, - init_attr, NULL); - if (IS_ERR(rwq_ind_table)) - return rwq_ind_table; - - rwq_ind_table->ind_tbl = init_attr->ind_tbl; - rwq_ind_table->log_ind_tbl_size = init_attr->log_ind_tbl_size; - rwq_ind_table->device = device; - rwq_ind_table->uobject = NULL; - atomic_set(&rwq_ind_table->usecnt, 0); - - for (i = 0; i < table_size; i++) - atomic_inc(&rwq_ind_table->ind_tbl[i]->usecnt); - - return rwq_ind_table; -} -EXPORT_SYMBOL(ib_create_rwq_ind_table); - /* * ib_destroy_rwq_ind_table - Destroys the specified Indirection Table. * @wq_ind_table: The Indirection Table to destroy. diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 77106ff3cd26..1e902a8f1713 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -4422,9 +4422,6 @@ struct ib_wq *ib_create_wq(struct ib_pd *pd, int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata); int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr, u32 wq_attr_mask); -struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device, - struct ib_rwq_ind_table_init_attr* - wq_ind_table_init_attr); int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table); int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, -- cgit v1.2.3-59-g8ed1b From 0ef44e5cab8dbf0a0327871b48fe7c8425d0d885 Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Tue, 23 Jun 2020 16:30:07 +0200 Subject: net: phy: add support for a common probe between shared PHYs Shared PHYs (PHYs in the same hardware package) may have shared registers and their drivers would usually need to share information. There is currently a way to have a shared (part of the) init, by using phy_package_init_once(). This patch extends the logic to share parts of the probe to allow sharing the initialization of locks or resources retrieval. Signed-off-by: Antoine Tenart Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- include/linux/phy.h | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/phy.h b/include/linux/phy.h index 7860d56c6bf5..6fb8f302978d 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -244,7 +244,8 @@ struct phy_package_shared { }; /* used as bit number in atomic bitops */ -#define PHY_SHARED_F_INIT_DONE 0 +#define PHY_SHARED_F_INIT_DONE 0 +#define PHY_SHARED_F_PROBE_DONE 1 /* * The Bus class for PHYs. Devices which provide access to @@ -1566,14 +1567,25 @@ static inline int __phy_package_write(struct phy_device *phydev, return __mdiobus_write(phydev->mdio.bus, shared->addr, regnum, val); } -static inline bool phy_package_init_once(struct phy_device *phydev) +static inline bool __phy_package_set_once(struct phy_device *phydev, + unsigned int b) { struct phy_package_shared *shared = phydev->shared; if (!shared) return false; - return !test_and_set_bit(PHY_SHARED_F_INIT_DONE, &shared->flags); + return !test_and_set_bit(b, &shared->flags); +} + +static inline bool phy_package_init_once(struct phy_device *phydev) +{ + return __phy_package_set_once(phydev, PHY_SHARED_F_INIT_DONE); +} + +static inline bool phy_package_probe_once(struct phy_device *phydev) +{ + return __phy_package_set_once(phydev, PHY_SHARED_F_PROBE_DONE); } extern struct bus_type mdio_bus_type; -- cgit v1.2.3-59-g8ed1b From 899426b3bdd947541ba4af8c767575889c8b842a Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Tue, 23 Jun 2020 23:47:16 +0300 Subject: net: neighbor: add fdb extended attribute Add an attribute to NDA which will contain all future fdb-specific attributes in order to avoid polluting the NDA namespace with e.g. bridge or vxlan specific attributes. The attribute is called NDA_FDB_EXT_ATTRS and the structure would look like: [NDA_FDB_EXT_ATTRS] = { [NFEA_xxx] } Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/uapi/linux/neighbour.h | 12 ++++++++++++ net/core/neighbour.c | 1 + 2 files changed, 13 insertions(+) (limited to 'include') diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h index eefcda8ca44e..540ff48402a1 100644 --- a/include/uapi/linux/neighbour.h +++ b/include/uapi/linux/neighbour.h @@ -30,6 +30,7 @@ enum { NDA_SRC_VNI, NDA_PROTOCOL, /* Originator of entry */ NDA_NH_ID, + NDA_FDB_EXT_ATTRS, __NDA_MAX }; @@ -172,4 +173,15 @@ enum { }; #define NDTA_MAX (__NDTA_MAX - 1) +/* embedded into NDA_FDB_EXT_ATTRS: + * [NDA_FDB_EXT_ATTRS] = { + * ... + * } + */ +enum { + NFEA_UNSPEC, + __NFEA_MAX +}; +#define NFEA_MAX (__NFEA_MAX - 1) + #endif diff --git a/net/core/neighbour.c b/net/core/neighbour.c index ef6b5a8f629c..8e39e28b0a8d 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -1783,6 +1783,7 @@ const struct nla_policy nda_policy[NDA_MAX+1] = { [NDA_MASTER] = { .type = NLA_U32 }, [NDA_PROTOCOL] = { .type = NLA_U8 }, [NDA_NH_ID] = { .type = NLA_U32 }, + [NDA_FDB_EXT_ATTRS] = { .type = NLA_NESTED }, }; static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, -- cgit v1.2.3-59-g8ed1b From 31cbc39b6344916c20452e43a9171009214c409c Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Tue, 23 Jun 2020 23:47:17 +0300 Subject: net: bridge: add option to allow activity notifications for any fdb entries This patch adds the ability to notify about activity of any entries (static, permanent or ext_learn). EVPN multihoming peers need it to properly and efficiently handle mac sync (peer active/locally active). We add a new NFEA_ACTIVITY_NOTIFY attribute which is used to dump the current activity state and to control if static entries should be monitored at all. We use 2 bits - one to activate fdb entry tracking (disabled by default) and the second to denote that an entry is inactive. We need the second bit in order to avoid multiple notifications of inactivity. Obviously this makes no difference for dynamic entries since at the time of inactivity they get deleted, while the tracked non-dynamic entries get the inactive bit set and get a notification. Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/uapi/linux/neighbour.h | 11 ++++ net/bridge/br_fdb.c | 117 ++++++++++++++++++++++++++++++++++++----- net/bridge/br_private.h | 4 ++ 3 files changed, 119 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h index 540ff48402a1..21e569297355 100644 --- a/include/uapi/linux/neighbour.h +++ b/include/uapi/linux/neighbour.h @@ -173,13 +173,24 @@ enum { }; #define NDTA_MAX (__NDTA_MAX - 1) + /* FDB activity notification bits used in NFEA_ACTIVITY_NOTIFY: + * - FDB_NOTIFY_BIT - notify on activity/expire for any entry + * - FDB_NOTIFY_INACTIVE_BIT - mark as inactive to avoid multiple notifications + */ +enum { + FDB_NOTIFY_BIT = (1 << 0), + FDB_NOTIFY_INACTIVE_BIT = (1 << 1) +}; + /* embedded into NDA_FDB_EXT_ATTRS: * [NDA_FDB_EXT_ATTRS] = { + * [NFEA_ACTIVITY_NOTIFY] * ... * } */ enum { NFEA_UNSPEC, + NFEA_ACTIVITY_NOTIFY, __NFEA_MAX }; #define NFEA_MAX (__NFEA_MAX - 1) diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index ed80d9ab0fb9..642deb57c064 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -349,12 +349,21 @@ void br_fdb_cleanup(struct work_struct *work) */ rcu_read_lock(); hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) { - unsigned long this_timer; + unsigned long this_timer = f->updated + delay; if (test_bit(BR_FDB_STATIC, &f->flags) || - test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags)) + test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags)) { + if (test_bit(BR_FDB_NOTIFY, &f->flags)) { + if (time_after(this_timer, now)) + work_delay = min(work_delay, + this_timer - now); + else if (!test_and_set_bit(BR_FDB_NOTIFY_INACTIVE, + &f->flags)) + fdb_notify(br, f, RTM_NEWNEIGH, false); + } continue; - this_timer = f->updated + delay; + } + if (time_after(this_timer, now)) { work_delay = min(work_delay, this_timer - now); } else { @@ -556,11 +565,17 @@ int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source, return ret; } +/* returns true if the fdb was modified */ +static bool __fdb_mark_active(struct net_bridge_fdb_entry *fdb) +{ + return !!(test_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags) && + test_and_clear_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags)); +} + void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, const unsigned char *addr, u16 vid, unsigned long flags) { struct net_bridge_fdb_entry *fdb; - bool fdb_modified = false; /* some users want to always flood. */ if (hold_time(br) == 0) @@ -575,6 +590,12 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, source->dev->name, addr, vid); } else { unsigned long now = jiffies; + bool fdb_modified = false; + + if (now != fdb->updated) { + fdb->updated = now; + fdb_modified = __fdb_mark_active(fdb); + } /* fastpath: update of existing entry */ if (unlikely(source != fdb->dst && @@ -587,8 +608,7 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, clear_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags); } - if (now != fdb->updated) - fdb->updated = now; + if (unlikely(test_bit(BR_FDB_ADDED_BY_USER, &flags))) set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags); if (unlikely(fdb_modified)) { @@ -667,6 +687,23 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br, &fdb->key.vlan_id)) goto nla_put_failure; + if (test_bit(BR_FDB_NOTIFY, &fdb->flags)) { + struct nlattr *nest = nla_nest_start(skb, NDA_FDB_EXT_ATTRS); + u8 notify_bits = FDB_NOTIFY_BIT; + + if (!nest) + goto nla_put_failure; + if (test_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags)) + notify_bits |= FDB_NOTIFY_INACTIVE_BIT; + + if (nla_put_u8(skb, NFEA_ACTIVITY_NOTIFY, notify_bits)) { + nla_nest_cancel(skb, nest); + goto nla_put_failure; + } + + nla_nest_end(skb, nest); + } + nlmsg_end(skb, nlh); return 0; @@ -681,7 +718,9 @@ static inline size_t fdb_nlmsg_size(void) + nla_total_size(ETH_ALEN) /* NDA_LLADDR */ + nla_total_size(sizeof(u32)) /* NDA_MASTER */ + nla_total_size(sizeof(u16)) /* NDA_VLAN */ - + nla_total_size(sizeof(struct nda_cacheinfo)); + + nla_total_size(sizeof(struct nda_cacheinfo)) + + nla_total_size(0) /* NDA_FDB_EXT_ATTRS */ + + nla_total_size(sizeof(u8)); /* NFEA_ACTIVITY_NOTIFY */ } static void fdb_notify(struct net_bridge *br, @@ -791,14 +830,40 @@ errout: return err; } +/* returns true if the fdb is modified */ +static bool fdb_handle_notify(struct net_bridge_fdb_entry *fdb, u8 notify) +{ + bool modified = false; + + /* allow to mark an entry as inactive, usually done on creation */ + if ((notify & FDB_NOTIFY_INACTIVE_BIT) && + !test_and_set_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags)) + modified = true; + + if ((notify & FDB_NOTIFY_BIT) && + !test_and_set_bit(BR_FDB_NOTIFY, &fdb->flags)) { + /* enabled activity tracking */ + modified = true; + } else if (!(notify & FDB_NOTIFY_BIT) && + test_and_clear_bit(BR_FDB_NOTIFY, &fdb->flags)) { + /* disabled activity tracking, clear notify state */ + clear_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags); + modified = true; + } + + return modified; +} + /* Update (create or replace) forwarding database entry */ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source, - const u8 *addr, struct ndmsg *ndm, u16 flags, u16 vid) + const u8 *addr, struct ndmsg *ndm, u16 flags, u16 vid, + struct nlattr *nfea_tb[]) { bool is_sticky = !!(ndm->ndm_flags & NTF_STICKY); struct net_bridge_fdb_entry *fdb; u16 state = ndm->ndm_state; bool modified = false; + u8 notify = 0; /* If the port cannot learn allow only local and static entries */ if (source && !(state & NUD_PERMANENT) && !(state & NUD_NOARP) && @@ -815,6 +880,13 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source, if (is_sticky && (state & NUD_PERMANENT)) return -EINVAL; + if (nfea_tb[NFEA_ACTIVITY_NOTIFY]) { + notify = nla_get_u8(nfea_tb[NFEA_ACTIVITY_NOTIFY]); + if ((notify & ~BR_FDB_NOTIFY_SETTABLE_BITS) || + (notify & BR_FDB_NOTIFY_SETTABLE_BITS) == FDB_NOTIFY_INACTIVE_BIT) + return -EINVAL; + } + fdb = br_fdb_find(br, addr, vid); if (fdb == NULL) { if (!(flags & NLM_F_CREATE)) @@ -858,6 +930,9 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source, modified = true; } + if (fdb_handle_notify(fdb, notify)) + modified = true; + set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags); fdb->used = jiffies; @@ -871,7 +946,7 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source, static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br, struct net_bridge_port *p, const unsigned char *addr, - u16 nlh_flags, u16 vid) + u16 nlh_flags, u16 vid, struct nlattr *nfea_tb[]) { int err = 0; @@ -893,19 +968,24 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br, err = br_fdb_external_learn_add(br, p, addr, vid, true); } else { spin_lock_bh(&br->hash_lock); - err = fdb_add_entry(br, p, addr, ndm, nlh_flags, vid); + err = fdb_add_entry(br, p, addr, ndm, nlh_flags, vid, nfea_tb); spin_unlock_bh(&br->hash_lock); } return err; } +static const struct nla_policy br_nda_fdb_pol[NFEA_MAX + 1] = { + [NFEA_ACTIVITY_NOTIFY] = { .type = NLA_U8 }, +}; + /* Add new permanent fdb entry with RTM_NEWNEIGH */ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid, u16 nlh_flags, struct netlink_ext_ack *extack) { + struct nlattr *nfea_tb[NFEA_MAX + 1], *attr; struct net_bridge_vlan_group *vg; struct net_bridge_port *p = NULL; struct net_bridge_vlan *v; @@ -938,6 +1018,16 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], vg = nbp_vlan_group(p); } + if (tb[NDA_FDB_EXT_ATTRS]) { + attr = tb[NDA_FDB_EXT_ATTRS]; + err = nla_parse_nested(nfea_tb, NFEA_MAX, attr, + br_nda_fdb_pol, extack); + if (err) + return err; + } else { + memset(nfea_tb, 0, sizeof(struct nlattr *) * (NFEA_MAX + 1)); + } + if (vid) { v = br_vlan_find(vg, vid); if (!v || !br_vlan_should_use(v)) { @@ -946,9 +1036,9 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], } /* VID was specified, so use it. */ - err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid); + err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid, nfea_tb); } else { - err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0); + err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0, nfea_tb); if (err || !vg || !vg->num_vlans) goto out; @@ -959,7 +1049,8 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], list_for_each_entry(v, &vg->vlan_list, vlist) { if (!br_vlan_should_use(v)) continue; - err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid); + err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid, + nfea_tb); if (err) goto out; } diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 7501be4eeba0..c0ae639e1b36 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -48,6 +48,8 @@ enum { /* Path to usermode spanning tree program */ #define BR_STP_PROG "/sbin/bridge-stp" +#define BR_FDB_NOTIFY_SETTABLE_BITS (FDB_NOTIFY_BIT | FDB_NOTIFY_INACTIVE_BIT) + typedef struct bridge_id bridge_id; typedef struct mac_addr mac_addr; typedef __u16 port_id; @@ -184,6 +186,8 @@ enum { BR_FDB_ADDED_BY_USER, BR_FDB_ADDED_BY_EXT_LEARN, BR_FDB_OFFLOADED, + BR_FDB_NOTIFY, + BR_FDB_NOTIFY_INACTIVE }; struct net_bridge_fdb_key { -- cgit v1.2.3-59-g8ed1b From b5f1d9ec283bd28a452cf61d7e5c2f2b1a9cccda Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Tue, 23 Jun 2020 23:47:18 +0300 Subject: net: bridge: add a flag to avoid refreshing fdb when changing/adding When we modify or create a new fdb entry sometimes we want to avoid refreshing its activity in order to track it properly. One example is when a mac is received from EVPN multi-homing peer by FRR, which doesn't want to change local activity accounting. It makes it static and sets a flag to track its activity. Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/uapi/linux/neighbour.h | 1 + net/bridge/br_fdb.c | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h index 21e569297355..dc8b72201f6c 100644 --- a/include/uapi/linux/neighbour.h +++ b/include/uapi/linux/neighbour.h @@ -191,6 +191,7 @@ enum { enum { NFEA_UNSPEC, NFEA_ACTIVITY_NOTIFY, + NFEA_DONT_REFRESH, __NFEA_MAX }; #define NFEA_MAX (__NFEA_MAX - 1) diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 642deb57c064..9db504baa094 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -860,6 +860,7 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source, struct nlattr *nfea_tb[]) { bool is_sticky = !!(ndm->ndm_flags & NTF_STICKY); + bool refresh = !nfea_tb[NFEA_DONT_REFRESH]; struct net_bridge_fdb_entry *fdb; u16 state = ndm->ndm_state; bool modified = false; @@ -937,7 +938,8 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source, fdb->used = jiffies; if (modified) { - fdb->updated = jiffies; + if (refresh) + fdb->updated = jiffies; fdb_notify(br, fdb, RTM_NEWNEIGH, true); } @@ -977,6 +979,7 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br, static const struct nla_policy br_nda_fdb_pol[NFEA_MAX + 1] = { [NFEA_ACTIVITY_NOTIFY] = { .type = NLA_U8 }, + [NFEA_DONT_REFRESH] = { .type = NLA_FLAG }, }; /* Add new permanent fdb entry with RTM_NEWNEIGH */ -- cgit v1.2.3-59-g8ed1b From b08d4d3b6c0460306e8a0608413b201705200d33 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Tue, 23 Jun 2020 16:08:04 -0700 Subject: net: bpf: Add bpf_seq_afinfo in tcp_iter_state A new field bpf_seq_afinfo is added to tcp_iter_state to provide bpf tcp iterator afinfo. There are two reasons on why we did this. First, the current way to get afinfo from PDE_DATA does not work for bpf iterator as its seq_file inode does not conform to /proc/net/{tcp,tcp6} inode structures. More specifically, anonymous bpf iterator will use an anonymous inode which is shared in the system and we cannot change inode private data structure at all. Second, bpf iterator for tcp/tcp6 wants to traverse all tcp and tcp6 sockets in one pass and bpf program can control whether they want to skip one sk_family or not. Having a different afinfo with family AF_UNSPEC make it easier to understand in the code. This patch does not change /proc/net/{tcp,tcp6} behavior as the bpf_seq_afinfo will be NULL for these two proc files. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20200623230804.3987829-1-yhs@fb.com --- include/net/tcp.h | 1 + net/ipv4/tcp_ipv4.c | 30 ++++++++++++++++++++++++------ 2 files changed, 25 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/net/tcp.h b/include/net/tcp.h index 4de9485f73d9..eab1c7d0facb 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1935,6 +1935,7 @@ struct tcp_iter_state { struct seq_net_private p; enum tcp_seq_states state; struct sock *syn_wait_sk; + struct tcp_seq_afinfo *bpf_seq_afinfo; int bucket, offset, sbucket, num; loff_t last_pos; }; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index ad6435ba6d72..9cb65ee4ec63 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2211,13 +2211,18 @@ EXPORT_SYMBOL(tcp_v4_destroy_sock); */ static void *listening_get_next(struct seq_file *seq, void *cur) { - struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file)); + struct tcp_seq_afinfo *afinfo; struct tcp_iter_state *st = seq->private; struct net *net = seq_file_net(seq); struct inet_listen_hashbucket *ilb; struct hlist_nulls_node *node; struct sock *sk = cur; + if (st->bpf_seq_afinfo) + afinfo = st->bpf_seq_afinfo; + else + afinfo = PDE_DATA(file_inode(seq->file)); + if (!sk) { get_head: ilb = &tcp_hashinfo.listening_hash[st->bucket]; @@ -2235,7 +2240,8 @@ get_sk: sk_nulls_for_each_from(sk, node) { if (!net_eq(sock_net(sk), net)) continue; - if (sk->sk_family == afinfo->family) + if (afinfo->family == AF_UNSPEC || + sk->sk_family == afinfo->family) return sk; } spin_unlock(&ilb->lock); @@ -2272,11 +2278,16 @@ static inline bool empty_bucket(const struct tcp_iter_state *st) */ static void *established_get_first(struct seq_file *seq) { - struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file)); + struct tcp_seq_afinfo *afinfo; struct tcp_iter_state *st = seq->private; struct net *net = seq_file_net(seq); void *rc = NULL; + if (st->bpf_seq_afinfo) + afinfo = st->bpf_seq_afinfo; + else + afinfo = PDE_DATA(file_inode(seq->file)); + st->offset = 0; for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) { struct sock *sk; @@ -2289,7 +2300,8 @@ static void *established_get_first(struct seq_file *seq) spin_lock_bh(lock); sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { - if (sk->sk_family != afinfo->family || + if ((afinfo->family != AF_UNSPEC && + sk->sk_family != afinfo->family) || !net_eq(sock_net(sk), net)) { continue; } @@ -2304,19 +2316,25 @@ out: static void *established_get_next(struct seq_file *seq, void *cur) { - struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file)); + struct tcp_seq_afinfo *afinfo; struct sock *sk = cur; struct hlist_nulls_node *node; struct tcp_iter_state *st = seq->private; struct net *net = seq_file_net(seq); + if (st->bpf_seq_afinfo) + afinfo = st->bpf_seq_afinfo; + else + afinfo = PDE_DATA(file_inode(seq->file)); + ++st->num; ++st->offset; sk = sk_nulls_next(sk); sk_nulls_for_each_from(sk, node) { - if (sk->sk_family == afinfo->family && + if ((afinfo->family == AF_UNSPEC || + sk->sk_family == afinfo->family) && net_eq(sock_net(sk), net)) return sk; } -- cgit v1.2.3-59-g8ed1b From af7ec13833619e17f03aa73a785a2f871da6d66b Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Tue, 23 Jun 2020 16:08:09 -0700 Subject: bpf: Add bpf_skc_to_tcp6_sock() helper The helper is used in tracing programs to cast a socket pointer to a tcp6_sock pointer. The return value could be NULL if the casting is illegal. A new helper return type RET_PTR_TO_BTF_ID_OR_NULL is added so the verifier is able to deduce proper return types for the helper. Different from the previous BTF_ID based helpers, the bpf_skc_to_tcp6_sock() argument can be several possible btf_ids. More specifically, all possible socket data structures with sock_common appearing in the first in the memory layout. This patch only added socket types related to tcp and udp. All possible argument btf_id and return value btf_id for helper bpf_skc_to_tcp6_sock() are pre-calculcated and cached. In the future, it is even possible to precompute these btf_id's at kernel build time. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20200623230809.3988195-1-yhs@fb.com --- include/linux/bpf.h | 12 +++++++ include/uapi/linux/bpf.h | 9 ++++- kernel/bpf/btf.c | 1 + kernel/bpf/verifier.c | 43 ++++++++++++++++------ kernel/trace/bpf_trace.c | 2 ++ net/core/filter.c | 82 ++++++++++++++++++++++++++++++++++++++++++ scripts/bpf_helpers_doc.py | 2 ++ tools/include/uapi/linux/bpf.h | 9 ++++- 8 files changed, 148 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 1e1501ee53ce..c0e38ad07848 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -265,6 +265,7 @@ enum bpf_return_type { RET_PTR_TO_TCP_SOCK_OR_NULL, /* returns a pointer to a tcp_sock or NULL */ RET_PTR_TO_SOCK_COMMON_OR_NULL, /* returns a pointer to a sock_common or NULL */ RET_PTR_TO_ALLOC_MEM_OR_NULL, /* returns a pointer to dynamically allocated memory or NULL */ + RET_PTR_TO_BTF_ID_OR_NULL, /* returns a pointer to a btf_id or NULL */ }; /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs @@ -287,6 +288,12 @@ struct bpf_func_proto { enum bpf_arg_type arg_type[5]; }; int *btf_id; /* BTF ids of arguments */ + bool (*check_btf_id)(u32 btf_id, u32 arg); /* if the argument btf_id is + * valid. Often used if more + * than one btf id is permitted + * for this argument. + */ + int *ret_btf_id; /* return value btf_id */ }; /* bpf_context is intentionally undefined structure. Pointer to bpf_context is @@ -1524,6 +1531,7 @@ static inline bool bpf_map_is_dev_bound(struct bpf_map *map) struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr); void bpf_map_offload_map_free(struct bpf_map *map); +void init_btf_sock_ids(struct btf *btf); #else static inline int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr) @@ -1549,6 +1557,9 @@ static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) static inline void bpf_map_offload_map_free(struct bpf_map *map) { } +static inline void init_btf_sock_ids(struct btf *btf) +{ +} #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ #if defined(CONFIG_BPF_STREAM_PARSER) @@ -1638,6 +1649,7 @@ extern const struct bpf_func_proto bpf_ringbuf_reserve_proto; extern const struct bpf_func_proto bpf_ringbuf_submit_proto; extern const struct bpf_func_proto bpf_ringbuf_discard_proto; extern const struct bpf_func_proto bpf_ringbuf_query_proto; +extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto; const struct bpf_func_proto *bpf_tracing_func_proto( enum bpf_func_id func_id, const struct bpf_prog *prog); diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index d9737d51dd19..e90ad07b291a 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -3255,6 +3255,12 @@ union bpf_attr { * case of **BPF_CSUM_LEVEL_QUERY**, the current skb->csum_level * is returned or the error code -EACCES in case the skb is not * subject to CHECKSUM_UNNECESSARY. + * + * struct tcp6_sock *bpf_skc_to_tcp6_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *tcp6_sock* pointer. + * Return + * *sk* if casting is valid, or NULL otherwise. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -3392,7 +3398,8 @@ union bpf_attr { FN(ringbuf_submit), \ FN(ringbuf_discard), \ FN(ringbuf_query), \ - FN(csum_level), + FN(csum_level), \ + FN(skc_to_tcp6_sock), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index e377d1981730..4c3007f428b1 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -3674,6 +3674,7 @@ struct btf *btf_parse_vmlinux(void) goto errout; bpf_struct_ops_init(btf, log); + init_btf_sock_ids(btf); btf_verifier_env_free(env); refcount_set(&btf->refcnt, 1); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 7460f967cb75..7de98906ddf4 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -3800,12 +3800,14 @@ static int int_ptr_type_to_size(enum bpf_arg_type type) return -EINVAL; } -static int check_func_arg(struct bpf_verifier_env *env, u32 regno, - enum bpf_arg_type arg_type, - struct bpf_call_arg_meta *meta) +static int check_func_arg(struct bpf_verifier_env *env, u32 arg, + struct bpf_call_arg_meta *meta, + const struct bpf_func_proto *fn) { + u32 regno = BPF_REG_1 + arg; struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; enum bpf_reg_type expected_type, type = reg->type; + enum bpf_arg_type arg_type = fn->arg_type[arg]; int err = 0; if (arg_type == ARG_DONTCARE) @@ -3885,9 +3887,16 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, expected_type = PTR_TO_BTF_ID; if (type != expected_type) goto err_type; - if (reg->btf_id != meta->btf_id) { - verbose(env, "Helper has type %s got %s in R%d\n", - kernel_type_name(meta->btf_id), + if (!fn->check_btf_id) { + if (reg->btf_id != meta->btf_id) { + verbose(env, "Helper has type %s got %s in R%d\n", + kernel_type_name(meta->btf_id), + kernel_type_name(reg->btf_id), regno); + + return -EACCES; + } + } else if (!fn->check_btf_id(reg->btf_id, arg)) { + verbose(env, "Helper does not support %s in R%d\n", kernel_type_name(reg->btf_id), regno); return -EACCES; @@ -4709,10 +4718,12 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn meta.func_id = func_id; /* check args */ for (i = 0; i < 5; i++) { - err = btf_resolve_helper_id(&env->log, fn, i); - if (err > 0) - meta.btf_id = err; - err = check_func_arg(env, BPF_REG_1 + i, fn->arg_type[i], &meta); + if (!fn->check_btf_id) { + err = btf_resolve_helper_id(&env->log, fn, i); + if (err > 0) + meta.btf_id = err; + } + err = check_func_arg(env, i, &meta, fn); if (err) return err; } @@ -4815,6 +4826,18 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn regs[BPF_REG_0].type = PTR_TO_MEM_OR_NULL; regs[BPF_REG_0].id = ++env->id_gen; regs[BPF_REG_0].mem_size = meta.mem_size; + } else if (fn->ret_type == RET_PTR_TO_BTF_ID_OR_NULL) { + int ret_btf_id; + + mark_reg_known_zero(env, regs, BPF_REG_0); + regs[BPF_REG_0].type = PTR_TO_BTF_ID_OR_NULL; + ret_btf_id = *fn->ret_btf_id; + if (ret_btf_id == 0) { + verbose(env, "invalid return type %d of func %s#%d\n", + fn->ret_type, func_id_name(func_id), func_id); + return -EINVAL; + } + regs[BPF_REG_0].btf_id = ret_btf_id; } else { verbose(env, "unknown return type %d of func %s#%d\n", fn->ret_type, func_id_name(func_id), func_id); diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 0159f12d2af5..2a97a268f533 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1515,6 +1515,8 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_skb_output_proto; case BPF_FUNC_xdp_output: return &bpf_xdp_output_proto; + case BPF_FUNC_skc_to_tcp6_sock: + return &bpf_skc_to_tcp6_sock_proto; #endif case BPF_FUNC_seq_printf: return prog->expected_attach_type == BPF_TRACE_ITER ? diff --git a/net/core/filter.c b/net/core/filter.c index c713b6b8938f..176e27d75c51 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -47,6 +47,7 @@ #include #include #include +#include #include #include #include @@ -9225,3 +9226,84 @@ void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog) { bpf_dispatcher_change_prog(BPF_DISPATCHER_PTR(xdp), prev_prog, prog); } + +/* Define a list of socket types which can be the argument for + * skc_to_*_sock() helpers. All these sockets should have + * sock_common as the first argument in its memory layout. + */ +#define BTF_SOCK_TYPE_xxx \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET, "inet_sock") \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_CONN, "inet_connection_sock") \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_REQ, "inet_request_sock") \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_TW, "inet_timewait_sock") \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_REQ, "request_sock") \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK, "sock") \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK_COMMON, "sock_common") \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP, "tcp_sock") \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_REQ, "tcp_request_sock") \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_TW, "tcp_timewait_sock") \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP6, "tcp6_sock") \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, "udp_sock") \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, "udp6_sock") + +enum { +#define BTF_SOCK_TYPE(name, str) name, +BTF_SOCK_TYPE_xxx +#undef BTF_SOCK_TYPE +MAX_BTF_SOCK_TYPE, +}; + +static int btf_sock_ids[MAX_BTF_SOCK_TYPE]; + +#ifdef CONFIG_BPF_SYSCALL +static const char *bpf_sock_types[] = { +#define BTF_SOCK_TYPE(name, str) str, +BTF_SOCK_TYPE_xxx +#undef BTF_SOCK_TYPE +}; + +void init_btf_sock_ids(struct btf *btf) +{ + int i, btf_id; + + for (i = 0; i < MAX_BTF_SOCK_TYPE; i++) { + btf_id = btf_find_by_name_kind(btf, bpf_sock_types[i], + BTF_KIND_STRUCT); + if (btf_id > 0) + btf_sock_ids[i] = btf_id; + } +} +#endif + +static bool check_arg_btf_id(u32 btf_id, u32 arg) +{ + int i; + + /* only one argument, no need to check arg */ + for (i = 0; i < MAX_BTF_SOCK_TYPE; i++) + if (btf_sock_ids[i] == btf_id) + return true; + return false; +} + +BPF_CALL_1(bpf_skc_to_tcp6_sock, struct sock *, sk) +{ + /* tcp6_sock type is not generated in dwarf and hence btf, + * trigger an explicit type generation here. + */ + BTF_TYPE_EMIT(struct tcp6_sock); + if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP && + sk->sk_family == AF_INET6) + return (unsigned long)sk; + + return (unsigned long)NULL; +} + +const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto = { + .func = bpf_skc_to_tcp6_sock, + .gpl_only = false, + .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, + .arg1_type = ARG_PTR_TO_BTF_ID, + .check_btf_id = check_arg_btf_id, + .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP6], +}; diff --git a/scripts/bpf_helpers_doc.py b/scripts/bpf_helpers_doc.py index 91fa668fa860..6c2f64118651 100755 --- a/scripts/bpf_helpers_doc.py +++ b/scripts/bpf_helpers_doc.py @@ -421,6 +421,7 @@ class PrinterHelpers(Printer): 'struct sockaddr', 'struct tcphdr', 'struct seq_file', + 'struct tcp6_sock', 'struct __sk_buff', 'struct sk_msg_md', @@ -458,6 +459,7 @@ class PrinterHelpers(Printer): 'struct sockaddr', 'struct tcphdr', 'struct seq_file', + 'struct tcp6_sock', } mapped_types = { 'u8': '__u8', diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index d9737d51dd19..e90ad07b291a 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -3255,6 +3255,12 @@ union bpf_attr { * case of **BPF_CSUM_LEVEL_QUERY**, the current skb->csum_level * is returned or the error code -EACCES in case the skb is not * subject to CHECKSUM_UNNECESSARY. + * + * struct tcp6_sock *bpf_skc_to_tcp6_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *tcp6_sock* pointer. + * Return + * *sk* if casting is valid, or NULL otherwise. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -3392,7 +3398,8 @@ union bpf_attr { FN(ringbuf_submit), \ FN(ringbuf_discard), \ FN(ringbuf_query), \ - FN(csum_level), + FN(csum_level), \ + FN(skc_to_tcp6_sock), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call -- cgit v1.2.3-59-g8ed1b From 478cfbdf5f13dfe09cfd0b1cbac821f5e27f6108 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Tue, 23 Jun 2020 16:08:11 -0700 Subject: bpf: Add bpf_skc_to_{tcp, tcp_timewait, tcp_request}_sock() helpers Three more helpers are added to cast a sock_common pointer to an tcp_sock, tcp_timewait_sock or a tcp_request_sock for tracing programs. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20200623230811.3988277-1-yhs@fb.com --- include/linux/bpf.h | 3 ++ include/uapi/linux/bpf.h | 23 +++++++++++++++- kernel/trace/bpf_trace.c | 6 ++++ net/core/filter.c | 62 ++++++++++++++++++++++++++++++++++++++++++ scripts/bpf_helpers_doc.py | 6 ++++ tools/include/uapi/linux/bpf.h | 23 +++++++++++++++- 6 files changed, 121 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index c0e38ad07848..c23998cf6699 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1650,6 +1650,9 @@ extern const struct bpf_func_proto bpf_ringbuf_submit_proto; extern const struct bpf_func_proto bpf_ringbuf_discard_proto; extern const struct bpf_func_proto bpf_ringbuf_query_proto; extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto; +extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto; +extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto; +extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto; const struct bpf_func_proto *bpf_tracing_func_proto( enum bpf_func_id func_id, const struct bpf_prog *prog); diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index e90ad07b291a..b9412ab275f3 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -3261,6 +3261,24 @@ union bpf_attr { * Dynamically cast a *sk* pointer to a *tcp6_sock* pointer. * Return * *sk* if casting is valid, or NULL otherwise. + * + * struct tcp_sock *bpf_skc_to_tcp_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *tcp_sock* pointer. + * Return + * *sk* if casting is valid, or NULL otherwise. + * + * struct tcp_timewait_sock *bpf_skc_to_tcp_timewait_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *tcp_timewait_sock* pointer. + * Return + * *sk* if casting is valid, or NULL otherwise. + * + * struct tcp_request_sock *bpf_skc_to_tcp_request_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer. + * Return + * *sk* if casting is valid, or NULL otherwise. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -3399,7 +3417,10 @@ union bpf_attr { FN(ringbuf_discard), \ FN(ringbuf_query), \ FN(csum_level), \ - FN(skc_to_tcp6_sock), + FN(skc_to_tcp6_sock), \ + FN(skc_to_tcp_sock), \ + FN(skc_to_tcp_timewait_sock), \ + FN(skc_to_tcp_request_sock), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 2a97a268f533..48d935b0d87c 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1517,6 +1517,12 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_xdp_output_proto; case BPF_FUNC_skc_to_tcp6_sock: return &bpf_skc_to_tcp6_sock_proto; + case BPF_FUNC_skc_to_tcp_sock: + return &bpf_skc_to_tcp_sock_proto; + case BPF_FUNC_skc_to_tcp_timewait_sock: + return &bpf_skc_to_tcp_timewait_sock_proto; + case BPF_FUNC_skc_to_tcp_request_sock: + return &bpf_skc_to_tcp_request_sock_proto; #endif case BPF_FUNC_seq_printf: return prog->expected_attach_type == BPF_TRACE_ITER ? diff --git a/net/core/filter.c b/net/core/filter.c index 176e27d75c51..0b4e5aed7e20 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -74,6 +74,7 @@ #include #include #include +#include /** * sk_filter_trim_cap - run a packet through a socket filter @@ -9307,3 +9308,64 @@ const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto = { .check_btf_id = check_arg_btf_id, .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP6], }; + +BPF_CALL_1(bpf_skc_to_tcp_sock, struct sock *, sk) +{ + if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP) + return (unsigned long)sk; + + return (unsigned long)NULL; +} + +const struct bpf_func_proto bpf_skc_to_tcp_sock_proto = { + .func = bpf_skc_to_tcp_sock, + .gpl_only = false, + .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, + .arg1_type = ARG_PTR_TO_BTF_ID, + .check_btf_id = check_arg_btf_id, + .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP], +}; + +BPF_CALL_1(bpf_skc_to_tcp_timewait_sock, struct sock *, sk) +{ + if (sk->sk_prot == &tcp_prot && sk->sk_state == TCP_TIME_WAIT) + return (unsigned long)sk; + +#if IS_BUILTIN(CONFIG_IPV6) + if (sk->sk_prot == &tcpv6_prot && sk->sk_state == TCP_TIME_WAIT) + return (unsigned long)sk; +#endif + + return (unsigned long)NULL; +} + +const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto = { + .func = bpf_skc_to_tcp_timewait_sock, + .gpl_only = false, + .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, + .arg1_type = ARG_PTR_TO_BTF_ID, + .check_btf_id = check_arg_btf_id, + .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP_TW], +}; + +BPF_CALL_1(bpf_skc_to_tcp_request_sock, struct sock *, sk) +{ + if (sk->sk_prot == &tcp_prot && sk->sk_state == TCP_NEW_SYN_RECV) + return (unsigned long)sk; + +#if IS_BUILTIN(CONFIG_IPV6) + if (sk->sk_prot == &tcpv6_prot && sk->sk_state == TCP_NEW_SYN_RECV) + return (unsigned long)sk; +#endif + + return (unsigned long)NULL; +} + +const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto = { + .func = bpf_skc_to_tcp_request_sock, + .gpl_only = false, + .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, + .arg1_type = ARG_PTR_TO_BTF_ID, + .check_btf_id = check_arg_btf_id, + .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP_REQ], +}; diff --git a/scripts/bpf_helpers_doc.py b/scripts/bpf_helpers_doc.py index 6c2f64118651..d886657c6aaa 100755 --- a/scripts/bpf_helpers_doc.py +++ b/scripts/bpf_helpers_doc.py @@ -422,6 +422,9 @@ class PrinterHelpers(Printer): 'struct tcphdr', 'struct seq_file', 'struct tcp6_sock', + 'struct tcp_sock', + 'struct tcp_timewait_sock', + 'struct tcp_request_sock', 'struct __sk_buff', 'struct sk_msg_md', @@ -460,6 +463,9 @@ class PrinterHelpers(Printer): 'struct tcphdr', 'struct seq_file', 'struct tcp6_sock', + 'struct tcp_sock', + 'struct tcp_timewait_sock', + 'struct tcp_request_sock', } mapped_types = { 'u8': '__u8', diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index e90ad07b291a..b9412ab275f3 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -3261,6 +3261,24 @@ union bpf_attr { * Dynamically cast a *sk* pointer to a *tcp6_sock* pointer. * Return * *sk* if casting is valid, or NULL otherwise. + * + * struct tcp_sock *bpf_skc_to_tcp_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *tcp_sock* pointer. + * Return + * *sk* if casting is valid, or NULL otherwise. + * + * struct tcp_timewait_sock *bpf_skc_to_tcp_timewait_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *tcp_timewait_sock* pointer. + * Return + * *sk* if casting is valid, or NULL otherwise. + * + * struct tcp_request_sock *bpf_skc_to_tcp_request_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer. + * Return + * *sk* if casting is valid, or NULL otherwise. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -3399,7 +3417,10 @@ union bpf_attr { FN(ringbuf_discard), \ FN(ringbuf_query), \ FN(csum_level), \ - FN(skc_to_tcp6_sock), + FN(skc_to_tcp6_sock), \ + FN(skc_to_tcp_sock), \ + FN(skc_to_tcp_timewait_sock), \ + FN(skc_to_tcp_request_sock), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call -- cgit v1.2.3-59-g8ed1b From 9e8ca27afab6c92477b459f6a5d2af0cd3197c20 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Tue, 23 Jun 2020 16:08:12 -0700 Subject: net: bpf: Add bpf_seq_afinfo in udp_iter_state Similar to tcp_iter_state, a new field bpf_seq_afinfo is added to udp_iter_state to provide bpf udp iterator afinfo. This does not change /proc/net/{udp, udp6} behavior. But it enables bpf iterator to avoid get afinfo from PDE_DATA and iterate through all udp and udp6 sockets in one pass. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20200623230812.3988347-1-yhs@fb.com --- include/net/udp.h | 1 + net/ipv4/udp.c | 28 +++++++++++++++++++++++----- 2 files changed, 24 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/net/udp.h b/include/net/udp.h index a8fa6c0c6ded..67c8b7368845 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -440,6 +440,7 @@ struct udp_seq_afinfo { struct udp_iter_state { struct seq_net_private p; int bucket; + struct udp_seq_afinfo *bpf_seq_afinfo; }; void *udp_seq_start(struct seq_file *seq, loff_t *pos); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 1b7ebbcae497..90355301b266 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -2826,10 +2826,15 @@ EXPORT_SYMBOL(udp_prot); static struct sock *udp_get_first(struct seq_file *seq, int start) { struct sock *sk; - struct udp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file)); + struct udp_seq_afinfo *afinfo; struct udp_iter_state *state = seq->private; struct net *net = seq_file_net(seq); + if (state->bpf_seq_afinfo) + afinfo = state->bpf_seq_afinfo; + else + afinfo = PDE_DATA(file_inode(seq->file)); + for (state->bucket = start; state->bucket <= afinfo->udp_table->mask; ++state->bucket) { struct udp_hslot *hslot = &afinfo->udp_table->hash[state->bucket]; @@ -2841,7 +2846,8 @@ static struct sock *udp_get_first(struct seq_file *seq, int start) sk_for_each(sk, &hslot->head) { if (!net_eq(sock_net(sk), net)) continue; - if (sk->sk_family == afinfo->family) + if (afinfo->family == AF_UNSPEC || + sk->sk_family == afinfo->family) goto found; } spin_unlock_bh(&hslot->lock); @@ -2853,13 +2859,20 @@ found: static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk) { - struct udp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file)); + struct udp_seq_afinfo *afinfo; struct udp_iter_state *state = seq->private; struct net *net = seq_file_net(seq); + if (state->bpf_seq_afinfo) + afinfo = state->bpf_seq_afinfo; + else + afinfo = PDE_DATA(file_inode(seq->file)); + do { sk = sk_next(sk); - } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != afinfo->family)); + } while (sk && (!net_eq(sock_net(sk), net) || + (afinfo->family != AF_UNSPEC && + sk->sk_family != afinfo->family))); if (!sk) { if (state->bucket <= afinfo->udp_table->mask) @@ -2904,9 +2917,14 @@ EXPORT_SYMBOL(udp_seq_next); void udp_seq_stop(struct seq_file *seq, void *v) { - struct udp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file)); + struct udp_seq_afinfo *afinfo; struct udp_iter_state *state = seq->private; + if (state->bpf_seq_afinfo) + afinfo = state->bpf_seq_afinfo; + else + afinfo = PDE_DATA(file_inode(seq->file)); + if (state->bucket <= afinfo->udp_table->mask) spin_unlock_bh(&afinfo->udp_table->hash[state->bucket].lock); } -- cgit v1.2.3-59-g8ed1b From 0d4fad3e57df2bf61e8ffc8d12a34b1caf9b8835 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Tue, 23 Jun 2020 16:08:15 -0700 Subject: bpf: Add bpf_skc_to_udp6_sock() helper The helper is used in tracing programs to cast a socket pointer to a udp6_sock pointer. The return value could be NULL if the casting is illegal. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Acked-by: Martin KaFai Lau Cc: Eric Dumazet Link: https://lore.kernel.org/bpf/20200623230815.3988481-1-yhs@fb.com --- include/linux/bpf.h | 1 + include/uapi/linux/bpf.h | 9 ++++++++- kernel/trace/bpf_trace.c | 2 ++ net/core/filter.c | 22 ++++++++++++++++++++++ scripts/bpf_helpers_doc.py | 2 ++ tools/include/uapi/linux/bpf.h | 9 ++++++++- 6 files changed, 43 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index c23998cf6699..3d2ade703a35 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1653,6 +1653,7 @@ extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto; extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto; extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto; extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto; +extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto; const struct bpf_func_proto *bpf_tracing_func_proto( enum bpf_func_id func_id, const struct bpf_prog *prog); diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index b9412ab275f3..0cb8ec948816 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -3279,6 +3279,12 @@ union bpf_attr { * Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer. * Return * *sk* if casting is valid, or NULL otherwise. + * + * struct udp6_sock *bpf_skc_to_udp6_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *udp6_sock* pointer. + * Return + * *sk* if casting is valid, or NULL otherwise. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -3420,7 +3426,8 @@ union bpf_attr { FN(skc_to_tcp6_sock), \ FN(skc_to_tcp_sock), \ FN(skc_to_tcp_timewait_sock), \ - FN(skc_to_tcp_request_sock), + FN(skc_to_tcp_request_sock), \ + FN(skc_to_udp6_sock), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 48d935b0d87c..5d59dda5f661 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1523,6 +1523,8 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_skc_to_tcp_timewait_sock_proto; case BPF_FUNC_skc_to_tcp_request_sock: return &bpf_skc_to_tcp_request_sock_proto; + case BPF_FUNC_skc_to_udp6_sock: + return &bpf_skc_to_udp6_sock_proto; #endif case BPF_FUNC_seq_printf: return prog->expected_attach_type == BPF_TRACE_ITER ? diff --git a/net/core/filter.c b/net/core/filter.c index 0b4e5aed7e20..c796e141ea8e 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -9369,3 +9369,25 @@ const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto = { .check_btf_id = check_arg_btf_id, .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP_REQ], }; + +BPF_CALL_1(bpf_skc_to_udp6_sock, struct sock *, sk) +{ + /* udp6_sock type is not generated in dwarf and hence btf, + * trigger an explicit type generation here. + */ + BTF_TYPE_EMIT(struct udp6_sock); + if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_UDP && + sk->sk_type == SOCK_DGRAM && sk->sk_family == AF_INET6) + return (unsigned long)sk; + + return (unsigned long)NULL; +} + +const struct bpf_func_proto bpf_skc_to_udp6_sock_proto = { + .func = bpf_skc_to_udp6_sock, + .gpl_only = false, + .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, + .arg1_type = ARG_PTR_TO_BTF_ID, + .check_btf_id = check_arg_btf_id, + .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_UDP6], +}; diff --git a/scripts/bpf_helpers_doc.py b/scripts/bpf_helpers_doc.py index d886657c6aaa..6bab40ff442e 100755 --- a/scripts/bpf_helpers_doc.py +++ b/scripts/bpf_helpers_doc.py @@ -425,6 +425,7 @@ class PrinterHelpers(Printer): 'struct tcp_sock', 'struct tcp_timewait_sock', 'struct tcp_request_sock', + 'struct udp6_sock', 'struct __sk_buff', 'struct sk_msg_md', @@ -466,6 +467,7 @@ class PrinterHelpers(Printer): 'struct tcp_sock', 'struct tcp_timewait_sock', 'struct tcp_request_sock', + 'struct udp6_sock', } mapped_types = { 'u8': '__u8', diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index b9412ab275f3..0cb8ec948816 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -3279,6 +3279,12 @@ union bpf_attr { * Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer. * Return * *sk* if casting is valid, or NULL otherwise. + * + * struct udp6_sock *bpf_skc_to_udp6_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *udp6_sock* pointer. + * Return + * *sk* if casting is valid, or NULL otherwise. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -3420,7 +3426,8 @@ union bpf_attr { FN(skc_to_tcp6_sock), \ FN(skc_to_tcp_sock), \ FN(skc_to_tcp_timewait_sock), \ - FN(skc_to_tcp_request_sock), + FN(skc_to_tcp_request_sock), \ + FN(skc_to_udp6_sock), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call -- cgit v1.2.3-59-g8ed1b From 58ffbba6a39979baa22d2f7e69faeffa2d9c0641 Mon Sep 17 00:00:00 2001 From: Akash Asthana Date: Tue, 23 Jun 2020 16:08:50 +0530 Subject: soc: qcom: geni: Support for ICC voting Add necessary macros and structure variables to support ICC BW voting from individual SE drivers. Signed-off-by: Akash Asthana Reviewed-by: Matthias Kaehlcke Link: https://lore.kernel.org/r/1592908737-7068-2-git-send-email-akashast@codeaurora.org Signed-off-by: Bjorn Andersson --- drivers/soc/qcom/qcom-geni-se.c | 82 +++++++++++++++++++++++++++++++++++++++++ include/linux/qcom-geni-se.h | 38 +++++++++++++++++++ 2 files changed, 120 insertions(+) (limited to 'include') diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c index 7d622ea1274e..950e3470c498 100644 --- a/drivers/soc/qcom/qcom-geni-se.c +++ b/drivers/soc/qcom/qcom-geni-se.c @@ -92,6 +92,9 @@ struct geni_wrapper { struct clk_bulk_data ahb_clks[NUM_AHB_CLKS]; }; +static const char * const icc_path_names[] = {"qup-core", "qup-config", + "qup-memory"}; + #define QUP_HW_VER_REG 0x4 /* Common SE registers */ @@ -720,6 +723,85 @@ void geni_se_rx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len) } EXPORT_SYMBOL(geni_se_rx_dma_unprep); +int geni_icc_get(struct geni_se *se, const char *icc_ddr) +{ + int i, err; + const char *icc_names[] = {"qup-core", "qup-config", icc_ddr}; + + for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) { + if (!icc_names[i]) + continue; + + se->icc_paths[i].path = devm_of_icc_get(se->dev, icc_names[i]); + if (IS_ERR(se->icc_paths[i].path)) + goto err; + } + + return 0; + +err: + err = PTR_ERR(se->icc_paths[i].path); + if (err != -EPROBE_DEFER) + dev_err_ratelimited(se->dev, "Failed to get ICC path '%s': %d\n", + icc_names[i], err); + return err; + +} +EXPORT_SYMBOL(geni_icc_get); + +int geni_icc_set_bw(struct geni_se *se) +{ + int i, ret; + + for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) { + ret = icc_set_bw(se->icc_paths[i].path, + se->icc_paths[i].avg_bw, se->icc_paths[i].avg_bw); + if (ret) { + dev_err_ratelimited(se->dev, "ICC BW voting failed on path '%s': %d\n", + icc_path_names[i], ret); + return ret; + } + } + + return 0; +} +EXPORT_SYMBOL(geni_icc_set_bw); + +/* To do: Replace this by icc_bulk_enable once it's implemented in ICC core */ +int geni_icc_enable(struct geni_se *se) +{ + int i, ret; + + for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) { + ret = icc_enable(se->icc_paths[i].path); + if (ret) { + dev_err_ratelimited(se->dev, "ICC enable failed on path '%s': %d\n", + icc_path_names[i], ret); + return ret; + } + } + + return 0; +} +EXPORT_SYMBOL(geni_icc_enable); + +int geni_icc_disable(struct geni_se *se) +{ + int i, ret; + + for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) { + ret = icc_disable(se->icc_paths[i].path); + if (ret) { + dev_err_ratelimited(se->dev, "ICC disable failed on path '%s': %d\n", + icc_path_names[i], ret); + return ret; + } + } + + return 0; +} +EXPORT_SYMBOL(geni_icc_disable); + static int geni_se_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h index dd464943f717..80dbc01904d6 100644 --- a/include/linux/qcom-geni-se.h +++ b/include/linux/qcom-geni-se.h @@ -6,6 +6,8 @@ #ifndef _LINUX_QCOM_GENI_SE #define _LINUX_QCOM_GENI_SE +#include + /* Transfer mode supported by GENI Serial Engines */ enum geni_se_xfer_mode { GENI_SE_INVALID, @@ -25,6 +27,17 @@ enum geni_se_protocol_type { struct geni_wrapper; struct clk; +enum geni_icc_path_index { + GENI_TO_CORE, + CPU_TO_GENI, + GENI_TO_DDR +}; + +struct geni_icc_path { + struct icc_path *path; + unsigned int avg_bw; +}; + /** * struct geni_se - GENI Serial Engine * @base: Base Address of the Serial Engine's register block @@ -33,6 +46,7 @@ struct clk; * @clk: Handle to the core serial engine clock * @num_clk_levels: Number of valid clock levels in clk_perf_tbl * @clk_perf_tbl: Table of clock frequency input to serial engine clock + * @icc_paths: Array of ICC paths for SE */ struct geni_se { void __iomem *base; @@ -41,6 +55,7 @@ struct geni_se { struct clk *clk; unsigned int num_clk_levels; unsigned long *clk_perf_tbl; + struct geni_icc_path icc_paths[3]; }; /* Common SE registers */ @@ -229,6 +244,21 @@ struct geni_se { #define GENI_SE_VERSION_MINOR(ver) ((ver & HW_VER_MINOR_MASK) >> HW_VER_MINOR_SHFT) #define GENI_SE_VERSION_STEP(ver) (ver & HW_VER_STEP_MASK) +/* + * Define bandwidth thresholds that cause the underlying Core 2X interconnect + * clock to run at the named frequency. These baseline values are recommended + * by the hardware team, and are not dynamically scaled with GENI bandwidth + * beyond basic on/off. + */ +#define CORE_2X_19_2_MHZ 960 +#define CORE_2X_50_MHZ 2500 +#define CORE_2X_100_MHZ 5000 +#define CORE_2X_150_MHZ 7500 +#define CORE_2X_200_MHZ 10000 +#define CORE_2X_236_MHZ 16383 + +#define GENI_DEFAULT_BW Bps_to_icc(1000) + #if IS_ENABLED(CONFIG_QCOM_GENI_SE) u32 geni_se_get_qup_hw_version(struct geni_se *se); @@ -416,5 +446,13 @@ int geni_se_rx_dma_prep(struct geni_se *se, void *buf, size_t len, void geni_se_tx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len); void geni_se_rx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len); + +int geni_icc_get(struct geni_se *se, const char *icc_ddr); + +int geni_icc_set_bw(struct geni_se *se); + +int geni_icc_enable(struct geni_se *se); + +int geni_icc_disable(struct geni_se *se); #endif #endif -- cgit v1.2.3-59-g8ed1b From 048eb908a1f276ca0346f20a3e6e7d707dcd81f3 Mon Sep 17 00:00:00 2001 From: Akash Asthana Date: Tue, 23 Jun 2020 16:08:51 +0530 Subject: soc: qcom-geni-se: Add interconnect support to fix earlycon crash QUP core clock is shared among all the SE drivers present on particular QUP wrapper, the system will reset(unclocked access) if earlycon used after QUP core clock is put to 0 from other SE drivers before real console comes up. As earlycon can't vote for it's QUP core need, to fix this add ICC support to common/QUP wrapper driver and put vote for QUP core from probe on behalf of earlycon and remove vote during earlycon exit call. Signed-off-by: Akash Asthana Reported-by: Matthias Kaehlcke Reviewed-by: Matthias Kaehlcke Link: https://lore.kernel.org/r/1592908737-7068-3-git-send-email-akashast@codeaurora.org Signed-off-by: Bjorn Andersson --- drivers/soc/qcom/qcom-geni-se.c | 68 +++++++++++++++++++++++++++++++++++ drivers/tty/serial/qcom_geni_serial.c | 7 ++++ include/linux/qcom-geni-se.h | 2 ++ 3 files changed, 77 insertions(+) (limited to 'include') diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c index 950e3470c498..e2a0ba278b6b 100644 --- a/drivers/soc/qcom/qcom-geni-se.c +++ b/drivers/soc/qcom/qcom-geni-se.c @@ -3,6 +3,7 @@ #include #include +#include #include #include #include @@ -90,11 +91,14 @@ struct geni_wrapper { struct device *dev; void __iomem *base; struct clk_bulk_data ahb_clks[NUM_AHB_CLKS]; + struct geni_icc_path to_core; }; static const char * const icc_path_names[] = {"qup-core", "qup-config", "qup-memory"}; +static struct geni_wrapper *earlycon_wrapper; + #define QUP_HW_VER_REG 0x4 /* Common SE registers */ @@ -802,11 +806,38 @@ int geni_icc_disable(struct geni_se *se) } EXPORT_SYMBOL(geni_icc_disable); +void geni_remove_earlycon_icc_vote(void) +{ + struct geni_wrapper *wrapper; + struct device_node *parent; + struct device_node *child; + + if (!earlycon_wrapper) + return; + + wrapper = earlycon_wrapper; + parent = of_get_next_parent(wrapper->dev->of_node); + for_each_child_of_node(parent, child) { + if (!of_device_is_compatible(child, "qcom,geni-se-qup")) + continue; + wrapper = platform_get_drvdata(of_find_device_by_node(child)); + icc_put(wrapper->to_core.path); + wrapper->to_core.path = NULL; + + } + of_node_put(parent); + + earlycon_wrapper = NULL; +} +EXPORT_SYMBOL(geni_remove_earlycon_icc_vote); + static int geni_se_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct resource *res; struct geni_wrapper *wrapper; + struct console __maybe_unused *bcon; + bool __maybe_unused has_earlycon = false; int ret; wrapper = devm_kzalloc(dev, sizeof(*wrapper), GFP_KERNEL); @@ -829,6 +860,43 @@ static int geni_se_probe(struct platform_device *pdev) } } +#ifdef CONFIG_SERIAL_EARLYCON + for_each_console(bcon) { + if (!strcmp(bcon->name, "qcom_geni")) { + has_earlycon = true; + break; + } + } + if (!has_earlycon) + goto exit; + + wrapper->to_core.path = devm_of_icc_get(dev, "qup-core"); + if (IS_ERR(wrapper->to_core.path)) + return PTR_ERR(wrapper->to_core.path); + /* + * Put minmal BW request on core clocks on behalf of early console. + * The vote will be removed earlycon exit function. + * + * Note: We are putting vote on each QUP wrapper instead only to which + * earlycon is connected because QUP core clock of different wrapper + * share same voltage domain. If core1 is put to 0, then core2 will + * also run at 0, if not voted. Default ICC vote will be removed ASA + * we touch any of the core clock. + * core1 = core2 = max(core1, core2) + */ + ret = icc_set_bw(wrapper->to_core.path, GENI_DEFAULT_BW, + GENI_DEFAULT_BW); + if (ret) { + dev_err(&pdev->dev, "%s: ICC BW voting failed for core: %d\n", + __func__, ret); + return ret; + } + + if (of_get_compatible_child(pdev->dev.of_node, "qcom,geni-debug-uart")) + earlycon_wrapper = wrapper; + of_node_put(pdev->dev.of_node); +#endif +exit: dev_set_drvdata(dev, wrapper); dev_dbg(dev, "GENI SE Driver probed\n"); return devm_of_platform_populate(dev); diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c index 457c0bf8cbf8..a4468db3b734 100644 --- a/drivers/tty/serial/qcom_geni_serial.c +++ b/drivers/tty/serial/qcom_geni_serial.c @@ -1121,6 +1121,12 @@ static inline void qcom_geni_serial_enable_early_read(struct geni_se *se, struct console *con) { } #endif +static int qcom_geni_serial_earlycon_exit(struct console *con) +{ + geni_remove_earlycon_icc_vote(); + return 0; +} + static int __init qcom_geni_serial_earlycon_setup(struct earlycon_device *dev, const char *opt) { @@ -1166,6 +1172,7 @@ static int __init qcom_geni_serial_earlycon_setup(struct earlycon_device *dev, writel(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN); dev->con->write = qcom_geni_serial_earlycon_write; + dev->con->exit = qcom_geni_serial_earlycon_exit; dev->con->setup = NULL; qcom_geni_serial_enable_early_read(&se, dev->con); diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h index 80dbc01904d6..743dd975d1cd 100644 --- a/include/linux/qcom-geni-se.h +++ b/include/linux/qcom-geni-se.h @@ -454,5 +454,7 @@ int geni_icc_set_bw(struct geni_se *se); int geni_icc_enable(struct geni_se *se); int geni_icc_disable(struct geni_se *se); + +void geni_remove_earlycon_icc_vote(void); #endif #endif -- cgit v1.2.3-59-g8ed1b From a5819b548af0cc0fd0b84fa3e35723c4c36f157c Mon Sep 17 00:00:00 2001 From: Rajendra Nayak Date: Mon, 15 Jun 2020 17:32:39 +0530 Subject: tty: serial: qcom_geni_serial: Use OPP API to set clk/perf state geni serial needs to express a perforamnce state requirement on CX powerdomain depending on the frequency of the clock rates. Use OPP table from DT to register with OPP framework and use dev_pm_opp_set_rate() to set the clk/perf state. Signed-off-by: Rajendra Nayak Reviewed-by: Matthias Kaehlcke Acked-by: Greg Kroah-Hartman Cc: Akash Asthana Cc: linux-serial@vger.kernel.org Link: https://lore.kernel.org/r/1592222564-13556-2-git-send-email-rnayak@codeaurora.org Signed-off-by: Bjorn Andersson --- drivers/tty/serial/qcom_geni_serial.c | 29 +++++++++++++++++++++++++---- include/linux/qcom-geni-se.h | 4 ++++ 2 files changed, 29 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c index f701c7e9b89d..0300867eab7a 100644 --- a/drivers/tty/serial/qcom_geni_serial.c +++ b/drivers/tty/serial/qcom_geni_serial.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -963,7 +964,7 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport, goto out_restart_rx; uport->uartclk = clk_rate; - clk_set_rate(port->se.clk, clk_rate); + dev_pm_opp_set_rate(uport->dev, clk_rate); ser_clk_cfg = SER_CLK_EN; ser_clk_cfg |= clk_div << CLK_DIV_SHFT; @@ -1383,13 +1384,25 @@ static int qcom_geni_serial_probe(struct platform_device *pdev) if (of_property_read_bool(pdev->dev.of_node, "cts-rts-swap")) port->cts_rts_swap = true; + port->se.opp_table = dev_pm_opp_set_clkname(&pdev->dev, "se"); + if (IS_ERR(port->se.opp_table)) + return PTR_ERR(port->se.opp_table); + /* OPP table is optional */ + ret = dev_pm_opp_of_add_table(&pdev->dev); + if (!ret) { + port->se.has_opp_table = true; + } else if (ret != -ENODEV) { + dev_err(&pdev->dev, "invalid OPP table in device tree\n"); + return ret; + } + uport->private_data = drv; platform_set_drvdata(pdev, port); port->handle_rx = console ? handle_rx_console : handle_rx_uart; ret = uart_add_one_port(drv, uport); if (ret) - return ret; + goto err; irq_set_status_flags(uport->irq, IRQ_NOAUTOEN); ret = devm_request_irq(uport->dev, uport->irq, qcom_geni_serial_isr, @@ -1397,7 +1410,7 @@ static int qcom_geni_serial_probe(struct platform_device *pdev) if (ret) { dev_err(uport->dev, "Failed to get IRQ ret %d\n", ret); uart_remove_one_port(drv, uport); - return ret; + goto err; } /* @@ -1414,11 +1427,16 @@ static int qcom_geni_serial_probe(struct platform_device *pdev) if (ret) { device_init_wakeup(&pdev->dev, false); uart_remove_one_port(drv, uport); - return ret; + goto err; } } return 0; +err: + if (port->se.has_opp_table) + dev_pm_opp_of_remove_table(&pdev->dev); + dev_pm_opp_put_clkname(port->se.opp_table); + return ret; } static int qcom_geni_serial_remove(struct platform_device *pdev) @@ -1426,6 +1444,9 @@ static int qcom_geni_serial_remove(struct platform_device *pdev) struct qcom_geni_serial_port *port = platform_get_drvdata(pdev); struct uart_driver *drv = port->uport.private_data; + if (port->se.has_opp_table) + dev_pm_opp_of_remove_table(&pdev->dev); + dev_pm_opp_put_clkname(port->se.opp_table); dev_pm_clear_wake_irq(&pdev->dev); device_init_wakeup(&pdev->dev, false); uart_remove_one_port(drv, &port->uport); diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h index 743dd975d1cd..afa511ef1457 100644 --- a/include/linux/qcom-geni-se.h +++ b/include/linux/qcom-geni-se.h @@ -47,6 +47,8 @@ struct geni_icc_path { * @num_clk_levels: Number of valid clock levels in clk_perf_tbl * @clk_perf_tbl: Table of clock frequency input to serial engine clock * @icc_paths: Array of ICC paths for SE + * @opp_table: Pointer to the OPP table + * @has_opp_table: Specifies if the SE has an OPP table */ struct geni_se { void __iomem *base; @@ -56,6 +58,8 @@ struct geni_se { unsigned int num_clk_levels; unsigned long *clk_perf_tbl; struct geni_icc_path icc_paths[3]; + struct opp_table *opp_table; + bool has_opp_table; }; /* Common SE registers */ -- cgit v1.2.3-59-g8ed1b From 19e528dc9af29169fa7cdfa61071805fdef504c6 Mon Sep 17 00:00:00 2001 From: Po Liu Date: Wed, 24 Jun 2020 17:36:28 +0800 Subject: net: qos: add tc police offloading action with max frame size limit Current police offloading support the 'burst'' and 'rate_bytes_ps'. Some hardware own the capability to limit the frame size. If the frame size larger than the setting, the frame would be dropped. For the police action itself already accept the 'mtu' parameter in tc command. But not extend to tc flower offloading. So extend 'mtu' to tc flower offloading. Signed-off-by: Po Liu Signed-off-by: David S. Miller --- include/net/flow_offload.h | 1 + include/net/tc_act/tc_police.h | 10 ++++++++++ net/sched/cls_api.c | 1 + 3 files changed, 12 insertions(+) (limited to 'include') diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index 00c15f14c434..c2ef19c6b27d 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -234,6 +234,7 @@ struct flow_action_entry { struct { /* FLOW_ACTION_POLICE */ s64 burst; u64 rate_bytes_ps; + u32 mtu; } police; struct { /* FLOW_ACTION_CT */ int action; diff --git a/include/net/tc_act/tc_police.h b/include/net/tc_act/tc_police.h index f098ad4424be..cd973b10ae8c 100644 --- a/include/net/tc_act/tc_police.h +++ b/include/net/tc_act/tc_police.h @@ -69,4 +69,14 @@ static inline s64 tcf_police_tcfp_burst(const struct tc_action *act) return params->tcfp_burst; } +static inline u32 tcf_police_tcfp_mtu(const struct tc_action *act) +{ + struct tcf_police *police = to_police(act); + struct tcf_police_params *params; + + params = rcu_dereference_protected(police->params, + lockdep_is_held(&police->tcf_lock)); + return params->tcfp_mtu; +} + #endif /* __NET_TC_POLICE_H */ diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index a00a203b2ef5..6aba7d5ba1ec 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -3658,6 +3658,7 @@ int tc_setup_flow_action(struct flow_action *flow_action, entry->police.burst = tcf_police_tcfp_burst(act); entry->police.rate_bytes_ps = tcf_police_rate_bytes_ps(act); + entry->police.mtu = tcf_police_tcfp_mtu(act); } else if (is_tcf_ct(act)) { entry->id = FLOW_ACTION_CT; entry->ct.action = tcf_ct_action(act); -- cgit v1.2.3-59-g8ed1b From 627e39b1399e72e53895eec6bbec30199ed43de2 Mon Sep 17 00:00:00 2001 From: Po Liu Date: Wed, 24 Jun 2020 17:36:30 +0800 Subject: net: qos: police action add index for tc flower offloading Hardware device may include more than one police entry. Specifying the action's index make it possible for several tc filters to share the same police action when installing the filters. Propagate this index to device drivers through the flow offload intermediate representation, so that drivers could share a single hardware policer between multiple filters. v1->v2 changes: - Update the commit message suggest by Ido Schimmel Signed-off-by: Po Liu Signed-off-by: David S. Miller --- include/net/flow_offload.h | 1 + net/sched/cls_api.c | 1 + 2 files changed, 2 insertions(+) (limited to 'include') diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index c2ef19c6b27d..eed98075b1ae 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -232,6 +232,7 @@ struct flow_action_entry { bool truncate; } sample; struct { /* FLOW_ACTION_POLICE */ + u32 index; s64 burst; u64 rate_bytes_ps; u32 mtu; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 6aba7d5ba1ec..fdc4c89ca1fa 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -3659,6 +3659,7 @@ int tc_setup_flow_action(struct flow_action *flow_action, entry->police.rate_bytes_ps = tcf_police_rate_bytes_ps(act); entry->police.mtu = tcf_police_tcfp_mtu(act); + entry->police.index = act->tcfa_index; } else if (is_tcf_ct(act)) { entry->id = FLOW_ACTION_CT; entry->ct.action = tcf_ct_action(act); -- cgit v1.2.3-59-g8ed1b From 5e48a03bb9bff1728164040d71aa03cdb3cdfca2 Mon Sep 17 00:00:00 2001 From: Prashant Malani Date: Wed, 24 Jun 2020 01:09:23 -0700 Subject: platform/chrome: cros_ec: Add TBT pd_ctrl fields To support Thunderbolt compatibility mode, synchronize ec_response_usb_pd_control_v2 with the Chrome EC version, so that we get the Thunderbolt related control fields and macros. Signed-off-by: Prashant Malani Signed-off-by: Enric Balletbo i Serra --- include/linux/platform_data/cros_ec_commands.h | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h index a7b0fc440c35..b808570bdd04 100644 --- a/include/linux/platform_data/cros_ec_commands.h +++ b/include/linux/platform_data/cros_ec_commands.h @@ -4917,15 +4917,26 @@ struct ec_response_usb_pd_control_v1 { #define USBC_PD_CC_UFP_ATTACHED 4 /* UFP attached to usbc */ #define USBC_PD_CC_DFP_ATTACHED 5 /* DPF attached to usbc */ +/* Active/Passive Cable */ +#define USB_PD_CTRL_ACTIVE_CABLE BIT(0) +/* Optical/Non-optical cable */ +#define USB_PD_CTRL_OPTICAL_CABLE BIT(1) +/* 3rd Gen TBT device (or AMA)/2nd gen tbt Adapter */ +#define USB_PD_CTRL_TBT_LEGACY_ADAPTER BIT(2) +/* Active Link Uni-Direction */ +#define USB_PD_CTRL_ACTIVE_LINK_UNIDIR BIT(3) + struct ec_response_usb_pd_control_v2 { uint8_t enabled; uint8_t role; uint8_t polarity; char state[32]; - uint8_t cc_state; /* USBC_PD_CC_*Encoded cc state */ - uint8_t dp_mode; /* Current DP pin mode (MODE_DP_PIN_[A-E]) */ - /* CL:1500994 Current cable type */ - uint8_t reserved_cable_type; + uint8_t cc_state; /* enum pd_cc_states representing cc state */ + uint8_t dp_mode; /* Current DP pin mode (MODE_DP_PIN_[A-E]) */ + uint8_t reserved; /* Reserved for future use */ + uint8_t control_flags; /* USB_PD_CTRL_*flags */ + uint8_t cable_speed; /* TBT_SS_* cable speed */ + uint8_t cable_gen; /* TBT_GEN3_* cable rounded support */ } __ec_align1; #define EC_CMD_USB_PD_PORTS 0x0102 -- cgit v1.2.3-59-g8ed1b From 590d69796346353878b275c5512c664e3f875f24 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Thu, 19 Dec 2019 16:44:52 -0500 Subject: sched: Force the address order of each sched class descriptor In order to make a micro optimization in pick_next_task(), the order of the sched class descriptor address must be in the same order as their priority to each other. That is: &idle_sched_class < &fair_sched_class < &rt_sched_class < &dl_sched_class < &stop_sched_class In order to guarantee this order of the sched class descriptors, add each one into their own data section and force the order in the linker script. Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/157675913272.349305.8936736338884044103.stgit@localhost.localdomain --- include/asm-generic/vmlinux.lds.h | 13 +++++++++++++ kernel/sched/deadline.c | 3 ++- kernel/sched/fair.c | 3 ++- kernel/sched/idle.c | 3 ++- kernel/sched/rt.c | 3 ++- kernel/sched/stop_task.c | 3 ++- 6 files changed, 23 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index db600ef218d7..2186d7b01af6 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -108,6 +108,18 @@ #define SBSS_MAIN .sbss #endif +/* + * The order of the sched class addresses are important, as they are + * used to determine the order of the priority of each sched class in + * relation to each other. + */ +#define SCHED_DATA \ + *(__idle_sched_class) \ + *(__fair_sched_class) \ + *(__rt_sched_class) \ + *(__dl_sched_class) \ + *(__stop_sched_class) + /* * Align to a 32 byte boundary equal to the * alignment gcc 4.5 uses for a struct @@ -388,6 +400,7 @@ .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ __start_rodata = .; \ *(.rodata) *(.rodata.*) \ + SCHED_DATA \ RO_AFTER_INIT_DATA /* Read only after init */ \ . = ALIGN(8); \ __start___tracepoints_ptrs = .; \ diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index d4708e29008f..d9e79462993b 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -2479,7 +2479,8 @@ static void prio_changed_dl(struct rq *rq, struct task_struct *p, } } -const struct sched_class dl_sched_class = { +const struct sched_class dl_sched_class + __attribute__((section("__dl_sched_class"))) = { .next = &rt_sched_class, .enqueue_task = enqueue_task_dl, .dequeue_task = dequeue_task_dl, diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 0424a0af5f87..3365f6b07c36 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -11122,7 +11122,8 @@ static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task /* * All the scheduling class methods: */ -const struct sched_class fair_sched_class = { +const struct sched_class fair_sched_class + __attribute__((section("__fair_sched_class"))) = { .next = &idle_sched_class, .enqueue_task = enqueue_task_fair, .dequeue_task = dequeue_task_fair, diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 8d75ca201484..f5806295356b 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -453,7 +453,8 @@ static void update_curr_idle(struct rq *rq) /* * Simple, special scheduling class for the per-CPU idle tasks: */ -const struct sched_class idle_sched_class = { +const struct sched_class idle_sched_class + __attribute__((section("__idle_sched_class"))) = { /* .next is NULL */ /* no enqueue/yield_task for idle tasks */ diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index f395ddb75f38..6543d4430331 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -2429,7 +2429,8 @@ static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task) return 0; } -const struct sched_class rt_sched_class = { +const struct sched_class rt_sched_class + __attribute__((section("__rt_sched_class"))) = { .next = &fair_sched_class, .enqueue_task = enqueue_task_rt, .dequeue_task = dequeue_task_rt, diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c index 3e50a6a8f1e5..f4bbd54caae0 100644 --- a/kernel/sched/stop_task.c +++ b/kernel/sched/stop_task.c @@ -109,7 +109,8 @@ static void update_curr_stop(struct rq *rq) /* * Simple, special scheduling class for the per-CPU stop tasks: */ -const struct sched_class stop_sched_class = { +const struct sched_class stop_sched_class + __attribute__((section("__stop_sched_class"))) = { .next = &dl_sched_class, .enqueue_task = enqueue_task_stop, -- cgit v1.2.3-59-g8ed1b From c3a340f7e7eadac7662ab104ceb16432e5a4c6b2 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Thu, 19 Dec 2019 16:44:53 -0500 Subject: sched: Have sched_class_highest define by vmlinux.lds.h Now that the sched_class descriptors are defined by the linker script, and this needs to be aware of the existance of stop_sched_class when SMP is enabled or not, as it is used as the "highest" priority when defined. Move the declaration of sched_class_highest to the same location in the linker script that inserts stop_sched_class, and this will also make it easier to see what should be defined as the highest class, as this linker script location defines the priorities as well. Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20191219214558.682913590@goodmis.org --- include/asm-generic/vmlinux.lds.h | 5 ++++- kernel/sched/core.c | 8 ++++++++ kernel/sched/sched.h | 17 +++++++++-------- 3 files changed, 21 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 2186d7b01af6..66fb84c3dc7e 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -114,11 +114,14 @@ * relation to each other. */ #define SCHED_DATA \ + STRUCT_ALIGN(); \ + __begin_sched_classes = .; \ *(__idle_sched_class) \ *(__fair_sched_class) \ *(__rt_sched_class) \ *(__dl_sched_class) \ - *(__stop_sched_class) + *(__stop_sched_class) \ + __end_sched_classes = .; /* * Align to a 32 byte boundary equal to the diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 0208b71bef80..81640fe0eae8 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6646,6 +6646,14 @@ void __init sched_init(void) unsigned long ptr = 0; int i; + /* Make sure the linker didn't screw up */ + BUG_ON(&idle_sched_class + 1 != &fair_sched_class || + &fair_sched_class + 1 != &rt_sched_class || + &rt_sched_class + 1 != &dl_sched_class); +#ifdef CONFIG_SMP + BUG_ON(&dl_sched_class + 1 != &stop_sched_class); +#endif + wait_bit_init(); #ifdef CONFIG_FAIR_GROUP_SCHED diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 336887607b3d..4165c06d1d7b 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1811,7 +1811,7 @@ struct sched_class { #ifdef CONFIG_FAIR_GROUP_SCHED void (*task_change_group)(struct task_struct *p, int type); #endif -}; +} __aligned(32); /* STRUCT_ALIGN(), vmlinux.lds.h */ static inline void put_prev_task(struct rq *rq, struct task_struct *prev) { @@ -1825,17 +1825,18 @@ static inline void set_next_task(struct rq *rq, struct task_struct *next) next->sched_class->set_next_task(rq, next, false); } -#ifdef CONFIG_SMP -#define sched_class_highest (&stop_sched_class) -#else -#define sched_class_highest (&dl_sched_class) -#endif +/* Defined in include/asm-generic/vmlinux.lds.h */ +extern struct sched_class __begin_sched_classes[]; +extern struct sched_class __end_sched_classes[]; + +#define sched_class_highest (__end_sched_classes - 1) +#define sched_class_lowest (__begin_sched_classes - 1) #define for_class_range(class, _from, _to) \ - for (class = (_from); class != (_to); class = class->next) + for (class = (_from); class != (_to); class--) #define for_each_class(class) \ - for_class_range(class, sched_class_highest, NULL) + for_class_range(class, sched_class_highest, sched_class_lowest) extern const struct sched_class stop_sched_class; extern const struct sched_class dl_sched_class; -- cgit v1.2.3-59-g8ed1b From 01e377c539ca52a6c753d0fdbe93b3b8fcd66a1c Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 18 Jun 2020 21:08:10 +0200 Subject: sched/core: Remove mmdrop() definition Commit bf2c59fce4074 ("sched/core: Fix illegal RCU from offline CPUs") introduced a definition for mmdrop() but a a few lines above there is already mmdrop() defined as static inline. Remove the newly introduced mmdrop() definition. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200618190810.790211-1-bigeasy@linutronix.de --- include/linux/sched/mm.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include') diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index 480a4d1b7dd8..a98604ea76f1 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -49,8 +49,6 @@ static inline void mmdrop(struct mm_struct *mm) __mmdrop(mm); } -void mmdrop(struct mm_struct *mm); - /* * This has to be called after a get_task_mm()/mmget_not_zero() * followed by taking the mmap_lock for writing before modifying the -- cgit v1.2.3-59-g8ed1b From 10e834099d38dd2c02bf2bd5feaa3997cfcf139f Mon Sep 17 00:00:00 2001 From: Tzung-Bi Shih Date: Thu, 25 Jun 2020 23:35:41 +0800 Subject: ASoC: core: move definition of enum snd_soc_bias_level To fix compilation error: - error: field 'XXX' has incomplete type Moves definition of enum snd_soc_bias_level from soc.h to soc-dapm.h. Signed-off-by: Tzung-Bi Shih Link: https://lore.kernel.org/r/20200625153543.85039-2-tzungbi@google.com Signed-off-by: Mark Brown --- include/sound/soc-dapm.h | 18 ++++++++++++++++++ include/sound/soc.h | 18 ------------------ 2 files changed, 18 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h index cc3dcb815282..75467f2ed405 100644 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h @@ -376,6 +376,24 @@ struct snd_soc_dapm_widget_list; struct snd_soc_dapm_update; enum snd_soc_dapm_direction; +/* + * Bias levels + * + * @ON: Bias is fully on for audio playback and capture operations. + * @PREPARE: Prepare for audio operations. Called before DAPM switching for + * stream start and stop operations. + * @STANDBY: Low power standby state when no playback/capture operations are + * in progress. NOTE: The transition time between STANDBY and ON + * should be as fast as possible and no longer than 10ms. + * @OFF: Power Off. No restrictions on transition times. + */ +enum snd_soc_bias_level { + SND_SOC_BIAS_OFF = 0, + SND_SOC_BIAS_STANDBY = 1, + SND_SOC_BIAS_PREPARE = 2, + SND_SOC_BIAS_ON = 3, +}; + int dapm_regulator_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event); int dapm_clock_event(struct snd_soc_dapm_widget *w, diff --git a/include/sound/soc.h b/include/sound/soc.h index 33aceadebd03..6791b7570a67 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -368,24 +368,6 @@ #define SOC_ENUM_SINGLE_VIRT_DECL(name, xtexts) \ const struct soc_enum name = SOC_ENUM_SINGLE_VIRT(ARRAY_SIZE(xtexts), xtexts) -/* - * Bias levels - * - * @ON: Bias is fully on for audio playback and capture operations. - * @PREPARE: Prepare for audio operations. Called before DAPM switching for - * stream start and stop operations. - * @STANDBY: Low power standby state when no playback/capture operations are - * in progress. NOTE: The transition time between STANDBY and ON - * should be as fast as possible and no longer than 10ms. - * @OFF: Power Off. No restrictions on transition times. - */ -enum snd_soc_bias_level { - SND_SOC_BIAS_OFF = 0, - SND_SOC_BIAS_STANDBY = 1, - SND_SOC_BIAS_PREPARE = 2, - SND_SOC_BIAS_ON = 3, -}; - struct device_node; struct snd_jack; struct snd_soc_card; -- cgit v1.2.3-59-g8ed1b From 3d62ef4280a377bb2ccaee4e8f6c5093f5b8f9d4 Mon Sep 17 00:00:00 2001 From: Tzung-Bi Shih Date: Thu, 25 Jun 2020 23:35:42 +0800 Subject: ASoC: dapm: declare missing structure prototypes To fix compilation warnings: - struct 'snd_soc_pcm_runtime' declared inside parameter list will not be visible outside of this definition or declaration - struct 'soc_enum' declared inside parameter list will not be visible outside of this definition or declaration Declares the missing structure prototypes. Signed-off-by: Tzung-Bi Shih Link: https://lore.kernel.org/r/20200625153543.85039-3-tzungbi@google.com Signed-off-by: Mark Brown --- include/sound/soc-dapm.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h index 75467f2ed405..c3039e97929a 100644 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h @@ -16,6 +16,8 @@ #include struct device; +struct snd_soc_pcm_runtime; +struct soc_enum; /* widget has no PM register bit */ #define SND_SOC_NOPM -1 -- cgit v1.2.3-59-g8ed1b From da6690767cbd344998f36081815c85f3d467e78c Mon Sep 17 00:00:00 2001 From: Lee Jones Date: Thu, 25 Jun 2020 17:36:05 +0100 Subject: regulator: consumer: Supply missing prototypes for 3 core functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit regulator_suspend_enable(), regulator_suspend_disable() and regulator_set_suspend_voltage() are all exported members of the API, but are all missing prototypes. Fixes the following W=1 warning(s): drivers/regulator/core.c:3805:5: warning: no previous prototype for ‘regulator_suspend_enable’ [-Wmissing-prototypes] 3805 | int regulator_suspend_enable(struct regulator_dev *rdev, | ^~~~~~~~~~~~~~~~~~~~~~~~ drivers/regulator/core.c:3812:5: warning: no previous prototype for ‘regulator_suspend_disable’ [-Wmissing-prototypes] 3812 | int regulator_suspend_disable(struct regulator_dev *rdev, | ^~~~~~~~~~~~~~~~~~~~~~~~~ drivers/regulator/core.c:3851:5: warning: no previous prototype for ‘regulator_set_suspend_voltage’ [-Wmissing-prototypes] 3851 | int regulator_set_suspend_voltage(struct regulator *regulator, int min_uV, | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Signed-off-by: Lee Jones Link: https://lore.kernel.org/r/20200625163614.4001403-2-lee.jones@linaro.org Signed-off-by: Mark Brown --- include/linux/regulator/consumer.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'include') diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 6a92fd3105a3..2024944fd2f7 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -32,10 +32,12 @@ #define __LINUX_REGULATOR_CONSUMER_H_ #include +#include struct device; struct notifier_block; struct regmap; +struct regulator_dev; /* * Regulator operating modes. @@ -277,6 +279,14 @@ int regulator_unregister_notifier(struct regulator *regulator, void devm_regulator_unregister_notifier(struct regulator *regulator, struct notifier_block *nb); +/* regulator suspend */ +int regulator_suspend_enable(struct regulator_dev *rdev, + suspend_state_t state); +int regulator_suspend_disable(struct regulator_dev *rdev, + suspend_state_t state); +int regulator_set_suspend_voltage(struct regulator *regulator, int min_uV, + int max_uV, suspend_state_t state); + /* driver data - core doesn't touch */ void *regulator_get_drvdata(struct regulator *regulator); void regulator_set_drvdata(struct regulator *regulator, void *data); -- cgit v1.2.3-59-g8ed1b From c6d5d843d9b6e8dca3768250970f0d0a1e3d4fb0 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 24 Jun 2020 11:06:54 +0100 Subject: net: phylink: add phylink_speed_(up|down) interface Add an interface for the phy_speed_(up|down) functions when a driver makes use of phylink. These pass the call through to phylib when we have a normal PHY attached (i.o.w., not a PHY on a SFP module.) Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/phylink.c | 48 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/phylink.h | 2 ++ 2 files changed, 50 insertions(+) (limited to 'include') diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 7ce787c227b3..7cda1646bbf7 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -1826,6 +1826,54 @@ int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd) } EXPORT_SYMBOL_GPL(phylink_mii_ioctl); +/** + * phylink_speed_down() - set the non-SFP PHY to lowest speed supported by both + * link partners + * @pl: a pointer to a &struct phylink returned from phylink_create() + * @sync: perform action synchronously + * + * If we have a PHY that is not part of a SFP module, then set the speed + * as described in the phy_speed_down() function. Please see this function + * for a description of the @sync parameter. + * + * Returns zero if there is no PHY, otherwise as per phy_speed_down(). + */ +int phylink_speed_down(struct phylink *pl, bool sync) +{ + int ret = 0; + + ASSERT_RTNL(); + + if (!pl->sfp_bus && pl->phydev) + ret = phy_speed_down(pl->phydev, sync); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_speed_down); + +/** + * phylink_speed_up() - restore the advertised speeds prior to the call to + * phylink_speed_down() + * @pl: a pointer to a &struct phylink returned from phylink_create() + * + * If we have a PHY that is not part of a SFP module, then restore the + * PHY speeds as per phy_speed_up(). + * + * Returns zero if there is no PHY, otherwise as per phy_speed_up(). + */ +int phylink_speed_up(struct phylink *pl) +{ + int ret = 0; + + ASSERT_RTNL(); + + if (!pl->sfp_bus && pl->phydev) + ret = phy_speed_up(pl->phydev); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_speed_up); + static void phylink_sfp_attach(void *upstream, struct sfp_bus *bus) { struct phylink *pl = upstream; diff --git a/include/linux/phylink.h b/include/linux/phylink.h index cc5b452a184e..b32b8b45421b 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -392,6 +392,8 @@ int phylink_init_eee(struct phylink *, bool); int phylink_ethtool_get_eee(struct phylink *, struct ethtool_eee *); int phylink_ethtool_set_eee(struct phylink *, struct ethtool_eee *); int phylink_mii_ioctl(struct phylink *, struct ifreq *, int); +int phylink_speed_down(struct phylink *pl, bool sync); +int phylink_speed_up(struct phylink *pl); #define phylink_zero(bm) \ bitmap_zero(bm, __ETHTOOL_LINK_MODE_MASK_NBITS) -- cgit v1.2.3-59-g8ed1b From 92252eec913b2dd5e7b5de11ea3efa2e64d65cf4 Mon Sep 17 00:00:00 2001 From: Dan Murphy Date: Wed, 24 Jun 2020 07:16:02 -0500 Subject: net: phy: Add a helper to return the index for of the internal delay Add a helper function that will return the index in the array for the passed in internal delay value. The helper requires the array, size and delay value. The helper will then return the index for the exact match or return the index for the index to the closest smaller value. Signed-off-by: Dan Murphy Signed-off-by: David S. Miller --- drivers/net/phy/phy_device.c | 99 ++++++++++++++++++++++++++++++++++++++++++++ include/linux/phy.h | 4 ++ 2 files changed, 103 insertions(+) (limited to 'include') diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 29ef4456ac25..6d47485e68f9 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -31,6 +31,7 @@ #include #include #include +#include MODULE_DESCRIPTION("PHY library"); MODULE_AUTHOR("Andy Fleming"); @@ -2708,6 +2709,104 @@ void phy_get_pause(struct phy_device *phydev, bool *tx_pause, bool *rx_pause) } EXPORT_SYMBOL(phy_get_pause); +#if IS_ENABLED(CONFIG_OF_MDIO) +static int phy_get_int_delay_property(struct device *dev, const char *name) +{ + s32 int_delay; + int ret; + + ret = device_property_read_u32(dev, name, &int_delay); + if (ret) + return ret; + + return int_delay; +} +#else +static int phy_get_int_delay_property(struct device *dev, const char *name) +{ + return -EINVAL; +} +#endif + +/** + * phy_get_delay_index - returns the index of the internal delay + * @phydev: phy_device struct + * @dev: pointer to the devices device struct + * @delay_values: array of delays the PHY supports + * @size: the size of the delay array + * @is_rx: boolean to indicate to get the rx internal delay + * + * Returns the index within the array of internal delay passed in. + * If the device property is not present then the interface type is checked + * if the interface defines use of internal delay then a 1 is returned otherwise + * a 0 is returned. + * The array must be in ascending order. If PHY does not have an ascending order + * array then size = 0 and the value of the delay property is returned. + * Return -EINVAL if the delay is invalid or cannot be found. + */ +s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev, + const int *delay_values, int size, bool is_rx) +{ + s32 delay; + int i; + + if (is_rx) { + delay = phy_get_int_delay_property(dev, "rx-internal-delay-ps"); + if (delay < 0 && size == 0) { + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || + phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) + return 1; + else + return 0; + } + + } else { + delay = phy_get_int_delay_property(dev, "tx-internal-delay-ps"); + if (delay < 0 && size == 0) { + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || + phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) + return 1; + else + return 0; + } + } + + if (delay < 0) + return delay; + + if (delay && size == 0) + return delay; + + if (delay < delay_values[0] || delay > delay_values[size - 1]) { + phydev_err(phydev, "Delay %d is out of range\n", delay); + return -EINVAL; + } + + if (delay == delay_values[0]) + return 0; + + for (i = 1; i < size; i++) { + if (delay == delay_values[i]) + return i; + + /* Find an approximate index by looking up the table */ + if (delay > delay_values[i - 1] && + delay < delay_values[i]) { + if (delay - delay_values[i - 1] < + delay_values[i] - delay) + return i - 1; + else + return i; + } + } + + phydev_err(phydev, "error finding internal delay index for %d\n", + delay); + + return -EINVAL; +} +EXPORT_SYMBOL(phy_get_internal_delay); + static bool phy_drv_supports_irq(struct phy_driver *phydrv) { return phydrv->config_intr && phydrv->ack_interrupt; diff --git a/include/linux/phy.h b/include/linux/phy.h index 6fb8f302978d..2c00374dc996 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -1443,6 +1443,10 @@ void phy_set_asym_pause(struct phy_device *phydev, bool rx, bool tx); bool phy_validate_pause(struct phy_device *phydev, struct ethtool_pauseparam *pp); void phy_get_pause(struct phy_device *phydev, bool *tx_pause, bool *rx_pause); + +s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev, + const int *delay_values, int size, bool is_rx); + void phy_resolve_pause(unsigned long *local_adv, unsigned long *partner_adv, bool *tx_pause, bool *rx_pause); -- cgit v1.2.3-59-g8ed1b From f3bdc62fd82ed93dbe4d049eacba310de7eb2a6a Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 17 Jun 2020 15:58:23 +0200 Subject: blktrace: Provide event for request merging Currently blk-mq does not report any event when two requests get merged in the elevator. This then results in difficult to understand sequence of events like: ... 8,0 34 1579 0.608765271 2718 I WS 215023504 + 40 [dbench] 8,0 34 1584 0.609184613 2719 A WS 215023544 + 56 <- (8,4) 2160568 8,0 34 1585 0.609184850 2719 Q WS 215023544 + 56 [dbench] 8,0 34 1586 0.609188524 2719 G WS 215023544 + 56 [dbench] 8,0 3 602 0.609684162 773 D WS 215023504 + 96 [kworker/3:1H] 8,0 34 1591 0.609843593 0 C WS 215023504 + 96 [0] and you can only guess (after quite some headscratching since the above excerpt is intermixed with a lot of other IO) that request 215023544+56 got merged to request 215023504+40. Provide proper event for request merging like we used to do in the legacy block layer. Signed-off-by: Jan Kara Signed-off-by: Jens Axboe --- block/blk-merge.c | 2 ++ include/trace/events/block.h | 15 +++++++++++++++ kernel/trace/blktrace.c | 10 ++++++++++ 3 files changed, 27 insertions(+) (limited to 'include') diff --git a/block/blk-merge.c b/block/blk-merge.c index f0b0bae075a0..9c9fb21584b6 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -793,6 +793,8 @@ static struct request *attempt_merge(struct request_queue *q, */ blk_account_io_merge_request(next); + trace_block_rq_merge(q, next); + /* * ownership of bio passed from next to req, return 'next' for * the caller to free diff --git a/include/trace/events/block.h b/include/trace/events/block.h index 93b114226af8..34d64ca306b1 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h @@ -211,6 +211,21 @@ DEFINE_EVENT(block_rq, block_rq_issue, TP_ARGS(q, rq) ); +/** + * block_rq_merge - merge request with another one in the elevator + * @q: queue holding operation + * @rq: block IO operation operation request + * + * Called when block operation request @rq from queue @q is merged to another + * request queued in the elevator. + */ +DEFINE_EVENT(block_rq, block_rq_merge, + + TP_PROTO(struct request_queue *q, struct request *rq), + + TP_ARGS(q, rq) +); + /** * block_bio_bounce - used bounce buffer when processing block operation * @q: queue holding the block operation diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index c086c38f4954..7ba62d68885a 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -860,6 +860,13 @@ static void blk_add_trace_rq_issue(void *ignore, blk_trace_request_get_cgid(q, rq)); } +static void blk_add_trace_rq_merge(void *ignore, + struct request_queue *q, struct request *rq) +{ + blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_BACKMERGE, + blk_trace_request_get_cgid(q, rq)); +} + static void blk_add_trace_rq_requeue(void *ignore, struct request_queue *q, struct request *rq) @@ -1144,6 +1151,8 @@ static void blk_register_tracepoints(void) WARN_ON(ret); ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL); WARN_ON(ret); + ret = register_trace_block_rq_merge(blk_add_trace_rq_merge, NULL); + WARN_ON(ret); ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL); WARN_ON(ret); ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL); @@ -1190,6 +1199,7 @@ static void blk_unregister_tracepoints(void) unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL); unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL); unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL); + unregister_trace_block_rq_merge(blk_add_trace_rq_merge, NULL); unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL); unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL); -- cgit v1.2.3-59-g8ed1b From d037cb4ae20407df89491f9c2d521ac0723aa15d Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 18 Jun 2020 17:00:22 +1000 Subject: crypto: api - Prune inclusions in crypto.h We haven't used string.h since the memcpy calls were removed so this patch removes its inclusion. The file uaccess.h isn't needed at all. However, removing it reveals that we do need to add an inclusion for refcount.h. Signed-off-by: Herbert Xu --- include/linux/crypto.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 763863dbc079..bc5d2d4bfc3d 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -16,9 +16,8 @@ #include #include #include +#include #include -#include -#include #include /* -- cgit v1.2.3-59-g8ed1b From fe21b6c3a65ca74cc4d0909635649bea48b115b3 Mon Sep 17 00:00:00 2001 From: Shiraz Saleem Date: Mon, 4 May 2020 09:43:48 -0700 Subject: i40e: Move client header location Move i40e_client.h to include/linux/net/intel/* since its shared between i40iw and i40e. Signed-off-by: Shiraz Saleem Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/infiniband/hw/i40iw/Makefile | 1 - drivers/infiniband/hw/i40iw/i40iw.h | 2 +- drivers/net/ethernet/intel/i40e/i40e.h | 2 +- drivers/net/ethernet/intel/i40e/i40e_client.c | 2 +- drivers/net/ethernet/intel/i40e/i40e_client.h | 203 -------------------------- include/linux/net/intel/i40e_client.h | 203 ++++++++++++++++++++++++++ 6 files changed, 206 insertions(+), 207 deletions(-) delete mode 100644 drivers/net/ethernet/intel/i40e/i40e_client.h create mode 100644 include/linux/net/intel/i40e_client.h (limited to 'include') diff --git a/drivers/infiniband/hw/i40iw/Makefile b/drivers/infiniband/hw/i40iw/Makefile index 8942f8229945..34da9eba8a7c 100644 --- a/drivers/infiniband/hw/i40iw/Makefile +++ b/drivers/infiniband/hw/i40iw/Makefile @@ -1,5 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -ccflags-y := -I $(srctree)/drivers/net/ethernet/intel/i40e obj-$(CONFIG_INFINIBAND_I40IW) += i40iw.o diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h index 49d92638e0db..25747b85a79c 100644 --- a/drivers/infiniband/hw/i40iw/i40iw.h +++ b/drivers/infiniband/hw/i40iw/i40iw.h @@ -45,6 +45,7 @@ #include #include #include +#include #include #include #include @@ -57,7 +58,6 @@ #include "i40iw_d.h" #include "i40iw_hmc.h" -#include #include "i40iw_type.h" #include "i40iw_p.h" #include diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index e95b8da45e07..5ff0828a6f50 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -38,7 +38,7 @@ #include #include "i40e_type.h" #include "i40e_prototype.h" -#include "i40e_client.h" +#include #include #include "i40e_virtchnl_pf.h" #include "i40e_txrx.h" diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c index e81530ca08d0..befd3018183f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c @@ -3,10 +3,10 @@ #include #include +#include #include "i40e.h" #include "i40e_prototype.h" -#include "i40e_client.h" static const char i40e_client_interface_version_str[] = I40E_CLIENT_VERSION_STR; static struct i40e_client *registered_client; diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/drivers/net/ethernet/intel/i40e/i40e_client.h deleted file mode 100644 index 72994baf4941..000000000000 --- a/drivers/net/ethernet/intel/i40e/i40e_client.h +++ /dev/null @@ -1,203 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_CLIENT_H_ -#define _I40E_CLIENT_H_ - -#define I40E_CLIENT_STR_LENGTH 10 - -/* Client interface version should be updated anytime there is a change in the - * existing APIs or data structures. - */ -#define I40E_CLIENT_VERSION_MAJOR 0 -#define I40E_CLIENT_VERSION_MINOR 01 -#define I40E_CLIENT_VERSION_BUILD 00 -#define I40E_CLIENT_VERSION_STR \ - __stringify(I40E_CLIENT_VERSION_MAJOR) "." \ - __stringify(I40E_CLIENT_VERSION_MINOR) "." \ - __stringify(I40E_CLIENT_VERSION_BUILD) - -struct i40e_client_version { - u8 major; - u8 minor; - u8 build; - u8 rsvd; -}; - -enum i40e_client_state { - __I40E_CLIENT_NULL, - __I40E_CLIENT_REGISTERED -}; - -enum i40e_client_instance_state { - __I40E_CLIENT_INSTANCE_NONE, - __I40E_CLIENT_INSTANCE_OPENED, -}; - -struct i40e_ops; -struct i40e_client; - -/* HW does not define a type value for AEQ; only for RX/TX and CEQ. - * In order for us to keep the interface simple, SW will define a - * unique type value for AEQ. - */ -#define I40E_QUEUE_TYPE_PE_AEQ 0x80 -#define I40E_QUEUE_INVALID_IDX 0xFFFF - -struct i40e_qv_info { - u32 v_idx; /* msix_vector */ - u16 ceq_idx; - u16 aeq_idx; - u8 itr_idx; -}; - -struct i40e_qvlist_info { - u32 num_vectors; - struct i40e_qv_info qv_info[1]; -}; - -#define I40E_CLIENT_MSIX_ALL 0xFFFFFFFF - -/* set of LAN parameters useful for clients managed by LAN */ - -/* Struct to hold per priority info */ -struct i40e_prio_qos_params { - u16 qs_handle; /* qs handle for prio */ - u8 tc; /* TC mapped to prio */ - u8 reserved; -}; - -#define I40E_CLIENT_MAX_USER_PRIORITY 8 -/* Struct to hold Client QoS */ -struct i40e_qos_params { - struct i40e_prio_qos_params prio_qos[I40E_CLIENT_MAX_USER_PRIORITY]; -}; - -struct i40e_params { - struct i40e_qos_params qos; - u16 mtu; -}; - -/* Structure to hold Lan device info for a client device */ -struct i40e_info { - struct i40e_client_version version; - u8 lanmac[6]; - struct net_device *netdev; - struct pci_dev *pcidev; - u8 __iomem *hw_addr; - u8 fid; /* function id, PF id or VF id */ -#define I40E_CLIENT_FTYPE_PF 0 -#define I40E_CLIENT_FTYPE_VF 1 - u8 ftype; /* function type, PF or VF */ - void *pf; - - /* All L2 params that could change during the life span of the PF - * and needs to be communicated to the client when they change - */ - struct i40e_qvlist_info *qvlist_info; - struct i40e_params params; - struct i40e_ops *ops; - - u16 msix_count; /* number of msix vectors*/ - /* Array down below will be dynamically allocated based on msix_count */ - struct msix_entry *msix_entries; - u16 itr_index; /* Which ITR index the PE driver is suppose to use */ - u16 fw_maj_ver; /* firmware major version */ - u16 fw_min_ver; /* firmware minor version */ - u32 fw_build; /* firmware build number */ -}; - -#define I40E_CLIENT_RESET_LEVEL_PF 1 -#define I40E_CLIENT_RESET_LEVEL_CORE 2 -#define I40E_CLIENT_VSI_FLAG_TCP_ENABLE BIT(1) - -struct i40e_ops { - /* setup_q_vector_list enables queues with a particular vector */ - int (*setup_qvlist)(struct i40e_info *ldev, struct i40e_client *client, - struct i40e_qvlist_info *qv_info); - - int (*virtchnl_send)(struct i40e_info *ldev, struct i40e_client *client, - u32 vf_id, u8 *msg, u16 len); - - /* If the PE Engine is unresponsive, RDMA driver can request a reset. - * The level helps determine the level of reset being requested. - */ - void (*request_reset)(struct i40e_info *ldev, - struct i40e_client *client, u32 level); - - /* API for the RDMA driver to set certain VSI flags that control - * PE Engine. - */ - int (*update_vsi_ctxt)(struct i40e_info *ldev, - struct i40e_client *client, - bool is_vf, u32 vf_id, - u32 flag, u32 valid_flag); -}; - -struct i40e_client_ops { - /* Should be called from register_client() or whenever PF is ready - * to create a specific client instance. - */ - int (*open)(struct i40e_info *ldev, struct i40e_client *client); - - /* Should be called when netdev is unavailable or when unregister - * call comes in. If the close is happenening due to a reset being - * triggered set the reset bit to true. - */ - void (*close)(struct i40e_info *ldev, struct i40e_client *client, - bool reset); - - /* called when some l2 managed parameters changes - mtu */ - void (*l2_param_change)(struct i40e_info *ldev, - struct i40e_client *client, - struct i40e_params *params); - - int (*virtchnl_receive)(struct i40e_info *ldev, - struct i40e_client *client, u32 vf_id, - u8 *msg, u16 len); - - /* called when a VF is reset by the PF */ - void (*vf_reset)(struct i40e_info *ldev, - struct i40e_client *client, u32 vf_id); - - /* called when the number of VFs changes */ - void (*vf_enable)(struct i40e_info *ldev, - struct i40e_client *client, u32 num_vfs); - - /* returns true if VF is capable of specified offload */ - int (*vf_capable)(struct i40e_info *ldev, - struct i40e_client *client, u32 vf_id); -}; - -/* Client device */ -struct i40e_client_instance { - struct list_head list; - struct i40e_info lan_info; - struct i40e_client *client; - unsigned long state; -}; - -struct i40e_client { - struct list_head list; /* list of registered clients */ - char name[I40E_CLIENT_STR_LENGTH]; - struct i40e_client_version version; - unsigned long state; /* client state */ - atomic_t ref_cnt; /* Count of all the client devices of this kind */ - u32 flags; -#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0) -#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2) - u8 type; -#define I40E_CLIENT_IWARP 0 - const struct i40e_client_ops *ops; /* client ops provided by the client */ -}; - -static inline bool i40e_client_is_registered(struct i40e_client *client) -{ - return test_bit(__I40E_CLIENT_REGISTERED, &client->state); -} - -/* used by clients */ -int i40e_register_client(struct i40e_client *client); -int i40e_unregister_client(struct i40e_client *client); - -#endif /* _I40E_CLIENT_H_ */ diff --git a/include/linux/net/intel/i40e_client.h b/include/linux/net/intel/i40e_client.h new file mode 100644 index 000000000000..72994baf4941 --- /dev/null +++ b/include/linux/net/intel/i40e_client.h @@ -0,0 +1,203 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#ifndef _I40E_CLIENT_H_ +#define _I40E_CLIENT_H_ + +#define I40E_CLIENT_STR_LENGTH 10 + +/* Client interface version should be updated anytime there is a change in the + * existing APIs or data structures. + */ +#define I40E_CLIENT_VERSION_MAJOR 0 +#define I40E_CLIENT_VERSION_MINOR 01 +#define I40E_CLIENT_VERSION_BUILD 00 +#define I40E_CLIENT_VERSION_STR \ + __stringify(I40E_CLIENT_VERSION_MAJOR) "." \ + __stringify(I40E_CLIENT_VERSION_MINOR) "." \ + __stringify(I40E_CLIENT_VERSION_BUILD) + +struct i40e_client_version { + u8 major; + u8 minor; + u8 build; + u8 rsvd; +}; + +enum i40e_client_state { + __I40E_CLIENT_NULL, + __I40E_CLIENT_REGISTERED +}; + +enum i40e_client_instance_state { + __I40E_CLIENT_INSTANCE_NONE, + __I40E_CLIENT_INSTANCE_OPENED, +}; + +struct i40e_ops; +struct i40e_client; + +/* HW does not define a type value for AEQ; only for RX/TX and CEQ. + * In order for us to keep the interface simple, SW will define a + * unique type value for AEQ. + */ +#define I40E_QUEUE_TYPE_PE_AEQ 0x80 +#define I40E_QUEUE_INVALID_IDX 0xFFFF + +struct i40e_qv_info { + u32 v_idx; /* msix_vector */ + u16 ceq_idx; + u16 aeq_idx; + u8 itr_idx; +}; + +struct i40e_qvlist_info { + u32 num_vectors; + struct i40e_qv_info qv_info[1]; +}; + +#define I40E_CLIENT_MSIX_ALL 0xFFFFFFFF + +/* set of LAN parameters useful for clients managed by LAN */ + +/* Struct to hold per priority info */ +struct i40e_prio_qos_params { + u16 qs_handle; /* qs handle for prio */ + u8 tc; /* TC mapped to prio */ + u8 reserved; +}; + +#define I40E_CLIENT_MAX_USER_PRIORITY 8 +/* Struct to hold Client QoS */ +struct i40e_qos_params { + struct i40e_prio_qos_params prio_qos[I40E_CLIENT_MAX_USER_PRIORITY]; +}; + +struct i40e_params { + struct i40e_qos_params qos; + u16 mtu; +}; + +/* Structure to hold Lan device info for a client device */ +struct i40e_info { + struct i40e_client_version version; + u8 lanmac[6]; + struct net_device *netdev; + struct pci_dev *pcidev; + u8 __iomem *hw_addr; + u8 fid; /* function id, PF id or VF id */ +#define I40E_CLIENT_FTYPE_PF 0 +#define I40E_CLIENT_FTYPE_VF 1 + u8 ftype; /* function type, PF or VF */ + void *pf; + + /* All L2 params that could change during the life span of the PF + * and needs to be communicated to the client when they change + */ + struct i40e_qvlist_info *qvlist_info; + struct i40e_params params; + struct i40e_ops *ops; + + u16 msix_count; /* number of msix vectors*/ + /* Array down below will be dynamically allocated based on msix_count */ + struct msix_entry *msix_entries; + u16 itr_index; /* Which ITR index the PE driver is suppose to use */ + u16 fw_maj_ver; /* firmware major version */ + u16 fw_min_ver; /* firmware minor version */ + u32 fw_build; /* firmware build number */ +}; + +#define I40E_CLIENT_RESET_LEVEL_PF 1 +#define I40E_CLIENT_RESET_LEVEL_CORE 2 +#define I40E_CLIENT_VSI_FLAG_TCP_ENABLE BIT(1) + +struct i40e_ops { + /* setup_q_vector_list enables queues with a particular vector */ + int (*setup_qvlist)(struct i40e_info *ldev, struct i40e_client *client, + struct i40e_qvlist_info *qv_info); + + int (*virtchnl_send)(struct i40e_info *ldev, struct i40e_client *client, + u32 vf_id, u8 *msg, u16 len); + + /* If the PE Engine is unresponsive, RDMA driver can request a reset. + * The level helps determine the level of reset being requested. + */ + void (*request_reset)(struct i40e_info *ldev, + struct i40e_client *client, u32 level); + + /* API for the RDMA driver to set certain VSI flags that control + * PE Engine. + */ + int (*update_vsi_ctxt)(struct i40e_info *ldev, + struct i40e_client *client, + bool is_vf, u32 vf_id, + u32 flag, u32 valid_flag); +}; + +struct i40e_client_ops { + /* Should be called from register_client() or whenever PF is ready + * to create a specific client instance. + */ + int (*open)(struct i40e_info *ldev, struct i40e_client *client); + + /* Should be called when netdev is unavailable or when unregister + * call comes in. If the close is happenening due to a reset being + * triggered set the reset bit to true. + */ + void (*close)(struct i40e_info *ldev, struct i40e_client *client, + bool reset); + + /* called when some l2 managed parameters changes - mtu */ + void (*l2_param_change)(struct i40e_info *ldev, + struct i40e_client *client, + struct i40e_params *params); + + int (*virtchnl_receive)(struct i40e_info *ldev, + struct i40e_client *client, u32 vf_id, + u8 *msg, u16 len); + + /* called when a VF is reset by the PF */ + void (*vf_reset)(struct i40e_info *ldev, + struct i40e_client *client, u32 vf_id); + + /* called when the number of VFs changes */ + void (*vf_enable)(struct i40e_info *ldev, + struct i40e_client *client, u32 num_vfs); + + /* returns true if VF is capable of specified offload */ + int (*vf_capable)(struct i40e_info *ldev, + struct i40e_client *client, u32 vf_id); +}; + +/* Client device */ +struct i40e_client_instance { + struct list_head list; + struct i40e_info lan_info; + struct i40e_client *client; + unsigned long state; +}; + +struct i40e_client { + struct list_head list; /* list of registered clients */ + char name[I40E_CLIENT_STR_LENGTH]; + struct i40e_client_version version; + unsigned long state; /* client state */ + atomic_t ref_cnt; /* Count of all the client devices of this kind */ + u32 flags; +#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0) +#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2) + u8 type; +#define I40E_CLIENT_IWARP 0 + const struct i40e_client_ops *ops; /* client ops provided by the client */ +}; + +static inline bool i40e_client_is_registered(struct i40e_client *client) +{ + return test_bit(__I40E_CLIENT_REGISTERED, &client->state); +} + +/* used by clients */ +int i40e_register_client(struct i40e_client *client); +int i40e_unregister_client(struct i40e_client *client); + +#endif /* _I40E_CLIENT_H_ */ -- cgit v1.2.3-59-g8ed1b From 3c98f9ee6bc280499cbcb6f8e42c001c3bd7caa1 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Mon, 6 Jan 2020 16:09:33 -0800 Subject: i40e: remove unused defines Remove all the unused defines as they are just dead weight. Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 24 - drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h | 497 +-- drivers/net/ethernet/intel/i40e/i40e_common.c | 4 - drivers/net/ethernet/intel/i40e/i40e_dcb.h | 5 - drivers/net/ethernet/intel/i40e/i40e_debugfs.c | 1 - drivers/net/ethernet/intel/i40e/i40e_devids.h | 3 - drivers/net/ethernet/intel/i40e/i40e_hmc.h | 1 - drivers/net/ethernet/intel/i40e/i40e_main.c | 10 +- drivers/net/ethernet/intel/i40e/i40e_osdep.h | 1 - drivers/net/ethernet/intel/i40e/i40e_register.h | 4656 -------------------- drivers/net/ethernet/intel/i40e/i40e_txrx.h | 25 - drivers/net/ethernet/intel/i40e/i40e_type.h | 82 - drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h | 1 - include/linux/net/intel/i40e_client.h | 9 - 14 files changed, 2 insertions(+), 5317 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 5ff0828a6f50..8151671e5e0e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -60,17 +60,14 @@ (((pf)->hw_features & I40E_HW_RSS_AQ_CAPABLE) ? 4 : 1) #define I40E_DEFAULT_QUEUES_PER_VF 4 #define I40E_MAX_VF_QUEUES 16 -#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */ #define i40e_pf_get_max_q_per_tc(pf) \ (((pf)->hw_features & I40E_HW_128_QP_RSS_CAPABLE) ? 128 : 64) -#define I40E_FDIR_RING 0 #define I40E_FDIR_RING_COUNT 32 #define I40E_MAX_AQ_BUF_SIZE 4096 #define I40E_AQ_LEN 256 #define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */ #define I40E_MAX_USER_PRIORITY 8 #define I40E_DEFAULT_TRAFFIC_CLASS BIT(0) -#define I40E_DEFAULT_MSG_ENABLE 4 #define I40E_QUEUE_WAIT_RETRY_LIMIT 10 #define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16) @@ -92,10 +89,6 @@ #define I40E_OEM_SNAP_SHIFT 16 #define I40E_OEM_RELEASE_MASK 0x0000ffff -/* The values in here are decimal coded as hex as is the case in the NVM map*/ -#define I40E_CURRENT_NVM_VERSION_HI 0x2 -#define I40E_CURRENT_NVM_VERSION_LO 0x40 - #define I40E_RX_DESC(R, i) \ (&(((union i40e_32byte_rx_desc *)((R)->desc))[i])) #define I40E_TX_DESC(R, i) \ @@ -105,9 +98,6 @@ #define I40E_TX_FDIRDESC(R, i) \ (&(((struct i40e_filter_program_desc *)((R)->desc))[i])) -/* default to trying for four seconds */ -#define I40E_TRY_LINK_TIMEOUT (4 * HZ) - /* BW rate limiting */ #define I40E_BW_CREDIT_DIVISOR 50 /* 50Mbps per BW credit */ #define I40E_BW_MBPS_DIVISOR 125000 /* rate / (1000000 / 8) Mbps */ @@ -295,9 +285,6 @@ struct i40e_cloud_filter { u8 tunnel_type; }; -#define I40E_DCB_PRIO_TYPE_STRICT 0 -#define I40E_DCB_PRIO_TYPE_ETS 1 -#define I40E_DCB_STRICT_PRIO_CREDITS 127 /* DCB per TC information data structure */ struct i40e_tc_info { u16 qoffset; /* Queue offset from base queue */ @@ -357,15 +344,6 @@ struct i40e_ddp_old_profile_list { I40E_FLEX_SET_FSIZE(fsize) | \ I40E_FLEX_SET_SRC_WORD(src)) -#define I40E_FLEX_PIT_GET_SRC(flex) (((flex) & \ - I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK) >> \ - I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT) -#define I40E_FLEX_PIT_GET_DST(flex) (((flex) & \ - I40E_PRTQF_FLX_PIT_DEST_OFF_MASK) >> \ - I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT) -#define I40E_FLEX_PIT_GET_FSIZE(flex) (((flex) & \ - I40E_PRTQF_FLX_PIT_FSIZE_MASK) >> \ - I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) #define I40E_MAX_FLEX_SRC_OFFSET 0x1F @@ -390,7 +368,6 @@ struct i40e_ddp_old_profile_list { #define I40E_L4_GLQF_ORT_IDX 35 /* Flex PIT register index */ -#define I40E_FLEX_PIT_IDX_START_L2 0 #define I40E_FLEX_PIT_IDX_START_L3 3 #define I40E_FLEX_PIT_IDX_START_L4 6 @@ -531,7 +508,6 @@ struct i40e_pf { #define I40E_HW_GENEVE_OFFLOAD_CAPABLE BIT(9) #define I40E_HW_PTP_L4_CAPABLE BIT(10) #define I40E_HW_WOL_MC_MAGIC_PKT_WAKE BIT(11) -#define I40E_HW_MPLS_HDR_OFFLOAD_CAPABLE BIT(12) #define I40E_HW_HAVE_CRT_RETIMER BIT(13) #define I40E_HW_OUTER_UDP_CSUM_CAPABLE BIT(14) #define I40E_HW_PHY_CONTROLS_LEDS BIT(15) diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index aa5f1c0aa721..c52910f03cfb 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -55,29 +55,17 @@ struct i40e_aq_desc { */ /* command flags and offsets*/ -#define I40E_AQ_FLAG_DD_SHIFT 0 -#define I40E_AQ_FLAG_CMP_SHIFT 1 #define I40E_AQ_FLAG_ERR_SHIFT 2 -#define I40E_AQ_FLAG_VFE_SHIFT 3 #define I40E_AQ_FLAG_LB_SHIFT 9 #define I40E_AQ_FLAG_RD_SHIFT 10 -#define I40E_AQ_FLAG_VFC_SHIFT 11 #define I40E_AQ_FLAG_BUF_SHIFT 12 #define I40E_AQ_FLAG_SI_SHIFT 13 -#define I40E_AQ_FLAG_EI_SHIFT 14 -#define I40E_AQ_FLAG_FE_SHIFT 15 -#define I40E_AQ_FLAG_DD BIT(I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ -#define I40E_AQ_FLAG_CMP BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ #define I40E_AQ_FLAG_ERR BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ -#define I40E_AQ_FLAG_VFE BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ #define I40E_AQ_FLAG_LB BIT(I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ #define I40E_AQ_FLAG_RD BIT(I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ -#define I40E_AQ_FLAG_VFC BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ #define I40E_AQ_FLAG_BUF BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ #define I40E_AQ_FLAG_SI BIT(I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ -#define I40E_AQ_FLAG_EI BIT(I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ -#define I40E_AQ_FLAG_FE BIT(I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ /* error codes */ enum i40e_admin_queue_err { @@ -362,13 +350,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context); /* Request resource ownership (direct 0x0008) * Release resource ownership (direct 0x0009) */ -#define I40E_AQ_RESOURCE_NVM 1 -#define I40E_AQ_RESOURCE_SDP 2 -#define I40E_AQ_RESOURCE_ACCESS_READ 1 -#define I40E_AQ_RESOURCE_ACCESS_WRITE 2 -#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000 -#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000 - struct i40e_aqc_request_resource { __le16 resource_id; __le16 access_type; @@ -384,7 +365,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource); */ struct i40e_aqc_list_capabilites { u8 command_flags; -#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1 u8 pf_index; u8 reserved[2]; __le32 count; @@ -411,8 +391,6 @@ struct i40e_aqc_list_capabilities_element_resp { #define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003 #define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 #define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 -#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 -#define I40E_AQ_CAP_ID_WOL_AND_PROXY 0x0008 #define I40E_AQ_CAP_ID_SRIOV 0x0012 #define I40E_AQ_CAP_ID_VF 0x0013 #define I40E_AQ_CAP_ID_VMDQ 0x0014 @@ -441,11 +419,6 @@ struct i40e_aqc_list_capabilities_element_resp { /* Set CPPM Configuration (direct 0x0103) */ struct i40e_aqc_cppm_configuration { __le16 command_flags; -#define I40E_AQ_CPPM_EN_LTRC 0x0800 -#define I40E_AQ_CPPM_EN_DMCTH 0x1000 -#define I40E_AQ_CPPM_EN_DMCTLX 0x2000 -#define I40E_AQ_CPPM_EN_HPTC 0x4000 -#define I40E_AQ_CPPM_EN_DMARC 0x8000 __le16 ttlx; __le32 dmacr; __le16 dmcth; @@ -459,15 +432,8 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration); /* Set ARP Proxy command / response (indirect 0x0104) */ struct i40e_aqc_arp_proxy_data { __le16 command_flags; -#define I40E_AQ_ARP_INIT_IPV4 0x0800 -#define I40E_AQ_ARP_UNSUP_CTL 0x1000 -#define I40E_AQ_ARP_ENA 0x2000 -#define I40E_AQ_ARP_ADD_IPV4 0x4000 -#define I40E_AQ_ARP_DEL_IPV4 0x8000 __le16 table_id; __le32 enabled_offloads; -#define I40E_AQ_ARP_DIRECTED_OFFLOAD_ENABLE 0x00000020 -#define I40E_AQ_ARP_OFFLOAD_ENABLE 0x00000800 __le32 ip_addr; u8 mac_addr[6]; u8 reserved[2]; @@ -482,19 +448,6 @@ struct i40e_aqc_ns_proxy_data { __le16 table_idx_ipv6_0; __le16 table_idx_ipv6_1; __le16 control; -#define I40E_AQ_NS_PROXY_ADD_0 0x0001 -#define I40E_AQ_NS_PROXY_DEL_0 0x0002 -#define I40E_AQ_NS_PROXY_ADD_1 0x0004 -#define I40E_AQ_NS_PROXY_DEL_1 0x0008 -#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x0010 -#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x0020 -#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x0040 -#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x0080 -#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0100 -#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0200 -#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0400 -#define I40E_AQ_NS_PROXY_OFFLOAD_ENABLE 0x0800 -#define I40E_AQ_NS_PROXY_DIRECTED_OFFLOAD_ENABLE 0x1000 u8 mac_addr_0[6]; u8 mac_addr_1[6]; u8 local_mac_addr[6]; @@ -507,7 +460,6 @@ I40E_CHECK_STRUCT_LEN(0x3c, i40e_aqc_ns_proxy_data); /* Manage LAA Command (0x0106) - obsolete */ struct i40e_aqc_mng_laa { __le16 command_flags; -#define I40E_AQ_LAA_FLAG_WR 0x8000 u8 reserved[2]; __le32 sal; __le16 sah; @@ -520,11 +472,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_mng_laa); struct i40e_aqc_mac_address_read { __le16 command_flags; #define I40E_AQC_LAN_ADDR_VALID 0x10 -#define I40E_AQC_SAN_ADDR_VALID 0x20 #define I40E_AQC_PORT_ADDR_VALID 0x40 -#define I40E_AQC_WOL_ADDR_VALID 0x80 -#define I40E_AQC_MC_MAG_EN_VALID 0x100 -#define I40E_AQC_ADDR_VALID_MASK 0x3F0 u8 reserved[6]; __le32 addr_high; __le32 addr_low; @@ -548,9 +496,7 @@ struct i40e_aqc_mac_address_write { #define I40E_AQC_WOL_PRESERVE_ON_PFR 0x0200 #define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 #define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 -#define I40E_AQC_WRITE_TYPE_PORT 0x8000 #define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG 0xC000 -#define I40E_AQC_WRITE_TYPE_MASK 0xC000 __le16 mac_sah; __le32 mac_sal; @@ -573,22 +519,9 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe); struct i40e_aqc_set_wol_filter { __le16 filter_index; -#define I40E_AQC_MAX_NUM_WOL_FILTERS 8 -#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT 15 -#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_MASK (0x1 << \ - I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT) - -#define I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT 0 -#define I40E_AQC_SET_WOL_FILTER_INDEX_MASK (0x7 << \ - I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT) + __le16 cmd_flags; -#define I40E_AQC_SET_WOL_FILTER 0x8000 -#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000 -#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0 -#define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1 __le16 valid_flags; -#define I40E_AQC_SET_WOL_FILTER_ACTION_VALID 0x8000 -#define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID 0x4000 u8 reserved[2]; __le32 address_high; __le32 address_low; @@ -608,12 +541,6 @@ I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data); struct i40e_aqc_get_wake_reason_completion { u8 reserved_1[2]; __le16 wake_reason; -#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT 0 -#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_MASK (0xFF << \ - I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT) -#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT 8 -#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_MASK (0xFF << \ - I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT) u8 reserved_2[12]; }; @@ -646,25 +573,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_config_header_resp); struct i40e_aqc_switch_config_element_resp { u8 element_type; -#define I40E_AQ_SW_ELEM_TYPE_MAC 1 -#define I40E_AQ_SW_ELEM_TYPE_PF 2 -#define I40E_AQ_SW_ELEM_TYPE_VF 3 -#define I40E_AQ_SW_ELEM_TYPE_EMP 4 -#define I40E_AQ_SW_ELEM_TYPE_BMC 5 -#define I40E_AQ_SW_ELEM_TYPE_PV 16 -#define I40E_AQ_SW_ELEM_TYPE_VEB 17 -#define I40E_AQ_SW_ELEM_TYPE_PA 18 -#define I40E_AQ_SW_ELEM_TYPE_VSI 19 u8 revision; -#define I40E_AQ_SW_ELEM_REV_1 1 __le16 seid; __le16 uplink_seid; __le16 downlink_seid; u8 reserved[3]; u8 connection_type; -#define I40E_AQ_CONN_TYPE_REGULAR 0x1 -#define I40E_AQ_CONN_TYPE_DEFAULT 0x2 -#define I40E_AQ_CONN_TYPE_CASCADED 0x3 __le16 scheduler_id; __le16 element_info; }; @@ -697,12 +611,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics); /* Set Port Parameters command (direct 0x0203) */ struct i40e_aqc_set_port_parameters { __le16 command_flags; -#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1 -#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */ -#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4 __le16 bad_frame_vsi; -#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_SHIFT 0x0 -#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_MASK 0x3FF __le16 default_seid; /* reserved for command */ u8 reserved[10]; }; @@ -722,25 +631,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc); /* expect an array of these structs in the response buffer */ struct i40e_aqc_switch_resource_alloc_element_resp { u8 resource_type; -#define I40E_AQ_RESOURCE_TYPE_VEB 0x0 -#define I40E_AQ_RESOURCE_TYPE_VSI 0x1 -#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2 -#define I40E_AQ_RESOURCE_TYPE_STAG 0x3 -#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4 -#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5 -#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6 -#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7 -#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8 -#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9 -#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA -#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB -#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC -#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD -#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF -#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10 -#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11 -#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12 -#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13 u8 reserved1; __le16 guaranteed; __le16 total; @@ -756,7 +646,6 @@ struct i40e_aqc_set_switch_config { __le16 flags; /* flags used for both fields below */ #define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001 -#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002 __le16 valid_flags; /* The ethertype in switch_tag is dropped on ingress and used * internally by the switch. Set this to zero for the default @@ -789,17 +678,10 @@ struct i40e_aqc_set_switch_config { */ #define I40E_AQ_SET_SWITCH_BIT7_VALID 0x80 -#define I40E_AQ_SET_SWITCH_L4_SRC_PORT 0x40 -#define I40E_AQ_SET_SWITCH_L4_TYPE_RSVD 0x00 #define I40E_AQ_SET_SWITCH_L4_TYPE_TCP 0x10 -#define I40E_AQ_SET_SWITCH_L4_TYPE_UDP 0x20 -#define I40E_AQ_SET_SWITCH_L4_TYPE_BOTH 0x30 -#define I40E_AQ_SET_SWITCH_MODE_DEFAULT 0x00 -#define I40E_AQ_SET_SWITCH_MODE_L4_PORT 0x01 #define I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL 0x02 -#define I40E_AQ_SET_SWITCH_MODE_TUNNEL 0x03 u8 mode; u8 rsvd5[5]; }; @@ -834,19 +716,13 @@ struct i40e_aqc_add_get_update_vsi { __le16 uplink_seid; u8 connection_type; #define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1 -#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2 -#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3 u8 reserved1; u8 vf_id; u8 reserved2; __le16 vsi_flags; -#define I40E_AQ_VSI_TYPE_SHIFT 0x0 -#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT) #define I40E_AQ_VSI_TYPE_VF 0x0 #define I40E_AQ_VSI_TYPE_VMDQ2 0x1 #define I40E_AQ_VSI_TYPE_PF 0x2 -#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3 -#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4 __le32 addr_high; __le32 addr_low; }; @@ -870,24 +746,18 @@ struct i40e_aqc_vsi_properties_data { #define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001 #define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002 #define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004 -#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008 -#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010 -#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020 #define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040 #define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080 -#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100 #define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200 /* switch section */ __le16 switch_id; /* 12bit id combined with flags below */ #define I40E_AQ_VSI_SW_ID_SHIFT 0x0000 #define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT) -#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000 #define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000 #define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000 u8 sw_reserved[2]; /* security section */ u8 sec_flags; -#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01 #define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02 #define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04 u8 sec_reserved; @@ -899,78 +769,33 @@ struct i40e_aqc_vsi_properties_data { #define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \ I40E_AQ_VSI_PVLAN_MODE_SHIFT) #define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01 -#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02 #define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03 #define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04 #define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03 #define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \ I40E_AQ_VSI_PVLAN_EMOD_SHIFT) #define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0 -#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08 #define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10 #define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18 u8 pvlan_reserved[3]; /* ingress egress up sections */ __le32 ingress_table; /* bitmap, 3 bits per up */ -#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0 -#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP0_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3 -#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP1_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6 -#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP2_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9 -#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP3_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12 -#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP4_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15 -#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP5_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18 -#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP6_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21 -#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP7_SHIFT) __le32 egress_table; /* same defines as for ingress table */ /* cascaded PV section */ __le16 cas_pv_tag; u8 cas_pv_flags; -#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00 -#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \ - I40E_AQ_VSI_CAS_PV_TAGX_SHIFT) -#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00 -#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01 -#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02 -#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10 -#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20 -#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40 u8 cas_pv_reserved; /* queue mapping section */ __le16 mapping_flags; #define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0 #define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1 __le16 queue_mapping[16]; -#define I40E_AQ_VSI_QUEUE_SHIFT 0x0 -#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT) __le16 tc_mapping[8]; #define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0 -#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \ - I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) #define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9 -#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \ - I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) /* queueing option section */ u8 queueing_opt_flags; -#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04 -#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08 #define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 -#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 -#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00 #define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40 u8 queueing_opt_reserved[3]; /* scheduler section */ @@ -995,10 +820,6 @@ I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); */ struct i40e_aqc_add_update_pv { __le16 command_flags; -#define I40E_AQC_PV_FLAG_PV_TYPE 0x1 -#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2 -#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4 -#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8 __le16 uplink_seid; __le16 connected_seid; u8 reserved[10]; @@ -1009,10 +830,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv); struct i40e_aqc_add_update_pv_completion { /* reserved for update; for add also encodes error if rc == ENOSPC */ __le16 pv_seid; -#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1 -#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2 -#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4 -#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8 u8 reserved[14]; }; @@ -1026,9 +843,6 @@ struct i40e_aqc_get_pv_params_completion { __le16 seid; __le16 default_stag; __le16 pv_flags; /* same flags as add_pv */ -#define I40E_AQC_GET_PV_PV_TYPE 0x1 -#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2 -#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4 u8 reserved[8]; __le16 default_port_seid; }; @@ -1041,12 +855,8 @@ struct i40e_aqc_add_veb { __le16 downlink_seid; __le16 veb_flags; #define I40E_AQC_ADD_VEB_FLOATING 0x1 -#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1 -#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \ - I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT) #define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 #define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 -#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 /* deprecated */ #define I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS 0x10 u8 enable_tcs; u8 reserved[9]; @@ -1059,10 +869,6 @@ struct i40e_aqc_add_veb_completion { __le16 switch_seid; /* also encodes error if rc == ENOSPC; codes are the same as add_pv */ __le16 veb_seid; -#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1 -#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2 -#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4 -#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8 __le16 statistic_index; __le16 vebs_used; __le16 vebs_free; @@ -1095,9 +901,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); struct i40e_aqc_macvlan { __le16 num_addresses; __le16 seid[3]; -#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) #define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000 __le32 addr_high; __le32 addr_low; @@ -1111,18 +914,11 @@ struct i40e_aqc_add_macvlan_element_data { __le16 vlan_tag; __le16 flags; #define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001 -#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 #define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 -#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 #define I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC 0x0010 __le16 queue_number; -#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 -#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ - I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) /* response section */ u8 match_method; -#define I40E_AQC_MM_PERFECT_MATCH 0x01 -#define I40E_AQC_MM_HASH_MATCH 0x02 #define I40E_AQC_MM_ERR_NO_RES 0xFF u8 reserved1[3]; }; @@ -1148,14 +944,10 @@ struct i40e_aqc_remove_macvlan_element_data { __le16 vlan_tag; u8 flags; #define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01 -#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02 #define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08 -#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10 u8 reserved[3]; /* reply section */ u8 error_code; -#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0 -#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF u8 reply_reserved[3]; }; @@ -1166,30 +958,8 @@ struct i40e_aqc_remove_macvlan_element_data { struct i40e_aqc_add_remove_vlan_element_data { __le16 vlan_tag; u8 vlan_flags; -/* flags for add VLAN */ -#define I40E_AQC_ADD_VLAN_LOCAL 0x1 -#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1 -#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT) -#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0 -#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2 -#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4 -#define I40E_AQC_VLAN_PTYPE_SHIFT 3 -#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT) -#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0 -#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8 -#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10 -#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18 -/* flags for remove VLAN */ -#define I40E_AQC_REMOVE_VLAN_ALL 0x1 u8 reserved; u8 result; -/* flags for add VLAN */ -#define I40E_AQC_ADD_VLAN_SUCCESS 0x0 -#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE -#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF -/* flags for remove VLAN */ -#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0 -#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF u8 reserved1[3]; }; @@ -1213,9 +983,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes { #define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 #define I40E_AQC_SET_VSI_PROMISC_TX 0x8000 __le16 seid; -#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF __le16 vlan_tag; -#define I40E_AQC_SET_VSI_VLAN_MASK 0x0FFF #define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 u8 reserved[8]; }; @@ -1227,11 +995,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); */ struct i40e_aqc_add_tag { __le16 flags; -#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001 __le16 seid; -#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT) __le16 tag; __le16 queue_number; u8 reserved[8]; @@ -1252,9 +1016,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion); */ struct i40e_aqc_remove_tag { __le16 seid; -#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT) __le16 tag; u8 reserved[12]; }; @@ -1290,9 +1051,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion); /* Update S/E-Tag (direct 0x0259) */ struct i40e_aqc_update_tag { __le16 seid; -#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT) __le16 old_tag; __le16 new_tag; u8 reserved[10]; @@ -1319,13 +1077,8 @@ struct i40e_aqc_add_remove_control_packet_filter { __le16 flags; #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001 #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004 #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000 __le16 seid; -#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT) __le16 queue; u8 reserved[2]; }; @@ -1351,9 +1104,6 @@ struct i40e_aqc_add_remove_cloud_filters { u8 num_filters; u8 reserved; __le16 seid; -#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT) u8 big_buffer_flag; #define I40E_AQC_ADD_CLOUD_CMD_BB 1 u8 reserved2[3]; @@ -1380,9 +1130,6 @@ struct i40e_aqc_cloud_filters_element_data { } raw_v6; } ipaddr; __le16 flags; -#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ - I40E_AQC_ADD_CLOUD_FILTER_SHIFT) /* 0x0000 reserved */ /* 0x0001 reserved */ /* 0x0002 reserved */ @@ -1404,36 +1151,20 @@ struct i40e_aqc_cloud_filters_element_data { #define I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT 0x0011 /* Dest MAC + L4 Port */ #define I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT 0x0012 /* Dest MAC + VLAN + L4 Port */ -#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080 -#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6 -#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0 #define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0 #define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN 0 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE 2 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_RESERVED 4 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE 5 -#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_MAC 0x2000 -#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_INNER_MAC 0x4000 -#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_IP 0x8000 __le32 tenant_id; u8 reserved[4]; __le16 queue_number; -#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x7FF << \ - I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) u8 reserved2[14]; /* response section */ u8 allocation_result; -#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0 -#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF u8 response_reserved[7]; }; @@ -1445,37 +1176,7 @@ I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_cloud_filters_element_data); struct i40e_aqc_cloud_filters_element_bb { struct i40e_aqc_cloud_filters_element_data element; u16 general_fields[32]; -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0 0 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1 1 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2 2 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0 3 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1 4 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2 5 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0 6 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1 7 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2 8 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0 9 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1 10 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2 11 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD0 12 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD1 13 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD2 14 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0 15 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD1 16 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD2 17 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD3 18 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD4 19 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD5 20 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD6 21 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD7 22 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD0 23 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD1 24 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD2 25 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD3 26 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD4 27 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD5 28 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD6 29 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD7 30 }; I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_cloud_filters_element_bb); @@ -1504,11 +1205,6 @@ I40E_CHECK_STRUCT_LEN(4, i40e_filter_data); struct i40e_aqc_replace_cloud_filters_cmd { u8 valid_flags; -#define I40E_AQC_REPLACE_L1_FILTER 0x0 -#define I40E_AQC_REPLACE_CLOUD_FILTER 0x1 -#define I40E_AQC_GET_CLOUD_FILTERS 0x2 -#define I40E_AQC_MIRROR_CLOUD_FILTER 0x4 -#define I40E_AQC_HIGH_PRIORITY_CLOUD_FILTER 0x8 u8 old_filter_type; u8 new_filter_type; u8 tr_bit; @@ -1521,25 +1217,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_replace_cloud_filters_cmd); struct i40e_aqc_replace_cloud_filters_cmd_buf { u8 data[32]; -/* Filter type INPUT codes*/ -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_ENTRIES_MAX 3 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED BIT(7) - -/* Field Vector offsets */ -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_MAC_DA 0 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_ETH 6 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG 7 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN 8 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_OVLAN 9 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN 10 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY 11 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC 12 -/* big FLU */ -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IP_DA 14 -/* big FLU */ -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_OIP_DA 15 - -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN 37 struct i40e_filter_data filters[8]; }; @@ -1556,8 +1233,6 @@ struct i40e_aqc_add_delete_mirror_rule { #define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0 #define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \ I40E_AQC_MIRROR_RULE_TYPE_SHIFT) -#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1 -#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2 #define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3 #define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4 #define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5 @@ -1600,8 +1275,6 @@ struct i40e_aqc_write_ddp_resp { struct i40e_aqc_get_applied_profiles { u8 flags; -#define I40E_AQC_GET_DDP_GET_CONF 0x1 -#define I40E_AQC_GET_DDP_GET_RDPU_CONF 0x2 u8 rsv[3]; __le32 reserved; __le32 addr_high; @@ -1618,8 +1291,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_applied_profiles); struct i40e_aqc_pfc_ignore { u8 tc_bitmap; u8 command_flags; /* unused on response */ -#define I40E_AQC_PFC_IGNORE_SET 0x80 -#define I40E_AQC_PFC_IGNORE_CLEAR 0x0 u8 reserved[14]; }; @@ -1736,7 +1407,6 @@ struct i40e_aqc_configure_switching_comp_ets_data { u8 reserved[4]; u8 tc_valid_bits; u8 seepage; -#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1 u8 tc_strict_priority_flags; u8 reserved1[17]; u8 tc_bw_share_credits[8]; @@ -1977,40 +1647,18 @@ struct i40e_aq_get_phy_abilities_resp { u8 abilities; #define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 #define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 -#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 -#define I40E_AQ_PHY_LINK_ENABLED 0x08 -#define I40E_AQ_PHY_AN_ENABLED 0x10 -#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 -#define I40E_AQ_PHY_FEC_ABILITY_KR 0x40 -#define I40E_AQ_PHY_FEC_ABILITY_RS 0x80 __le16 eee_capability; -#define I40E_AQ_EEE_100BASE_TX 0x0002 -#define I40E_AQ_EEE_1000BASE_T 0x0004 -#define I40E_AQ_EEE_10GBASE_T 0x0008 -#define I40E_AQ_EEE_1000BASE_KX 0x0010 -#define I40E_AQ_EEE_10GBASE_KX4 0x0020 -#define I40E_AQ_EEE_10GBASE_KR 0x0040 __le32 eeer_val; u8 d3_lpan; -#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01 u8 phy_type_ext; #define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01 #define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02 #define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04 #define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08 -#define I40E_AQ_PHY_TYPE_EXT_25G_AOC 0x10 -#define I40E_AQ_PHY_TYPE_EXT_25G_ACC 0x20 -#define I40E_AQ_PHY_TYPE_EXT_2_5GBASE_T 0x40 -#define I40E_AQ_PHY_TYPE_EXT_5GBASE_T 0x80 u8 fec_cfg_curr_mod_ext_info; -#define I40E_AQ_ENABLE_FEC_KR 0x01 -#define I40E_AQ_ENABLE_FEC_RS 0x02 #define I40E_AQ_REQUEST_FEC_KR 0x04 #define I40E_AQ_REQUEST_FEC_RS 0x08 #define I40E_AQ_ENABLE_FEC_AUTO 0x10 -#define I40E_AQ_FEC -#define I40E_AQ_MODULE_TYPE_EXT_MASK 0xE0 -#define I40E_AQ_MODULE_TYPE_EXT_SHIFT 5 u8 ext_comp_code; u8 phy_id[4]; @@ -2028,7 +1676,6 @@ struct i40e_aq_set_phy_config { /* same bits as above in all */ u8 link_speed; u8 abilities; /* bits 0-2 use the values from get_phy_abilities_resp */ -#define I40E_AQ_PHY_ENABLE_LINK 0x08 #define I40E_AQ_PHY_ENABLE_AN 0x10 #define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20 __le16 eee_capability; @@ -2056,21 +1703,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config); struct i40e_aq_set_mac_config { __le16 max_frame_size; u8 params; -#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 -#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 -#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 -#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 -#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 -#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 -#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 -#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 -#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 -#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 -#define I40E_AQ_SET_MAC_CONFIG_DROP_BLOCKING_PACKET_EN 0x80 u8 tx_timer_priority; /* bitmap */ __le16 tx_timer_value; __le16 fc_refresh_threshold; @@ -2092,8 +1724,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an); /* Get Link Status cmd & response data structure (direct 0x0607) */ struct i40e_aqc_get_link_status { __le16 command_flags; /* only field set on command */ -#define I40E_AQ_LSE_MASK 0x3 -#define I40E_AQ_LSE_NOP 0x0 #define I40E_AQ_LSE_DISABLE 0x2 #define I40E_AQ_LSE_ENABLE 0x3 /* only response uses this flag */ @@ -2102,44 +1732,16 @@ struct i40e_aqc_get_link_status { u8 link_speed; /* i40e_aq_link_speed */ u8 link_info; #define I40E_AQ_LINK_UP 0x01 /* obsolete */ -#define I40E_AQ_LINK_UP_FUNCTION 0x01 -#define I40E_AQ_LINK_FAULT 0x02 -#define I40E_AQ_LINK_FAULT_TX 0x04 -#define I40E_AQ_LINK_FAULT_RX 0x08 -#define I40E_AQ_LINK_FAULT_REMOTE 0x10 -#define I40E_AQ_LINK_UP_PORT 0x20 #define I40E_AQ_MEDIA_AVAILABLE 0x40 -#define I40E_AQ_SIGNAL_DETECT 0x80 u8 an_info; #define I40E_AQ_AN_COMPLETED 0x01 -#define I40E_AQ_LP_AN_ABILITY 0x02 -#define I40E_AQ_PD_FAULT 0x04 -#define I40E_AQ_FEC_EN 0x08 -#define I40E_AQ_PHY_LOW_POWER 0x10 #define I40E_AQ_LINK_PAUSE_TX 0x20 #define I40E_AQ_LINK_PAUSE_RX 0x40 #define I40E_AQ_QUALIFIED_MODULE 0x80 u8 ext_info; -#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01 -#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02 -#define I40E_AQ_LINK_TX_SHIFT 0x02 -#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT) -#define I40E_AQ_LINK_TX_ACTIVE 0x00 -#define I40E_AQ_LINK_TX_DRAINED 0x01 -#define I40E_AQ_LINK_TX_FLUSHED 0x03 -#define I40E_AQ_LINK_FORCED_40G 0x10 -/* 25G Error Codes */ -#define I40E_AQ_25G_NO_ERR 0X00 -#define I40E_AQ_25G_NOT_PRESENT 0X01 -#define I40E_AQ_25G_NVM_CRC_ERR 0X02 -#define I40E_AQ_25G_SBUS_UCODE_ERR 0X03 -#define I40E_AQ_25G_SERDES_UCODE_ERR 0X04 -#define I40E_AQ_25G_NIMB_UCODE_ERR 0X05 u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ /* Since firmware API 1.7 loopback field keeps power class info as well */ #define I40E_AQ_LOOPBACK_MASK 0x07 -#define I40E_AQ_PWR_CLASS_SHIFT_LB 6 -#define I40E_AQ_PWR_CLASS_MASK_LB (0x03 << I40E_AQ_PWR_CLASS_SHIFT_LB) __le16 max_frame_size; u8 config; #define I40E_AQ_CONFIG_FEC_KR_ENA 0x01 @@ -2149,11 +1751,6 @@ struct i40e_aqc_get_link_status { union { struct { u8 power_desc; -#define I40E_AQ_LINK_POWER_CLASS_1 0x00 -#define I40E_AQ_LINK_POWER_CLASS_2 0x01 -#define I40E_AQ_LINK_POWER_CLASS_3 0x02 -#define I40E_AQ_LINK_POWER_CLASS_4 0x03 -#define I40E_AQ_PWR_CLASS_MASK 0x03 u8 reserved[4]; }; struct { @@ -2171,13 +1768,7 @@ struct i40e_aqc_set_phy_int_mask { __le16 event_mask; #define I40E_AQ_EVENT_LINK_UPDOWN 0x0002 #define I40E_AQ_EVENT_MEDIA_NA 0x0004 -#define I40E_AQ_EVENT_LINK_FAULT 0x0008 -#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010 -#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020 -#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040 -#define I40E_AQ_EVENT_AN_COMPLETED 0x0080 #define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100 -#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200 u8 reserved1[6]; }; @@ -2209,13 +1800,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode); /* Set PHY Debug command (0x0622) */ struct i40e_aqc_set_phy_debug { u8 command_flags; -#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02 -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2 -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \ - I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT) -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00 -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01 -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02 /* Disable link manageability on a single port */ #define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10 /* Disable link manageability on all ports */ @@ -2247,7 +1831,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity); /* Get PHY Register command (0x0629) */ struct i40e_aqc_phy_register_access { u8 phy_interface; -#define I40E_AQ_PHY_REG_ACCESS_INTERNAL 0 #define I40E_AQ_PHY_REG_ACCESS_EXTERNAL 1 #define I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE 2 u8 dev_address; @@ -2274,9 +1857,7 @@ struct i40e_aqc_nvm_update { #define I40E_AQ_NVM_LAST_CMD 0x01 #define I40E_AQ_NVM_REARRANGE_TO_FLAT 0x20 #define I40E_AQ_NVM_REARRANGE_TO_STRUCT 0x40 -#define I40E_AQ_NVM_FLASH_ONLY 0x80 #define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1 -#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03 #define I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED 0x03 #define I40E_AQ_NVM_PRESERVATION_FLAGS_ALL 0x01 u8 module_pointer; @@ -2291,9 +1872,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update); /* NVM Config Read (indirect 0x0704) */ struct i40e_aqc_nvm_config_read { __le16 cmd_flags; -#define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1 -#define I40E_AQ_ANVM_READ_SINGLE_FEATURE 0 -#define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES 1 __le16 element_count; __le16 element_id; /* Feature/field ID */ __le16 element_id_msw; /* MSWord of field ID */ @@ -2315,16 +1893,8 @@ struct i40e_aqc_nvm_config_write { I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write); /* Used for 0x0704 as well as for 0x0705 commands */ -#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1 -#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \ - BIT(I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT) -#define I40E_AQ_ANVM_FEATURE 0 -#define I40E_AQ_ANVM_IMMEDIATE_FIELD BIT(FEATURE_OR_IMMEDIATE_SHIFT) struct i40e_aqc_nvm_config_data_feature { __le16 feature_id; -#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01 -#define I40E_AQ_ANVM_FEATURE_OPTION_DWORD_MAP 0x08 -#define I40E_AQ_ANVM_FEATURE_OPTION_POR_CSR 0x10 __le16 feature_options; __le16 feature_selection; }; @@ -2344,7 +1914,6 @@ I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field); * no command data struct used */ struct i40e_aqc_nvm_oem_post_update { -#define I40E_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA 0x01 u8 sel_data; u8 reserved[7]; }; @@ -2366,9 +1935,6 @@ I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer); */ struct i40e_aqc_thermal_sensor { u8 sensor_action; -#define I40E_AQ_THERMAL_SENSOR_READ_CONFIG 0 -#define I40E_AQ_THERMAL_SENSOR_SET_CONFIG 1 -#define I40E_AQ_THERMAL_SENSOR_READ_TEMP 2 u8 reserved[7]; __le32 addr_high; __le32 addr_low; @@ -2421,10 +1987,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write); */ struct i40e_aqc_alternate_write_done { __le16 cmd_flags; -#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1 -#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0 -#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1 -#define I40E_AQ_ALTERNATE_RESET_NEEDED 2 u8 reserved[14]; }; @@ -2433,8 +1995,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done); /* Set OEM mode (direct 0x0905) */ struct i40e_aqc_alternate_set_mode { __le32 mode; -#define I40E_AQ_ALTERNATE_MODE_NONE 0 -#define I40E_AQ_ALTERNATE_MODE_OEM 1 u8 reserved[12]; }; @@ -2460,13 +2020,9 @@ struct i40e_aqc_lldp_get_mib { #define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3 #define I40E_AQ_LLDP_MIB_LOCAL 0x0 #define I40E_AQ_LLDP_MIB_REMOTE 0x1 -#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2 #define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC #define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2 #define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0 -#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1 -#define I40E_AQ_LLDP_TX_SHIFT 0x4 -#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT) /* TX pause flags use I40E_AQ_LINK_TX_* above */ __le16 local_len; __le16 remote_len; @@ -2482,7 +2038,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib); */ struct i40e_aqc_lldp_update_mib { u8 command; -#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 #define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1 u8 reserved[7]; __le32 addr_high; @@ -2521,7 +2076,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv); /* Stop LLDP (direct 0x0A05) */ struct i40e_aqc_lldp_stop { u8 command; -#define I40E_AQ_LLDP_AGENT_STOP 0x0 #define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1 #define I40E_AQ_LLDP_AGENT_STOP_PERSIST 0x2 u8 reserved[15]; @@ -2627,13 +2181,6 @@ I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp); * Used to replace the local MIB of a given LLDP agent. e.g. DCBx */ struct i40e_aqc_lldp_set_local_mib { -#define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0 -#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK BIT(SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT) -#define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB 0x0 -#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT (1) -#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK \ - BIT(SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT) -#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS 0x1 u8 type; u8 reserved0; __le16 length; @@ -2648,9 +2195,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_set_local_mib); * Used for stopping/starting specific LLDP agent. e.g. DCBx */ struct i40e_aqc_lldp_stop_start_specific_agent { -#define I40E_AQC_START_SPECIFIC_AGENT_SHIFT 0 -#define I40E_AQC_START_SPECIFIC_AGENT_MASK \ - BIT(I40E_AQC_START_SPECIFIC_AGENT_SHIFT) u8 command; u8 reserved[15]; }; @@ -2660,7 +2204,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop_start_specific_agent); /* Restore LLDP Agent factory settings (direct 0x0A0A) */ struct i40e_aqc_lldp_restore { u8 command; -#define I40E_AQ_LLDP_AGENT_RESTORE_NOT 0x0 #define I40E_AQ_LLDP_AGENT_RESTORE 0x1 u8 reserved[15]; }; @@ -2674,8 +2217,6 @@ struct i40e_aqc_add_udp_tunnel { u8 protocol_type; #define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00 #define I40E_AQC_TUNNEL_TYPE_NGE 0x01 -#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10 -#define I40E_AQC_TUNNEL_TYPE_VXLAN_GPE 0x11 u8 reserved1[10]; }; @@ -2685,8 +2226,6 @@ struct i40e_aqc_add_udp_tunnel_completion { __le16 udp_port; u8 filter_entry_index; u8 multiple_pfs; -#define I40E_AQC_SINGLE_PF 0x0 -#define I40E_AQC_MULTIPLE_PFS 0x1 u8 total_filters; u8 reserved[11]; }; @@ -2759,16 +2298,7 @@ struct i40e_aqc_tunnel_key_structure { u8 key1_len; /* 0 to 15 */ u8 key2_len; /* 0 to 15 */ u8 flags; -#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 -/* response flags */ -#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 -#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 -#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 u8 network_key_index; -#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0 -#define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1 -#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP 0x2 -#define I40E_AQC_NETWORK_KEY_INDEX_GRE 0x3 u8 reserved[10]; }; @@ -2777,9 +2307,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure); /* OEM mode commands (direct 0xFE0x) */ struct i40e_aqc_oem_param_change { __le32 param_type; -#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0 -#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1 -#define I40E_AQ_OEM_PARAM_MAC 2 __le32 param_value1; __le16 param_value2; u8 reserved[6]; @@ -2789,8 +2316,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change); struct i40e_aqc_oem_state_change { __le32 state; -#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0 -#define I40E_AQ_OEM_STATE_LINK_UP 0x1 u8 reserved[12]; }; @@ -2826,14 +2351,8 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocbb_initialize); struct i40e_acq_set_test_mode { u8 mode; -#define I40E_AQ_TEST_PARTIAL 0 -#define I40E_AQ_TEST_FULL 1 -#define I40E_AQ_TEST_NVM 2 u8 reserved[3]; u8 command; -#define I40E_AQ_TEST_OPEN 0 -#define I40E_AQ_TEST_CLOSE 1 -#define I40E_AQ_TEST_INC 2 u8 reserved2[3]; __le32 address_high; __le32 address_low; @@ -2874,20 +2393,6 @@ struct i40e_aqc_debug_modify_reg { I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg); /* dump internal data (0xFF08, indirect) */ - -#define I40E_AQ_CLUSTER_ID_AUX 0 -#define I40E_AQ_CLUSTER_ID_SWITCH_FLU 1 -#define I40E_AQ_CLUSTER_ID_TXSCHED 2 -#define I40E_AQ_CLUSTER_ID_HMC 3 -#define I40E_AQ_CLUSTER_ID_MAC0 4 -#define I40E_AQ_CLUSTER_ID_MAC1 5 -#define I40E_AQ_CLUSTER_ID_MAC2 6 -#define I40E_AQ_CLUSTER_ID_MAC3 7 -#define I40E_AQ_CLUSTER_ID_DCB 8 -#define I40E_AQ_CLUSTER_ID_EMP_MEM 9 -#define I40E_AQ_CLUSTER_ID_PKT_BUF 10 -#define I40E_AQ_CLUSTER_ID_ALTRAM 11 - struct i40e_aqc_debug_dump_internals { u8 cluster_id; u8 table_id; diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 45b90eb11adb..4ab081953e19 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1455,10 +1455,6 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) return gpio_val; } -#define I40E_COMBINED_ACTIVITY 0xA -#define I40E_FILTER_ACTIVITY 0xE -#define I40E_LINK_ACTIVITY 0xC -#define I40E_MAC_ACTIVITY 0xD #define I40E_FW_LED BIT(4) #define I40E_LED_MODE_VALID (I40E_GLGEN_GPIO_CTL_LED_MODE_MASK >> \ I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h index ba86ad833bee..2b1a2e81ac73 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h @@ -6,10 +6,8 @@ #include "i40e_type.h" -#define I40E_DCBX_STATUS_NOT_STARTED 0 #define I40E_DCBX_STATUS_IN_PROGRESS 1 #define I40E_DCBX_STATUS_DONE 2 -#define I40E_DCBX_STATUS_MULTIPLE_PEERS 3 #define I40E_DCBX_STATUS_DISABLED 7 #define I40E_TLV_TYPE_END 0 @@ -24,7 +22,6 @@ #define I40E_CEE_DCBX_OUI 0x001b21 #define I40E_CEE_DCBX_TYPE 2 -#define I40E_CEE_SUBTYPE_CTRL 1 #define I40E_CEE_SUBTYPE_PG_CFG 2 #define I40E_CEE_SUBTYPE_PFC_CFG 3 #define I40E_CEE_SUBTYPE_APP_PRI 4 @@ -105,9 +102,7 @@ struct i40e_cee_ctrl_tlv { struct i40e_cee_feat_tlv { struct i40e_cee_tlv_hdr hdr; u8 en_will_err; /* Bits: |En|Will|Err|Reserved(5)| */ -#define I40E_CEE_FEAT_TLV_ENABLE_MASK 0x80 #define I40E_CEE_FEAT_TLV_WILLING_MASK 0x40 -#define I40E_CEE_FEAT_TLV_ERR_MASK 0x20 u8 subtype; u8 tlvinfo[1]; }; diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 99ea543dd245..9cb9b781451c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -688,7 +688,6 @@ static void i40e_dbg_dump_vf_all(struct i40e_pf *pf) i40e_dbg_dump_vf(pf, i); } -#define I40E_MAX_DEBUG_OUT_BUFFER (4096*4) /** * i40e_dbg_command_write - write into command datum * @filp: the opened file diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h index bf15a868292f..33df3bf2f73b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h @@ -32,8 +32,5 @@ #define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 #define I40E_DEV_ID_SFP_I_X722 0x37D3 -#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ - (d) == I40E_DEV_ID_QSFP_B || \ - (d) == I40E_DEV_ID_QSFP_C) #endif /* _I40E_DEVIDS_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h index 1c78de838857..3113792afaff 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h +++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h @@ -14,7 +14,6 @@ struct i40e_hw; #define I40E_HMC_DIRECT_BP_SIZE 0x200000 /* 2M */ #define I40E_HMC_PAGED_BP_SIZE 4096 #define I40E_HMC_PD_BP_BUF_ALIGNMENT 4096 -#define I40E_FIRST_VF_FPM_ID 16 struct i40e_hmc_obj_info { u64 base; /* base addr in FPM */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 5d807c8004f8..5f7f5147f9a7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -6492,8 +6492,7 @@ out: return err; } #endif /* CONFIG_I40E_DCB */ -#define SPEED_SIZE 14 -#define FC_SIZE 8 + /** * i40e_print_link_message - print link up or down * @vsi: the VSI for which link needs a message @@ -8950,13 +8949,6 @@ u32 i40e_get_current_atr_cnt(struct i40e_pf *pf) return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters; } -/* We can see up to 256 filter programming desc in transit if the filters are - * being applied really fast; before we see the first - * filter miss error on Rx queue 0. Accumulating enough error messages before - * reacting will make sure we don't cause flush too often. - */ -#define I40E_MAX_FD_PROGRAM_ERROR 256 - /** * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table * @pf: board private structure diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h index c302ef2524f8..2f6815b2f8df 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h +++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h @@ -26,7 +26,6 @@ do { \ #define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg))) #define rd32(a, reg) readl((a)->hw_addr + (reg)) -#define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg))) #define rd64(a, reg) readq((a)->hw_addr + (reg)) #define i40e_flush(a) readl((a)->hw_addr + I40E_GLGEN_STAT) diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h index d35d690ca10f..7cd3a08a1891 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_register.h +++ b/drivers/net/ethernet/intel/i40e/i40e_register.h @@ -4,53 +4,14 @@ #ifndef _I40E_REGISTER_H_ #define _I40E_REGISTER_H_ -#define I40E_GL_ARQBAH 0x000801C0 /* Reset: EMPR */ -#define I40E_GL_ARQBAH_ARQBAH_SHIFT 0 -#define I40E_GL_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAH_ARQBAH_SHIFT) -#define I40E_GL_ARQBAL 0x000800C0 /* Reset: EMPR */ -#define I40E_GL_ARQBAL_ARQBAL_SHIFT 0 -#define I40E_GL_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAL_ARQBAL_SHIFT) -#define I40E_GL_ARQH 0x000803C0 /* Reset: EMPR */ -#define I40E_GL_ARQH_ARQH_SHIFT 0 -#define I40E_GL_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_GL_ARQH_ARQH_SHIFT) -#define I40E_GL_ARQT 0x000804C0 /* Reset: EMPR */ -#define I40E_GL_ARQT_ARQT_SHIFT 0 -#define I40E_GL_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_GL_ARQT_ARQT_SHIFT) -#define I40E_GL_ATQBAH 0x00080140 /* Reset: EMPR */ -#define I40E_GL_ATQBAH_ATQBAH_SHIFT 0 -#define I40E_GL_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAH_ATQBAH_SHIFT) -#define I40E_GL_ATQBAL 0x00080040 /* Reset: EMPR */ -#define I40E_GL_ATQBAL_ATQBAL_SHIFT 0 -#define I40E_GL_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAL_ATQBAL_SHIFT) -#define I40E_GL_ATQH 0x00080340 /* Reset: EMPR */ -#define I40E_GL_ATQH_ATQH_SHIFT 0 -#define I40E_GL_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_GL_ATQH_ATQH_SHIFT) -#define I40E_GL_ATQLEN 0x00080240 /* Reset: EMPR */ -#define I40E_GL_ATQLEN_ATQLEN_SHIFT 0 -#define I40E_GL_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_GL_ATQLEN_ATQLEN_SHIFT) -#define I40E_GL_ATQLEN_ATQVFE_SHIFT 28 -#define I40E_GL_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQVFE_SHIFT) -#define I40E_GL_ATQLEN_ATQOVFL_SHIFT 29 -#define I40E_GL_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQOVFL_SHIFT) #define I40E_GL_ATQLEN_ATQCRIT_SHIFT 30 #define I40E_GL_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQCRIT_SHIFT) -#define I40E_GL_ATQLEN_ATQENABLE_SHIFT 31 -#define I40E_GL_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQENABLE_SHIFT) -#define I40E_GL_ATQT 0x00080440 /* Reset: EMPR */ -#define I40E_GL_ATQT_ATQT_SHIFT 0 -#define I40E_GL_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_GL_ATQT_ATQT_SHIFT) #define I40E_PF_ARQBAH 0x00080180 /* Reset: EMPR */ -#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0 -#define I40E_PF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAH_ARQBAH_SHIFT) #define I40E_PF_ARQBAL 0x00080080 /* Reset: EMPR */ -#define I40E_PF_ARQBAL_ARQBAL_SHIFT 0 -#define I40E_PF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAL_ARQBAL_SHIFT) #define I40E_PF_ARQH 0x00080380 /* Reset: EMPR */ #define I40E_PF_ARQH_ARQH_SHIFT 0 #define I40E_PF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_PF_ARQH_ARQH_SHIFT) #define I40E_PF_ARQLEN 0x00080280 /* Reset: EMPR */ -#define I40E_PF_ARQLEN_ARQLEN_SHIFT 0 -#define I40E_PF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ARQLEN_ARQLEN_SHIFT) #define I40E_PF_ARQLEN_ARQVFE_SHIFT 28 #define I40E_PF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQVFE_SHIFT) #define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29 @@ -60,20 +21,10 @@ #define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31 #define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ARQLEN_ARQENABLE_SHIFT) #define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */ -#define I40E_PF_ARQT_ARQT_SHIFT 0 -#define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT) #define I40E_PF_ATQBAH 0x00080100 /* Reset: EMPR */ -#define I40E_PF_ATQBAH_ATQBAH_SHIFT 0 -#define I40E_PF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAH_ATQBAH_SHIFT) #define I40E_PF_ATQBAL 0x00080000 /* Reset: EMPR */ -#define I40E_PF_ATQBAL_ATQBAL_SHIFT 0 -#define I40E_PF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAL_ATQBAL_SHIFT) #define I40E_PF_ATQH 0x00080300 /* Reset: EMPR */ -#define I40E_PF_ATQH_ATQH_SHIFT 0 -#define I40E_PF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_PF_ATQH_ATQH_SHIFT) #define I40E_PF_ATQLEN 0x00080200 /* Reset: EMPR */ -#define I40E_PF_ATQLEN_ATQLEN_SHIFT 0 -#define I40E_PF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ATQLEN_ATQLEN_SHIFT) #define I40E_PF_ATQLEN_ATQVFE_SHIFT 28 #define I40E_PF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQVFE_SHIFT) #define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29 @@ -83,284 +34,13 @@ #define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31 #define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ATQLEN_ATQENABLE_SHIFT) #define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */ -#define I40E_PF_ATQT_ATQT_SHIFT 0 -#define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT) -#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ARQBAH_MAX_INDEX 127 -#define I40E_VF_ARQBAH_ARQBAH_SHIFT 0 -#define I40E_VF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH_ARQBAH_SHIFT) -#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ARQBAL_MAX_INDEX 127 -#define I40E_VF_ARQBAL_ARQBAL_SHIFT 0 -#define I40E_VF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL_ARQBAL_SHIFT) -#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ARQH_MAX_INDEX 127 -#define I40E_VF_ARQH_ARQH_SHIFT 0 -#define I40E_VF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH_ARQH_SHIFT) -#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ARQLEN_MAX_INDEX 127 -#define I40E_VF_ARQLEN_ARQLEN_SHIFT 0 -#define I40E_VF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN_ARQLEN_SHIFT) -#define I40E_VF_ARQLEN_ARQVFE_SHIFT 28 -#define I40E_VF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQVFE_SHIFT) -#define I40E_VF_ARQLEN_ARQOVFL_SHIFT 29 -#define I40E_VF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQOVFL_SHIFT) -#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30 -#define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT) -#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31 -#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN_ARQENABLE_SHIFT) -#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ARQT_MAX_INDEX 127 -#define I40E_VF_ARQT_ARQT_SHIFT 0 -#define I40E_VF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT_ARQT_SHIFT) -#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ATQBAH_MAX_INDEX 127 -#define I40E_VF_ATQBAH_ATQBAH_SHIFT 0 -#define I40E_VF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH_ATQBAH_SHIFT) -#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ATQBAL_MAX_INDEX 127 -#define I40E_VF_ATQBAL_ATQBAL_SHIFT 0 -#define I40E_VF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL_ATQBAL_SHIFT) -#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ATQH_MAX_INDEX 127 -#define I40E_VF_ATQH_ATQH_SHIFT 0 -#define I40E_VF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH_ATQH_SHIFT) -#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ATQLEN_MAX_INDEX 127 -#define I40E_VF_ATQLEN_ATQLEN_SHIFT 0 -#define I40E_VF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN_ATQLEN_SHIFT) -#define I40E_VF_ATQLEN_ATQVFE_SHIFT 28 -#define I40E_VF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQVFE_SHIFT) -#define I40E_VF_ATQLEN_ATQOVFL_SHIFT 29 -#define I40E_VF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQOVFL_SHIFT) -#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30 -#define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT) -#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31 -#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN_ATQENABLE_SHIFT) -#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ATQT_MAX_INDEX 127 -#define I40E_VF_ATQT_ATQT_SHIFT 0 -#define I40E_VF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT_ATQT_SHIFT) -#define I40E_PRT_L2TAGSEN 0x001C0B20 /* Reset: CORER */ -#define I40E_PRT_L2TAGSEN_ENABLE_SHIFT 0 -#define I40E_PRT_L2TAGSEN_ENABLE_MASK I40E_MASK(0xFF, I40E_PRT_L2TAGSEN_ENABLE_SHIFT) -#define I40E_PFCM_LAN_ERRDATA 0x0010C080 /* Reset: PFR */ -#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT 0 -#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT) -#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT 4 -#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT) -#define I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT 8 -#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT) -#define I40E_PFCM_LAN_ERRINFO 0x0010C000 /* Reset: PFR */ -#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT 0 -#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT) -#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT 4 -#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT) -#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT 8 -#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT) -#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT 16 -#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT) -#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT 24 -#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT) -#define I40E_PFCM_LANCTXCTL 0x0010C300 /* Reset: CORER */ -#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT 0 -#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT) -#define I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT 12 -#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK I40E_MASK(0x7, I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT) -#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT 15 -#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT) -#define I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT 17 -#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT) -#define I40E_PFCM_LANCTXDATA(_i) (0x0010C100 + ((_i) * 128)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_PFCM_LANCTXDATA_MAX_INDEX 3 -#define I40E_PFCM_LANCTXDATA_DATA_SHIFT 0 -#define I40E_PFCM_LANCTXDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFCM_LANCTXDATA_DATA_SHIFT) -#define I40E_PFCM_LANCTXSTAT 0x0010C380 /* Reset: CORER */ -#define I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT 0 -#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT) -#define I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT 1 -#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT) -#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFCM_PE_ERRDATA1_MAX_INDEX 127 -#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT 0 -#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT) -#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT 4 -#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT) -#define I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT 8 -#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT) -#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFCM_PE_ERRINFO1_MAX_INDEX 127 -#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT 0 -#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT) -#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT 4 -#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT) -#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT 8 -#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT) -#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT 16 -#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT) -#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT 24 -#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT) -#define I40E_GLDCB_GENC 0x00083044 /* Reset: CORER */ -#define I40E_GLDCB_GENC_PCIRTT_SHIFT 0 -#define I40E_GLDCB_GENC_PCIRTT_MASK I40E_MASK(0xFFFF, I40E_GLDCB_GENC_PCIRTT_SHIFT) -#define I40E_GLDCB_RUPTI 0x00122618 /* Reset: CORER */ -#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT 0 -#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT) -#define I40E_PRTDCB_FCCFG 0x001E4640 /* Reset: GLOBR */ -#define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3 -#define I40E_PRTDCB_FCCFG_TFCE_MASK I40E_MASK(0x3, I40E_PRTDCB_FCCFG_TFCE_SHIFT) -#define I40E_PRTDCB_FCRTV 0x001E4600 /* Reset: GLOBR */ -#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT 0 -#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT) -#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: GLOBR */ -#define I40E_PRTDCB_FCTTVN_MAX_INDEX 3 -#define I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT 0 -#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT) -#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT 16 -#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT) #define I40E_PRTDCB_GENC 0x00083000 /* Reset: CORER */ -#define I40E_PRTDCB_GENC_RESERVED_1_SHIFT 0 -#define I40E_PRTDCB_GENC_RESERVED_1_MASK I40E_MASK(0x3, I40E_PRTDCB_GENC_RESERVED_1_SHIFT) -#define I40E_PRTDCB_GENC_NUMTC_SHIFT 2 -#define I40E_PRTDCB_GENC_NUMTC_MASK I40E_MASK(0xF, I40E_PRTDCB_GENC_NUMTC_SHIFT) -#define I40E_PRTDCB_GENC_FCOEUP_SHIFT 6 -#define I40E_PRTDCB_GENC_FCOEUP_MASK I40E_MASK(0x7, I40E_PRTDCB_GENC_FCOEUP_SHIFT) -#define I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT 9 -#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK I40E_MASK(0x1, I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT) #define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16 #define I40E_PRTDCB_GENC_PFCLDA_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_GENC_PFCLDA_SHIFT) #define I40E_PRTDCB_GENS 0x00083020 /* Reset: CORER */ #define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0 #define I40E_PRTDCB_GENS_DCBX_STATUS_MASK I40E_MASK(0x7, I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT) -#define I40E_PRTDCB_MFLCN 0x001E2400 /* Reset: GLOBR */ -#define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0 -#define I40E_PRTDCB_MFLCN_PMCF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_PMCF_SHIFT) -#define I40E_PRTDCB_MFLCN_DPF_SHIFT 1 -#define I40E_PRTDCB_MFLCN_DPF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_DPF_SHIFT) -#define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2 -#define I40E_PRTDCB_MFLCN_RPFCM_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RPFCM_SHIFT) -#define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3 -#define I40E_PRTDCB_MFLCN_RFCE_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RFCE_SHIFT) -#define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4 -#define I40E_PRTDCB_MFLCN_RPFCE_MASK I40E_MASK(0xFF, I40E_PRTDCB_MFLCN_RPFCE_SHIFT) -#define I40E_PRTDCB_RETSC 0x001223E0 /* Reset: CORER */ -#define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0 -#define I40E_PRTDCB_RETSC_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_ETS_MODE_SHIFT) -#define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1 -#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT) -#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2 -#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK I40E_MASK(0xF, I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT) -#define I40E_PRTDCB_RETSC_LLTC_SHIFT 8 -#define I40E_PRTDCB_RETSC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RETSC_LLTC_SHIFT) -#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTDCB_RETSTCC_MAX_INDEX 7 -#define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0 -#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK I40E_MASK(0x7F, I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) -#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30 -#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) -#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31 -#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1u, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) -#define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */ -#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0 -#define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT) -#define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8 -#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT) -#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16 -#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT) -#define I40E_PRTDCB_RUP 0x001C0B00 /* Reset: CORER */ -#define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0 -#define I40E_PRTDCB_RUP_NOVLANUP_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP_NOVLANUP_SHIFT) -#define I40E_PRTDCB_RUP2TC 0x001C09A0 /* Reset: CORER */ -#define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0 -#define I40E_PRTDCB_RUP2TC_UP0TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP0TC_SHIFT) -#define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3 -#define I40E_PRTDCB_RUP2TC_UP1TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP1TC_SHIFT) -#define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6 -#define I40E_PRTDCB_RUP2TC_UP2TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP2TC_SHIFT) -#define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9 -#define I40E_PRTDCB_RUP2TC_UP3TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP3TC_SHIFT) -#define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12 -#define I40E_PRTDCB_RUP2TC_UP4TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP4TC_SHIFT) -#define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15 -#define I40E_PRTDCB_RUP2TC_UP5TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP5TC_SHIFT) -#define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18 -#define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT) -#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21 -#define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT) -#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7 -#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0 -#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT) -#define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */ -#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0 -#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT) -#define I40E_PRTDCB_TCMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTDCB_TCMSTC_MAX_INDEX 7 -#define I40E_PRTDCB_TCMSTC_MSTC_SHIFT 0 -#define I40E_PRTDCB_TCMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_MSTC_SHIFT) -#define I40E_PRTDCB_TCPMC 0x000A21A0 /* Reset: CORER */ -#define I40E_PRTDCB_TCPMC_CPM_SHIFT 0 -#define I40E_PRTDCB_TCPMC_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_CPM_SHIFT) -#define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13 -#define I40E_PRTDCB_TCPMC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_LLTC_SHIFT) -#define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30 -#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT) -#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTDCB_TCWSTC_MAX_INDEX 7 -#define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0 -#define I40E_PRTDCB_TCWSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCWSTC_MSTC_SHIFT) -#define I40E_PRTDCB_TDPMC 0x000A0180 /* Reset: CORER */ -#define I40E_PRTDCB_TDPMC_DPM_SHIFT 0 -#define I40E_PRTDCB_TDPMC_DPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_TDPMC_DPM_SHIFT) -#define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30 -#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT) -#define I40E_PRTDCB_TETSC_TCB 0x000AE060 /* Reset: CORER */ -#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0 -#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT) -#define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8 -#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT) -#define I40E_PRTDCB_TETSC_TPB 0x00098060 /* Reset: CORER */ -#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0 -#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT) -#define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8 -#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT) -#define I40E_PRTDCB_TFCS 0x001E4560 /* Reset: GLOBR */ -#define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0 -#define I40E_PRTDCB_TFCS_TXOFF_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF_SHIFT) -#define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8 -#define I40E_PRTDCB_TFCS_TXOFF0_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF0_SHIFT) -#define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9 -#define I40E_PRTDCB_TFCS_TXOFF1_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF1_SHIFT) -#define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10 -#define I40E_PRTDCB_TFCS_TXOFF2_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF2_SHIFT) -#define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11 -#define I40E_PRTDCB_TFCS_TXOFF3_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF3_SHIFT) -#define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12 -#define I40E_PRTDCB_TFCS_TXOFF4_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF4_SHIFT) -#define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13 -#define I40E_PRTDCB_TFCS_TXOFF5_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF5_SHIFT) -#define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14 -#define I40E_PRTDCB_TFCS_TXOFF6_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF6_SHIFT) -#define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15 -#define I40E_PRTDCB_TFCS_TXOFF7_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF7_SHIFT) -#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */ /* Reset: GLOBR */ -#define I40E_PRTDCB_TPFCTS_MAX_INDEX 7 -#define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0 -#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT) -#define I40E_GLFCOE_RCTL 0x00269B94 /* Reset: CORER */ -#define I40E_GLFCOE_RCTL_FCOEVER_SHIFT 0 -#define I40E_GLFCOE_RCTL_FCOEVER_MASK I40E_MASK(0xF, I40E_GLFCOE_RCTL_FCOEVER_SHIFT) -#define I40E_GLFCOE_RCTL_SAVBAD_SHIFT 4 -#define I40E_GLFCOE_RCTL_SAVBAD_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_SAVBAD_SHIFT) -#define I40E_GLFCOE_RCTL_ICRC_SHIFT 5 -#define I40E_GLFCOE_RCTL_ICRC_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_ICRC_SHIFT) -#define I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT 16 -#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK I40E_MASK(0x3FFF, I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT) #define I40E_GL_FWSTS 0x00083048 /* Reset: POR */ -#define I40E_GL_FWSTS_FWS0B_SHIFT 0 -#define I40E_GL_FWSTS_FWS0B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS0B_SHIFT) -#define I40E_GL_FWSTS_FWRI_SHIFT 9 -#define I40E_GL_FWSTS_FWRI_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT) #define I40E_GL_FWSTS_FWS1B_SHIFT 16 #define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT) #define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK I40E_MASK(0x30, I40E_GL_FWSTS_FWS1B_SHIFT) @@ -369,500 +49,119 @@ #define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK I40E_MASK(0x33, I40E_GL_FWSTS_FWS1B_SHIFT) #define I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK I40E_MASK(0xB, I40E_GL_FWSTS_FWS1B_SHIFT) #define I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK I40E_MASK(0xC, I40E_GL_FWSTS_FWS1B_SHIFT) -#define I40E_GLGEN_CLKSTAT 0x000B8184 /* Reset: POR */ -#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0 -#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK I40E_MASK(0x1, I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT) -#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT 4 -#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK I40E_MASK(0x3, I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT) -#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT 8 -#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT) -#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT 12 -#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT) -#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT 16 -#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT) -#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT 20 -#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT) #define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */ /* Reset: POR */ #define I40E_GLGEN_GPIO_CTL_MAX_INDEX 29 #define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT 0 #define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT) #define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3 #define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT) -#define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4 -#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT) -#define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5 -#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT) -#define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6 -#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT) #define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7 #define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK I40E_MASK(0x7, I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) -#define I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT 10 -#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT) #define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11 -#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT) #define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12 #define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) -#define I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT 17 -#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT) -#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19 -#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT) -#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20 -#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT) -#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26 -#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT) -#define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */ -#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0 -#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT) -#define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5 -#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT) -#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6 -#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT) -#define I40E_GLGEN_GPIO_STAT 0x0008817C /* Reset: POR */ -#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT 0 -#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT) -#define I40E_GLGEN_GPIO_TRANSIT 0x00088180 /* Reset: POR */ -#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT 0 -#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT) -#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_GLGEN_I2CCMD_MAX_INDEX 3 -#define I40E_GLGEN_I2CCMD_DATA_SHIFT 0 -#define I40E_GLGEN_I2CCMD_DATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_I2CCMD_DATA_SHIFT) -#define I40E_GLGEN_I2CCMD_REGADD_SHIFT 16 -#define I40E_GLGEN_I2CCMD_REGADD_MASK I40E_MASK(0xFF, I40E_GLGEN_I2CCMD_REGADD_SHIFT) -#define I40E_GLGEN_I2CCMD_PHYADD_SHIFT 24 -#define I40E_GLGEN_I2CCMD_PHYADD_MASK I40E_MASK(0x7, I40E_GLGEN_I2CCMD_PHYADD_SHIFT) -#define I40E_GLGEN_I2CCMD_OP_SHIFT 27 -#define I40E_GLGEN_I2CCMD_OP_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_OP_SHIFT) -#define I40E_GLGEN_I2CCMD_RESET_SHIFT 28 -#define I40E_GLGEN_I2CCMD_RESET_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_RESET_SHIFT) -#define I40E_GLGEN_I2CCMD_R_SHIFT 29 -#define I40E_GLGEN_I2CCMD_R_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_R_SHIFT) -#define I40E_GLGEN_I2CCMD_E_SHIFT 31 -#define I40E_GLGEN_I2CCMD_E_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_E_SHIFT) -#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_GLGEN_I2CPARAMS_MAX_INDEX 3 -#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT 0 -#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK I40E_MASK(0x1F, I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT) -#define I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT 5 -#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK I40E_MASK(0x7, I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT) -#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT 8 -#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT) -#define I40E_GLGEN_I2CPARAMS_CLK_SHIFT 9 -#define I40E_GLGEN_I2CPARAMS_CLK_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_SHIFT) -#define I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT 10 -#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT) -#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT 11 -#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT) -#define I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT 12 -#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT) -#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT 13 -#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT) -#define I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT 14 -#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT) -#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT 15 -#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT) -#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT 31 -#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT) -#define I40E_GLGEN_LED_CTL 0x00088178 /* Reset: POR */ -#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT 0 -#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT) -#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_GLGEN_MDIO_CTRL_MAX_INDEX 3 -#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT 0 -#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK I40E_MASK(0x1FFFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT) -#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17 -#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT) -#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18 -#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT) -#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29 -#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT) #define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3 -#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0 -#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT) -#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT 1 -#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT) -#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT 5 -#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT) -#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT 10 -#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT) -#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT 15 -#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT) -#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT 20 -#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT) -#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT 25 -#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT) -#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT 31 -#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT) #define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_GLGEN_MSCA_MAX_INDEX 3 #define I40E_GLGEN_MSCA_MDIADD_SHIFT 0 -#define I40E_GLGEN_MSCA_MDIADD_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSCA_MDIADD_SHIFT) #define I40E_GLGEN_MSCA_DEVADD_SHIFT 16 -#define I40E_GLGEN_MSCA_DEVADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_DEVADD_SHIFT) #define I40E_GLGEN_MSCA_PHYADD_SHIFT 21 -#define I40E_GLGEN_MSCA_PHYADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_PHYADD_SHIFT) #define I40E_GLGEN_MSCA_OPCODE_SHIFT 26 -#define I40E_GLGEN_MSCA_OPCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_OPCODE_SHIFT) #define I40E_GLGEN_MSCA_STCODE_SHIFT 28 -#define I40E_GLGEN_MSCA_STCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_STCODE_SHIFT) #define I40E_GLGEN_MSCA_MDICMD_SHIFT 30 #define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT) #define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31 #define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1u, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT) #define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_GLGEN_MSRWD_MAX_INDEX 3 #define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0 -#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT) #define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16 #define I40E_GLGEN_MSRWD_MDIRDDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT) -#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4 /* Reset: PCIR */ -#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0 -#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK I40E_MASK(0x1F, I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT) -#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16 -#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK I40E_MASK(0xFF, I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT) #define I40E_GLGEN_RSTAT 0x000B8188 /* Reset: POR */ #define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0 #define I40E_GLGEN_RSTAT_DEVSTATE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_DEVSTATE_SHIFT) #define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT 2 #define I40E_GLGEN_RSTAT_RESET_TYPE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT) -#define I40E_GLGEN_RSTAT_CORERCNT_SHIFT 4 -#define I40E_GLGEN_RSTAT_CORERCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_CORERCNT_SHIFT) -#define I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT 6 -#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT) -#define I40E_GLGEN_RSTAT_EMPRCNT_SHIFT 8 -#define I40E_GLGEN_RSTAT_EMPRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_EMPRCNT_SHIFT) -#define I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT 10 -#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT) #define I40E_GLGEN_RSTCTL 0x000B8180 /* Reset: POR */ #define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT 0 #define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT) -#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8 -#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT) #define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */ #define I40E_GLGEN_RTRIG_CORER_SHIFT 0 #define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT) #define I40E_GLGEN_RTRIG_GLOBR_SHIFT 1 #define I40E_GLGEN_RTRIG_GLOBR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_GLOBR_SHIFT) -#define I40E_GLGEN_RTRIG_EMPFWR_SHIFT 2 -#define I40E_GLGEN_RTRIG_EMPFWR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_EMPFWR_SHIFT) #define I40E_GLGEN_STAT 0x000B612C /* Reset: POR */ -#define I40E_GLGEN_STAT_HWRSVD0_SHIFT 0 -#define I40E_GLGEN_STAT_HWRSVD0_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD0_SHIFT) -#define I40E_GLGEN_STAT_DCBEN_SHIFT 2 -#define I40E_GLGEN_STAT_DCBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_DCBEN_SHIFT) -#define I40E_GLGEN_STAT_VTEN_SHIFT 3 -#define I40E_GLGEN_STAT_VTEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_VTEN_SHIFT) -#define I40E_GLGEN_STAT_FCOEN_SHIFT 4 -#define I40E_GLGEN_STAT_FCOEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_FCOEN_SHIFT) -#define I40E_GLGEN_STAT_EVBEN_SHIFT 5 -#define I40E_GLGEN_STAT_EVBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_EVBEN_SHIFT) -#define I40E_GLGEN_STAT_HWRSVD1_SHIFT 6 -#define I40E_GLGEN_STAT_HWRSVD1_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD1_SHIFT) #define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLGEN_VFLRSTAT_MAX_INDEX 3 -#define I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT 0 -#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT) #define I40E_GLVFGEN_TIMER 0x000881BC /* Reset: CORER */ -#define I40E_GLVFGEN_TIMER_GTIME_SHIFT 0 -#define I40E_GLVFGEN_TIMER_GTIME_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVFGEN_TIMER_GTIME_SHIFT) #define I40E_PFGEN_CTRL 0x00092400 /* Reset: PFR */ #define I40E_PFGEN_CTRL_PFSWR_SHIFT 0 #define I40E_PFGEN_CTRL_PFSWR_MASK I40E_MASK(0x1, I40E_PFGEN_CTRL_PFSWR_SHIFT) -#define I40E_PFGEN_DRUN 0x00092500 /* Reset: CORER */ -#define I40E_PFGEN_DRUN_DRVUNLD_SHIFT 0 -#define I40E_PFGEN_DRUN_DRVUNLD_MASK I40E_MASK(0x1, I40E_PFGEN_DRUN_DRVUNLD_SHIFT) #define I40E_PFGEN_PORTNUM 0x001C0480 /* Reset: CORER */ #define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0 #define I40E_PFGEN_PORTNUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT) -#define I40E_PFGEN_STATE 0x00088000 /* Reset: CORER */ -#define I40E_PFGEN_STATE_RESERVED_0_SHIFT 0 -#define I40E_PFGEN_STATE_RESERVED_0_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_RESERVED_0_SHIFT) -#define I40E_PFGEN_STATE_PFFCEN_SHIFT 1 -#define I40E_PFGEN_STATE_PFFCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFFCEN_SHIFT) -#define I40E_PFGEN_STATE_PFLINKEN_SHIFT 2 -#define I40E_PFGEN_STATE_PFLINKEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFLINKEN_SHIFT) -#define I40E_PFGEN_STATE_PFSCEN_SHIFT 3 -#define I40E_PFGEN_STATE_PFSCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFSCEN_SHIFT) #define I40E_PRTGEN_CNF 0x000B8120 /* Reset: POR */ #define I40E_PRTGEN_CNF_PORT_DIS_SHIFT 0 #define I40E_PRTGEN_CNF_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_PORT_DIS_SHIFT) -#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT 1 -#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT) -#define I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT 2 -#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT) -#define I40E_PRTGEN_CNF2 0x000B8160 /* Reset: POR */ -#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT 0 -#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT) #define I40E_PRTGEN_STATUS 0x000B8100 /* Reset: POR */ -#define I40E_PRTGEN_STATUS_PORT_VALID_SHIFT 0 -#define I40E_PRTGEN_STATUS_PORT_VALID_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_VALID_SHIFT) -#define I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT 1 -#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT) #define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFGEN_RSTAT1_MAX_INDEX 127 -#define I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT 0 -#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT) #define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_VPGEN_VFRSTAT_MAX_INDEX 127 #define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0 #define I40E_VPGEN_VFRSTAT_VFRD_MASK I40E_MASK(0x1, I40E_VPGEN_VFRSTAT_VFRD_SHIFT) #define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_VPGEN_VFRTRIG_MAX_INDEX 127 #define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0 #define I40E_VPGEN_VFRTRIG_VFSWR_MASK I40E_MASK(0x1, I40E_VPGEN_VFRTRIG_VFSWR_SHIFT) -#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_VSIGEN_RSTAT_MAX_INDEX 383 -#define I40E_VSIGEN_RSTAT_VMRD_SHIFT 0 -#define I40E_VSIGEN_RSTAT_VMRD_MASK I40E_MASK(0x1, I40E_VSIGEN_RSTAT_VMRD_SHIFT) -#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_VSIGEN_RTRIG_MAX_INDEX 383 -#define I40E_VSIGEN_RTRIG_VMSWR_SHIFT 0 -#define I40E_VSIGEN_RTRIG_VMSWR_MASK I40E_MASK(0x1, I40E_VSIGEN_RTRIG_VMSWR_SHIFT) #define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_FCOEDDPBASE_MAX_INDEX 15 #define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0 #define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT) #define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_FCOEDDPCNT_MAX_INDEX 15 -#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT 0 -#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK I40E_MASK(0xFFFFF, I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT) #define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010 /* Reset: CORER */ -#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT 0 -#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT) #define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_FCOEFBASE_MAX_INDEX 15 #define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0 #define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT) #define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_FCOEFCNT_MAX_INDEX 15 -#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT 0 -#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT) #define I40E_GLHMC_FCOEFMAX 0x000C20D0 /* Reset: CORER */ #define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0 #define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT) #define I40E_GLHMC_FCOEFOBJSZ 0x000C2018 /* Reset: CORER */ -#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT 0 -#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT) #define I40E_GLHMC_FCOEMAX 0x000C2014 /* Reset: CORER */ -#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT 0 -#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK I40E_MASK(0x1FFF, I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT) -#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_FSIAVBASE_MAX_INDEX 15 -#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT 0 -#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT) -#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_FSIAVCNT_MAX_INDEX 15 -#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT 0 -#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT) -#define I40E_GLHMC_FSIAVCNT_RSVD_SHIFT 29 -#define I40E_GLHMC_FSIAVCNT_RSVD_MASK I40E_MASK(0x7, I40E_GLHMC_FSIAVCNT_RSVD_SHIFT) -#define I40E_GLHMC_FSIAVMAX 0x000C2068 /* Reset: CORER */ -#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT 0 -#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT) -#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064 /* Reset: CORER */ -#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT 0 -#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT) -#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_FSIMCBASE_MAX_INDEX 15 -#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT 0 -#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT) -#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_FSIMCCNT_MAX_INDEX 15 -#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT 0 -#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT) -#define I40E_GLHMC_FSIMCMAX 0x000C2060 /* Reset: CORER */ -#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT 0 -#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK I40E_MASK(0x3FFF, I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT) -#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c /* Reset: CORER */ -#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT 0 -#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT) #define I40E_GLHMC_LANQMAX 0x000C2008 /* Reset: CORER */ -#define I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT 0 -#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT) #define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_LANRXBASE_MAX_INDEX 15 #define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0 #define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT) #define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_LANRXCNT_MAX_INDEX 15 -#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT 0 -#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT) #define I40E_GLHMC_LANRXOBJSZ 0x000C200c /* Reset: CORER */ -#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT 0 -#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT) #define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_LANTXBASE_MAX_INDEX 15 #define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0 #define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT) -#define I40E_GLHMC_LANTXBASE_RSVD_SHIFT 24 -#define I40E_GLHMC_LANTXBASE_RSVD_MASK I40E_MASK(0xFF, I40E_GLHMC_LANTXBASE_RSVD_SHIFT) #define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_LANTXCNT_MAX_INDEX 15 -#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT 0 -#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT) #define I40E_GLHMC_LANTXOBJSZ 0x000C2004 /* Reset: CORER */ -#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT 0 -#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT) -#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PFASSIGN_MAX_INDEX 15 -#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT 0 -#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK I40E_MASK(0xF, I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT) -#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_SDPART_MAX_INDEX 15 -#define I40E_GLHMC_SDPART_PMSDBASE_SHIFT 0 -#define I40E_GLHMC_SDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_SDPART_PMSDBASE_SHIFT) -#define I40E_GLHMC_SDPART_PMSDSIZE_SHIFT 16 -#define I40E_GLHMC_SDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_SDPART_PMSDSIZE_SHIFT) #define I40E_PFHMC_ERRORDATA 0x000C0500 /* Reset: PFR */ -#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT 0 -#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK I40E_MASK(0x3FFFFFFF, I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT) #define I40E_PFHMC_ERRORINFO 0x000C0400 /* Reset: PFR */ -#define I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT 0 -#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT) -#define I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT 7 -#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT) -#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT 8 -#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK I40E_MASK(0xF, I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT) -#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT 16 -#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT) -#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT 31 -#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT) #define I40E_PFHMC_PDINV 0x000C0300 /* Reset: PFR */ #define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0 -#define I40E_PFHMC_PDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_PDINV_PMSDIDX_SHIFT) #define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16 -#define I40E_PFHMC_PDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_PFHMC_PDINV_PMPDIDX_SHIFT) #define I40E_PFHMC_SDCMD 0x000C0000 /* Reset: PFR */ -#define I40E_PFHMC_SDCMD_PMSDIDX_SHIFT 0 -#define I40E_PFHMC_SDCMD_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_SDCMD_PMSDIDX_SHIFT) #define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31 -#define I40E_PFHMC_SDCMD_PMSDWR_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDWR_SHIFT) #define I40E_PFHMC_SDDATAHIGH 0x000C0200 /* Reset: PFR */ -#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT 0 -#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT) #define I40E_PFHMC_SDDATALOW 0x000C0100 /* Reset: PFR */ #define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0 -#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT) #define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1 -#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) #define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2 -#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK I40E_MASK(0x3FF, I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) -#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT 12 -#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK I40E_MASK(0xFFFFF, I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT) -#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */ /* Reset: POR */ -#define I40E_GL_GP_FUSE_MAX_INDEX 28 -#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0 -#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT) -#define I40E_GL_UFUSE 0x00094008 /* Reset: POR */ -#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT 1 -#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK I40E_MASK(0x1, I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT) -#define I40E_GL_UFUSE_NIC_ID_SHIFT 2 -#define I40E_GL_UFUSE_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_NIC_ID_SHIFT) -#define I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT 10 -#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT) -#define I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT 11 -#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT) -#define I40E_EMPINT_GPIO_ENA 0x00088188 /* Reset: POR */ -#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT 0 -#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT 1 -#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT 2 -#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT 3 -#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT 4 -#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT 5 -#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT 6 -#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT 7 -#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT 8 -#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT 9 -#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT 10 -#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT 11 -#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT 12 -#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT 13 -#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT 14 -#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT 15 -#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT 16 -#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT 17 -#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT 18 -#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT 19 -#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT 20 -#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT 21 -#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT 22 -#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT 23 -#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT 24 -#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT 25 -#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT 26 -#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT 27 -#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT 28 -#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT 29 -#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT) #define I40E_PFGEN_PORTMDIO_NUM 0x0003F100 /* Reset: CORER */ -#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT 0 -#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT) #define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4 #define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK I40E_MASK(0x1, I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT) #define I40E_PFINT_AEQCTL 0x00038700 /* Reset: CORER */ #define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT 0 -#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT) #define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT 11 -#define I40E_PFINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_AEQCTL_ITR_INDX_SHIFT) -#define I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT 13 -#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT) #define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT 30 #define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT) -#define I40E_PFINT_AEQCTL_INTEVENT_SHIFT 31 -#define I40E_PFINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_INTEVENT_SHIFT) #define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: CORER */ -#define I40E_PFINT_CEQCTL_MAX_INDEX 511 #define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0 -#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT) #define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11 -#define I40E_PFINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_ITR_INDX_SHIFT) -#define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13 -#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT) #define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16 -#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT) -#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27 -#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT) #define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30 #define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT) -#define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31 -#define I40E_PFINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_INTEVENT_SHIFT) #define I40E_GLINT_CTL 0x0003F800 /* Reset: CORER */ -#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT 0 -#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT) #define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT 1 #define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT) -#define I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT 2 -#define I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT) #define I40E_PFINT_DYN_CTL0 0x00038480 /* Reset: PFR */ #define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0 #define I40E_PFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_SHIFT) @@ -872,8 +171,6 @@ #define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT) #define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT 3 #define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT) -#define I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT 5 -#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT) #define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24 #define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT) #define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25 @@ -881,7 +178,6 @@ #define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT 31 #define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT) #define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */ -#define I40E_PFINT_DYN_CTLN_MAX_INDEX 511 #define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0 #define I40E_PFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_SHIFT) #define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1 @@ -891,93 +187,13 @@ #define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3 #define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) #define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5 -#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT) #define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24 #define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT) -#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25 -#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT) -#define I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT 31 -#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT) -#define I40E_PFINT_GPIO_ENA 0x00088080 /* Reset: CORER */ -#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT 0 -#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT 1 -#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT 2 -#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT 3 -#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT 4 -#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT 5 -#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT 6 -#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT 7 -#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT 8 -#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT 9 -#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT 10 -#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT 11 -#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT 12 -#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT 13 -#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT 14 -#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT 15 -#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT 16 -#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT 17 -#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT 18 -#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT 19 -#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT 20 -#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT 21 -#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT 22 -#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT 23 -#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT 24 -#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT 25 -#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT 26 -#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT 27 -#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT 28 -#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT 29 -#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT) #define I40E_PFINT_ICR0 0x00038780 /* Reset: CORER */ #define I40E_PFINT_ICR0_INTEVENT_SHIFT 0 #define I40E_PFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_INTEVENT_SHIFT) #define I40E_PFINT_ICR0_QUEUE_0_SHIFT 1 #define I40E_PFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_0_SHIFT) -#define I40E_PFINT_ICR0_QUEUE_1_SHIFT 2 -#define I40E_PFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_1_SHIFT) -#define I40E_PFINT_ICR0_QUEUE_2_SHIFT 3 -#define I40E_PFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_2_SHIFT) -#define I40E_PFINT_ICR0_QUEUE_3_SHIFT 4 -#define I40E_PFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_3_SHIFT) -#define I40E_PFINT_ICR0_QUEUE_4_SHIFT 5 -#define I40E_PFINT_ICR0_QUEUE_4_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_4_SHIFT) -#define I40E_PFINT_ICR0_QUEUE_5_SHIFT 6 -#define I40E_PFINT_ICR0_QUEUE_5_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_5_SHIFT) -#define I40E_PFINT_ICR0_QUEUE_6_SHIFT 7 -#define I40E_PFINT_ICR0_QUEUE_6_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_6_SHIFT) -#define I40E_PFINT_ICR0_QUEUE_7_SHIFT 8 -#define I40E_PFINT_ICR0_QUEUE_7_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_7_SHIFT) #define I40E_PFINT_ICR0_ECC_ERR_SHIFT 16 #define I40E_PFINT_ICR0_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ECC_ERR_SHIFT) #define I40E_PFINT_ICR0_MAL_DETECT_SHIFT 19 @@ -986,14 +202,8 @@ #define I40E_PFINT_ICR0_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GRST_SHIFT) #define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT 21 #define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT) -#define I40E_PFINT_ICR0_GPIO_SHIFT 22 -#define I40E_PFINT_ICR0_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GPIO_SHIFT) #define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23 #define I40E_PFINT_ICR0_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_TIMESYNC_SHIFT) -#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24 -#define I40E_PFINT_ICR0_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_STORM_DETECT_SHIFT) -#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25 -#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT) #define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26 #define I40E_PFINT_ICR0_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_HMC_ERR_SHIFT) #define I40E_PFINT_ICR0_PE_CRITERR_SHIFT 28 @@ -1017,10 +227,6 @@ #define I40E_PFINT_ICR0_ENA_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GPIO_SHIFT) #define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23 #define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT) -#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24 -#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT) -#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25 -#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT) #define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26 #define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT) #define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT 28 @@ -1029,43 +235,17 @@ #define I40E_PFINT_ICR0_ENA_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_VFLR_SHIFT) #define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT 30 #define I40E_PFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT) -#define I40E_PFINT_ICR0_ENA_RSVD_SHIFT 31 -#define I40E_PFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_RSVD_SHIFT) #define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */ /* Reset: PFR */ -#define I40E_PFINT_ITR0_MAX_INDEX 2 -#define I40E_PFINT_ITR0_INTERVAL_SHIFT 0 -#define I40E_PFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITR0_INTERVAL_SHIFT) #define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4)) /* _i=0...2, _INTPF=0...511 */ /* Reset: PFR */ -#define I40E_PFINT_ITRN_MAX_INDEX 2 -#define I40E_PFINT_ITRN_INTERVAL_SHIFT 0 -#define I40E_PFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITRN_INTERVAL_SHIFT) #define I40E_PFINT_LNKLST0 0x00038500 /* Reset: PFR */ #define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0 -#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) -#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11 -#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT) #define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */ -#define I40E_PFINT_LNKLSTN_MAX_INDEX 511 #define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0 #define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) #define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11 -#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) -#define I40E_PFINT_RATE0 0x00038580 /* Reset: PFR */ -#define I40E_PFINT_RATE0_INTERVAL_SHIFT 0 -#define I40E_PFINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATE0_INTERVAL_SHIFT) -#define I40E_PFINT_RATE0_INTRL_ENA_SHIFT 6 -#define I40E_PFINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATE0_INTRL_ENA_SHIFT) #define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */ -#define I40E_PFINT_RATEN_MAX_INDEX 511 -#define I40E_PFINT_RATEN_INTERVAL_SHIFT 0 -#define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT) -#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6 -#define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT) #define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */ -#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 -#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) #define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ -#define I40E_QINT_RQCTL_MAX_INDEX 1535 #define I40E_QINT_RQCTL_MSIX_INDX_SHIFT 0 #define I40E_QINT_RQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_RQCTL_MSIX_INDX_SHIFT) #define I40E_QINT_RQCTL_ITR_INDX_SHIFT 11 @@ -1075,13 +255,11 @@ #define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16 #define I40E_QINT_RQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) #define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27 -#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) #define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT 30 #define I40E_QINT_RQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) #define I40E_QINT_RQCTL_INTEVENT_SHIFT 31 #define I40E_QINT_RQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_INTEVENT_SHIFT) #define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ -#define I40E_QINT_TQCTL_MAX_INDEX 1535 #define I40E_QINT_TQCTL_MSIX_INDX_SHIFT 0 #define I40E_QINT_TQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_TQCTL_MSIX_INDX_SHIFT) #define I40E_QINT_TQCTL_ITR_INDX_SHIFT 11 @@ -1091,160 +269,45 @@ #define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16 #define I40E_QINT_TQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) #define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27 -#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) #define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT 30 #define I40E_QINT_TQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_CAUSE_ENA_SHIFT) #define I40E_QINT_TQCTL_INTEVENT_SHIFT 31 #define I40E_QINT_TQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_INTEVENT_SHIFT) #define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFINT_DYN_CTL0_MAX_INDEX 127 -#define I40E_VFINT_DYN_CTL0_INTENA_SHIFT 0 -#define I40E_VFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_SHIFT) -#define I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT 1 -#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT) -#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2 -#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT) -#define I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT 3 -#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT 5 -#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT) -#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24 -#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT) -#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25 -#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT 31 -#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT) #define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */ -#define I40E_VFINT_DYN_CTLN_MAX_INDEX 511 -#define I40E_VFINT_DYN_CTLN_INTENA_SHIFT 0 -#define I40E_VFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_SHIFT) #define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1 #define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT) -#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2 -#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT) -#define I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT 3 -#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT 5 -#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT) -#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24 -#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT) -#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25 -#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT 31 -#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT) -#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_VFINT_ICR0_MAX_INDEX 127 -#define I40E_VFINT_ICR0_INTEVENT_SHIFT 0 -#define I40E_VFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_INTEVENT_SHIFT) -#define I40E_VFINT_ICR0_QUEUE_0_SHIFT 1 -#define I40E_VFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_0_SHIFT) -#define I40E_VFINT_ICR0_QUEUE_1_SHIFT 2 -#define I40E_VFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_1_SHIFT) -#define I40E_VFINT_ICR0_QUEUE_2_SHIFT 3 -#define I40E_VFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_2_SHIFT) -#define I40E_VFINT_ICR0_QUEUE_3_SHIFT 4 -#define I40E_VFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_3_SHIFT) -#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25 -#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT) -#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30 -#define I40E_VFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ADMINQ_SHIFT) -#define I40E_VFINT_ICR0_SWINT_SHIFT 31 -#define I40E_VFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_SWINT_SHIFT) -#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_VFINT_ICR0_ENA_MAX_INDEX 127 -#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25 -#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT) -#define I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT 30 -#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT) -#define I40E_VFINT_ICR0_ENA_RSVD_SHIFT 31 -#define I40E_VFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_RSVD_SHIFT) -#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */ /* Reset: VFR */ -#define I40E_VFINT_ITR0_MAX_INDEX 2 -#define I40E_VFINT_ITR0_INTERVAL_SHIFT 0 -#define I40E_VFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR0_INTERVAL_SHIFT) -#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...511 */ /* Reset: VFR */ -#define I40E_VFINT_ITRN_MAX_INDEX 2 -#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0 -#define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT) -#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127 -#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 -#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) #define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_VPINT_AEQCTL_MAX_INDEX 127 #define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0 -#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) #define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11 -#define I40E_VPINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_AEQCTL_ITR_INDX_SHIFT) -#define I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT 13 -#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT) #define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT 30 #define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT) -#define I40E_VPINT_AEQCTL_INTEVENT_SHIFT 31 -#define I40E_VPINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_INTEVENT_SHIFT) #define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: CORER */ -#define I40E_VPINT_CEQCTL_MAX_INDEX 511 #define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT 0 -#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) #define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT 11 -#define I40E_VPINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) -#define I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT 13 -#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT) #define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16 #define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT) #define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27 #define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) #define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT 30 #define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT) -#define I40E_VPINT_CEQCTL_INTEVENT_SHIFT 31 -#define I40E_VPINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_INTEVENT_SHIFT) #define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VPINT_LNKLST0_MAX_INDEX 127 #define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0 #define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) -#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11 -#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT) #define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */ -#define I40E_VPINT_LNKLSTN_MAX_INDEX 511 #define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0 #define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) #define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11 #define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) -#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VPINT_RATE0_MAX_INDEX 127 -#define I40E_VPINT_RATE0_INTERVAL_SHIFT 0 -#define I40E_VPINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATE0_INTERVAL_SHIFT) -#define I40E_VPINT_RATE0_INTRL_ENA_SHIFT 6 -#define I40E_VPINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATE0_INTRL_ENA_SHIFT) -#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */ -#define I40E_VPINT_RATEN_MAX_INDEX 511 -#define I40E_VPINT_RATEN_INTERVAL_SHIFT 0 -#define I40E_VPINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATEN_INTERVAL_SHIFT) -#define I40E_VPINT_RATEN_INTRL_ENA_SHIFT 6 -#define I40E_VPINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATEN_INTRL_ENA_SHIFT) -#define I40E_GL_RDPU_CNTRL 0x00051060 /* Reset: CORER */ -#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT 0 -#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK I40E_MASK(0x1, I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT) -#define I40E_GL_RDPU_CNTRL_ECO_SHIFT 1 -#define I40E_GL_RDPU_CNTRL_ECO_MASK I40E_MASK(0x7FFFFFFF, I40E_GL_RDPU_CNTRL_ECO_SHIFT) #define I40E_GLLAN_RCTL_0 0x0012A500 /* Reset: CORER */ #define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0 #define I40E_GLLAN_RCTL_0_PXE_MODE_MASK I40E_MASK(0x1, I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT) #define I40E_GLLAN_TSOMSK_F 0x000442D8 /* Reset: CORER */ -#define I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT 0 -#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT) #define I40E_GLLAN_TSOMSK_L 0x000442E0 /* Reset: CORER */ -#define I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT 0 -#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT) #define I40E_GLLAN_TSOMSK_M 0x000442DC /* Reset: CORER */ -#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0 -#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT) #define I40E_GLLAN_TXPRE_QDIS(_i) (0x000e6500 + ((_i) * 4)) /* _i=0...11 */ /* Reset: CORER */ -#define I40E_GLLAN_TXPRE_QDIS_MAX_INDEX 11 #define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0 #define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK I40E_MASK(0x7FF, I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT) -#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT 16 -#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT) #define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30 #define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT) #define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31 @@ -1257,19 +320,12 @@ #define I40E_PFLAN_QALLOC_VALID_SHIFT 31 #define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PFLAN_QALLOC_VALID_SHIFT) #define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */ -#define I40E_QRX_ENA_MAX_INDEX 1535 #define I40E_QRX_ENA_QENA_REQ_SHIFT 0 #define I40E_QRX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_REQ_SHIFT) -#define I40E_QRX_ENA_FAST_QDIS_SHIFT 1 -#define I40E_QRX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QRX_ENA_FAST_QDIS_SHIFT) #define I40E_QRX_ENA_QENA_STAT_SHIFT 2 #define I40E_QRX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_STAT_SHIFT) #define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ -#define I40E_QRX_TAIL_MAX_INDEX 1535 -#define I40E_QRX_TAIL_TAIL_SHIFT 0 -#define I40E_QRX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL_TAIL_SHIFT) #define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ -#define I40E_QTX_CTL_MAX_INDEX 1535 #define I40E_QTX_CTL_PFVF_Q_SHIFT 0 #define I40E_QTX_CTL_PFVF_Q_MASK I40E_MASK(0x3, I40E_QTX_CTL_PFVF_Q_SHIFT) #define I40E_QTX_CTL_PF_INDX_SHIFT 2 @@ -1277,43 +333,22 @@ #define I40E_QTX_CTL_VFVM_INDX_SHIFT 7 #define I40E_QTX_CTL_VFVM_INDX_MASK I40E_MASK(0x1FF, I40E_QTX_CTL_VFVM_INDX_SHIFT) #define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */ -#define I40E_QTX_ENA_MAX_INDEX 1535 #define I40E_QTX_ENA_QENA_REQ_SHIFT 0 #define I40E_QTX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_REQ_SHIFT) -#define I40E_QTX_ENA_FAST_QDIS_SHIFT 1 -#define I40E_QTX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QTX_ENA_FAST_QDIS_SHIFT) #define I40E_QTX_ENA_QENA_STAT_SHIFT 2 #define I40E_QTX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_STAT_SHIFT) #define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ -#define I40E_QTX_HEAD_MAX_INDEX 1535 -#define I40E_QTX_HEAD_HEAD_SHIFT 0 -#define I40E_QTX_HEAD_HEAD_MASK I40E_MASK(0x1FFF, I40E_QTX_HEAD_HEAD_SHIFT) -#define I40E_QTX_HEAD_RS_PENDING_SHIFT 16 -#define I40E_QTX_HEAD_RS_PENDING_MASK I40E_MASK(0x1, I40E_QTX_HEAD_RS_PENDING_SHIFT) #define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */ -#define I40E_QTX_TAIL_MAX_INDEX 1535 -#define I40E_QTX_TAIL_TAIL_SHIFT 0 -#define I40E_QTX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL_TAIL_SHIFT) #define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VPLAN_MAPENA_MAX_INDEX 127 #define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0 #define I40E_VPLAN_MAPENA_TXRX_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT) #define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: VFR */ -#define I40E_VPLAN_QTABLE_MAX_INDEX 15 #define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0 #define I40E_VPLAN_QTABLE_QINDEX_MASK I40E_MASK(0x7FF, I40E_VPLAN_QTABLE_QINDEX_SHIFT) #define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */ -#define I40E_VSILAN_QBASE_MAX_INDEX 383 -#define I40E_VSILAN_QBASE_VSIBASE_SHIFT 0 -#define I40E_VSILAN_QBASE_VSIBASE_MASK I40E_MASK(0x7FF, I40E_VSILAN_QBASE_VSIBASE_SHIFT) #define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11 #define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT) #define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...7, _VSI=0...383 */ /* Reset: PFR */ -#define I40E_VSILAN_QTABLE_MAX_INDEX 7 -#define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0 -#define I40E_VSILAN_QTABLE_QINDEX_0_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_0_SHIFT) -#define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16 -#define I40E_VSILAN_QTABLE_QINDEX_1_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_1_SHIFT) #define I40E_PRTGL_SAH 0x001E2140 /* Reset: GLOBR */ #define I40E_PRTGL_SAH_FC_SAH_SHIFT 0 #define I40E_PRTGL_SAH_FC_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_FC_SAH_SHIFT) @@ -1322,789 +357,47 @@ #define I40E_PRTGL_SAL 0x001E2120 /* Reset: GLOBR */ #define I40E_PRTGL_SAL_FC_SAL_SHIFT 0 #define I40E_PRTGL_SAL_FC_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTGL_SAL_FC_SAL_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8 -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8 -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480 /* Reset: GLOBR */ -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT 0 -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT 2 -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT 4 -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT 6 -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT 8 -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT 10 -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT 12 -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT 14 -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484 /* Reset: GLOBR */ -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT 0 -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT 2 -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT 4 -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT 6 -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT 8 -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT 10 -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT 12 -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14 -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT) -#define I40E_GL_FWRESETCNT 0x00083100 /* Reset: POR */ -#define I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT 0 -#define I40E_GL_FWRESETCNT_FWRESETCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT) -#define I40E_GL_MNG_FWSM 0x000B6134 /* Reset: POR */ -#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 0 -#define I40E_GL_MNG_FWSM_FW_MODES_MASK I40E_MASK(0x3, I40E_GL_MNG_FWSM_FW_MODES_SHIFT) -#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 10 -#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT) -#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11 -#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK I40E_MASK(0xF, I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT) -#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15 -#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT) -#define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16 -#define I40E_GL_MNG_FWSM_RESET_CNT_MASK I40E_MASK(0x7, I40E_GL_MNG_FWSM_RESET_CNT_SHIFT) -#define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19 -#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK I40E_MASK(0x3F, I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT) -#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26 -#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT) -#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27 -#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT) -#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT 28 -#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT) -#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT 29 -#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT) -#define I40E_GL_MNG_HWARB_CTRL 0x000B6130 /* Reset: POR */ -#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT 0 -#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK I40E_MASK(0x1, I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT) -#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */ /* Reset: POR */ -#define I40E_PRT_MNG_FTFT_DATA_MAX_INDEX 31 -#define I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT 0 -#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT) -#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260 /* Reset: POR */ -#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT 0 -#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT) -#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ -#define I40E_PRT_MNG_FTFT_MASK_MAX_INDEX 7 -#define I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT 0 -#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT) -#define I40E_PRT_MNG_MANC 0x00256A20 /* Reset: POR */ -#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT 0 -#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT) -#define I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT 1 -#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT) -#define I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT 17 -#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT) -#define I40E_PRT_MNG_MANC_RCV_ALL_SHIFT 19 -#define I40E_PRT_MNG_MANC_RCV_ALL_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_ALL_SHIFT) -#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT 25 -#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT) -#define I40E_PRT_MNG_MANC_NET_TYPE_SHIFT 26 -#define I40E_PRT_MNG_MANC_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NET_TYPE_SHIFT) -#define I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT 28 -#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT) -#define I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT 29 -#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT) -#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ -#define I40E_PRT_MNG_MAVTV_MAX_INDEX 7 -#define I40E_PRT_MNG_MAVTV_VID_SHIFT 0 -#define I40E_PRT_MNG_MAVTV_VID_MASK I40E_MASK(0xFFF, I40E_PRT_MNG_MAVTV_VID_SHIFT) -#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ -#define I40E_PRT_MNG_MDEF_MAX_INDEX 7 -#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT 0 -#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT) -#define I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT 4 -#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT) -#define I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT 5 -#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT) -#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT 13 -#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT) -#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT 17 -#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT) -#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT 21 -#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT 25 -#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT 26 -#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT) -#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT 27 -#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT 28 -#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT 29 -#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT 30 -#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT 31 -#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ -#define I40E_PRT_MNG_MDEF_EXT_MAX_INDEX 7 -#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT 0 -#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT 4 -#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT 8 -#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT 24 -#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT 25 -#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT 26 -#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT 27 -#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT 28 -#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT 29 -#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT 30 -#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT 31 -#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT) -#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_PRT_MNG_MDEFVSI_MAX_INDEX 3 -#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT 0 -#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT) -#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT 16 -#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT) -#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_PRT_MNG_METF_MAX_INDEX 3 -#define I40E_PRT_MNG_METF_ETYPE_SHIFT 0 -#define I40E_PRT_MNG_METF_ETYPE_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_METF_ETYPE_SHIFT) -#define I40E_PRT_MNG_METF_POLARITY_SHIFT 30 -#define I40E_PRT_MNG_METF_POLARITY_MASK I40E_MASK(0x1, I40E_PRT_MNG_METF_POLARITY_SHIFT) -#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */ -#define I40E_PRT_MNG_MFUTP_MAX_INDEX 15 -#define I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT 0 -#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT) -#define I40E_PRT_MNG_MFUTP_UDP_SHIFT 16 -#define I40E_PRT_MNG_MFUTP_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_UDP_SHIFT) -#define I40E_PRT_MNG_MFUTP_TCP_SHIFT 17 -#define I40E_PRT_MNG_MFUTP_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_TCP_SHIFT) -#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT 18 -#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT) -#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_PRT_MNG_MIPAF4_MAX_INDEX 3 -#define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0 -#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT) -#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */ -#define I40E_PRT_MNG_MIPAF6_MAX_INDEX 15 -#define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0 -#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT) -#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_PRT_MNG_MMAH_MAX_INDEX 3 -#define I40E_PRT_MNG_MMAH_MMAH_SHIFT 0 -#define I40E_PRT_MNG_MMAH_MMAH_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MMAH_MMAH_SHIFT) -#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_PRT_MNG_MMAL_MAX_INDEX 3 -#define I40E_PRT_MNG_MMAL_MMAL_SHIFT 0 -#define I40E_PRT_MNG_MMAL_MMAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MMAL_MMAL_SHIFT) -#define I40E_PRT_MNG_MNGONLY 0x00256A60 /* Reset: POR */ -#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT 0 -#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT) -#define I40E_PRT_MNG_MSFM 0x00256AA0 /* Reset: POR */ -#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT 0 -#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT) -#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT 1 -#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT) -#define I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT 2 -#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT) -#define I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT 3 -#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT) -#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT 4 -#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT) -#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT 5 -#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT) -#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT 6 -#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT) -#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT 7 -#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT) -#define I40E_MSIX_PBA(_i) (0x00001000 + ((_i) * 4)) /* _i=0...5 */ /* Reset: FLR */ -#define I40E_MSIX_PBA_MAX_INDEX 5 -#define I40E_MSIX_PBA_PENBIT_SHIFT 0 -#define I40E_MSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_PBA_PENBIT_SHIFT) -#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ -#define I40E_MSIX_TADD_MAX_INDEX 128 -#define I40E_MSIX_TADD_MSIXTADD10_SHIFT 0 -#define I40E_MSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_MSIX_TADD_MSIXTADD10_SHIFT) -#define I40E_MSIX_TADD_MSIXTADD_SHIFT 2 -#define I40E_MSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_MSIX_TADD_MSIXTADD_SHIFT) -#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ -#define I40E_MSIX_TMSG_MAX_INDEX 128 -#define I40E_MSIX_TMSG_MSIXTMSG_SHIFT 0 -#define I40E_MSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TMSG_MSIXTMSG_SHIFT) -#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ -#define I40E_MSIX_TUADD_MAX_INDEX 128 -#define I40E_MSIX_TUADD_MSIXTUADD_SHIFT 0 -#define I40E_MSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TUADD_MSIXTUADD_SHIFT) -#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ -#define I40E_MSIX_TVCTRL_MAX_INDEX 128 -#define I40E_MSIX_TVCTRL_MASK_SHIFT 0 -#define I40E_MSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_MSIX_TVCTRL_MASK_SHIFT) -#define I40E_VFMSIX_PBA1(_i) (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */ -#define I40E_VFMSIX_PBA1_MAX_INDEX 19 -#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0 -#define I40E_VFMSIX_PBA1_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA1_PENBIT_SHIFT) -#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TADD1_MAX_INDEX 639 -#define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0 -#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT) -#define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2 -#define I40E_VFMSIX_TADD1_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD1_MSIXTADD_SHIFT) -#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TMSG1_MAX_INDEX 639 -#define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0 -#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT) -#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TUADD1_MAX_INDEX 639 -#define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0 -#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT) -#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639 -#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0 -#define I40E_VFMSIX_TVCTRL1_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL1_MASK_SHIFT) #define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */ -#define I40E_GLNVM_FLA_FL_SCK_SHIFT 0 -#define I40E_GLNVM_FLA_FL_SCK_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SCK_SHIFT) -#define I40E_GLNVM_FLA_FL_CE_SHIFT 1 -#define I40E_GLNVM_FLA_FL_CE_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_CE_SHIFT) -#define I40E_GLNVM_FLA_FL_SI_SHIFT 2 -#define I40E_GLNVM_FLA_FL_SI_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SI_SHIFT) -#define I40E_GLNVM_FLA_FL_SO_SHIFT 3 -#define I40E_GLNVM_FLA_FL_SO_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SO_SHIFT) -#define I40E_GLNVM_FLA_FL_REQ_SHIFT 4 -#define I40E_GLNVM_FLA_FL_REQ_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_REQ_SHIFT) -#define I40E_GLNVM_FLA_FL_GNT_SHIFT 5 -#define I40E_GLNVM_FLA_FL_GNT_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_GNT_SHIFT) #define I40E_GLNVM_FLA_LOCKED_SHIFT 6 #define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT) -#define I40E_GLNVM_FLA_FL_SADDR_SHIFT 18 -#define I40E_GLNVM_FLA_FL_SADDR_MASK I40E_MASK(0x7FF, I40E_GLNVM_FLA_FL_SADDR_SHIFT) -#define I40E_GLNVM_FLA_FL_BUSY_SHIFT 30 -#define I40E_GLNVM_FLA_FL_BUSY_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_BUSY_SHIFT) -#define I40E_GLNVM_FLA_FL_DER_SHIFT 31 -#define I40E_GLNVM_FLA_FL_DER_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_DER_SHIFT) -#define I40E_GLNVM_FLASHID 0x000B6104 /* Reset: POR */ -#define I40E_GLNVM_FLASHID_FLASHID_SHIFT 0 -#define I40E_GLNVM_FLASHID_FLASHID_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_FLASHID_FLASHID_SHIFT) -#define I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT 31 -#define I40E_GLNVM_FLASHID_FLEEP_PERF_MASK I40E_MASK(0x1, I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT) #define I40E_GLNVM_GENS 0x000B6100 /* Reset: POR */ -#define I40E_GLNVM_GENS_NVM_PRES_SHIFT 0 -#define I40E_GLNVM_GENS_NVM_PRES_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_NVM_PRES_SHIFT) #define I40E_GLNVM_GENS_SR_SIZE_SHIFT 5 #define I40E_GLNVM_GENS_SR_SIZE_MASK I40E_MASK(0x7, I40E_GLNVM_GENS_SR_SIZE_SHIFT) -#define I40E_GLNVM_GENS_BANK1VAL_SHIFT 8 -#define I40E_GLNVM_GENS_BANK1VAL_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_BANK1VAL_SHIFT) -#define I40E_GLNVM_GENS_ALT_PRST_SHIFT 23 -#define I40E_GLNVM_GENS_ALT_PRST_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_ALT_PRST_SHIFT) -#define I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT 25 -#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT) -#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */ /* Reset: POR */ -#define I40E_GLNVM_PROTCSR_MAX_INDEX 59 -#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT 0 -#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT) #define I40E_GLNVM_SRCTL 0x000B6110 /* Reset: POR */ -#define I40E_GLNVM_SRCTL_SRBUSY_SHIFT 0 -#define I40E_GLNVM_SRCTL_SRBUSY_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_SRBUSY_SHIFT) #define I40E_GLNVM_SRCTL_ADDR_SHIFT 14 -#define I40E_GLNVM_SRCTL_ADDR_MASK I40E_MASK(0x7FFF, I40E_GLNVM_SRCTL_ADDR_SHIFT) -#define I40E_GLNVM_SRCTL_WRITE_SHIFT 29 -#define I40E_GLNVM_SRCTL_WRITE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_WRITE_SHIFT) #define I40E_GLNVM_SRCTL_START_SHIFT 30 -#define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT) #define I40E_GLNVM_SRCTL_DONE_SHIFT 31 #define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1u, I40E_GLNVM_SRCTL_DONE_SHIFT) #define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */ -#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0 -#define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT) #define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16 #define I40E_GLNVM_SRDATA_RDDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_RDDATA_SHIFT) #define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */ -#define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0 -#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT) -#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1 -#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT) -#define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2 -#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT) #define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3 #define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT) #define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4 #define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT) -#define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5 -#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT) -#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6 -#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT) -#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7 -#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT) -#define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8 -#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT) -#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9 -#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT) -#define I40E_GLPCI_BYTCTH 0x0009C484 /* Reset: PCIR */ -#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0 -#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT) -#define I40E_GLPCI_BYTCTL 0x0009C488 /* Reset: PCIR */ -#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT 0 -#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT) -#define I40E_GLPCI_CAPCTRL 0x000BE4A4 /* Reset: PCIR */ -#define I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT 0 -#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT) #define I40E_GLPCI_CAPSUP 0x000BE4A8 /* Reset: PCIR */ -#define I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT 0 -#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT) -#define I40E_GLPCI_CAPSUP_LTR_EN_SHIFT 2 -#define I40E_GLPCI_CAPSUP_LTR_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LTR_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_TPH_EN_SHIFT 3 -#define I40E_GLPCI_CAPSUP_TPH_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_TPH_EN_SHIFT) #define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT 4 #define I40E_GLPCI_CAPSUP_ARI_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ARI_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_IOV_EN_SHIFT 5 -#define I40E_GLPCI_CAPSUP_IOV_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IOV_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_ACS_EN_SHIFT 6 -#define I40E_GLPCI_CAPSUP_ACS_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ACS_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_SEC_EN_SHIFT 7 -#define I40E_GLPCI_CAPSUP_SEC_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_SEC_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT 16 -#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT 17 -#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_IDO_EN_SHIFT 18 -#define I40E_GLPCI_CAPSUP_IDO_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IDO_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT 19 -#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT) -#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT 20 -#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT 30 -#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT) -#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT 31 -#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT) -#define I40E_GLPCI_CNF 0x000BE4C0 /* Reset: POR */ -#define I40E_GLPCI_CNF_FLEX10_SHIFT 1 -#define I40E_GLPCI_CNF_FLEX10_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_FLEX10_SHIFT) -#define I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT 2 -#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT) #define I40E_GLPCI_CNF2 0x000BE494 /* Reset: PCIR */ -#define I40E_GLPCI_CNF2_RO_DIS_SHIFT 0 -#define I40E_GLPCI_CNF2_RO_DIS_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_RO_DIS_SHIFT) -#define I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT 1 -#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT) #define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2 #define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT) #define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT 13 #define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT) -#define I40E_GLPCI_DREVID 0x0009C480 /* Reset: PCIR */ -#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0 -#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT) -#define I40E_GLPCI_GSCL_1 0x0009C48C /* Reset: PCIR */ -#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT 0 -#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT) -#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT 1 -#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT) -#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT 2 -#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT) -#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT 3 -#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT) -#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT 4 -#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT) -#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT 5 -#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT) -#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT 6 -#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT) -#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT 7 -#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT) -#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT 8 -#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT) -#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT 9 -#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT) -#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT 14 -#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT) -#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT 15 -#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT) -#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT 28 -#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT) -#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT 29 -#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT) -#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT 30 -#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT) -#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT 31 -#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT) -#define I40E_GLPCI_GSCL_2 0x0009C490 /* Reset: PCIR */ -#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT 0 -#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT) -#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT 8 -#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT) -#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT 16 -#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT) -#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT 24 -#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT) -#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */ -#define I40E_GLPCI_GSCL_5_8_MAX_INDEX 3 -#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT 0 -#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT) -#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT 16 -#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT) -#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */ -#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3 -#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0 -#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT) #define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */ -#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0 -#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT) -#define I40E_GLPCI_LBARCTRL_BAR32_SHIFT 1 -#define I40E_GLPCI_LBARCTRL_BAR32_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_BAR32_SHIFT) -#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT 3 -#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT) -#define I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT 4 -#define I40E_GLPCI_LBARCTRL_RSVD_4_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT) #define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT 6 #define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT) -#define I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT 10 -#define I40E_GLPCI_LBARCTRL_RSVD_10_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT) -#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT 11 -#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT) -#define I40E_GLPCI_LINKCAP 0x000BE4AC /* Reset: PCIR */ -#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT 0 -#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK I40E_MASK(0x3F, I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT) -#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT 6 -#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK I40E_MASK(0x7, I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT) -#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT 9 -#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK I40E_MASK(0xF, I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT) -#define I40E_GLPCI_PCIERR 0x000BE4FC /* Reset: PCIR */ -#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0 -#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT) -#define I40E_GLPCI_PKTCT 0x0009C4BC /* Reset: PCIR */ -#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0 -#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT) -#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4 /* Reset: PCIR */ -#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0 -#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT) -#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16 -#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT) -#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0 /* Reset: PCIR */ -#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0 -#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT) -#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16 -#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT) -#define I40E_GLPCI_PMSUP 0x000BE4B0 /* Reset: PCIR */ -#define I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT 0 -#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT) -#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT 2 -#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT) -#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT 5 -#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT) -#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT 8 -#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT) -#define I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT 11 -#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT) -#define I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT 14 -#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK I40E_MASK(0x1, I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT) -#define I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT 15 -#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT) -#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC /* Reset: PCIR */ -#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0 -#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT) -#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8 -#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT) -#define I40E_GLPCI_PWRDATA 0x000BE490 /* Reset: PCIR */ -#define I40E_GLPCI_PWRDATA_D0_POWER_SHIFT 0 -#define I40E_GLPCI_PWRDATA_D0_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D0_POWER_SHIFT) -#define I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT 8 -#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT) -#define I40E_GLPCI_PWRDATA_D3_POWER_SHIFT 16 -#define I40E_GLPCI_PWRDATA_D3_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D3_POWER_SHIFT) -#define I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT 24 -#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK I40E_MASK(0x3, I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT) -#define I40E_GLPCI_REVID 0x000BE4B4 /* Reset: PCIR */ -#define I40E_GLPCI_REVID_NVM_REVID_SHIFT 0 -#define I40E_GLPCI_REVID_NVM_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_REVID_NVM_REVID_SHIFT) -#define I40E_GLPCI_SERH 0x000BE49C /* Reset: PCIR */ -#define I40E_GLPCI_SERH_SER_NUM_H_SHIFT 0 -#define I40E_GLPCI_SERH_SER_NUM_H_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SERH_SER_NUM_H_SHIFT) -#define I40E_GLPCI_SERL 0x000BE498 /* Reset: PCIR */ -#define I40E_GLPCI_SERL_SER_NUM_L_SHIFT 0 -#define I40E_GLPCI_SERL_SER_NUM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SERL_SER_NUM_L_SHIFT) -#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8 /* Reset: PCIR */ -#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0 -#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT) -#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC /* Reset: PCIR */ -#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0 -#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT) -#define I40E_GLPCI_SUBVENID 0x000BE48C /* Reset: PCIR */ -#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT 0 -#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT) -#define I40E_GLPCI_UPADD 0x000BE4F8 /* Reset: PCIR */ -#define I40E_GLPCI_UPADD_ADDRESS_SHIFT 1 -#define I40E_GLPCI_UPADD_ADDRESS_MASK I40E_MASK(0x7FFFFFFF, I40E_GLPCI_UPADD_ADDRESS_SHIFT) -#define I40E_GLPCI_VENDORID 0x000BE518 /* Reset: PCIR */ -#define I40E_GLPCI_VENDORID_VENDORID_SHIFT 0 -#define I40E_GLPCI_VENDORID_VENDORID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_VENDORID_VENDORID_SHIFT) -#define I40E_GLPCI_VFSUP 0x000BE4B8 /* Reset: PCIR */ -#define I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT 0 -#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT) -#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1 -#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT) -#define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */ -#define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9 -#define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT) -#define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11 -#define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT) #define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */ -#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0 -#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT) -#define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT 3 -#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK I40E_MASK(0x1F, I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT) -#define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT 8 -#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK I40E_MASK(0xFF, I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT) #define I40E_PF_PCI_CIAA 0x0009C080 /* Reset: FLR */ -#define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0 -#define I40E_PF_PCI_CIAA_ADDRESS_MASK I40E_MASK(0xFFF, I40E_PF_PCI_CIAA_ADDRESS_SHIFT) #define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12 -#define I40E_PF_PCI_CIAA_VF_NUM_MASK I40E_MASK(0x7F, I40E_PF_PCI_CIAA_VF_NUM_SHIFT) #define I40E_PF_PCI_CIAD 0x0009C100 /* Reset: FLR */ -#define I40E_PF_PCI_CIAD_DATA_SHIFT 0 -#define I40E_PF_PCI_CIAD_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_PCI_CIAD_DATA_SHIFT) -#define I40E_PFPCI_CLASS 0x000BE400 /* Reset: PCIR */ -#define I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT 0 -#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT) -#define I40E_PFPCI_CLASS_RESERVED_1_SHIFT 1 -#define I40E_PFPCI_CLASS_RESERVED_1_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_RESERVED_1_SHIFT) -#define I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT 2 -#define I40E_PFPCI_CLASS_PF_IS_LAN_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT) -#define I40E_PFPCI_CNF 0x000BE000 /* Reset: PCIR */ -#define I40E_PFPCI_CNF_MSI_EN_SHIFT 2 -#define I40E_PFPCI_CNF_MSI_EN_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_MSI_EN_SHIFT) -#define I40E_PFPCI_CNF_EXROM_DIS_SHIFT 3 -#define I40E_PFPCI_CNF_EXROM_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_EXROM_DIS_SHIFT) -#define I40E_PFPCI_CNF_IO_BAR_SHIFT 4 -#define I40E_PFPCI_CNF_IO_BAR_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_IO_BAR_SHIFT) -#define I40E_PFPCI_CNF_INT_PIN_SHIFT 5 -#define I40E_PFPCI_CNF_INT_PIN_MASK I40E_MASK(0x3, I40E_PFPCI_CNF_INT_PIN_SHIFT) -#define I40E_PFPCI_DEVID 0x000BE080 /* Reset: PCIR */ -#define I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT 0 -#define I40E_PFPCI_DEVID_PF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT) -#define I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT 16 -#define I40E_PFPCI_DEVID_VF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT) -#define I40E_PFPCI_FACTPS 0x0009C180 /* Reset: FLR */ -#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT 0 -#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK I40E_MASK(0x3, I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT) -#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT 3 -#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK I40E_MASK(0x1, I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT) -#define I40E_PFPCI_FUNC 0x000BE200 /* Reset: POR */ -#define I40E_PFPCI_FUNC_FUNC_DIS_SHIFT 0 -#define I40E_PFPCI_FUNC_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_FUNC_DIS_SHIFT) -#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT 1 -#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT) -#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT 2 -#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT) -#define I40E_PFPCI_FUNC2 0x000BE180 /* Reset: PCIR */ -#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT 0 -#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT) -#define I40E_PFPCI_ICAUSE 0x0009C200 /* Reset: PFR */ -#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT 0 -#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT) -#define I40E_PFPCI_IENA 0x0009C280 /* Reset: PFR */ -#define I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT 0 -#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT) -#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800 /* Reset: PCIR */ -#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0 -#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT) -#define I40E_PFPCI_PM 0x000BE300 /* Reset: POR */ -#define I40E_PFPCI_PM_PME_EN_SHIFT 0 -#define I40E_PFPCI_PM_PME_EN_MASK I40E_MASK(0x1, I40E_PFPCI_PM_PME_EN_SHIFT) -#define I40E_PFPCI_STATUS1 0x000BE280 /* Reset: POR */ -#define I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT 0 -#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK I40E_MASK(0x1, I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT) -#define I40E_PFPCI_SUBSYSID 0x000BE100 /* Reset: PCIR */ -#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT 0 -#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT) -#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT 16 -#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT) -#define I40E_PFPCI_VF_FLUSH_DONE 0x0000E400 /* Reset: PCIR */ -#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0 -#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT) -#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: PCIR */ -#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127 -#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0 -#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT) -#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880 /* Reset: PCIR */ -#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0 -#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT) -#define I40E_PFPCI_VMINDEX 0x0009C300 /* Reset: PCIR */ -#define I40E_PFPCI_VMINDEX_VMINDEX_SHIFT 0 -#define I40E_PFPCI_VMINDEX_VMINDEX_MASK I40E_MASK(0x1FF, I40E_PFPCI_VMINDEX_VMINDEX_SHIFT) -#define I40E_PFPCI_VMPEND 0x0009C380 /* Reset: PCIR */ -#define I40E_PFPCI_VMPEND_PENDING_SHIFT 0 -#define I40E_PFPCI_VMPEND_PENDING_MASK I40E_MASK(0x1, I40E_PFPCI_VMPEND_PENDING_SHIFT) #define I40E_PRTPM_EEE_STAT 0x001E4320 /* Reset: GLOBR */ -#define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29 -#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT) #define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30 #define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT) #define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31 #define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT) -#define I40E_PRTPM_EEEC 0x001E4380 /* Reset: GLOBR */ -#define I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT 16 -#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT) -#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT 24 -#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK I40E_MASK(0x3, I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT) -#define I40E_PRTPM_EEEC_TEEE_DLY_SHIFT 26 -#define I40E_PRTPM_EEEC_TEEE_DLY_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TEEE_DLY_SHIFT) -#define I40E_PRTPM_EEEFWD 0x001E4400 /* Reset: GLOBR */ -#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT 31 -#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK I40E_MASK(0x1, I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT) -#define I40E_PRTPM_EEER 0x001E4360 /* Reset: GLOBR */ -#define I40E_PRTPM_EEER_TW_SYSTEM_SHIFT 0 -#define I40E_PRTPM_EEER_TW_SYSTEM_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEER_TW_SYSTEM_SHIFT) -#define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16 -#define I40E_PRTPM_EEER_TX_LPI_EN_MASK I40E_MASK(0x1, I40E_PRTPM_EEER_TX_LPI_EN_SHIFT) -#define I40E_PRTPM_EEETXC 0x001E43E0 /* Reset: GLOBR */ -#define I40E_PRTPM_EEETXC_TW_PHY_SHIFT 0 -#define I40E_PRTPM_EEETXC_TW_PHY_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEETXC_TW_PHY_SHIFT) -#define I40E_PRTPM_GC 0x000B8140 /* Reset: POR */ -#define I40E_PRTPM_GC_EMP_LINK_ON_SHIFT 0 -#define I40E_PRTPM_GC_EMP_LINK_ON_MASK I40E_MASK(0x1, I40E_PRTPM_GC_EMP_LINK_ON_SHIFT) -#define I40E_PRTPM_GC_MNG_VETO_SHIFT 1 -#define I40E_PRTPM_GC_MNG_VETO_MASK I40E_MASK(0x1, I40E_PRTPM_GC_MNG_VETO_SHIFT) -#define I40E_PRTPM_GC_RATD_SHIFT 2 -#define I40E_PRTPM_GC_RATD_MASK I40E_MASK(0x1, I40E_PRTPM_GC_RATD_SHIFT) -#define I40E_PRTPM_GC_LCDMP_SHIFT 3 -#define I40E_PRTPM_GC_LCDMP_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LCDMP_SHIFT) -#define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31 -#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT) #define I40E_PRTPM_RLPIC 0x001E43A0 /* Reset: GLOBR */ -#define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0 -#define I40E_PRTPM_RLPIC_ERLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_RLPIC_ERLPIC_SHIFT) #define I40E_PRTPM_TLPIC 0x001E43C0 /* Reset: GLOBR */ -#define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0 -#define I40E_PRTPM_TLPIC_ETLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_TLPIC_ETLPIC_SHIFT) -#define I40E_GL_PRS_FVBM(_i) (0x00269760 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GL_PRS_FVBM_MAX_INDEX 3 -#define I40E_GL_PRS_FVBM_FV_BYTE_INDX_SHIFT 0 -#define I40E_GL_PRS_FVBM_FV_BYTE_INDX_MASK I40E_MASK(0x7F, I40E_GL_PRS_FVBM_FV_BYTE_INDX_SHIFT) -#define I40E_GL_PRS_FVBM_RULE_BUS_INDX_SHIFT 8 -#define I40E_GL_PRS_FVBM_RULE_BUS_INDX_MASK I40E_MASK(0x3F, I40E_GL_PRS_FVBM_RULE_BUS_INDX_SHIFT) -#define I40E_GL_PRS_FVBM_MSK_ENA_SHIFT 31 -#define I40E_GL_PRS_FVBM_MSK_ENA_MASK I40E_MASK(0x1, I40E_GL_PRS_FVBM_MSK_ENA_SHIFT) -#define I40E_GLRPB_DPSS 0x000AC828 /* Reset: CORER */ -#define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0 -#define I40E_GLRPB_DPSS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_DPSS_DPS_TCN_SHIFT) -#define I40E_GLRPB_GHW 0x000AC830 /* Reset: CORER */ -#define I40E_GLRPB_GHW_GHW_SHIFT 0 -#define I40E_GLRPB_GHW_GHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GHW_GHW_SHIFT) -#define I40E_GLRPB_GLW 0x000AC834 /* Reset: CORER */ -#define I40E_GLRPB_GLW_GLW_SHIFT 0 -#define I40E_GLRPB_GLW_GLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GLW_GLW_SHIFT) -#define I40E_GLRPB_PHW 0x000AC844 /* Reset: CORER */ -#define I40E_GLRPB_PHW_PHW_SHIFT 0 -#define I40E_GLRPB_PHW_PHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PHW_PHW_SHIFT) -#define I40E_GLRPB_PLW 0x000AC848 /* Reset: CORER */ -#define I40E_GLRPB_PLW_PLW_SHIFT 0 -#define I40E_GLRPB_PLW_PLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PLW_PLW_SHIFT) -#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTRPB_DHW_MAX_INDEX 7 -#define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0 -#define I40E_PRTRPB_DHW_DHW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DHW_DHW_TCN_SHIFT) -#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTRPB_DLW_MAX_INDEX 7 -#define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0 -#define I40E_PRTRPB_DLW_DLW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DLW_DLW_TCN_SHIFT) -#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTRPB_DPS_MAX_INDEX 7 -#define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0 -#define I40E_PRTRPB_DPS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DPS_DPS_TCN_SHIFT) -#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTRPB_SHT_MAX_INDEX 7 -#define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0 -#define I40E_PRTRPB_SHT_SHT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHT_SHT_TCN_SHIFT) -#define I40E_PRTRPB_SHW 0x000AC580 /* Reset: CORER */ -#define I40E_PRTRPB_SHW_SHW_SHIFT 0 -#define I40E_PRTRPB_SHW_SHW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHW_SHW_SHIFT) -#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTRPB_SLT_MAX_INDEX 7 -#define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0 -#define I40E_PRTRPB_SLT_SLT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLT_SLT_TCN_SHIFT) -#define I40E_PRTRPB_SLW 0x000AC6A0 /* Reset: CORER */ -#define I40E_PRTRPB_SLW_SLW_SHIFT 0 -#define I40E_PRTRPB_SLW_SLW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLW_SLW_SHIFT) -#define I40E_PRTRPB_SPS 0x000AC7C0 /* Reset: CORER */ -#define I40E_PRTRPB_SPS_SPS_SHIFT 0 -#define I40E_PRTRPB_SPS_SPS_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SPS_SPS_SHIFT) -#define I40E_GLQF_CTL 0x00269BA4 /* Reset: CORER */ -#define I40E_GLQF_CTL_HTOEP_SHIFT 1 -#define I40E_GLQF_CTL_HTOEP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_SHIFT) -#define I40E_GLQF_CTL_HTOEP_FCOE_SHIFT 2 -#define I40E_GLQF_CTL_HTOEP_FCOE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_FCOE_SHIFT) -#define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3 -#define I40E_GLQF_CTL_PCNT_ALLOC_MASK I40E_MASK(0x7, I40E_GLQF_CTL_PCNT_ALLOC_SHIFT) -#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT 6 -#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT) -#define I40E_GLQF_CTL_RSVD_SHIFT 7 -#define I40E_GLQF_CTL_RSVD_MASK I40E_MASK(0x1, I40E_GLQF_CTL_RSVD_SHIFT) -#define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8 -#define I40E_GLQF_CTL_MAXPEBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXPEBLEN_SHIFT) -#define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11 -#define I40E_GLQF_CTL_MAXFCBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFCBLEN_SHIFT) -#define I40E_GLQF_CTL_MAXFDBLEN_SHIFT 14 -#define I40E_GLQF_CTL_MAXFDBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFDBLEN_SHIFT) -#define I40E_GLQF_CTL_FDBEST_SHIFT 17 -#define I40E_GLQF_CTL_FDBEST_MASK I40E_MASK(0xFF, I40E_GLQF_CTL_FDBEST_SHIFT) -#define I40E_GLQF_CTL_PROGPRIO_SHIFT 25 -#define I40E_GLQF_CTL_PROGPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_PROGPRIO_SHIFT) -#define I40E_GLQF_CTL_INVALPRIO_SHIFT 26 -#define I40E_GLQF_CTL_INVALPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_INVALPRIO_SHIFT) -#define I40E_GLQF_CTL_IGNORE_IP_SHIFT 27 -#define I40E_GLQF_CTL_IGNORE_IP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_IGNORE_IP_SHIFT) #define I40E_GLQF_FDCNT_0 0x00269BAC /* Reset: CORER */ #define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0 #define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT) @@ -2112,36 +405,7 @@ #define I40E_GLQF_FDCNT_0_BESTCNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_BESTCNT_SHIFT) #define I40E_GLQF_HKEY(_i) (0x00270140 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */ #define I40E_GLQF_HKEY_MAX_INDEX 12 -#define I40E_GLQF_HKEY_KEY_0_SHIFT 0 -#define I40E_GLQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_0_SHIFT) -#define I40E_GLQF_HKEY_KEY_1_SHIFT 8 -#define I40E_GLQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_1_SHIFT) -#define I40E_GLQF_HKEY_KEY_2_SHIFT 16 -#define I40E_GLQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_2_SHIFT) -#define I40E_GLQF_HKEY_KEY_3_SHIFT 24 -#define I40E_GLQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_3_SHIFT) -#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */ -#define I40E_GLQF_HSYM_MAX_INDEX 63 -#define I40E_GLQF_HSYM_SYMH_ENA_SHIFT 0 -#define I40E_GLQF_HSYM_SYMH_ENA_MASK I40E_MASK(0x1, I40E_GLQF_HSYM_SYMH_ENA_SHIFT) #define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */ /* Reset: CORER */ -#define I40E_GLQF_PCNT_MAX_INDEX 511 -#define I40E_GLQF_PCNT_PCNT_SHIFT 0 -#define I40E_GLQF_PCNT_PCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_PCNT_PCNT_SHIFT) -#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */ -#define I40E_GLQF_SWAP_MAX_INDEX 1 -#define I40E_GLQF_SWAP_OFF0_SRC0_SHIFT 0 -#define I40E_GLQF_SWAP_OFF0_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC0_SHIFT) -#define I40E_GLQF_SWAP_OFF0_SRC1_SHIFT 6 -#define I40E_GLQF_SWAP_OFF0_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC1_SHIFT) -#define I40E_GLQF_SWAP_FLEN0_SHIFT 12 -#define I40E_GLQF_SWAP_FLEN0_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN0_SHIFT) -#define I40E_GLQF_SWAP_OFF1_SRC0_SHIFT 16 -#define I40E_GLQF_SWAP_OFF1_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC0_SHIFT) -#define I40E_GLQF_SWAP_OFF1_SRC1_SHIFT 22 -#define I40E_GLQF_SWAP_OFF1_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC1_SHIFT) -#define I40E_GLQF_SWAP_FLEN1_SHIFT 28 -#define I40E_GLQF_SWAP_FLEN1_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN1_SHIFT) #define I40E_PFQF_CTL_0 0x001C0AC0 /* Reset: CORER */ #define I40E_PFQF_CTL_0_PEHSIZE_SHIFT 0 #define I40E_PFQF_CTL_0_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEHSIZE_SHIFT) @@ -2159,54 +423,19 @@ #define I40E_PFQF_CTL_0_ETYPE_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT) #define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19 #define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT) -#define I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT 20 -#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT) -#define I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT 24 -#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT) #define I40E_PFQF_CTL_1 0x00245D80 /* Reset: CORER */ #define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0 #define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT) -#define I40E_PFQF_FDALLOC 0x00246280 /* Reset: CORER */ -#define I40E_PFQF_FDALLOC_FDALLOC_SHIFT 0 -#define I40E_PFQF_FDALLOC_FDALLOC_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDALLOC_SHIFT) -#define I40E_PFQF_FDALLOC_FDBEST_SHIFT 8 -#define I40E_PFQF_FDALLOC_FDBEST_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDBEST_SHIFT) #define I40E_PFQF_FDSTAT 0x00246380 /* Reset: CORER */ #define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0 #define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT) #define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT 16 #define I40E_PFQF_FDSTAT_BEST_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_BEST_CNT_SHIFT) #define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */ /* Reset: CORER */ -#define I40E_PFQF_HENA_MAX_INDEX 1 -#define I40E_PFQF_HENA_PTYPE_ENA_SHIFT 0 -#define I40E_PFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFQF_HENA_PTYPE_ENA_SHIFT) #define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */ /* Reset: CORER */ #define I40E_PFQF_HKEY_MAX_INDEX 12 -#define I40E_PFQF_HKEY_KEY_0_SHIFT 0 -#define I40E_PFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_0_SHIFT) -#define I40E_PFQF_HKEY_KEY_1_SHIFT 8 -#define I40E_PFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_1_SHIFT) -#define I40E_PFQF_HKEY_KEY_2_SHIFT 16 -#define I40E_PFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_2_SHIFT) -#define I40E_PFQF_HKEY_KEY_3_SHIFT 24 -#define I40E_PFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_3_SHIFT) #define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_PFQF_HLUT_MAX_INDEX 127 -#define I40E_PFQF_HLUT_LUT0_SHIFT 0 -#define I40E_PFQF_HLUT_LUT0_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT0_SHIFT) -#define I40E_PFQF_HLUT_LUT1_SHIFT 8 -#define I40E_PFQF_HLUT_LUT1_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT1_SHIFT) -#define I40E_PFQF_HLUT_LUT2_SHIFT 16 -#define I40E_PFQF_HLUT_LUT2_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT2_SHIFT) -#define I40E_PFQF_HLUT_LUT3_SHIFT 24 -#define I40E_PFQF_HLUT_LUT3_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT3_SHIFT) -#define I40E_PRTQF_CTL_0 0x00256E60 /* Reset: CORER */ -#define I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT 0 -#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK I40E_MASK(0x1, I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT) -#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */ /* Reset: CORER */ -#define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63 -#define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0 -#define I40E_PRTQF_FD_FLXINSET_INSET_MASK I40E_MASK(0xFF, I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) #define I40E_PRTQF_FD_INSET(_i, _j) (0x00250000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */ #define I40E_PRTQF_FD_INSET_MAX_INDEX 63 #define I40E_PRTQF_FD_INSET_INSET_SHIFT 0 @@ -2215,14 +444,7 @@ #define I40E_PRTQF_FD_INSET_MAX_INDEX 63 #define I40E_PRTQF_FD_INSET_INSET_SHIFT 0 #define I40E_PRTQF_FD_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTQF_FD_INSET_INSET_SHIFT) -#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */ -#define I40E_PRTQF_FD_MSK_MAX_INDEX 63 -#define I40E_PRTQF_FD_MSK_MASK_SHIFT 0 -#define I40E_PRTQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRTQF_FD_MSK_MASK_SHIFT) -#define I40E_PRTQF_FD_MSK_OFFSET_SHIFT 16 -#define I40E_PRTQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_PRTQF_FD_MSK_OFFSET_SHIFT) #define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */ /* Reset: CORER */ -#define I40E_PRTQF_FLX_PIT_MAX_INDEX 8 #define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0 #define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT) #define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5 @@ -2230,775 +452,148 @@ #define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10 #define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT) #define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...1, _VF=0...127 */ /* Reset: CORER */ -#define I40E_VFQF_HENA1_MAX_INDEX 1 -#define I40E_VFQF_HENA1_PTYPE_ENA_SHIFT 0 -#define I40E_VFQF_HENA1_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA1_PTYPE_ENA_SHIFT) #define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */ /* Reset: CORER */ #define I40E_VFQF_HKEY1_MAX_INDEX 12 -#define I40E_VFQF_HKEY1_KEY_0_SHIFT 0 -#define I40E_VFQF_HKEY1_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_0_SHIFT) -#define I40E_VFQF_HKEY1_KEY_1_SHIFT 8 -#define I40E_VFQF_HKEY1_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_1_SHIFT) -#define I40E_VFQF_HKEY1_KEY_2_SHIFT 16 -#define I40E_VFQF_HKEY1_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_2_SHIFT) -#define I40E_VFQF_HKEY1_KEY_3_SHIFT 24 -#define I40E_VFQF_HKEY1_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_3_SHIFT) #define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: CORER */ #define I40E_VFQF_HLUT1_MAX_INDEX 15 -#define I40E_VFQF_HLUT1_LUT0_SHIFT 0 -#define I40E_VFQF_HLUT1_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT0_SHIFT) -#define I40E_VFQF_HLUT1_LUT1_SHIFT 8 -#define I40E_VFQF_HLUT1_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT1_SHIFT) -#define I40E_VFQF_HLUT1_LUT2_SHIFT 16 -#define I40E_VFQF_HLUT1_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT2_SHIFT) -#define I40E_VFQF_HLUT1_LUT3_SHIFT 24 -#define I40E_VFQF_HLUT1_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT3_SHIFT) -#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...7, _VF=0...127 */ /* Reset: CORER */ -#define I40E_VFQF_HREGION1_MAX_INDEX 7 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT 0 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT) -#define I40E_VFQF_HREGION1_REGION_0_SHIFT 1 -#define I40E_VFQF_HREGION1_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_0_SHIFT) -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT 4 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT) -#define I40E_VFQF_HREGION1_REGION_1_SHIFT 5 -#define I40E_VFQF_HREGION1_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_1_SHIFT) -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT 8 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT) -#define I40E_VFQF_HREGION1_REGION_2_SHIFT 9 -#define I40E_VFQF_HREGION1_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_2_SHIFT) -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT 12 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT) -#define I40E_VFQF_HREGION1_REGION_3_SHIFT 13 -#define I40E_VFQF_HREGION1_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_3_SHIFT) -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT 16 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT) -#define I40E_VFQF_HREGION1_REGION_4_SHIFT 17 -#define I40E_VFQF_HREGION1_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_4_SHIFT) -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT 20 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT) -#define I40E_VFQF_HREGION1_REGION_5_SHIFT 21 -#define I40E_VFQF_HREGION1_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_5_SHIFT) -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT 24 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT) -#define I40E_VFQF_HREGION1_REGION_6_SHIFT 25 -#define I40E_VFQF_HREGION1_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_6_SHIFT) -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT 28 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT) -#define I40E_VFQF_HREGION1_REGION_7_SHIFT 29 -#define I40E_VFQF_HREGION1_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_7_SHIFT) -#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VPQF_CTL_MAX_INDEX 127 -#define I40E_VPQF_CTL_PEHSIZE_SHIFT 0 -#define I40E_VPQF_CTL_PEHSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEHSIZE_SHIFT) -#define I40E_VPQF_CTL_PEDSIZE_SHIFT 5 -#define I40E_VPQF_CTL_PEDSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEDSIZE_SHIFT) -#define I40E_VPQF_CTL_FCHSIZE_SHIFT 10 -#define I40E_VPQF_CTL_FCHSIZE_MASK I40E_MASK(0xF, I40E_VPQF_CTL_FCHSIZE_SHIFT) -#define I40E_VPQF_CTL_FCDSIZE_SHIFT 14 -#define I40E_VPQF_CTL_FCDSIZE_MASK I40E_MASK(0x3, I40E_VPQF_CTL_FCDSIZE_SHIFT) -#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */ -#define I40E_VSIQF_CTL_MAX_INDEX 383 -#define I40E_VSIQF_CTL_FCOE_ENA_SHIFT 0 -#define I40E_VSIQF_CTL_FCOE_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_FCOE_ENA_SHIFT) -#define I40E_VSIQF_CTL_PETCP_ENA_SHIFT 1 -#define I40E_VSIQF_CTL_PETCP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PETCP_ENA_SHIFT) -#define I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT 2 -#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT) -#define I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT 3 -#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT) -#define I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT 4 -#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT) -#define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5 -#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT) -#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...3, _VSI=0...383 */ /* Reset: PFR */ -#define I40E_VSIQF_TCREGION_MAX_INDEX 3 -#define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0 -#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT) -#define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9 -#define I40E_VSIQF_TCREGION_TC_SIZE_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE_SHIFT) -#define I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT 16 -#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT) -#define I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT 25 -#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT) -#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOECRC_MAX_INDEX 143 -#define I40E_GL_FCOECRC_FCOECRC_SHIFT 0 -#define I40E_GL_FCOECRC_FCOECRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOECRC_FCOECRC_SHIFT) -#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDDPC_MAX_INDEX 143 -#define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0 -#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT) -#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDIFEC_MAX_INDEX 143 -#define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0 -#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT) -#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDIFTCL_MAX_INDEX 143 -#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT 0 -#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT) -#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDIXEC_MAX_INDEX 143 -#define I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT 0 -#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT) -#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDIXVC_MAX_INDEX 143 -#define I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT 0 -#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT) -#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDWRCH_MAX_INDEX 143 -#define I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT 0 -#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT) -#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDWRCL_MAX_INDEX 143 -#define I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT 0 -#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT) -#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDWTCH_MAX_INDEX 143 -#define I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT 0 -#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT) -#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDWTCL_MAX_INDEX 143 -#define I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT 0 -#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT) -#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOELAST_MAX_INDEX 143 -#define I40E_GL_FCOELAST_FCOELAST_SHIFT 0 -#define I40E_GL_FCOELAST_FCOELAST_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOELAST_FCOELAST_SHIFT) -#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEPRC_MAX_INDEX 143 -#define I40E_GL_FCOEPRC_FCOEPRC_SHIFT 0 -#define I40E_GL_FCOEPRC_FCOEPRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPRC_FCOEPRC_SHIFT) -#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEPTC_MAX_INDEX 143 -#define I40E_GL_FCOEPTC_FCOEPTC_SHIFT 0 -#define I40E_GL_FCOEPTC_FCOEPTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPTC_FCOEPTC_SHIFT) -#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOERPDC_MAX_INDEX 143 -#define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0 -#define I40E_GL_FCOERPDC_FCOERPDC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOERPDC_FCOERPDC_SHIFT) -#define I40E_GL_RXERR1_L(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_RXERR1_L_MAX_INDEX 143 -#define I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT 0 -#define I40E_GL_RXERR1_L_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT) -#define I40E_GL_RXERR2_L(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_RXERR2_L_MAX_INDEX 143 -#define I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT 0 -#define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT) #define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_BPRCH_MAX_INDEX 3 -#define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0 -#define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT) #define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_BPRCL_MAX_INDEX 3 -#define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0 -#define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT) #define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_BPTCH_MAX_INDEX 3 -#define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0 -#define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT) #define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_BPTCL_MAX_INDEX 3 -#define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0 -#define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT) #define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_CRCERRS_MAX_INDEX 3 -#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0 -#define I40E_GLPRT_CRCERRS_CRCERRS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_CRCERRS_CRCERRS_SHIFT) #define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_GORCH_MAX_INDEX 3 -#define I40E_GLPRT_GORCH_GORCH_SHIFT 0 -#define I40E_GLPRT_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GORCH_GORCH_SHIFT) #define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_GORCL_MAX_INDEX 3 -#define I40E_GLPRT_GORCL_GORCL_SHIFT 0 -#define I40E_GLPRT_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GORCL_GORCL_SHIFT) #define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_GOTCH_MAX_INDEX 3 -#define I40E_GLPRT_GOTCH_GOTCH_SHIFT 0 -#define I40E_GLPRT_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GOTCH_GOTCH_SHIFT) #define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_GOTCL_MAX_INDEX 3 -#define I40E_GLPRT_GOTCL_GOTCL_SHIFT 0 -#define I40E_GLPRT_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GOTCL_GOTCL_SHIFT) #define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_ILLERRC_MAX_INDEX 3 -#define I40E_GLPRT_ILLERRC_ILLERRC_SHIFT 0 -#define I40E_GLPRT_ILLERRC_ILLERRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ILLERRC_ILLERRC_SHIFT) -#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_LDPC_MAX_INDEX 3 -#define I40E_GLPRT_LDPC_LDPC_SHIFT 0 -#define I40E_GLPRT_LDPC_LDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LDPC_LDPC_SHIFT) #define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_LXOFFRXC_MAX_INDEX 3 -#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT 0 -#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT) #define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_LXOFFTXC_MAX_INDEX 3 -#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT 0 -#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT) #define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_LXONRXC_MAX_INDEX 3 -#define I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT 0 -#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT) #define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_LXONTXC_MAX_INDEX 3 -#define I40E_GLPRT_LXONTXC_LXONTXC_SHIFT 0 -#define I40E_GLPRT_LXONTXC_LXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONTXC_LXONTXC_SHIFT) #define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_MLFC_MAX_INDEX 3 -#define I40E_GLPRT_MLFC_MLFC_SHIFT 0 -#define I40E_GLPRT_MLFC_MLFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MLFC_MLFC_SHIFT) #define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_MPRCH_MAX_INDEX 3 -#define I40E_GLPRT_MPRCH_MPRCH_SHIFT 0 -#define I40E_GLPRT_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPRCH_MPRCH_SHIFT) #define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_MPRCL_MAX_INDEX 3 -#define I40E_GLPRT_MPRCL_MPRCL_SHIFT 0 -#define I40E_GLPRT_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPRCL_MPRCL_SHIFT) #define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_MPTCH_MAX_INDEX 3 -#define I40E_GLPRT_MPTCH_MPTCH_SHIFT 0 -#define I40E_GLPRT_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPTCH_MPTCH_SHIFT) #define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_MPTCL_MAX_INDEX 3 -#define I40E_GLPRT_MPTCL_MPTCL_SHIFT 0 -#define I40E_GLPRT_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPTCL_MPTCL_SHIFT) #define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_MRFC_MAX_INDEX 3 -#define I40E_GLPRT_MRFC_MRFC_SHIFT 0 -#define I40E_GLPRT_MRFC_MRFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MRFC_MRFC_SHIFT) #define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC1023H_MAX_INDEX 3 -#define I40E_GLPRT_PRC1023H_PRC1023H_SHIFT 0 -#define I40E_GLPRT_PRC1023H_PRC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1023H_PRC1023H_SHIFT) #define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC1023L_MAX_INDEX 3 -#define I40E_GLPRT_PRC1023L_PRC1023L_SHIFT 0 -#define I40E_GLPRT_PRC1023L_PRC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1023L_PRC1023L_SHIFT) #define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC127H_MAX_INDEX 3 -#define I40E_GLPRT_PRC127H_PRC127H_SHIFT 0 -#define I40E_GLPRT_PRC127H_PRC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC127H_PRC127H_SHIFT) #define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC127L_MAX_INDEX 3 -#define I40E_GLPRT_PRC127L_PRC127L_SHIFT 0 -#define I40E_GLPRT_PRC127L_PRC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC127L_PRC127L_SHIFT) #define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC1522H_MAX_INDEX 3 -#define I40E_GLPRT_PRC1522H_PRC1522H_SHIFT 0 -#define I40E_GLPRT_PRC1522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1522H_PRC1522H_SHIFT) #define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC1522L_MAX_INDEX 3 -#define I40E_GLPRT_PRC1522L_PRC1522L_SHIFT 0 -#define I40E_GLPRT_PRC1522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1522L_PRC1522L_SHIFT) #define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC255H_MAX_INDEX 3 -#define I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT 0 -#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT) #define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC255L_MAX_INDEX 3 -#define I40E_GLPRT_PRC255L_PRC255L_SHIFT 0 -#define I40E_GLPRT_PRC255L_PRC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC255L_PRC255L_SHIFT) #define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC511H_MAX_INDEX 3 -#define I40E_GLPRT_PRC511H_PRC511H_SHIFT 0 -#define I40E_GLPRT_PRC511H_PRC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC511H_PRC511H_SHIFT) #define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC511L_MAX_INDEX 3 -#define I40E_GLPRT_PRC511L_PRC511L_SHIFT 0 -#define I40E_GLPRT_PRC511L_PRC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC511L_PRC511L_SHIFT) #define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC64H_MAX_INDEX 3 -#define I40E_GLPRT_PRC64H_PRC64H_SHIFT 0 -#define I40E_GLPRT_PRC64H_PRC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC64H_PRC64H_SHIFT) #define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC64L_MAX_INDEX 3 -#define I40E_GLPRT_PRC64L_PRC64L_SHIFT 0 -#define I40E_GLPRT_PRC64L_PRC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC64L_PRC64L_SHIFT) #define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC9522H_MAX_INDEX 3 -#define I40E_GLPRT_PRC9522H_PRC1522H_SHIFT 0 -#define I40E_GLPRT_PRC9522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC9522H_PRC1522H_SHIFT) #define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC9522L_MAX_INDEX 3 -#define I40E_GLPRT_PRC9522L_PRC1522L_SHIFT 0 -#define I40E_GLPRT_PRC9522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC9522L_PRC1522L_SHIFT) #define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC1023H_MAX_INDEX 3 -#define I40E_GLPRT_PTC1023H_PTC1023H_SHIFT 0 -#define I40E_GLPRT_PTC1023H_PTC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1023H_PTC1023H_SHIFT) #define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC1023L_MAX_INDEX 3 -#define I40E_GLPRT_PTC1023L_PTC1023L_SHIFT 0 -#define I40E_GLPRT_PTC1023L_PTC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1023L_PTC1023L_SHIFT) #define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC127H_MAX_INDEX 3 -#define I40E_GLPRT_PTC127H_PTC127H_SHIFT 0 -#define I40E_GLPRT_PTC127H_PTC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC127H_PTC127H_SHIFT) #define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC127L_MAX_INDEX 3 -#define I40E_GLPRT_PTC127L_PTC127L_SHIFT 0 -#define I40E_GLPRT_PTC127L_PTC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC127L_PTC127L_SHIFT) #define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC1522H_MAX_INDEX 3 -#define I40E_GLPRT_PTC1522H_PTC1522H_SHIFT 0 -#define I40E_GLPRT_PTC1522H_PTC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1522H_PTC1522H_SHIFT) #define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC1522L_MAX_INDEX 3 -#define I40E_GLPRT_PTC1522L_PTC1522L_SHIFT 0 -#define I40E_GLPRT_PTC1522L_PTC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1522L_PTC1522L_SHIFT) #define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC255H_MAX_INDEX 3 -#define I40E_GLPRT_PTC255H_PTC255H_SHIFT 0 -#define I40E_GLPRT_PTC255H_PTC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC255H_PTC255H_SHIFT) #define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC255L_MAX_INDEX 3 -#define I40E_GLPRT_PTC255L_PTC255L_SHIFT 0 -#define I40E_GLPRT_PTC255L_PTC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC255L_PTC255L_SHIFT) #define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC511H_MAX_INDEX 3 -#define I40E_GLPRT_PTC511H_PTC511H_SHIFT 0 -#define I40E_GLPRT_PTC511H_PTC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC511H_PTC511H_SHIFT) #define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC511L_MAX_INDEX 3 -#define I40E_GLPRT_PTC511L_PTC511L_SHIFT 0 -#define I40E_GLPRT_PTC511L_PTC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC511L_PTC511L_SHIFT) #define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC64H_MAX_INDEX 3 -#define I40E_GLPRT_PTC64H_PTC64H_SHIFT 0 -#define I40E_GLPRT_PTC64H_PTC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC64H_PTC64H_SHIFT) #define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC64L_MAX_INDEX 3 -#define I40E_GLPRT_PTC64L_PTC64L_SHIFT 0 -#define I40E_GLPRT_PTC64L_PTC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC64L_PTC64L_SHIFT) #define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC9522H_MAX_INDEX 3 -#define I40E_GLPRT_PTC9522H_PTC9522H_SHIFT 0 -#define I40E_GLPRT_PTC9522H_PTC9522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC9522H_PTC9522H_SHIFT) #define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC9522L_MAX_INDEX 3 -#define I40E_GLPRT_PTC9522L_PTC9522L_SHIFT 0 -#define I40E_GLPRT_PTC9522L_PTC9522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC9522L_PTC9522L_SHIFT) #define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ -#define I40E_GLPRT_PXOFFRXC_MAX_INDEX 3 -#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT 0 -#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT) #define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ -#define I40E_GLPRT_PXOFFTXC_MAX_INDEX 3 -#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT 0 -#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT) #define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ -#define I40E_GLPRT_PXONRXC_MAX_INDEX 3 -#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT 0 -#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT) #define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ -#define I40E_GLPRT_PXONTXC_MAX_INDEX 3 -#define I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT 0 -#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT) #define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_RDPC_MAX_INDEX 3 -#define I40E_GLPRT_RDPC_RDPC_SHIFT 0 -#define I40E_GLPRT_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RDPC_RDPC_SHIFT) #define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_RFC_MAX_INDEX 3 -#define I40E_GLPRT_RFC_RFC_SHIFT 0 -#define I40E_GLPRT_RFC_RFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RFC_RFC_SHIFT) #define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_RJC_MAX_INDEX 3 -#define I40E_GLPRT_RJC_RJC_SHIFT 0 -#define I40E_GLPRT_RJC_RJC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RJC_RJC_SHIFT) #define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_RLEC_MAX_INDEX 3 -#define I40E_GLPRT_RLEC_RLEC_SHIFT 0 -#define I40E_GLPRT_RLEC_RLEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RLEC_RLEC_SHIFT) #define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_ROC_MAX_INDEX 3 -#define I40E_GLPRT_ROC_ROC_SHIFT 0 -#define I40E_GLPRT_ROC_ROC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ROC_ROC_SHIFT) #define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_RUC_MAX_INDEX 3 -#define I40E_GLPRT_RUC_RUC_SHIFT 0 -#define I40E_GLPRT_RUC_RUC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUC_RUC_SHIFT) -#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_RUPP_MAX_INDEX 3 -#define I40E_GLPRT_RUPP_RUPP_SHIFT 0 -#define I40E_GLPRT_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUPP_RUPP_SHIFT) #define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ -#define I40E_GLPRT_RXON2OFFCNT_MAX_INDEX 3 -#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT 0 -#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT) #define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_TDOLD_MAX_INDEX 3 -#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0 -#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT) #define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_UPRCH_MAX_INDEX 3 -#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0 -#define I40E_GLPRT_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPRCH_UPRCH_SHIFT) #define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_UPRCL_MAX_INDEX 3 -#define I40E_GLPRT_UPRCL_UPRCL_SHIFT 0 -#define I40E_GLPRT_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPRCL_UPRCL_SHIFT) #define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_UPTCH_MAX_INDEX 3 -#define I40E_GLPRT_UPTCH_UPTCH_SHIFT 0 -#define I40E_GLPRT_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPTCH_UPTCH_SHIFT) #define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_UPTCL_MAX_INDEX 3 -#define I40E_GLPRT_UPTCL_VUPTCH_SHIFT 0 -#define I40E_GLPRT_UPTCL_VUPTCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPTCL_VUPTCH_SHIFT) #define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_BPRCH_MAX_INDEX 15 -#define I40E_GLSW_BPRCH_BPRCH_SHIFT 0 -#define I40E_GLSW_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPRCH_BPRCH_SHIFT) #define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_BPRCL_MAX_INDEX 15 -#define I40E_GLSW_BPRCL_BPRCL_SHIFT 0 -#define I40E_GLSW_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPRCL_BPRCL_SHIFT) #define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_BPTCH_MAX_INDEX 15 -#define I40E_GLSW_BPTCH_BPTCH_SHIFT 0 -#define I40E_GLSW_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPTCH_BPTCH_SHIFT) #define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_BPTCL_MAX_INDEX 15 -#define I40E_GLSW_BPTCL_BPTCL_SHIFT 0 -#define I40E_GLSW_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPTCL_BPTCL_SHIFT) #define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_GORCH_MAX_INDEX 15 -#define I40E_GLSW_GORCH_GORCH_SHIFT 0 -#define I40E_GLSW_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GORCH_GORCH_SHIFT) #define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_GORCL_MAX_INDEX 15 -#define I40E_GLSW_GORCL_GORCL_SHIFT 0 -#define I40E_GLSW_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GORCL_GORCL_SHIFT) #define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_GOTCH_MAX_INDEX 15 -#define I40E_GLSW_GOTCH_GOTCH_SHIFT 0 -#define I40E_GLSW_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GOTCH_GOTCH_SHIFT) #define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_GOTCL_MAX_INDEX 15 -#define I40E_GLSW_GOTCL_GOTCL_SHIFT 0 -#define I40E_GLSW_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GOTCL_GOTCL_SHIFT) #define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_MPRCH_MAX_INDEX 15 -#define I40E_GLSW_MPRCH_MPRCH_SHIFT 0 -#define I40E_GLSW_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPRCH_MPRCH_SHIFT) #define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_MPRCL_MAX_INDEX 15 -#define I40E_GLSW_MPRCL_MPRCL_SHIFT 0 -#define I40E_GLSW_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPRCL_MPRCL_SHIFT) #define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_MPTCH_MAX_INDEX 15 -#define I40E_GLSW_MPTCH_MPTCH_SHIFT 0 -#define I40E_GLSW_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPTCH_MPTCH_SHIFT) #define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_MPTCL_MAX_INDEX 15 -#define I40E_GLSW_MPTCL_MPTCL_SHIFT 0 -#define I40E_GLSW_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPTCL_MPTCL_SHIFT) #define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_RUPP_MAX_INDEX 15 -#define I40E_GLSW_RUPP_RUPP_SHIFT 0 -#define I40E_GLSW_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_RUPP_RUPP_SHIFT) #define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_TDPC_MAX_INDEX 15 -#define I40E_GLSW_TDPC_TDPC_SHIFT 0 -#define I40E_GLSW_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_TDPC_TDPC_SHIFT) #define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_UPRCH_MAX_INDEX 15 -#define I40E_GLSW_UPRCH_UPRCH_SHIFT 0 -#define I40E_GLSW_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPRCH_UPRCH_SHIFT) #define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_UPRCL_MAX_INDEX 15 -#define I40E_GLSW_UPRCL_UPRCL_SHIFT 0 -#define I40E_GLSW_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPRCL_UPRCL_SHIFT) #define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_UPTCH_MAX_INDEX 15 -#define I40E_GLSW_UPTCH_UPTCH_SHIFT 0 -#define I40E_GLSW_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPTCH_UPTCH_SHIFT) #define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_UPTCL_MAX_INDEX 15 -#define I40E_GLSW_UPTCL_UPTCL_SHIFT 0 -#define I40E_GLSW_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPTCL_UPTCL_SHIFT) #define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_BPRCH_MAX_INDEX 383 -#define I40E_GLV_BPRCH_BPRCH_SHIFT 0 -#define I40E_GLV_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPRCH_BPRCH_SHIFT) #define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_BPRCL_MAX_INDEX 383 -#define I40E_GLV_BPRCL_BPRCL_SHIFT 0 -#define I40E_GLV_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPRCL_BPRCL_SHIFT) #define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_BPTCH_MAX_INDEX 383 -#define I40E_GLV_BPTCH_BPTCH_SHIFT 0 -#define I40E_GLV_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPTCH_BPTCH_SHIFT) #define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_BPTCL_MAX_INDEX 383 -#define I40E_GLV_BPTCL_BPTCL_SHIFT 0 -#define I40E_GLV_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPTCL_BPTCL_SHIFT) #define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_GORCH_MAX_INDEX 383 -#define I40E_GLV_GORCH_GORCH_SHIFT 0 -#define I40E_GLV_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GORCH_GORCH_SHIFT) #define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_GORCL_MAX_INDEX 383 -#define I40E_GLV_GORCL_GORCL_SHIFT 0 -#define I40E_GLV_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GORCL_GORCL_SHIFT) #define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_GOTCH_MAX_INDEX 383 -#define I40E_GLV_GOTCH_GOTCH_SHIFT 0 -#define I40E_GLV_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GOTCH_GOTCH_SHIFT) #define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_GOTCL_MAX_INDEX 383 -#define I40E_GLV_GOTCL_GOTCL_SHIFT 0 -#define I40E_GLV_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GOTCL_GOTCL_SHIFT) #define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_MPRCH_MAX_INDEX 383 -#define I40E_GLV_MPRCH_MPRCH_SHIFT 0 -#define I40E_GLV_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPRCH_MPRCH_SHIFT) #define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_MPRCL_MAX_INDEX 383 -#define I40E_GLV_MPRCL_MPRCL_SHIFT 0 -#define I40E_GLV_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPRCL_MPRCL_SHIFT) #define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_MPTCH_MAX_INDEX 383 -#define I40E_GLV_MPTCH_MPTCH_SHIFT 0 -#define I40E_GLV_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPTCH_MPTCH_SHIFT) #define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_MPTCL_MAX_INDEX 383 -#define I40E_GLV_MPTCL_MPTCL_SHIFT 0 -#define I40E_GLV_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPTCL_MPTCL_SHIFT) #define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_RDPC_MAX_INDEX 383 -#define I40E_GLV_RDPC_RDPC_SHIFT 0 -#define I40E_GLV_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RDPC_RDPC_SHIFT) #define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_RUPP_MAX_INDEX 383 -#define I40E_GLV_RUPP_RUPP_SHIFT 0 -#define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT) #define I40E_GLV_TEPC(_i) (0x00344000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_TEPC_MAX_INDEX 383 -#define I40E_GLV_TEPC_TEPC_SHIFT 0 -#define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT) #define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_UPRCH_MAX_INDEX 383 -#define I40E_GLV_UPRCH_UPRCH_SHIFT 0 -#define I40E_GLV_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPRCH_UPRCH_SHIFT) #define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_UPRCL_MAX_INDEX 383 -#define I40E_GLV_UPRCL_UPRCL_SHIFT 0 -#define I40E_GLV_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPRCL_UPRCL_SHIFT) #define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_UPTCH_MAX_INDEX 383 -#define I40E_GLV_UPTCH_GLVUPTCH_SHIFT 0 -#define I40E_GLV_UPTCH_GLVUPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPTCH_GLVUPTCH_SHIFT) #define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_UPTCL_MAX_INDEX 383 -#define I40E_GLV_UPTCL_UPTCL_SHIFT 0 -#define I40E_GLV_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPTCL_UPTCL_SHIFT) #define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ -#define I40E_GLVEBTC_RBCH_MAX_INDEX 7 -#define I40E_GLVEBTC_RBCH_TCBCH_SHIFT 0 -#define I40E_GLVEBTC_RBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RBCH_TCBCH_SHIFT) #define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ -#define I40E_GLVEBTC_RBCL_MAX_INDEX 7 -#define I40E_GLVEBTC_RBCL_TCBCL_SHIFT 0 -#define I40E_GLVEBTC_RBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RBCL_TCBCL_SHIFT) #define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ -#define I40E_GLVEBTC_RPCH_MAX_INDEX 7 -#define I40E_GLVEBTC_RPCH_TCPCH_SHIFT 0 -#define I40E_GLVEBTC_RPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RPCH_TCPCH_SHIFT) #define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ -#define I40E_GLVEBTC_RPCL_MAX_INDEX 7 -#define I40E_GLVEBTC_RPCL_TCPCL_SHIFT 0 -#define I40E_GLVEBTC_RPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RPCL_TCPCL_SHIFT) #define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ -#define I40E_GLVEBTC_TBCH_MAX_INDEX 7 -#define I40E_GLVEBTC_TBCH_TCBCH_SHIFT 0 -#define I40E_GLVEBTC_TBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TBCH_TCBCH_SHIFT) #define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ -#define I40E_GLVEBTC_TBCL_MAX_INDEX 7 -#define I40E_GLVEBTC_TBCL_TCBCL_SHIFT 0 -#define I40E_GLVEBTC_TBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TBCL_TCBCL_SHIFT) #define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ -#define I40E_GLVEBTC_TPCH_MAX_INDEX 7 -#define I40E_GLVEBTC_TPCH_TCPCH_SHIFT 0 -#define I40E_GLVEBTC_TPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TPCH_TCPCH_SHIFT) #define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ -#define I40E_GLVEBTC_TPCL_MAX_INDEX 7 -#define I40E_GLVEBTC_TPCL_TCPCL_SHIFT 0 -#define I40E_GLVEBTC_TPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TPCL_TCPCL_SHIFT) -#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_BPCH_MAX_INDEX 127 -#define I40E_GLVEBVL_BPCH_VLBPCH_SHIFT 0 -#define I40E_GLVEBVL_BPCH_VLBPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_BPCH_VLBPCH_SHIFT) -#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_BPCL_MAX_INDEX 127 -#define I40E_GLVEBVL_BPCL_VLBPCL_SHIFT 0 -#define I40E_GLVEBVL_BPCL_VLBPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_BPCL_VLBPCL_SHIFT) -#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_GORCH_MAX_INDEX 127 -#define I40E_GLVEBVL_GORCH_VLBCH_SHIFT 0 -#define I40E_GLVEBVL_GORCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GORCH_VLBCH_SHIFT) -#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_GORCL_MAX_INDEX 127 -#define I40E_GLVEBVL_GORCL_VLBCL_SHIFT 0 -#define I40E_GLVEBVL_GORCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GORCL_VLBCL_SHIFT) -#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_GOTCH_MAX_INDEX 127 -#define I40E_GLVEBVL_GOTCH_VLBCH_SHIFT 0 -#define I40E_GLVEBVL_GOTCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GOTCH_VLBCH_SHIFT) -#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_GOTCL_MAX_INDEX 127 -#define I40E_GLVEBVL_GOTCL_VLBCL_SHIFT 0 -#define I40E_GLVEBVL_GOTCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GOTCL_VLBCL_SHIFT) -#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_MPCH_MAX_INDEX 127 -#define I40E_GLVEBVL_MPCH_VLMPCH_SHIFT 0 -#define I40E_GLVEBVL_MPCH_VLMPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_MPCH_VLMPCH_SHIFT) -#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_MPCL_MAX_INDEX 127 -#define I40E_GLVEBVL_MPCL_VLMPCL_SHIFT 0 -#define I40E_GLVEBVL_MPCL_VLMPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_MPCL_VLMPCL_SHIFT) -#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_UPCH_MAX_INDEX 127 -#define I40E_GLVEBVL_UPCH_VLUPCH_SHIFT 0 -#define I40E_GLVEBVL_UPCH_VLUPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_UPCH_VLUPCH_SHIFT) -#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_UPCL_MAX_INDEX 127 -#define I40E_GLVEBVL_UPCL_VLUPCL_SHIFT 0 -#define I40E_GLVEBVL_UPCL_VLUPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_UPCL_VLUPCL_SHIFT) -#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C /* Reset: CORER */ -#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT 0 -#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK I40E_MASK(0xFFFF, I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT) -#define I40E_GL_SWR_DEF_ACT(_i) (0x00270200 + ((_i) * 4)) /* _i=0...35 */ /* Reset: CORER */ -#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 35 -#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT 0 -#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT) -#define I40E_GL_SWR_DEF_ACT_EN(_i) (0x0026CFB8 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ -#define I40E_GL_SWR_DEF_ACT_EN_MAX_INDEX 1 -#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT 0 -#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT) -#define I40E_PRTTSYN_ADJ 0x001E4280 /* Reset: GLOBR */ -#define I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT 0 -#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK I40E_MASK(0x7FFFFFFF, I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT) -#define I40E_PRTTSYN_ADJ_SIGN_SHIFT 31 -#define I40E_PRTTSYN_ADJ_SIGN_MASK I40E_MASK(0x1, I40E_PRTTSYN_ADJ_SIGN_SHIFT) -#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ -#define I40E_PRTTSYN_AUX_0_MAX_INDEX 1 -#define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0 -#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT) -#define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1 -#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT) -#define I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT 3 -#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT) -#define I40E_PRTTSYN_AUX_0_PULSEW_SHIFT 8 -#define I40E_PRTTSYN_AUX_0_PULSEW_MASK I40E_MASK(0xF, I40E_PRTTSYN_AUX_0_PULSEW_SHIFT) -#define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16 -#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT) -#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ -#define I40E_PRTTSYN_AUX_1_MAX_INDEX 1 -#define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0 -#define I40E_PRTTSYN_AUX_1_INSTNT_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_INSTNT_SHIFT) -#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT 1 -#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT) -#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ -#define I40E_PRTTSYN_CLKO_MAX_INDEX 1 -#define I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT 0 -#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT) #define I40E_PRTTSYN_CTL0 0x001E4200 /* Reset: GLOBR */ -#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT 0 -#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT) #define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1 #define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT) -#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2 -#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT) -#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT 3 -#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT) #define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8 #define I40E_PRTTSYN_CTL0_PF_ID_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL0_PF_ID_SHIFT) -#define I40E_PRTTSYN_CTL0_TSYNACT_SHIFT 12 -#define I40E_PRTTSYN_CTL0_TSYNACT_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL0_TSYNACT_SHIFT) #define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31 #define I40E_PRTTSYN_CTL0_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TSYNENA_SHIFT) #define I40E_PRTTSYN_CTL1 0x00085020 /* Reset: CORER */ #define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0 #define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT) -#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT 8 -#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT) #define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16 #define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT) -#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT 20 -#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT) #define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT 24 -#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) #define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT 26 #define I40E_PRTTSYN_CTL1_UDP_ENA_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT) #define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT 31 #define I40E_PRTTSYN_CTL1_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL1_TSYNENA_SHIFT) -#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ -#define I40E_PRTTSYN_EVNT_H_MAX_INDEX 1 -#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT 0 -#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT) -#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ -#define I40E_PRTTSYN_EVNT_L_MAX_INDEX 1 -#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT 0 -#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT) #define I40E_PRTTSYN_INC_H 0x001E4060 /* Reset: GLOBR */ -#define I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT 0 -#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK I40E_MASK(0x3F, I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT) #define I40E_PRTTSYN_INC_L 0x001E4040 /* Reset: GLOBR */ -#define I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT 0 -#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT) #define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_PRTTSYN_RXTIME_H_MAX_INDEX 3 -#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT 0 -#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT) #define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3 -#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT 0 -#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT) #define I40E_PRTTSYN_STAT_0 0x001E4220 /* Reset: GLOBR */ -#define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0 -#define I40E_PRTTSYN_STAT_0_EVENT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT0_SHIFT) -#define I40E_PRTTSYN_STAT_0_EVENT1_SHIFT 1 -#define I40E_PRTTSYN_STAT_0_EVENT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT1_SHIFT) -#define I40E_PRTTSYN_STAT_0_TGT0_SHIFT 2 -#define I40E_PRTTSYN_STAT_0_TGT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT0_SHIFT) -#define I40E_PRTTSYN_STAT_0_TGT1_SHIFT 3 -#define I40E_PRTTSYN_STAT_0_TGT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT1_SHIFT) #define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4 #define I40E_PRTTSYN_STAT_0_TXTIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TXTIME_SHIFT) #define I40E_PRTTSYN_STAT_1 0x00085140 /* Reset: CORER */ -#define I40E_PRTTSYN_STAT_1_RXT0_SHIFT 0 -#define I40E_PRTTSYN_STAT_1_RXT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT0_SHIFT) -#define I40E_PRTTSYN_STAT_1_RXT1_SHIFT 1 -#define I40E_PRTTSYN_STAT_1_RXT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT1_SHIFT) -#define I40E_PRTTSYN_STAT_1_RXT2_SHIFT 2 -#define I40E_PRTTSYN_STAT_1_RXT2_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT2_SHIFT) -#define I40E_PRTTSYN_STAT_1_RXT3_SHIFT 3 -#define I40E_PRTTSYN_STAT_1_RXT3_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT3_SHIFT) -#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ -#define I40E_PRTTSYN_TGT_H_MAX_INDEX 1 -#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT 0 -#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT) -#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ -#define I40E_PRTTSYN_TGT_L_MAX_INDEX 1 -#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT 0 -#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT) #define I40E_PRTTSYN_TIME_H 0x001E4120 /* Reset: GLOBR */ -#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT 0 -#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT) #define I40E_PRTTSYN_TIME_L 0x001E4100 /* Reset: GLOBR */ -#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT 0 -#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT) #define I40E_PRTTSYN_TXTIME_H 0x001E41E0 /* Reset: GLOBR */ -#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT 0 -#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT) #define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */ -#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0 -#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT) #define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */ #define I40E_GL_MDET_RX_FUNCTION_SHIFT 0 #define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT) @@ -3033,2304 +628,53 @@ #define I40E_PF_VT_PFALLOC_VALID_SHIFT 31 #define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PF_VT_PFALLOC_VALID_SHIFT) #define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_VP_MDET_RX_MAX_INDEX 127 #define I40E_VP_MDET_RX_VALID_SHIFT 0 #define I40E_VP_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_RX_VALID_SHIFT) #define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_VP_MDET_TX_MAX_INDEX 127 #define I40E_VP_MDET_TX_VALID_SHIFT 0 #define I40E_VP_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_TX_VALID_SHIFT) -#define I40E_GLPM_WUMC 0x0006C800 /* Reset: POR */ -#define I40E_GLPM_WUMC_NOTCO_SHIFT 0 -#define I40E_GLPM_WUMC_NOTCO_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_NOTCO_SHIFT) -#define I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT 1 -#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT) -#define I40E_GLPM_WUMC_ROL_MODE_SHIFT 2 -#define I40E_GLPM_WUMC_ROL_MODE_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_ROL_MODE_SHIFT) -#define I40E_GLPM_WUMC_RESERVED_4_SHIFT 3 -#define I40E_GLPM_WUMC_RESERVED_4_MASK I40E_MASK(0x1FFF, I40E_GLPM_WUMC_RESERVED_4_SHIFT) -#define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT 16 -#define I40E_GLPM_WUMC_MNG_WU_PF_MASK I40E_MASK(0xFFFF, I40E_GLPM_WUMC_MNG_WU_PF_SHIFT) #define I40E_PFPM_APM 0x000B8080 /* Reset: POR */ #define I40E_PFPM_APM_APME_SHIFT 0 #define I40E_PFPM_APM_APME_MASK I40E_MASK(0x1, I40E_PFPM_APM_APME_SHIFT) -#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */ /* Reset: POR */ -#define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7 -#define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0 -#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT) -#define I40E_PFPM_WUC 0x0006B200 /* Reset: POR */ -#define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5 -#define I40E_PFPM_WUC_EN_APM_D0_MASK I40E_MASK(0x1, I40E_PFPM_WUC_EN_APM_D0_SHIFT) #define I40E_PFPM_WUFC 0x0006B400 /* Reset: POR */ -#define I40E_PFPM_WUFC_LNKC_SHIFT 0 -#define I40E_PFPM_WUFC_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_LNKC_SHIFT) #define I40E_PFPM_WUFC_MAG_SHIFT 1 #define I40E_PFPM_WUFC_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MAG_SHIFT) -#define I40E_PFPM_WUFC_MNG_SHIFT 3 -#define I40E_PFPM_WUFC_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MNG_SHIFT) -#define I40E_PFPM_WUFC_FLX0_ACT_SHIFT 4 -#define I40E_PFPM_WUFC_FLX0_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_ACT_SHIFT) -#define I40E_PFPM_WUFC_FLX1_ACT_SHIFT 5 -#define I40E_PFPM_WUFC_FLX1_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_ACT_SHIFT) -#define I40E_PFPM_WUFC_FLX2_ACT_SHIFT 6 -#define I40E_PFPM_WUFC_FLX2_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_ACT_SHIFT) -#define I40E_PFPM_WUFC_FLX3_ACT_SHIFT 7 -#define I40E_PFPM_WUFC_FLX3_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_ACT_SHIFT) -#define I40E_PFPM_WUFC_FLX4_ACT_SHIFT 8 -#define I40E_PFPM_WUFC_FLX4_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_ACT_SHIFT) -#define I40E_PFPM_WUFC_FLX5_ACT_SHIFT 9 -#define I40E_PFPM_WUFC_FLX5_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_ACT_SHIFT) -#define I40E_PFPM_WUFC_FLX6_ACT_SHIFT 10 -#define I40E_PFPM_WUFC_FLX6_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_ACT_SHIFT) -#define I40E_PFPM_WUFC_FLX7_ACT_SHIFT 11 -#define I40E_PFPM_WUFC_FLX7_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_ACT_SHIFT) -#define I40E_PFPM_WUFC_FLX0_SHIFT 16 -#define I40E_PFPM_WUFC_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_SHIFT) -#define I40E_PFPM_WUFC_FLX1_SHIFT 17 -#define I40E_PFPM_WUFC_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_SHIFT) -#define I40E_PFPM_WUFC_FLX2_SHIFT 18 -#define I40E_PFPM_WUFC_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_SHIFT) -#define I40E_PFPM_WUFC_FLX3_SHIFT 19 -#define I40E_PFPM_WUFC_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_SHIFT) -#define I40E_PFPM_WUFC_FLX4_SHIFT 20 -#define I40E_PFPM_WUFC_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_SHIFT) -#define I40E_PFPM_WUFC_FLX5_SHIFT 21 -#define I40E_PFPM_WUFC_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_SHIFT) -#define I40E_PFPM_WUFC_FLX6_SHIFT 22 -#define I40E_PFPM_WUFC_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_SHIFT) -#define I40E_PFPM_WUFC_FLX7_SHIFT 23 -#define I40E_PFPM_WUFC_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_SHIFT) -#define I40E_PFPM_WUFC_FW_RST_WK_SHIFT 31 -#define I40E_PFPM_WUFC_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FW_RST_WK_SHIFT) -#define I40E_PFPM_WUS 0x0006B600 /* Reset: POR */ -#define I40E_PFPM_WUS_LNKC_SHIFT 0 -#define I40E_PFPM_WUS_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUS_LNKC_SHIFT) -#define I40E_PFPM_WUS_MAG_SHIFT 1 -#define I40E_PFPM_WUS_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MAG_SHIFT) -#define I40E_PFPM_WUS_PME_STATUS_SHIFT 2 -#define I40E_PFPM_WUS_PME_STATUS_MASK I40E_MASK(0x1, I40E_PFPM_WUS_PME_STATUS_SHIFT) -#define I40E_PFPM_WUS_MNG_SHIFT 3 -#define I40E_PFPM_WUS_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MNG_SHIFT) -#define I40E_PFPM_WUS_FLX0_SHIFT 16 -#define I40E_PFPM_WUS_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX0_SHIFT) -#define I40E_PFPM_WUS_FLX1_SHIFT 17 -#define I40E_PFPM_WUS_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX1_SHIFT) -#define I40E_PFPM_WUS_FLX2_SHIFT 18 -#define I40E_PFPM_WUS_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX2_SHIFT) -#define I40E_PFPM_WUS_FLX3_SHIFT 19 -#define I40E_PFPM_WUS_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX3_SHIFT) -#define I40E_PFPM_WUS_FLX4_SHIFT 20 -#define I40E_PFPM_WUS_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX4_SHIFT) -#define I40E_PFPM_WUS_FLX5_SHIFT 21 -#define I40E_PFPM_WUS_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX5_SHIFT) -#define I40E_PFPM_WUS_FLX6_SHIFT 22 -#define I40E_PFPM_WUS_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX6_SHIFT) -#define I40E_PFPM_WUS_FLX7_SHIFT 23 -#define I40E_PFPM_WUS_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX7_SHIFT) -#define I40E_PFPM_WUS_FW_RST_WK_SHIFT 31 -#define I40E_PFPM_WUS_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FW_RST_WK_SHIFT) -#define I40E_PRTPM_FHFHR 0x0006C000 /* Reset: POR */ -#define I40E_PRTPM_FHFHR_UNICAST_SHIFT 0 -#define I40E_PRTPM_FHFHR_UNICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_UNICAST_SHIFT) -#define I40E_PRTPM_FHFHR_MULTICAST_SHIFT 1 -#define I40E_PRTPM_FHFHR_MULTICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_MULTICAST_SHIFT) -#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */ -#define I40E_PRTPM_SAH_MAX_INDEX 3 -#define I40E_PRTPM_SAH_PFPM_SAH_SHIFT 0 -#define I40E_PRTPM_SAH_PFPM_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTPM_SAH_PFPM_SAH_SHIFT) -#define I40E_PRTPM_SAH_PF_NUM_SHIFT 26 -#define I40E_PRTPM_SAH_PF_NUM_MASK I40E_MASK(0xF, I40E_PRTPM_SAH_PF_NUM_SHIFT) -#define I40E_PRTPM_SAH_MC_MAG_EN_SHIFT 30 -#define I40E_PRTPM_SAH_MC_MAG_EN_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_MC_MAG_EN_SHIFT) -#define I40E_PRTPM_SAH_AV_SHIFT 31 -#define I40E_PRTPM_SAH_AV_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_AV_SHIFT) -#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */ -#define I40E_PRTPM_SAL_MAX_INDEX 3 -#define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0 -#define I40E_PRTPM_SAL_PFPM_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_SAL_PFPM_SAL_SHIFT) #define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */ -#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0 -#define I40E_VF_ARQBAH1_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH1_ARQBAH_SHIFT) #define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */ -#define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0 -#define I40E_VF_ARQBAL1_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL1_ARQBAL_SHIFT) #define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */ -#define I40E_VF_ARQH1_ARQH_SHIFT 0 -#define I40E_VF_ARQH1_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH1_ARQH_SHIFT) #define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */ -#define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0 -#define I40E_VF_ARQLEN1_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN1_ARQLEN_SHIFT) -#define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28 -#define I40E_VF_ARQLEN1_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQVFE_SHIFT) -#define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29 -#define I40E_VF_ARQLEN1_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQOVFL_SHIFT) -#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30 -#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT) -#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31 -#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN1_ARQENABLE_SHIFT) #define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */ -#define I40E_VF_ARQT1_ARQT_SHIFT 0 -#define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT) #define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */ -#define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0 -#define I40E_VF_ATQBAH1_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH1_ATQBAH_SHIFT) #define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */ -#define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0 -#define I40E_VF_ATQBAL1_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL1_ATQBAL_SHIFT) #define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */ -#define I40E_VF_ATQH1_ATQH_SHIFT 0 -#define I40E_VF_ATQH1_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH1_ATQH_SHIFT) #define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */ -#define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0 -#define I40E_VF_ATQLEN1_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN1_ATQLEN_SHIFT) -#define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28 -#define I40E_VF_ATQLEN1_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQVFE_SHIFT) -#define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29 -#define I40E_VF_ATQLEN1_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQOVFL_SHIFT) -#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30 -#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT) -#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31 -#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN1_ATQENABLE_SHIFT) #define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */ -#define I40E_VF_ATQT1_ATQT_SHIFT 0 -#define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT) -#define I40E_VFGEN_RSTAT 0x00008800 /* Reset: VFR */ -#define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0 -#define I40E_VFGEN_RSTAT_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT_VFR_STATE_SHIFT) -#define I40E_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */ -#define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0 -#define I40E_VFINT_DYN_CTL01_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_SHIFT) -#define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1 -#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT) -#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2 -#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT) -#define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3 -#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5 -#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT) -#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24 -#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT) -#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25 -#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31 -#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT) -#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */ -#define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15 -#define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0 -#define I40E_VFINT_DYN_CTLN1_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_SHIFT) -#define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1 -#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT) -#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2 -#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT) -#define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3 -#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5 -#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT) -#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24 -#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT) -#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25 -#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31 -#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT) -#define I40E_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */ -#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25 -#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT) -#define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30 -#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT) -#define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31 -#define I40E_VFINT_ICR0_ENA1_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_RSVD_SHIFT) -#define I40E_VFINT_ICR01 0x00004800 /* Reset: CORER */ -#define I40E_VFINT_ICR01_INTEVENT_SHIFT 0 -#define I40E_VFINT_ICR01_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_INTEVENT_SHIFT) -#define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1 -#define I40E_VFINT_ICR01_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_0_SHIFT) -#define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2 -#define I40E_VFINT_ICR01_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_1_SHIFT) -#define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3 -#define I40E_VFINT_ICR01_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_2_SHIFT) -#define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4 -#define I40E_VFINT_ICR01_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_3_SHIFT) -#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25 -#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT) -#define I40E_VFINT_ICR01_ADMINQ_SHIFT 30 -#define I40E_VFINT_ICR01_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_ADMINQ_SHIFT) -#define I40E_VFINT_ICR01_SWINT_SHIFT 31 -#define I40E_VFINT_ICR01_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_SWINT_SHIFT) -#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset: VFR */ -#define I40E_VFINT_ITR01_MAX_INDEX 2 -#define I40E_VFINT_ITR01_INTERVAL_SHIFT 0 -#define I40E_VFINT_ITR01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR01_INTERVAL_SHIFT) -#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */ -#define I40E_VFINT_ITRN1_MAX_INDEX 2 -#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0 -#define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT) -#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */ -#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2 -#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT) -#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_QRX_TAIL1_MAX_INDEX 15 -#define I40E_QRX_TAIL1_TAIL_SHIFT 0 -#define I40E_QRX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL1_TAIL_SHIFT) -#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */ -#define I40E_QTX_TAIL1_MAX_INDEX 15 -#define I40E_QTX_TAIL1_TAIL_SHIFT 0 -#define I40E_QTX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL1_TAIL_SHIFT) -#define I40E_VFMSIX_PBA 0x00002000 /* Reset: VFLR */ -#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0 -#define I40E_VFMSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA_PENBIT_SHIFT) -#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TADD_MAX_INDEX 16 -#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0 -#define I40E_VFMSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD_MSIXTADD10_SHIFT) -#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2 -#define I40E_VFMSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD_MSIXTADD_SHIFT) -#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TMSG_MAX_INDEX 16 -#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0 -#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT) -#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TUADD_MAX_INDEX 16 -#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0 -#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT) -#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16 -#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0 -#define I40E_VFMSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL_MASK_SHIFT) -#define I40E_VFCM_PE_ERRDATA 0x0000DC00 /* Reset: VFR */ -#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0 -#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT) -#define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4 -#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT) -#define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8 -#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT) -#define I40E_VFCM_PE_ERRINFO 0x0000D800 /* Reset: VFR */ -#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0 -#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT) -#define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4 -#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT) -#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8 -#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT) -#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16 -#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT) -#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24 -#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT) -#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ -#define I40E_VFQF_HENA_MAX_INDEX 1 -#define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0 -#define I40E_VFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA_PTYPE_ENA_SHIFT) -#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */ -#define I40E_VFQF_HKEY_MAX_INDEX 12 -#define I40E_VFQF_HKEY_KEY_0_SHIFT 0 -#define I40E_VFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_0_SHIFT) -#define I40E_VFQF_HKEY_KEY_1_SHIFT 8 -#define I40E_VFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_1_SHIFT) -#define I40E_VFQF_HKEY_KEY_2_SHIFT 16 -#define I40E_VFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_2_SHIFT) -#define I40E_VFQF_HKEY_KEY_3_SHIFT 24 -#define I40E_VFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_3_SHIFT) -#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_VFQF_HLUT_MAX_INDEX 15 -#define I40E_VFQF_HLUT_LUT0_SHIFT 0 -#define I40E_VFQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT0_SHIFT) -#define I40E_VFQF_HLUT_LUT1_SHIFT 8 -#define I40E_VFQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT1_SHIFT) -#define I40E_VFQF_HLUT_LUT2_SHIFT 16 -#define I40E_VFQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT2_SHIFT) -#define I40E_VFQF_HLUT_LUT3_SHIFT 24 -#define I40E_VFQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT3_SHIFT) -#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_VFQF_HREGION_MAX_INDEX 7 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT) -#define I40E_VFQF_HREGION_REGION_0_SHIFT 1 -#define I40E_VFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_0_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT) -#define I40E_VFQF_HREGION_REGION_1_SHIFT 5 -#define I40E_VFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_1_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT) -#define I40E_VFQF_HREGION_REGION_2_SHIFT 9 -#define I40E_VFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_2_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT) -#define I40E_VFQF_HREGION_REGION_3_SHIFT 13 -#define I40E_VFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_3_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT) -#define I40E_VFQF_HREGION_REGION_4_SHIFT 17 -#define I40E_VFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_4_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT) -#define I40E_VFQF_HREGION_REGION_5_SHIFT 21 -#define I40E_VFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_5_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT) -#define I40E_VFQF_HREGION_REGION_6_SHIFT 25 -#define I40E_VFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_6_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT) -#define I40E_VFQF_HREGION_REGION_7_SHIFT 29 -#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT) -#define I40E_MNGSB_FDCRC 0x000B7050 /* Reset: POR */ -#define I40E_MNGSB_FDCRC_CRC_RES_SHIFT 0 -#define I40E_MNGSB_FDCRC_CRC_RES_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCRC_CRC_RES_SHIFT) -#define I40E_MNGSB_FDCS 0x000B7040 /* Reset: POR */ -#define I40E_MNGSB_FDCS_CRC_CONT_SHIFT 2 -#define I40E_MNGSB_FDCS_CRC_CONT_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_CONT_SHIFT) -#define I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT 3 -#define I40E_MNGSB_FDCS_CRC_SEED_EN_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT) -#define I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT 4 -#define I40E_MNGSB_FDCS_CRC_WR_INH_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT) -#define I40E_MNGSB_FDCS_CRC_SEED_SHIFT 8 -#define I40E_MNGSB_FDCS_CRC_SEED_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCS_CRC_SEED_SHIFT) -#define I40E_MNGSB_FDS 0x000B7048 /* Reset: POR */ -#define I40E_MNGSB_FDS_START_BC_SHIFT 0 -#define I40E_MNGSB_FDS_START_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_START_BC_SHIFT) -#define I40E_MNGSB_FDS_LAST_BC_SHIFT 16 -#define I40E_MNGSB_FDS_LAST_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_LAST_BC_SHIFT) -#define I40E_GL_VF_CTRL_RX(_VF) (0x00083600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_GL_VF_CTRL_RX_MAX_INDEX 127 -#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT 0 -#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT) -#define I40E_GL_VF_CTRL_TX(_VF) (0x00083400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_GL_VF_CTRL_TX_MAX_INDEX 127 -#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT 0 -#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT) -#define I40E_GLCM_LAN_CACHESIZE 0x0010C4D8 /* Reset: CORER */ -#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT 0 -#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT) -#define I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT 12 -#define I40E_GLCM_LAN_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT) -#define I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT 16 -#define I40E_GLCM_LAN_CACHESIZE_WAYS_MASK I40E_MASK(0x3FF, I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT) -#define I40E_GLCM_PE_CACHESIZE 0x00138FE4 /* Reset: CORER */ -#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT 0 -#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT) -#define I40E_GLCM_PE_CACHESIZE_SETS_SHIFT 12 -#define I40E_GLCM_PE_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_PE_CACHESIZE_SETS_SHIFT) -#define I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT 16 -#define I40E_GLCM_PE_CACHESIZE_WAYS_MASK I40E_MASK(0x1FF, I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT) -#define I40E_PFCM_PE_ERRDATA 0x00138D00 /* Reset: PFR */ -#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0 -#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT) -#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4 -#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT) -#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8 -#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT) -#define I40E_PFCM_PE_ERRINFO 0x00138C80 /* Reset: PFR */ -#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0 -#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT) -#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4 -#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT) -#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8 -#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT) -#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16 -#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT) -#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24 -#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT) -#define I40E_PRTDCB_TFMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTDCB_TFMSTC_MAX_INDEX 7 -#define I40E_PRTDCB_TFMSTC_MSTC_SHIFT 0 -#define I40E_PRTDCB_TFMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TFMSTC_MSTC_SHIFT) -#define I40E_GL_FWSTS_FWROWD_SHIFT 8 -#define I40E_GL_FWSTS_FWROWD_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWROWD_SHIFT) -#define I40E_GLFOC_CACHESIZE 0x000AA0DC /* Reset: CORER */ -#define I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT 0 -#define I40E_GLFOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT) -#define I40E_GLFOC_CACHESIZE_SETS_SHIFT 8 -#define I40E_GLFOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLFOC_CACHESIZE_SETS_SHIFT) -#define I40E_GLFOC_CACHESIZE_WAYS_SHIFT 20 -#define I40E_GLFOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLFOC_CACHESIZE_WAYS_SHIFT) -#define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15 -#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0 -#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT) -#define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_CEQPART_MAX_INDEX 15 -#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0 -#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT) -#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16 -#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT) -#define I40E_GLHMC_DBCQMAX 0x000C20F0 /* Reset: CORER */ -#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT 0 -#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_MASK I40E_MASK(0x3FFFF, I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT) -#define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_DBCQPART_MAX_INDEX 15 -#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0 -#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT) -#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16 -#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT) -#define I40E_GLHMC_DBQPMAX 0x000C20EC /* Reset: CORER */ -#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT 0 -#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_MASK I40E_MASK(0x7FFFF, I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT) -#define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_DBQPPART_MAX_INDEX 15 -#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0 -#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT) -#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16 -#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT) -#define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEARPBASE_MAX_INDEX 15 -#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0 -#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT) -#define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEARPCNT_MAX_INDEX 15 -#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0 -#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT) -#define I40E_GLHMC_PEARPMAX 0x000C2038 /* Reset: CORER */ -#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0 -#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT) -#define I40E_GLHMC_PEARPOBJSZ 0x000C2034 /* Reset: CORER */ -#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0 -#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK I40E_MASK(0x7, I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT) -#define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PECQBASE_MAX_INDEX 15 -#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0 -#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT) -#define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PECQCNT_MAX_INDEX 15 -#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0 -#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT) -#define I40E_GLHMC_PECQOBJSZ 0x000C2020 /* Reset: CORER */ -#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0 -#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT) -#define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEHTCNT_MAX_INDEX 15 -#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0 -#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT) -#define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15 -#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0 -#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT) -#define I40E_GLHMC_PEHTEOBJSZ 0x000C202c /* Reset: CORER */ -#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0 -#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT) -#define I40E_GLHMC_PEHTMAX 0x000C2030 /* Reset: CORER */ -#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0 -#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK I40E_MASK(0x1FFFFF, I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT) -#define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEMRBASE_MAX_INDEX 15 -#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0 -#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT) -#define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEMRCNT_MAX_INDEX 15 -#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0 -#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT) -#define I40E_GLHMC_PEMRMAX 0x000C2040 /* Reset: CORER */ -#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0 -#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT) -#define I40E_GLHMC_PEMROBJSZ 0x000C203c /* Reset: CORER */ -#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0 -#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT) -#define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15 -#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0 -#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT) -#define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15 -#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0 -#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT) -#define I40E_GLHMC_PEPBLMAX 0x000C206c /* Reset: CORER */ -#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0 -#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT) -#define I40E_GLHMC_PEPFFIRSTSD 0x000C20E4 /* Reset: CORER */ -#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT 0 -#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_MASK I40E_MASK(0xFFF, I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT) -#define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15 -#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0 -#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT) -#define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15 -#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0 -#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT) -#define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15 -#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0 -#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT) -#define I40E_GLHMC_PEQ1FLMAX 0x000C2058 /* Reset: CORER */ -#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0 -#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT) -#define I40E_GLHMC_PEQ1MAX 0x000C2054 /* Reset: CORER */ -#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0 -#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT) -#define I40E_GLHMC_PEQ1OBJSZ 0x000C2050 /* Reset: CORER */ -#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0 -#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT) -#define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEQPBASE_MAX_INDEX 15 -#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0 -#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT) -#define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEQPCNT_MAX_INDEX 15 -#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0 -#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT) -#define I40E_GLHMC_PEQPOBJSZ 0x000C201c /* Reset: CORER */ -#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0 -#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT) -#define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PESRQBASE_MAX_INDEX 15 -#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0 -#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT) -#define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PESRQCNT_MAX_INDEX 15 -#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0 -#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT) -#define I40E_GLHMC_PESRQMAX 0x000C2028 /* Reset: CORER */ -#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0 -#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT) -#define I40E_GLHMC_PESRQOBJSZ 0x000C2024 /* Reset: CORER */ -#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0 -#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT) -#define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15 -#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0 -#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT) -#define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15 -#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0 -#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT) -#define I40E_GLHMC_PETIMERMAX 0x000C2084 /* Reset: CORER */ -#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0 -#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT) -#define I40E_GLHMC_PETIMEROBJSZ 0x000C2080 /* Reset: CORER */ -#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0 -#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT) -#define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEXFBASE_MAX_INDEX 15 -#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0 -#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT) -#define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEXFCNT_MAX_INDEX 15 -#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0 -#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT) -#define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15 -#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0 -#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT) -#define I40E_GLHMC_PEXFFLMAX 0x000C204c /* Reset: CORER */ -#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0 -#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK I40E_MASK(0x1FFFFFF, I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT) -#define I40E_GLHMC_PEXFMAX 0x000C2048 /* Reset: CORER */ -#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0 -#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT) -#define I40E_GLHMC_PEXFOBJSZ 0x000C2044 /* Reset: CORER */ -#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0 -#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT) -#define I40E_GLHMC_PFPESDPART(_i) (0x000C0880 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PFPESDPART_MAX_INDEX 15 -#define I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT 0 -#define I40E_GLHMC_PFPESDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT) -#define I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT 16 -#define I40E_GLHMC_PFPESDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT) -#define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31 -#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0 -#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT) -#define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFCEQPART_MAX_INDEX 31 -#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0 -#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT) -#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16 -#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT) -#define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31 -#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0 -#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT) -#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16 -#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT) -#define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31 -#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0 -#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT) -#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16 -#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT) -#define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31 -#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0 -#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT) -#define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31 -#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0 -#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT) -#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPDINV_MAX_INDEX 31 -#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0 -#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT) -#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT 15 -#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT) -#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16 -#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT) -#define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31 -#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0 -#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT) -#define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31 -#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0 -#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT) -#define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31 -#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0 -#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT) -#define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31 -#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0 -#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT) -#define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31 -#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0 -#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT) -#define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31 -#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0 -#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT) -#define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31 -#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0 -#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT) -#define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31 -#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0 -#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT) -#define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31 -#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0 -#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT) -#define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31 -#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0 -#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT) -#define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31 -#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0 -#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT) -#define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31 -#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0 -#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT) -#define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31 -#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0 -#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT) -#define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31 -#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0 -#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT) -#define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31 -#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0 -#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT) -#define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31 -#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0 -#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT) -#define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31 -#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0 -#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT) -#define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31 -#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0 -#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT) -#define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31 -#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0 -#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT) -#define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31 -#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0 -#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT) -#define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31 -#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0 -#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT) -#define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31 -#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0 -#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT) -#define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFSDPART_MAX_INDEX 31 -#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0 -#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT) -#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16 -#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT) -#define I40E_GLPBLOC_CACHESIZE 0x000A80BC /* Reset: CORER */ -#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT 0 -#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT) -#define I40E_GLPBLOC_CACHESIZE_SETS_SHIFT 8 -#define I40E_GLPBLOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPBLOC_CACHESIZE_SETS_SHIFT) -#define I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT 20 -#define I40E_GLPBLOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT) -#define I40E_GLPDOC_CACHESIZE 0x000D0088 /* Reset: CORER */ -#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT 0 -#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT) -#define I40E_GLPDOC_CACHESIZE_SETS_SHIFT 8 -#define I40E_GLPDOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPDOC_CACHESIZE_SETS_SHIFT) -#define I40E_GLPDOC_CACHESIZE_WAYS_SHIFT 20 -#define I40E_GLPDOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPDOC_CACHESIZE_WAYS_SHIFT) -#define I40E_GLPEOC_CACHESIZE 0x000A60E8 /* Reset: CORER */ -#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT 0 -#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT) -#define I40E_GLPEOC_CACHESIZE_SETS_SHIFT 8 -#define I40E_GLPEOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPEOC_CACHESIZE_SETS_SHIFT) -#define I40E_GLPEOC_CACHESIZE_WAYS_SHIFT 20 -#define I40E_GLPEOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPEOC_CACHESIZE_WAYS_SHIFT) -#define I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT 15 -#define I40E_PFHMC_PDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT) -#define I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT 15 -#define I40E_PFHMC_SDCMD_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT) -#define I40E_GL_PPRS_SPARE 0x000856E0 /* Reset: CORER */ -#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT 0 -#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT) -#define I40E_GL_TLAN_SPARE 0x000E64E0 /* Reset: CORER */ -#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT 0 -#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT) -#define I40E_GL_TUPM_SPARE 0x000a2230 /* Reset: CORER */ -#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT 0 -#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT) -#define I40E_GLGEN_CAR_DEBUG 0x000B81C0 /* Reset: POR */ -#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT 0 -#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT 1 -#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT 2 -#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT 3 -#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT 4 -#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT 5 -#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT 6 -#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT 7 -#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT 8 -#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT 9 -#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT 10 -#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT 11 -#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT 12 -#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT 13 -#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT 14 -#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT) -#define I40E_GLGEN_MISC_SPARE 0x000880E0 /* Reset: POR */ -#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT 0 -#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT) -#define I40E_GL_UFUSE_SOC 0x000BE550 /* Reset: POR */ -#define I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT 0 -#define I40E_GL_UFUSE_SOC_PORT_MODE_MASK I40E_MASK(0x3, I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT) -#define I40E_GL_UFUSE_SOC_NIC_ID_SHIFT 2 -#define I40E_GL_UFUSE_SOC_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_SOC_NIC_ID_SHIFT) -#define I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT 3 -#define I40E_GL_UFUSE_SOC_SPARE_FUSES_MASK I40E_MASK(0x1FFF, I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT) #define I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30 #define I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT) #define I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30 #define I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT) -#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30 -#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT) -#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30 -#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT) -#define I40E_VPLAN_QBASE(_VF) (0x00074800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VPLAN_QBASE_MAX_INDEX 127 -#define I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT 0 -#define I40E_VPLAN_QBASE_VFFIRSTQ_MASK I40E_MASK(0x7FF, I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT) -#define I40E_VPLAN_QBASE_VFNUMQ_SHIFT 11 -#define I40E_VPLAN_QBASE_VFNUMQ_MASK I40E_MASK(0xFF, I40E_VPLAN_QBASE_VFNUMQ_SHIFT) -#define I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT 31 -#define I40E_VPLAN_QBASE_VFQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT) -#define I40E_PRTMAC_LINK_DOWN_COUNTER 0x001E2440 /* Reset: GLOBR */ -#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT 0 -#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT) -#define I40E_GLNVM_AL_REQ 0x000B6164 /* Reset: POR */ -#define I40E_GLNVM_AL_REQ_POR_SHIFT 0 -#define I40E_GLNVM_AL_REQ_POR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_POR_SHIFT) -#define I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT 1 -#define I40E_GLNVM_AL_REQ_PCIE_IMIB_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT) -#define I40E_GLNVM_AL_REQ_GLOBR_SHIFT 2 -#define I40E_GLNVM_AL_REQ_GLOBR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_GLOBR_SHIFT) -#define I40E_GLNVM_AL_REQ_CORER_SHIFT 3 -#define I40E_GLNVM_AL_REQ_CORER_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_CORER_SHIFT) -#define I40E_GLNVM_AL_REQ_PE_SHIFT 4 -#define I40E_GLNVM_AL_REQ_PE_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PE_SHIFT) -#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT 5 -#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT) -#define I40E_GLNVM_ALTIMERS 0x000B6140 /* Reset: POR */ -#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT 0 -#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_MASK I40E_MASK(0xFFF, I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT) -#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT 12 -#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_MASK I40E_MASK(0xFFFFF, I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT) #define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */ #define I40E_GLNVM_FLA_LOCKED_SHIFT 6 #define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT) #define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */ -#define I40E_GLNVM_ULD_PCIER_DONE_SHIFT 0 -#define I40E_GLNVM_ULD_PCIER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_SHIFT) -#define I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT 1 -#define I40E_GLNVM_ULD_PCIER_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT) -#define I40E_GLNVM_ULD_CORER_DONE_SHIFT 3 -#define I40E_GLNVM_ULD_CORER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CORER_DONE_SHIFT) -#define I40E_GLNVM_ULD_GLOBR_DONE_SHIFT 4 -#define I40E_GLNVM_ULD_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_GLOBR_DONE_SHIFT) -#define I40E_GLNVM_ULD_POR_DONE_SHIFT 5 -#define I40E_GLNVM_ULD_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_SHIFT) -#define I40E_GLNVM_ULD_POR_DONE_1_SHIFT 8 -#define I40E_GLNVM_ULD_POR_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_1_SHIFT) -#define I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT 9 -#define I40E_GLNVM_ULD_PCIER_DONE_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT) -#define I40E_GLNVM_ULD_PE_DONE_SHIFT 10 -#define I40E_GLNVM_ULD_PE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PE_DONE_SHIFT) -#define I40E_GLNVM_ULT 0x000B6154 /* Reset: POR */ -#define I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT 0 -#define I40E_GLNVM_ULT_CONF_PCIR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT) -#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT 1 -#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT) -#define I40E_GLNVM_ULT_RESERVED_1_SHIFT 2 -#define I40E_GLNVM_ULT_RESERVED_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_1_SHIFT) -#define I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT 3 -#define I40E_GLNVM_ULT_CONF_CORE_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT) -#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT 4 -#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT) -#define I40E_GLNVM_ULT_CONF_POR_AE_SHIFT 5 -#define I40E_GLNVM_ULT_CONF_POR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_POR_AE_SHIFT) -#define I40E_GLNVM_ULT_RESERVED_2_SHIFT 6 -#define I40E_GLNVM_ULT_RESERVED_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_2_SHIFT) -#define I40E_GLNVM_ULT_RESERVED_3_SHIFT 7 -#define I40E_GLNVM_ULT_RESERVED_3_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_3_SHIFT) -#define I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT 8 -#define I40E_GLNVM_ULT_CONF_EMP_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT) -#define I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT 9 -#define I40E_GLNVM_ULT_CONF_PCIALT_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT) -#define I40E_GLNVM_ULT_RESERVED_4_SHIFT 10 -#define I40E_GLNVM_ULT_RESERVED_4_MASK I40E_MASK(0x3FFFFF, I40E_GLNVM_ULT_RESERVED_4_SHIFT) -#define I40E_MEM_INIT_DONE_STAT 0x000B615C /* Reset: POR */ -#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT 0 -#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT 1 -#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT 2 -#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT 3 -#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT 4 -#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT 5 -#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT 6 -#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT 7 -#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT 8 -#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT 9 -#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT 10 -#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT 11 -#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT 12 -#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT 13 -#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT 14 -#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT 15 -#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT 16 -#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT) -#define I40E_MNGSB_DADD 0x000B7030 /* Reset: POR */ -#define I40E_MNGSB_DADD_ADDR_SHIFT 0 -#define I40E_MNGSB_DADD_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DADD_ADDR_SHIFT) -#define I40E_MNGSB_DCNT 0x000B7034 /* Reset: POR */ -#define I40E_MNGSB_DCNT_BYTE_CNT_SHIFT 0 -#define I40E_MNGSB_DCNT_BYTE_CNT_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DCNT_BYTE_CNT_SHIFT) -#define I40E_MNGSB_MSGCTL 0x000B7020 /* Reset: POR */ -#define I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT 0 -#define I40E_MNGSB_MSGCTL_HDR_DWS_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT) -#define I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT 8 -#define I40E_MNGSB_MSGCTL_EXP_RDW_MASK I40E_MASK(0x1FF, I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT) -#define I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT 26 -#define I40E_MNGSB_MSGCTL_MSG_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT) -#define I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT 28 -#define I40E_MNGSB_MSGCTL_TOKEN_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT) -#define I40E_MNGSB_MSGCTL_BARCLR_SHIFT 30 -#define I40E_MNGSB_MSGCTL_BARCLR_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_BARCLR_SHIFT) -#define I40E_MNGSB_MSGCTL_CMDV_SHIFT 31 -#define I40E_MNGSB_MSGCTL_CMDV_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_CMDV_SHIFT) -#define I40E_MNGSB_RDATA 0x000B7300 /* Reset: POR */ -#define I40E_MNGSB_RDATA_DATA_SHIFT 0 -#define I40E_MNGSB_RDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_RDATA_DATA_SHIFT) -#define I40E_MNGSB_RHDR0 0x000B72FC /* Reset: POR */ -#define I40E_MNGSB_RHDR0_DESTINATION_SHIFT 0 -#define I40E_MNGSB_RHDR0_DESTINATION_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_DESTINATION_SHIFT) -#define I40E_MNGSB_RHDR0_SOURCE_SHIFT 8 -#define I40E_MNGSB_RHDR0_SOURCE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_SOURCE_SHIFT) -#define I40E_MNGSB_RHDR0_OPCODE_SHIFT 16 -#define I40E_MNGSB_RHDR0_OPCODE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_OPCODE_SHIFT) -#define I40E_MNGSB_RHDR0_TAG_SHIFT 24 -#define I40E_MNGSB_RHDR0_TAG_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_TAG_SHIFT) -#define I40E_MNGSB_RHDR0_RESPONSE_SHIFT 27 -#define I40E_MNGSB_RHDR0_RESPONSE_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_RESPONSE_SHIFT) -#define I40E_MNGSB_RHDR0_EH_SHIFT 31 -#define I40E_MNGSB_RHDR0_EH_MASK I40E_MASK(0x1, I40E_MNGSB_RHDR0_EH_SHIFT) -#define I40E_MNGSB_RSPCTL 0x000B7024 /* Reset: POR */ -#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT 0 -#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_MASK I40E_MASK(0x1FF, I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT) -#define I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT 26 -#define I40E_MNGSB_RSPCTL_RSP_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT) -#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT 30 -#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT) -#define I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT 31 -#define I40E_MNGSB_RSPCTL_RSP_ERR_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT) -#define I40E_MNGSB_WDATA 0x000B7100 /* Reset: POR */ -#define I40E_MNGSB_WDATA_DATA_SHIFT 0 -#define I40E_MNGSB_WDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WDATA_DATA_SHIFT) -#define I40E_MNGSB_WHDR0 0x000B70F4 /* Reset: POR */ -#define I40E_MNGSB_WHDR0_RAW_DEST_SHIFT 0 -#define I40E_MNGSB_WHDR0_RAW_DEST_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_RAW_DEST_SHIFT) -#define I40E_MNGSB_WHDR0_DEST_SEL_SHIFT 12 -#define I40E_MNGSB_WHDR0_DEST_SEL_MASK I40E_MASK(0xF, I40E_MNGSB_WHDR0_DEST_SEL_SHIFT) -#define I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT 16 -#define I40E_MNGSB_WHDR0_OPCODE_SEL_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT) -#define I40E_MNGSB_WHDR0_TAG_SHIFT 24 -#define I40E_MNGSB_WHDR0_TAG_MASK I40E_MASK(0x7F, I40E_MNGSB_WHDR0_TAG_SHIFT) -#define I40E_MNGSB_WHDR1 0x000B70F8 /* Reset: POR */ -#define I40E_MNGSB_WHDR1_ADDR_SHIFT 0 -#define I40E_MNGSB_WHDR1_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR1_ADDR_SHIFT) -#define I40E_MNGSB_WHDR2 0x000B70FC /* Reset: POR */ -#define I40E_MNGSB_WHDR2_LENGTH_SHIFT 0 -#define I40E_MNGSB_WHDR2_LENGTH_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR2_LENGTH_SHIFT) -#define I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT 21 -#define I40E_GLPCI_CAPSUP_WAKUP_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT) -#define I40E_GLPCI_CUR_CLNT_COMMON 0x0009CA18 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT) -#define I40E_GLPCI_CUR_CLNT_PIPEMON 0x0009CA20 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_MNG_ALWD 0x0009c514 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT) -#define I40E_GLPCI_CUR_MNG_RSVD 0x0009c594 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_MNG_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT) -#define I40E_GLPCI_CUR_PMAT_ALWD 0x0009c510 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT) -#define I40E_GLPCI_CUR_PMAT_RSVD 0x0009c590 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT) -#define I40E_GLPCI_CUR_RLAN_ALWD 0x0009c500 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT) -#define I40E_GLPCI_CUR_RLAN_RSVD 0x0009c580 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT) -#define I40E_GLPCI_CUR_RXPE_ALWD 0x0009c508 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT) -#define I40E_GLPCI_CUR_RXPE_RSVD 0x0009c588 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT) -#define I40E_GLPCI_CUR_TDPU_ALWD 0x0009c518 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT) -#define I40E_GLPCI_CUR_TDPU_RSVD 0x0009c598 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT) -#define I40E_GLPCI_CUR_TLAN_ALWD 0x0009c504 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT) -#define I40E_GLPCI_CUR_TLAN_RSVD 0x0009c584 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT) -#define I40E_GLPCI_CUR_TXPE_ALWD 0x0009c50C /* Reset: PCIR */ -#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT) -#define I40E_GLPCI_CUR_TXPE_RSVD 0x0009c58c /* Reset: PCIR */ -#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT) -#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON 0x0009CA28 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT) -#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4 -#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT) -#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10 -#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT) -#define I40E_GLPCI_NPQ_CFG 0x0009CA00 /* Reset: PCIR */ -#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT 0 -#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT) -#define I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT 1 -#define I40E_GLPCI_NPQ_CFG_SMALL_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT) -#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT 2 -#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT) -#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT 6 -#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_MASK I40E_MASK(0x3FF, I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT) -#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT 16 -#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT) -#define I40E_GLPCI_WATMK_CLNT_PIPEMON 0x0009CA30 /* Reset: PCIR */ -#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT) -#define I40E_GLPCI_WATMK_MNG_ALWD 0x0009CB14 /* Reset: PCIR */ -#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT) -#define I40E_GLPCI_WATMK_PMAT_ALWD 0x0009CB10 /* Reset: PCIR */ -#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT) -#define I40E_GLPCI_WATMK_RLAN_ALWD 0x0009CB00 /* Reset: PCIR */ -#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT) -#define I40E_GLPCI_WATMK_RXPE_ALWD 0x0009CB08 /* Reset: PCIR */ -#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT) -#define I40E_GLPCI_WATMK_TLAN_ALWD 0x0009CB04 /* Reset: PCIR */ -#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT) -#define I40E_GLPCI_WATMK_TPDU_ALWD 0x0009CB18 /* Reset: PCIR */ -#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT) -#define I40E_GLPCI_WATMK_TXPE_ALWD 0x0009CB0c /* Reset: PCIR */ -#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT) -#define I40E_GLPE_CPUSTATUS0 0x0000D040 /* Reset: PE_CORER */ -#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0 -#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT) -#define I40E_GLPE_CPUSTATUS1 0x0000D044 /* Reset: PE_CORER */ -#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0 -#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT) -#define I40E_GLPE_CPUSTATUS2 0x0000D048 /* Reset: PE_CORER */ -#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0 -#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT) -#define I40E_GLPE_CPUTRIG0 0x0000D060 /* Reset: PE_CORER */ -#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT 0 -#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_MASK I40E_MASK(0xFFFF, I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT) -#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT 17 -#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT) -#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT 18 -#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT) -#define I40E_GLPE_DUAL40_RUPM 0x0000DA04 /* Reset: PE_CORER */ -#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT 0 -#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_MASK I40E_MASK(0x1, I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT) -#define I40E_GLPE_PFAEQEDROPCNT(_i) (0x00131440 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLPE_PFAEQEDROPCNT_MAX_INDEX 15 -#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0 -#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT) -#define I40E_GLPE_PFCEQEDROPCNT(_i) (0x001313C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLPE_PFCEQEDROPCNT_MAX_INDEX 15 -#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0 -#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT) -#define I40E_GLPE_PFCQEDROPCNT(_i) (0x00131340 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLPE_PFCQEDROPCNT_MAX_INDEX 15 -#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT 0 -#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT) -#define I40E_GLPE_RUPM_CQPPOOL 0x0000DACC /* Reset: PE_CORER */ -#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT 0 -#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT) -#define I40E_GLPE_RUPM_FLRPOOL 0x0000DAC4 /* Reset: PE_CORER */ -#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT 0 -#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT) -#define I40E_GLPE_RUPM_GCTL 0x0000DA00 /* Reset: PE_CORER */ -#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT 0 -#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT) -#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT 26 -#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT) -#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT 27 -#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT) -#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT 28 -#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT) -#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT 29 -#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT) -#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT 30 -#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT) -#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT 31 -#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT) -#define I40E_GLPE_RUPM_PTXPOOL 0x0000DAC8 /* Reset: PE_CORER */ -#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT 0 -#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT) -#define I40E_GLPE_RUPM_PUSHPOOL 0x0000DAC0 /* Reset: PE_CORER */ -#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT 0 -#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT) -#define I40E_GLPE_RUPM_TXHOST_EN 0x0000DA08 /* Reset: PE_CORER */ -#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT 0 -#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT) -#define I40E_GLPE_VFAEQEDROPCNT(_i) (0x00132540 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLPE_VFAEQEDROPCNT_MAX_INDEX 31 -#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0 -#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT) -#define I40E_GLPE_VFCEQEDROPCNT(_i) (0x00132440 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLPE_VFCEQEDROPCNT_MAX_INDEX 31 -#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0 -#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT) -#define I40E_GLPE_VFCQEDROPCNT(_i) (0x00132340 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLPE_VFCQEDROPCNT_MAX_INDEX 31 -#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT 0 -#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT) -#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31 -#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0 -#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT) -#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8 -#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT) -#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31 -#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0 -#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT) -#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31 -#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0 -#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT) -#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31 -#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0 -#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT) -#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1 -#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT) -#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2 -#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT) -#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3 -#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT) -#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4 -#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT) -#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31 -#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0 -#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT) -#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31 -#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT) -#define I40E_PFPE_AEQALLOC 0x00131180 /* Reset: PFR */ -#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0 -#define I40E_PFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_AEQALLOC_AECOUNT_SHIFT) -#define I40E_PFPE_CCQPHIGH 0x00008200 /* Reset: PFR */ -#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0 -#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT) -#define I40E_PFPE_CCQPLOW 0x00008180 /* Reset: PFR */ -#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0 -#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT) -#define I40E_PFPE_CCQPSTATUS 0x00008100 /* Reset: PFR */ -#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0 -#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT) -#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4 -#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT) -#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16 -#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT) -#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31 -#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT) -#define I40E_PFPE_CQACK 0x00131100 /* Reset: PFR */ -#define I40E_PFPE_CQACK_PECQID_SHIFT 0 -#define I40E_PFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQACK_PECQID_SHIFT) -#define I40E_PFPE_CQARM 0x00131080 /* Reset: PFR */ -#define I40E_PFPE_CQARM_PECQID_SHIFT 0 -#define I40E_PFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQARM_PECQID_SHIFT) -#define I40E_PFPE_CQPDB 0x00008000 /* Reset: PFR */ -#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0 -#define I40E_PFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPDB_WQHEAD_SHIFT) -#define I40E_PFPE_CQPERRCODES 0x00008880 /* Reset: PFR */ -#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0 -#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT) -#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16 -#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT) -#define I40E_PFPE_CQPTAIL 0x00008080 /* Reset: PFR */ -#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0 -#define I40E_PFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPTAIL_WQTAIL_SHIFT) -#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31 -#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT) -#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980 /* Reset: PFR */ -#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0 -#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT) -#define I40E_PFPE_FLMXMITALLOCERR 0x00008900 /* Reset: PFR */ -#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0 -#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT) -#define I40E_PFPE_IPCONFIG0 0x00008280 /* Reset: PFR */ -#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0 -#define I40E_PFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_PFPE_IPCONFIG0_PEIPID_SHIFT) -#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16 -#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT) -#define I40E_PFPE_MRTEIDXMASK 0x00008600 /* Reset: PFR */ -#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0 -#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT) -#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680 /* Reset: PFR */ -#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0 -#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT) -#define I40E_PFPE_TCPNOWTIMER 0x00008580 /* Reset: PFR */ -#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0 -#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT) -#define I40E_PFPE_UDACTRL 0x00008700 /* Reset: PFR */ -#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0 -#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT) -#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1 -#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT) -#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2 -#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT) -#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3 -#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT) -#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4 -#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT) -#define I40E_PFPE_UDAUCFBQPN 0x00008780 /* Reset: PFR */ -#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0 -#define I40E_PFPE_UDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_PFPE_UDAUCFBQPN_QPN_SHIFT) -#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31 -#define I40E_PFPE_UDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_PFPE_UDAUCFBQPN_VALID_SHIFT) -#define I40E_PFPE_WQEALLOC 0x00138C00 /* Reset: PFR */ -#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0 -#define I40E_PFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_PFPE_WQEALLOC_PEQPID_SHIFT) -#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20 -#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT) -#define I40E_PRTDCB_RLPMC 0x0001F140 /* Reset: PE_CORER */ -#define I40E_PRTDCB_RLPMC_TC2PFC_SHIFT 0 -#define I40E_PRTDCB_RLPMC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RLPMC_TC2PFC_SHIFT) -#define I40E_PRTDCB_TCMSTC_RLPM(_i) (0x0001F040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: PE_CORER */ -#define I40E_PRTDCB_TCMSTC_RLPM_MAX_INDEX 7 -#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT 0 -#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT) -#define I40E_PRTDCB_TCPMC_RLPM 0x0001F1A0 /* Reset: PE_CORER */ -#define I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT 0 -#define I40E_PRTDCB_TCPMC_RLPM_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT) -#define I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT 13 -#define I40E_PRTDCB_TCPMC_RLPM_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT) -#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT 30 -#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT) -#define I40E_PRTE_RUPM_TCCNTR03 0x0000DAE0 /* Reset: PE_CORER */ -#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT 0 -#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT) -#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT 8 -#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT) -#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT 16 -#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT) -#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT 24 -#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT) -#define I40E_PRTPE_RUPM_CNTR 0x0000DB20 /* Reset: PE_CORER */ -#define I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT 0 -#define I40E_PRTPE_RUPM_CNTR_COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT) -#define I40E_PRTPE_RUPM_CTL 0x0000DA40 /* Reset: PE_CORER */ -#define I40E_PRTPE_RUPM_CTL_LLTC_SHIFT 13 -#define I40E_PRTPE_RUPM_CTL_LLTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CTL_LLTC_SHIFT) -#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT 30 -#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT) -#define I40E_PRTPE_RUPM_PFCCTL 0x0000DA60 /* Reset: PE_CORER */ -#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT 0 -#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT) -#define I40E_PRTPE_RUPM_PFCPC 0x0000DA80 /* Reset: PE_CORER */ -#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT 0 -#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT) -#define I40E_PRTPE_RUPM_PFCTCC 0x0000DAA0 /* Reset: PE_CORER */ -#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT 0 -#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT) -#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT 16 -#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT) -#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT 31 -#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT) -#define I40E_PRTPE_RUPM_PTCTCCNTR47 0x0000DB60 /* Reset: PE_CORER */ -#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT 0 -#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT) -#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT 8 -#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT) -#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT 16 -#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT) -#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT 24 -#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT) -#define I40E_PRTPE_RUPM_PTXTCCNTR03 0x0000DB40 /* Reset: PE_CORER */ -#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT 0 -#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT) -#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT 8 -#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT) -#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT 16 -#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT) -#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT 24 -#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT) -#define I40E_PRTPE_RUPM_TCCNTR47 0x0000DB00 /* Reset: PE_CORER */ -#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT 0 -#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT) -#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT 8 -#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT) -#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT 16 -#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT) -#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT 24 -#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT) -#define I40E_PRTPE_RUPM_THRES 0x0000DA20 /* Reset: PE_CORER */ -#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT 0 -#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT) -#define I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT 8 -#define I40E_PRTPE_RUPM_THRES_MAXSPADS_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT) -#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT 16 -#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT) -#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_AEQALLOC_MAX_INDEX 127 -#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0 -#define I40E_VFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC_AECOUNT_SHIFT) -#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127 -#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0 -#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT) -#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_CCQPLOW_MAX_INDEX 127 -#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0 -#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT) -#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127 -#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0 -#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT) -#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4 -#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT) -#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16 -#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT) -#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31 -#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT) -#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_CQACK_MAX_INDEX 127 -#define I40E_VFPE_CQACK_PECQID_SHIFT 0 -#define I40E_VFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK_PECQID_SHIFT) -#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_CQARM_MAX_INDEX 127 -#define I40E_VFPE_CQARM_PECQID_SHIFT 0 -#define I40E_VFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM_PECQID_SHIFT) -#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_CQPDB_MAX_INDEX 127 -#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0 -#define I40E_VFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB_WQHEAD_SHIFT) -#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127 -#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0 -#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT) -#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16 -#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT) -#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_CQPTAIL_MAX_INDEX 127 -#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0 -#define I40E_VFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL_WQTAIL_SHIFT) -#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31 -#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT) -#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127 -#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0 -#define I40E_VFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG0_PEIPID_SHIFT) -#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16 -#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT) -#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127 -#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0 -#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT) -#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127 -#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0 -#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT) -#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127 -#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0 -#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT) -#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_WQEALLOC_MAX_INDEX 127 -#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0 -#define I40E_VFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC_PEQPID_SHIFT) -#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20 -#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT) -#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15 -#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0 -#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT) -#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0 -#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT) -#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0 -#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT) -#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0 -#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT) -#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0 -#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT) -#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0 -#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT) -#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0 -#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT) -#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0 -#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT) -#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0 -#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT) -#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0 -#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT) -#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0 -#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT) -#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15 -#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0 -#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT) -#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0 -#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT) -#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0 -#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT) -#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0 -#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT) -#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0 -#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT) -#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0 -#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT) -#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0 -#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT) -#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15 -#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0 -#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT) -#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0 -#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT) -#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0 -#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT) -#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0 -#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT) -#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0 -#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT) -#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15 -#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0 -#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT) -#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0 -#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT) -#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0 -#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT) -#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0 -#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT) -#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0 -#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT) -#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0 -#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT) -#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0 -#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT) -#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0 -#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT) -#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0 -#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT) -#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0 -#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT) -#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0 -#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT) -#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15 -#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0 -#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT) -#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0 -#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT) -#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0 -#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT) -#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0 -#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT) -#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0 -#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT) -#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0 -#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT) -#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0 -#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT) -#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15 -#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0 -#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT) -#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0 -#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT) -#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0 -#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT) -#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0 -#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT) -#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0 -#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT) -#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15 -#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0 -#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT) -#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15 -#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0 -#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT) -#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15 -#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0 -#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT) -#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15 -#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0 -#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT) -#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15 -#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0 -#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT) -#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15 -#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0 -#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT) -#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15 -#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0 -#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT) -#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15 -#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0 -#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT) -#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15 -#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0 -#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT) -#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15 -#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0 -#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT) -#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15 -#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0 -#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT) -#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15 -#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0 -#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT) -#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15 -#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0 -#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT) -#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15 -#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0 -#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT) -#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15 -#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0 -#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT) -#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15 -#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0 -#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT) -#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15 -#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0 -#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT) -#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15 -#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0 -#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT) -#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15 -#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0 -#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT) -#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15 -#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0 -#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT) -#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15 -#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0 -#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT) -#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15 -#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0 -#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT) -#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15 -#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0 -#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT) -#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15 -#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0 -#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT) -#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0 -#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT) -#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0 -#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT) -#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0 -#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT) -#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0 -#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT) -#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014 /* Reset: PE_CORER */ -#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0 -#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT) -#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010 /* Reset: PE_CORER */ -#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0 -#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT) -#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C /* Reset: PE_CORER */ -#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0 -#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT) -#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018 /* Reset: PE_CORER */ -#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0 -#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT) -#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004 /* Reset: PE_CORER */ -#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0 -#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT) -#define I40E_GLPES_RDMARXUNALIGN 0x0001E000 /* Reset: PE_CORER */ -#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0 -#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT) -#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044 /* Reset: PE_CORER */ -#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0 -#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT) -#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040 /* Reset: PE_CORER */ -#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0 -#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT) -#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C /* Reset: PE_CORER */ -#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0 -#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT) -#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028 /* Reset: PE_CORER */ -#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0 -#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT) -#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024 /* Reset: PE_CORER */ -#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0 -#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT) -#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020 /* Reset: PE_CORER */ -#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0 -#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT) -#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C /* Reset: PE_CORER */ -#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0 -#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT) -#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038 /* Reset: PE_CORER */ -#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0 -#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT) -#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034 /* Reset: PE_CORER */ -#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0 -#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT) -#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030 /* Reset: PE_CORER */ -#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0 -#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT) -#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C /* Reset: PE_CORER */ -#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0 -#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT) -#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048 /* Reset: PE_CORER */ -#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0 -#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT) -#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054 /* Reset: PE_CORER */ -#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0 -#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT) -#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050 /* Reset: PE_CORER */ -#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0 -#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT) -#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C /* Reset: PE_CORER */ -#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0 -#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT) -#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058 /* Reset: PE_CORER */ -#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0 -#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT) -#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31 -#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0 -#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT) -#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0 -#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT) -#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0 -#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT) -#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0 -#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT) -#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0 -#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT) -#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0 -#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT) -#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0 -#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT) -#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0 -#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT) -#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0 -#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT) -#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0 -#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT) -#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0 -#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT) -#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31 -#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0 -#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT) -#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0 -#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT) -#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0 -#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT) -#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0 -#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT) -#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0 -#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT) -#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0 -#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT) -#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0 -#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT) -#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31 -#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0 -#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT) -#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0 -#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT) -#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0 -#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT) -#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0 -#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT) -#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0 -#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT) -#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31 -#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0 -#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT) -#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0 -#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT) -#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0 -#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT) -#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0 -#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT) -#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0 -#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT) -#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0 -#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT) -#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0 -#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT) -#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0 -#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT) -#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0 -#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT) -#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0 -#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT) -#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0 -#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT) -#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31 -#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0 -#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT) -#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0 -#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT) -#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0 -#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT) -#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0 -#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT) -#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0 -#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT) -#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0 -#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT) -#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0 -#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT) -#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31 -#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0 -#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT) -#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0 -#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT) -#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0 -#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT) -#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0 -#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT) -#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0 -#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT) -#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31 -#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0 -#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT) -#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31 -#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0 -#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT) -#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31 -#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0 -#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT) -#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31 -#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0 -#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT) -#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31 -#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0 -#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT) -#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31 -#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0 -#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT) -#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31 -#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0 -#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT) -#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31 -#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0 -#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT) -#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31 -#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0 -#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT) -#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31 -#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0 -#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT) -#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31 -#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0 -#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT) -#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31 -#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0 -#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT) -#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31 -#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0 -#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT) -#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31 -#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0 -#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT) -#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31 -#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0 -#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT) -#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31 -#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0 -#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT) -#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31 -#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0 -#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT) -#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31 -#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0 -#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT) -#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31 -#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0 -#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT) -#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31 -#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0 -#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT) -#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31 -#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0 -#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT) -#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31 -#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0 -#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT) -#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31 -#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0 -#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT) -#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31 -#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0 -#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT) -#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0 -#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT) -#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0 -#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT) -#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0 -#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT) -#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0 -#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT) -#define I40E_GLGEN_PME_TO 0x000B81BC /* Reset: POR */ -#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT 0 -#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_MASK I40E_MASK(0x1, I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT) -#define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */ /* Reset: CORER */ -#define I40E_GLQF_APBVT_MAX_INDEX 2047 -#define I40E_GLQF_APBVT_APBVT_SHIFT 0 -#define I40E_GLQF_APBVT_APBVT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_APBVT_APBVT_SHIFT) -#define I40E_GLQF_FD_PCTYPES(_i) (0x00268000 + ((_i) * 4)) /* _i=0...63 */ /* Reset: POR */ -#define I40E_GLQF_FD_PCTYPES_MAX_INDEX 63 -#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT 0 -#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_MASK I40E_MASK(0x3F, I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT) -#define I40E_GLQF_FD_MSK(_i, _j) (0x00267200 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */ -#define I40E_GLQF_FD_MSK_MAX_INDEX 1 -#define I40E_GLQF_FD_MSK_MASK_SHIFT 0 -#define I40E_GLQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_GLQF_FD_MSK_MASK_SHIFT) -#define I40E_GLQF_FD_MSK_OFFSET_SHIFT 16 -#define I40E_GLQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_GLQF_FD_MSK_OFFSET_SHIFT) #define I40E_GLQF_HASH_INSET(_i, _j) (0x00267600 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */ -#define I40E_GLQF_HASH_INSET_MAX_INDEX 1 -#define I40E_GLQF_HASH_INSET_INSET_SHIFT 0 -#define I40E_GLQF_HASH_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_HASH_INSET_INSET_SHIFT) -#define I40E_GLQF_HASH_MSK(_i, _j) (0x00267A00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */ -#define I40E_GLQF_HASH_MSK_MAX_INDEX 1 -#define I40E_GLQF_HASH_MSK_MASK_SHIFT 0 -#define I40E_GLQF_HASH_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_GLQF_HASH_MSK_MASK_SHIFT) -#define I40E_GLQF_HASH_MSK_OFFSET_SHIFT 16 -#define I40E_GLQF_HASH_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_GLQF_HASH_MSK_OFFSET_SHIFT) #define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */ -#define I40E_GLQF_ORT_MAX_INDEX 63 #define I40E_GLQF_ORT_PIT_INDX_SHIFT 0 #define I40E_GLQF_ORT_PIT_INDX_MASK I40E_MASK(0x1F, I40E_GLQF_ORT_PIT_INDX_SHIFT) #define I40E_GLQF_ORT_FIELD_CNT_SHIFT 5 #define I40E_GLQF_ORT_FIELD_CNT_MASK I40E_MASK(0x3, I40E_GLQF_ORT_FIELD_CNT_SHIFT) #define I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT 7 #define I40E_GLQF_ORT_FLX_PAYLOAD_MASK I40E_MASK(0x1, I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) -#define I40E_GLQF_PIT(_i) (0x00268C80 + ((_i) * 4)) /* _i=0...23 */ /* Reset: CORER */ -#define I40E_GLQF_PIT_MAX_INDEX 23 -#define I40E_GLQF_PIT_SOURCE_OFF_SHIFT 0 -#define I40E_GLQF_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_GLQF_PIT_SOURCE_OFF_SHIFT) -#define I40E_GLQF_PIT_FSIZE_SHIFT 5 -#define I40E_GLQF_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_GLQF_PIT_FSIZE_SHIFT) -#define I40E_GLQF_PIT_DEST_OFF_SHIFT 10 -#define I40E_GLQF_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_GLQF_PIT_DEST_OFF_SHIFT) #define I40E_GLQF_FDEVICTENA(_i) (0x00270384 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ -#define I40E_GLQF_FDEVICTENA_MAX_INDEX 1 -#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT 0 -#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT) -#define I40E_GLQF_FDEVICTFLAG 0x00270280 /* Reset: CORER */ -#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT 0 -#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT) -#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT 8 -#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT) -#define I40E_PFQF_CTL_2 0x00270300 /* Reset: CORER */ -#define I40E_PFQF_CTL_2_PEHSIZE_SHIFT 0 -#define I40E_PFQF_CTL_2_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEHSIZE_SHIFT) -#define I40E_PFQF_CTL_2_PEDSIZE_SHIFT 5 -#define I40E_PFQF_CTL_2_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEDSIZE_SHIFT) /* Redefined for X722 family */ -#define I40E_X722_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_X722_PFQF_HLUT_MAX_INDEX 127 -#define I40E_X722_PFQF_HLUT_LUT0_SHIFT 0 -#define I40E_X722_PFQF_HLUT_LUT0_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT0_SHIFT) -#define I40E_X722_PFQF_HLUT_LUT1_SHIFT 8 -#define I40E_X722_PFQF_HLUT_LUT1_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT1_SHIFT) -#define I40E_X722_PFQF_HLUT_LUT2_SHIFT 16 -#define I40E_X722_PFQF_HLUT_LUT2_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT2_SHIFT) -#define I40E_X722_PFQF_HLUT_LUT3_SHIFT 24 -#define I40E_X722_PFQF_HLUT_LUT3_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT3_SHIFT) -#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PFQF_HREGION_MAX_INDEX 7 -#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0 -#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT) -#define I40E_PFQF_HREGION_REGION_0_SHIFT 1 -#define I40E_PFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_0_SHIFT) -#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4 -#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT) -#define I40E_PFQF_HREGION_REGION_1_SHIFT 5 -#define I40E_PFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_1_SHIFT) -#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8 -#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT) -#define I40E_PFQF_HREGION_REGION_2_SHIFT 9 -#define I40E_PFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_2_SHIFT) -#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12 -#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT) -#define I40E_PFQF_HREGION_REGION_3_SHIFT 13 -#define I40E_PFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_3_SHIFT) -#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16 -#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT) -#define I40E_PFQF_HREGION_REGION_4_SHIFT 17 -#define I40E_PFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_4_SHIFT) -#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20 -#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT) -#define I40E_PFQF_HREGION_REGION_5_SHIFT 21 -#define I40E_PFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_5_SHIFT) -#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24 -#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT) -#define I40E_PFQF_HREGION_REGION_6_SHIFT 25 -#define I40E_PFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_6_SHIFT) -#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28 -#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT) -#define I40E_PFQF_HREGION_REGION_7_SHIFT 29 -#define I40E_PFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_7_SHIFT) -#define I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT 8 -#define I40E_VSIQF_CTL_RSS_LUT_TYPE_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT) -#define I40E_VSIQF_HKEY(_i, _VSI) (0x002A0000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...12, _VSI=0...383 */ /* Reset: CORER */ -#define I40E_VSIQF_HKEY_MAX_INDEX 12 -#define I40E_VSIQF_HKEY_KEY_0_SHIFT 0 -#define I40E_VSIQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_0_SHIFT) -#define I40E_VSIQF_HKEY_KEY_1_SHIFT 8 -#define I40E_VSIQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_1_SHIFT) -#define I40E_VSIQF_HKEY_KEY_2_SHIFT 16 -#define I40E_VSIQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_2_SHIFT) -#define I40E_VSIQF_HKEY_KEY_3_SHIFT 24 -#define I40E_VSIQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_3_SHIFT) -#define I40E_VSIQF_HLUT(_i, _VSI) (0x00220000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...15, _VSI=0...383 */ /* Reset: CORER */ -#define I40E_VSIQF_HLUT_MAX_INDEX 15 -#define I40E_VSIQF_HLUT_LUT0_SHIFT 0 -#define I40E_VSIQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT0_SHIFT) -#define I40E_VSIQF_HLUT_LUT1_SHIFT 8 -#define I40E_VSIQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT1_SHIFT) -#define I40E_VSIQF_HLUT_LUT2_SHIFT 16 -#define I40E_VSIQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT2_SHIFT) -#define I40E_VSIQF_HLUT_LUT3_SHIFT 24 -#define I40E_VSIQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT3_SHIFT) #define I40E_GLGEN_STAT_CLEAR 0x00390004 /* Reset: CORER */ -#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT 0 -#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT) -#define I40E_GLGEN_STAT_HALT 0x00390000 /* Reset: CORER */ -#define I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT 0 -#define I40E_GLGEN_STAT_HALT_HALT_CELLS_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT) -#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30 -#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT) -#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30 -#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT) -#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */ -#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0 -#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT) -#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */ -#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0 -#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT) -#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */ -#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0 -#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT) -#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */ -#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0 -#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT) -#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4 -#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT) -#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16 -#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT) -#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31 -#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT) -#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */ -#define I40E_VFPE_CQACK1_PECQID_SHIFT 0 -#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT) -#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */ -#define I40E_VFPE_CQARM1_PECQID_SHIFT 0 -#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT) -#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */ -#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0 -#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT) -#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */ -#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0 -#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT) -#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16 -#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT) -#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */ -#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0 -#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT) -#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31 -#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT) -#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */ -#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0 -#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT) -#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16 -#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT) -#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */ -#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0 -#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT) -#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */ -#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0 -#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT) -#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */ -#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0 -#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT) -#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */ -#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0 -#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT) -#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20 -#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT) #endif /* _I40E_REGISTER_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 5c255977fd58..8d3c9d37e42e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -18,10 +18,7 @@ #define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ #define I40E_ITR_MASK 0x1FFE /* mask for ITR register value */ #define I40E_MIN_ITR 2 /* reg uses 2 usec resolution */ -#define I40E_ITR_100K 10 /* all values below must be even */ -#define I40E_ITR_50K 20 #define I40E_ITR_20K 50 -#define I40E_ITR_18K 60 #define I40E_ITR_8K 122 #define I40E_MAX_ITR 8160 /* maximum value as per datasheet */ #define ITR_TO_REG(setting) ((setting) & ~I40E_ITR_DYNAMIC) @@ -52,9 +49,6 @@ static inline u16 i40e_intrl_usec_to_reg(int intrl) else return 0; } -#define I40E_INTRL_8K 125 /* 8000 ints/sec */ -#define I40E_INTRL_62K 16 /* 62500 ints/sec */ -#define I40E_INTRL_83K 12 /* 83333 ints/sec */ #define I40E_QUEUE_END_OF_LIST 0x7FF @@ -73,7 +67,6 @@ enum i40e_dyn_idx_t { /* these are indexes into ITRN registers */ #define I40E_RX_ITR I40E_IDX_ITR0 #define I40E_TX_ITR I40E_IDX_ITR1 -#define I40E_PE_ITR I40E_IDX_ITR2 /* Supported RSS offloads */ #define I40E_DEFAULT_RSS_HENA ( \ @@ -193,13 +186,6 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc, /* How many Rx Buffers do we bundle into one write to the hardware ? */ #define I40E_RX_BUFFER_WRITE 32 /* Must be power of 2 */ -#define I40E_RX_INCREMENT(r, i) \ - do { \ - (i)++; \ - if ((i) == (r)->count) \ - i = 0; \ - r->next_to_clean = i; \ - } while (0) #define I40E_RX_NEXT_DESC(r, i, n) \ do { \ @@ -209,11 +195,6 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc, (n) = I40E_RX_DESC((r), (i)); \ } while (0) -#define I40E_RX_NEXT_DESC_PREFETCH(r, i, n) \ - do { \ - I40E_RX_NEXT_DESC((r), (i), (n)); \ - prefetch((n)); \ - } while (0) #define I40E_MAX_BUFFER_TXD 8 #define I40E_MIN_TX_LEN 17 @@ -262,15 +243,12 @@ static inline unsigned int i40e_txd_use_count(unsigned int size) /* Tx Descriptors needed, worst case */ #define DESC_NEEDED (MAX_SKB_FRAGS + 6) -#define I40E_MIN_DESC_PENDING 4 #define I40E_TX_FLAGS_HW_VLAN BIT(1) #define I40E_TX_FLAGS_SW_VLAN BIT(2) #define I40E_TX_FLAGS_TSO BIT(3) #define I40E_TX_FLAGS_IPV4 BIT(4) #define I40E_TX_FLAGS_IPV6 BIT(5) -#define I40E_TX_FLAGS_FCCRC BIT(6) -#define I40E_TX_FLAGS_FSO BIT(7) #define I40E_TX_FLAGS_TSYN BIT(8) #define I40E_TX_FLAGS_FD_SB BIT(9) #define I40E_TX_FLAGS_UDP_TUNNEL BIT(10) @@ -332,9 +310,7 @@ enum i40e_ring_state_t { /* some useful defines for virtchannel interface, which * is the only remaining user of header split */ -#define I40E_RX_DTYPE_NO_SPLIT 0 #define I40E_RX_DTYPE_HEADER_SPLIT 1 -#define I40E_RX_DTYPE_SPLIT_ALWAYS 2 #define I40E_RX_SPLIT_L2 0x1 #define I40E_RX_SPLIT_IP 0x2 #define I40E_RX_SPLIT_TCP_UDP 0x4 @@ -444,7 +420,6 @@ static inline void set_ring_xdp(struct i40e_ring *ring) #define I40E_ITR_ADAPTIVE_MAX_USECS 0x007e #define I40E_ITR_ADAPTIVE_LATENCY 0x8000 #define I40E_ITR_ADAPTIVE_BULK 0x0000 -#define ITR_IS_BULK(x) (!((x) & I40E_ITR_ADAPTIVE_LATENCY)) struct i40e_ring_container { struct i40e_ring *ring; /* pointer to linked list of ring(s) */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 63e098f7cb63..52410d609ba1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -84,8 +84,6 @@ enum i40e_debug_mask { I40E_GLGEN_MSCA_OPCODE_SHIFT) #define I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK I40E_MASK(1, \ I40E_GLGEN_MSCA_OPCODE_SHIFT) -#define I40E_MDIO_CLAUSE45_OPCODE_READ_INC_ADDR_MASK I40E_MASK(2, \ - I40E_GLGEN_MSCA_OPCODE_SHIFT) #define I40E_MDIO_CLAUSE45_OPCODE_READ_MASK I40E_MASK(3, \ I40E_GLGEN_MSCA_OPCODE_SHIFT) @@ -178,21 +176,9 @@ struct i40e_link_status { u8 module_type[3]; /* 1st byte: module identifier */ #define I40E_MODULE_TYPE_SFP 0x03 -#define I40E_MODULE_TYPE_QSFP 0x0D - /* 2nd byte: ethernet compliance codes for 10/40G */ -#define I40E_MODULE_TYPE_40G_ACTIVE 0x01 -#define I40E_MODULE_TYPE_40G_LR4 0x02 -#define I40E_MODULE_TYPE_40G_SR4 0x04 -#define I40E_MODULE_TYPE_40G_CR4 0x08 -#define I40E_MODULE_TYPE_10G_BASE_SR 0x10 -#define I40E_MODULE_TYPE_10G_BASE_LR 0x20 -#define I40E_MODULE_TYPE_10G_BASE_LRM 0x40 -#define I40E_MODULE_TYPE_10G_BASE_ER 0x80 /* 3rd byte: ethernet compliance codes for 1G */ #define I40E_MODULE_TYPE_1000BASE_SX 0x01 #define I40E_MODULE_TYPE_1000BASE_LX 0x02 -#define I40E_MODULE_TYPE_1000BASE_CX 0x04 -#define I40E_MODULE_TYPE_1000BASE_T 0x08 }; struct i40e_phy_info { @@ -262,9 +248,6 @@ struct i40e_phy_info { /* Capabilities of a PF or a VF or the whole device */ struct i40e_hw_capabilities { u32 switch_mode; -#define I40E_NVM_IMAGE_TYPE_EVB 0x0 -#define I40E_NVM_IMAGE_TYPE_CLOUD 0x2 -#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3 /* Cloud filter modes: * Mode1: Filter on L4 port only @@ -273,14 +256,10 @@ struct i40e_hw_capabilities { */ #define I40E_CLOUD_FILTER_MODE1 0x6 #define I40E_CLOUD_FILTER_MODE2 0x7 -#define I40E_CLOUD_FILTER_MODE3 0x8 #define I40E_SWITCH_MODE_MASK 0xF u32 management_mode; u32 mng_protocols_over_mctp; -#define I40E_MNG_PROTOCOL_PLDM 0x2 -#define I40E_MNG_PROTOCOL_OEM_COMMANDS 0x4 -#define I40E_MNG_PROTOCOL_NCSI 0x8 u32 npar_enable; u32 os2bmc; u32 valid_functions; @@ -294,13 +273,8 @@ struct i40e_hw_capabilities { bool flex10_enable; bool flex10_capable; u32 flex10_mode; -#define I40E_FLEX10_MODE_UNKNOWN 0x0 -#define I40E_FLEX10_MODE_DCC 0x1 -#define I40E_FLEX10_MODE_DCI 0x2 u32 flex10_status; -#define I40E_FLEX10_STATUS_DCC_ERROR 0x1 -#define I40E_FLEX10_STATUS_VC_MODE 0x2 bool sec_rev_disabled; bool update_disabled; @@ -421,11 +395,8 @@ enum i40e_nvmupd_state { #define I40E_NVM_AQE 0xe #define I40E_NVM_EXEC 0xf -#define I40E_NVM_ADAPT_SHIFT 16 -#define I40E_NVM_ADAPT_MASK (0xffff << I40E_NVM_ADAPT_SHIFT) #define I40E_NVMUPD_MAX_DATA 4096 -#define I40E_NVMUPD_IFACE_TIMEOUT 2 /* seconds */ struct i40e_nvm_access { u32 command; @@ -438,7 +409,6 @@ struct i40e_nvm_access { /* (Q)SFP module access definitions */ #define I40E_I2C_EEPROM_DEV_ADDR 0xA0 #define I40E_I2C_EEPROM_DEV_ADDR2 0xA2 -#define I40E_MODULE_TYPE_ADDR 0x00 #define I40E_MODULE_REVISION_ADDR 0x01 #define I40E_MODULE_SFF_8472_COMP 0x5E #define I40E_MODULE_SFF_8472_SWAP 0x5C @@ -547,7 +517,6 @@ struct i40e_dcbx_config { #define I40E_DCBX_MODE_CEE 0x1 #define I40E_DCBX_MODE_IEEE 0x2 u8 app_mode; -#define I40E_DCBX_APPS_NON_WILLING 0x1 u32 numapps; u32 tlv_status; /* CEE mode TLV status */ struct i40e_dcb_ets_config etscfg; @@ -895,9 +864,6 @@ enum i40e_rx_ptype_payload_layer { #define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \ I40E_RXD_QW1_LENGTH_PBUF_SHIFT) -#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT 52 -#define I40E_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \ - I40E_RXD_QW1_LENGTH_HBUF_SHIFT) #define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63 #define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT) @@ -926,7 +892,6 @@ enum i40e_rx_desc_pe_status_bits { I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29 }; -#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38 #define I40E_RX_PROG_STATUS_DESC_LENGTH 0x2000000 #define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2 @@ -963,8 +928,6 @@ struct i40e_tx_desc { __le64 cmd_type_offset_bsz; }; -#define I40E_TXD_QW1_DTYPE_SHIFT 0 -#define I40E_TXD_QW1_DTYPE_MASK (0xFUL << I40E_TXD_QW1_DTYPE_SHIFT) enum i40e_tx_desc_dtype_value { I40E_TX_DESC_DTYPE_DATA = 0x0, @@ -980,7 +943,6 @@ enum i40e_tx_desc_dtype_value { }; #define I40E_TXD_QW1_CMD_SHIFT 4 -#define I40E_TXD_QW1_CMD_MASK (0x3FFUL << I40E_TXD_QW1_CMD_SHIFT) enum i40e_tx_desc_cmd_bits { I40E_TX_DESC_CMD_EOP = 0x0001, @@ -1004,8 +966,6 @@ enum i40e_tx_desc_cmd_bits { }; #define I40E_TXD_QW1_OFFSET_SHIFT 16 -#define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \ - I40E_TXD_QW1_OFFSET_SHIFT) enum i40e_tx_desc_length_fields { /* Note: These are predefined bit offsets */ @@ -1015,11 +975,8 @@ enum i40e_tx_desc_length_fields { }; #define I40E_TXD_QW1_TX_BUF_SZ_SHIFT 34 -#define I40E_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \ - I40E_TXD_QW1_TX_BUF_SZ_SHIFT) #define I40E_TXD_QW1_L2TAG1_SHIFT 48 -#define I40E_TXD_QW1_L2TAG1_MASK (0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT) /* Context descriptors */ struct i40e_tx_context_desc { @@ -1029,11 +986,8 @@ struct i40e_tx_context_desc { __le64 type_cmd_tso_mss; }; -#define I40E_TXD_CTX_QW1_DTYPE_SHIFT 0 -#define I40E_TXD_CTX_QW1_DTYPE_MASK (0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT) #define I40E_TXD_CTX_QW1_CMD_SHIFT 4 -#define I40E_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT) enum i40e_tx_ctx_desc_cmd_bits { I40E_TX_CTX_DESC_TSO = 0x01, @@ -1048,19 +1002,10 @@ enum i40e_tx_ctx_desc_cmd_bits { }; #define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT 30 -#define I40E_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \ - I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) #define I40E_TXD_CTX_QW1_MSS_SHIFT 50 -#define I40E_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \ - I40E_TXD_CTX_QW1_MSS_SHIFT) -#define I40E_TXD_CTX_QW1_VSI_SHIFT 50 -#define I40E_TXD_CTX_QW1_VSI_MASK (0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT) -#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT 0 -#define I40E_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \ - I40E_TXD_CTX_QW0_EXT_IP_SHIFT) enum i40e_tx_ctx_desc_eipt_offload { I40E_TX_CTX_EXT_IP_NONE = 0x0, @@ -1070,28 +1015,16 @@ enum i40e_tx_ctx_desc_eipt_offload { }; #define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2 -#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \ - I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT) #define I40E_TXD_CTX_QW0_NATT_SHIFT 9 -#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) #define I40E_TXD_CTX_UDP_TUNNELING BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT) #define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) -#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11 -#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \ - BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT) -#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK #define I40E_TXD_CTX_QW0_NATLEN_SHIFT 12 -#define I40E_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \ - I40E_TXD_CTX_QW0_NATLEN_SHIFT) -#define I40E_TXD_CTX_QW0_DECTTL_SHIFT 19 -#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \ - I40E_TXD_CTX_QW0_DECTTL_SHIFT) #define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23 #define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT) @@ -1161,11 +1094,8 @@ enum i40e_filter_program_desc_fd_status { I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) #define I40E_TXD_FLTR_QW1_CMD_SHIFT 4 -#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \ - I40E_TXD_FLTR_QW1_CMD_SHIFT) #define I40E_TXD_FLTR_QW1_PCMD_SHIFT (0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) -#define I40E_TXD_FLTR_QW1_PCMD_MASK (0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT) enum i40e_filter_program_desc_pcmd { I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE = 0x1, @@ -1316,7 +1246,6 @@ struct i40e_hw_port_stats { #define I40E_NVM_OEM_VER_OFF 0x83 #define I40E_SR_NVM_DEV_STARTER_VERSION 0x18 #define I40E_SR_NVM_WAKE_ON_LAN 0x19 -#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27 #define I40E_SR_NVM_EETRACK_LO 0x2D #define I40E_SR_NVM_EETRACK_HI 0x2E #define I40E_SR_VPD_PTR 0x2F @@ -1329,7 +1258,6 @@ struct i40e_hw_port_stats { #define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024 #define I40E_SR_CONTROL_WORD_1_SHIFT 0x06 #define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT) -#define I40E_SR_CONTROL_WORD_1_NVM_BANK_VALID BIT(5) #define I40E_SR_NVM_MAP_STRUCTURE_TYPE BIT(12) #define I40E_PTR_TYPE BIT(15) #define I40E_SR_OCP_CFG_WORD0 0x2B @@ -1463,14 +1391,11 @@ struct i40e_lldp_variables { /* Offsets into Alternate Ram */ #define I40E_ALT_STRUCT_FIRST_PF_OFFSET 0 /* in dwords */ #define I40E_ALT_STRUCT_DWORDS_PER_PF 64 /* in dwords */ -#define I40E_ALT_STRUCT_OUTER_VLAN_TAG_OFFSET 0xD /* in dwords */ -#define I40E_ALT_STRUCT_USER_PRIORITY_OFFSET 0xC /* in dwords */ #define I40E_ALT_STRUCT_MIN_BW_OFFSET 0xE /* in dwords */ #define I40E_ALT_STRUCT_MAX_BW_OFFSET 0xF /* in dwords */ /* Alternate Ram Bandwidth Masks */ #define I40E_ALT_BW_VALUE_MASK 0xFF -#define I40E_ALT_BW_RELATIVE_MASK 0x40000000 #define I40E_ALT_BW_VALID_MASK 0x80000000 /* RSS Hash Table Size */ @@ -1529,9 +1454,7 @@ struct i40e_package_header { /* Generic segment header */ struct i40e_generic_seg_header { #define SEGMENT_TYPE_METADATA 0x00000001 -#define SEGMENT_TYPE_NOTES 0x00000002 #define SEGMENT_TYPE_I40E 0x00000011 -#define SEGMENT_TYPE_X722 0x00000012 u32 type; struct i40e_ddp_version version; u32 size; @@ -1541,7 +1464,6 @@ struct i40e_generic_seg_header { struct i40e_metadata_segment { struct i40e_generic_seg_header header; struct i40e_ddp_version version; -#define I40E_DDP_TRACKID_RDONLY 0 #define I40E_DDP_TRACKID_INVALID 0xFFFFFFFF u32 track_id; char name[I40E_DDP_NAME_SIZE]; @@ -1575,10 +1497,6 @@ struct i40e_profile_section_header { #define SECTION_TYPE_AQ 0x00000801 #define SECTION_TYPE_RB_AQ 0x00001801 #define SECTION_TYPE_NOTE 0x80000000 -#define SECTION_TYPE_NAME 0x80000001 -#define SECTION_TYPE_PROTO 0x80000002 -#define SECTION_TYPE_PCTYPE 0x80000003 -#define SECTION_TYPE_PTYPE 0x80000004 u32 type; u32 offset; u32 size; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 631248c0981a..5491215d81de 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -10,7 +10,6 @@ #define I40E_VIRTCHNL_SUPPORTED_QTYPES 2 -#define I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED 3 #define I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED 10 #define I40E_VLAN_PRIORITY_SHIFT 13 diff --git a/include/linux/net/intel/i40e_client.h b/include/linux/net/intel/i40e_client.h index 72994baf4941..f41387a8969f 100644 --- a/include/linux/net/intel/i40e_client.h +++ b/include/linux/net/intel/i40e_client.h @@ -37,11 +37,6 @@ enum i40e_client_instance_state { struct i40e_ops; struct i40e_client; -/* HW does not define a type value for AEQ; only for RX/TX and CEQ. - * In order for us to keep the interface simple, SW will define a - * unique type value for AEQ. - */ -#define I40E_QUEUE_TYPE_PE_AEQ 0x80 #define I40E_QUEUE_INVALID_IDX 0xFFFF struct i40e_qv_info { @@ -56,7 +51,6 @@ struct i40e_qvlist_info { struct i40e_qv_info qv_info[1]; }; -#define I40E_CLIENT_MSIX_ALL 0xFFFFFFFF /* set of LAN parameters useful for clients managed by LAN */ @@ -87,7 +81,6 @@ struct i40e_info { u8 __iomem *hw_addr; u8 fid; /* function id, PF id or VF id */ #define I40E_CLIENT_FTYPE_PF 0 -#define I40E_CLIENT_FTYPE_VF 1 u8 ftype; /* function type, PF or VF */ void *pf; @@ -184,8 +177,6 @@ struct i40e_client { unsigned long state; /* client state */ atomic_t ref_cnt; /* Count of all the client devices of this kind */ u32 flags; -#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0) -#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2) u8 type; #define I40E_CLIENT_IWARP 0 const struct i40e_client_ops *ops; /* client ops provided by the client */ -- cgit v1.2.3-59-g8ed1b From 33d226f504ed72cba3a2b42bbe2a993b3d6d9548 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 29 May 2020 02:25:06 +0200 Subject: mtd: nand: Move nand_device forward declaration to the top This structure might be used earlier in this file, let's move the forward declaration at the top. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200529002517.3546-10-miquel.raynal@bootlin.com --- include/linux/mtd/nand.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 0c7483843a32..a1f38c778d0e 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -12,6 +12,8 @@ #include +struct nand_device; + /** * struct nand_memory_organization - Memory organization structure * @bits_per_cell: number of bits per NAND cell @@ -133,8 +135,6 @@ struct nand_bbt { unsigned long *cache; }; -struct nand_device; - /** * struct nand_ops - NAND operations * @erase: erase a specific block. No need to check if the block is bad before -- cgit v1.2.3-59-g8ed1b From 85f54c5588885cc3b5be4a07498dd0755de9f5cf Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 29 May 2020 02:25:10 +0200 Subject: mtd: nand: Rename a core structure Prepare the migration to a generic ECC engine by renaming the nand_ecc_req structure into nand_ecc_props. This structure will be the base of a wider 'nand_ecc' structure. In nand_device, these properties are still named "eccreq" even if "eccprops" might be more descriptive. This is just a transition step, this field is being replaced very soon by a much wider structure. The impact of renaming this field would be huge compared to its interest. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200529002517.3546-14-miquel.raynal@bootlin.com --- include/linux/mtd/nand.h | 8 ++++---- include/linux/mtd/spinand.h | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index a1f38c778d0e..af99041ceaa9 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -116,11 +116,11 @@ struct nand_page_io_req { }; /** - * struct nand_ecc_req - NAND ECC requirements + * struct nand_ecc_props - NAND ECC properties * @strength: ECC strength - * @step_size: ECC step/block size + * @step_size: Number of bytes per step */ -struct nand_ecc_req { +struct nand_ecc_props { unsigned int strength; unsigned int step_size; }; @@ -179,7 +179,7 @@ struct nand_ops { struct nand_device { struct mtd_info mtd; struct nand_memory_organization memorg; - struct nand_ecc_req eccreq; + struct nand_ecc_props eccreq; struct nand_row_converter rowconv; struct nand_bbt bbt; const struct nand_ops *ops; diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h index 1077c45721ff..7b78c4ba9b3e 100644 --- a/include/linux/mtd/spinand.h +++ b/include/linux/mtd/spinand.h @@ -309,7 +309,7 @@ struct spinand_info { struct spinand_devid devid; u32 flags; struct nand_memory_organization memorg; - struct nand_ecc_req eccreq; + struct nand_ecc_props eccreq; struct spinand_ecc_info eccinfo; struct { const struct spinand_op_variants *read_cache; -- cgit v1.2.3-59-g8ed1b From c4cabc08d09e4b107b685e08bdec8a38a91089d8 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 29 May 2020 13:12:55 +0200 Subject: mtd: rawnand: Use unsigned types for nand_chip unsigned values page_shift, phys_erase_shift, bbt_erase_shift, chip_shift, pagemask, subpagesize and badblockbits are all positive values, so declare them as unsigned. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200529111322.7184-2-miquel.raynal@bootlin.com --- include/linux/mtd/rawnand.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 65b1c1c18b41..830f2d08937f 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1110,11 +1110,11 @@ struct nand_chip { unsigned int options; unsigned int bbt_options; - int page_shift; - int phys_erase_shift; - int bbt_erase_shift; - int chip_shift; - int pagemask; + unsigned int page_shift; + unsigned int phys_erase_shift; + unsigned int bbt_erase_shift; + unsigned int chip_shift; + unsigned int pagemask; u8 *data_buf; struct { @@ -1122,10 +1122,10 @@ struct nand_chip { int page; } pagecache; - int subpagesize; + unsigned int subpagesize; int onfi_timing_mode_default; unsigned int badblockpos; - int badblockbits; + unsigned int badblockbits; struct nand_id id; struct nand_parameters parameters; -- cgit v1.2.3-59-g8ed1b From d1f3837a507d73746f9e2118fad20ee5e57e86cc Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 29 May 2020 13:12:56 +0200 Subject: mtd: rawnand: Only use u8 instead of uint8_t in nand_chip structure Mechanical change to avoid using old types. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200529111322.7184-3-miquel.raynal@bootlin.com --- include/linux/mtd/rawnand.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 830f2d08937f..cea137778224 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1141,13 +1141,13 @@ struct nand_chip { int (*suspend)(struct nand_chip *chip); void (*resume)(struct nand_chip *chip); - uint8_t *oob_poi; + u8 *oob_poi; struct nand_controller *controller; struct nand_ecc_ctrl ecc; unsigned long buf_align; - uint8_t *bbt; + u8 *bbt; struct nand_bbt_descr *bbt_td; struct nand_bbt_descr *bbt_md; -- cgit v1.2.3-59-g8ed1b From 8e8b2706e15d16443b1ea61a6f994c08ec5b9486 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 29 May 2020 13:12:57 +0200 Subject: mtd: rawnand: Create a nand_chip operations structure And move nand_chip hooks there. While moving entries from one structure to the other, adapt the documentation style. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200529111322.7184-4-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nand_base.c | 20 ++++++++++---------- drivers/mtd/nand/raw/nand_hynix.c | 2 +- drivers/mtd/nand/raw/nand_macronix.c | 10 +++++----- drivers/mtd/nand/raw/nand_micron.c | 2 +- include/linux/mtd/rawnand.h | 32 ++++++++++++++++++-------------- 5 files changed, 35 insertions(+), 31 deletions(-) (limited to 'include') diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 45124dbb1835..d9cb71e7c0ed 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -3215,10 +3215,10 @@ static int nand_setup_read_retry(struct nand_chip *chip, int retry_mode) if (retry_mode >= chip->read_retries) return -EINVAL; - if (!chip->setup_read_retry) + if (!chip->ops.setup_read_retry) return -EOPNOTSUPP; - return chip->setup_read_retry(chip, retry_mode); + return chip->ops.setup_read_retry(chip, retry_mode); } static void nand_wait_readrdy(struct nand_chip *chip) @@ -4462,8 +4462,8 @@ static int nand_suspend(struct mtd_info *mtd) int ret = 0; mutex_lock(&chip->lock); - if (chip->suspend) - ret = chip->suspend(chip); + if (chip->ops.suspend) + ret = chip->ops.suspend(chip); if (!ret) chip->suspended = 1; mutex_unlock(&chip->lock); @@ -4481,8 +4481,8 @@ static void nand_resume(struct mtd_info *mtd) mutex_lock(&chip->lock); if (chip->suspended) { - if (chip->resume) - chip->resume(chip); + if (chip->ops.resume) + chip->ops.resume(chip); chip->suspended = 0; } else { pr_err("%s called for a chip which is not in suspended state\n", @@ -4511,10 +4511,10 @@ static int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) { struct nand_chip *chip = mtd_to_nand(mtd); - if (!chip->lock_area) + if (!chip->ops.lock_area) return -ENOTSUPP; - return chip->lock_area(chip, ofs, len); + return chip->ops.lock_area(chip, ofs, len); } /** @@ -4527,10 +4527,10 @@ static int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) { struct nand_chip *chip = mtd_to_nand(mtd); - if (!chip->unlock_area) + if (!chip->ops.unlock_area) return -ENOTSUPP; - return chip->unlock_area(chip, ofs, len); + return chip->ops.unlock_area(chip, ofs, len); } /* Set default functions */ diff --git a/drivers/mtd/nand/raw/nand_hynix.c b/drivers/mtd/nand/raw/nand_hynix.c index 7caedaa5b9e5..7d1be53f27f3 100644 --- a/drivers/mtd/nand/raw/nand_hynix.c +++ b/drivers/mtd/nand/raw/nand_hynix.c @@ -337,7 +337,7 @@ static int hynix_mlc_1xnm_rr_init(struct nand_chip *chip, rr->nregs = nregs; rr->regs = hynix_1xnm_mlc_read_retry_regs; hynix->read_retry = rr; - chip->setup_read_retry = hynix_nand_setup_read_retry; + chip->ops.setup_read_retry = hynix_nand_setup_read_retry; chip->read_retries = nmodes; out: diff --git a/drivers/mtd/nand/raw/nand_macronix.c b/drivers/mtd/nand/raw/nand_macronix.c index 09c254c97b5c..1472f925f386 100644 --- a/drivers/mtd/nand/raw/nand_macronix.c +++ b/drivers/mtd/nand/raw/nand_macronix.c @@ -130,7 +130,7 @@ static void macronix_nand_onfi_init(struct nand_chip *chip) return; chip->read_retries = MACRONIX_NUM_READ_RETRY_MODES; - chip->setup_read_retry = macronix_nand_setup_read_retry; + chip->ops.setup_read_retry = macronix_nand_setup_read_retry; if (p->supports_set_get_features) { bitmap_set(p->set_feature_list, @@ -242,8 +242,8 @@ static void macronix_nand_block_protection_support(struct nand_chip *chip) bitmap_set(chip->parameters.set_feature_list, ONFI_FEATURE_ADDR_MXIC_PROTECTION, 1); - chip->lock_area = mxic_nand_lock; - chip->unlock_area = mxic_nand_unlock; + chip->ops.lock_area = mxic_nand_lock; + chip->ops.unlock_area = mxic_nand_unlock; } static int nand_power_down_op(struct nand_chip *chip) @@ -312,8 +312,8 @@ static void macronix_nand_deep_power_down_support(struct nand_chip *chip) if (i < 0) return; - chip->suspend = mxic_nand_suspend; - chip->resume = mxic_nand_resume; + chip->ops.suspend = mxic_nand_suspend; + chip->ops.resume = mxic_nand_resume; } static int macronix_nand_init(struct nand_chip *chip) diff --git a/drivers/mtd/nand/raw/nand_micron.c b/drivers/mtd/nand/raw/nand_micron.c index 3589b4fce0d4..4385092a9325 100644 --- a/drivers/mtd/nand/raw/nand_micron.c +++ b/drivers/mtd/nand/raw/nand_micron.c @@ -84,7 +84,7 @@ static int micron_nand_onfi_init(struct nand_chip *chip) struct nand_onfi_vendor_micron *micron = (void *)p->onfi->vendor; chip->read_retries = micron->read_retry_options; - chip->setup_read_retry = micron_nand_setup_read_retry; + chip->ops.setup_read_retry = micron_nand_setup_read_retry; } if (p->supports_set_get_features) { diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index cea137778224..7f9be95ca8dc 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1027,16 +1027,31 @@ struct nand_legacy { struct nand_controller dummy_controller; }; +/** + * struct nand_chip_ops - NAND chip operations + * @suspend: Suspend operation + * @resume: Resume operation + * @lock_area: Lock operation + * @unlock_area: Unlock operation + * @setup_read_retry: Set the read-retry mode (mostly needed for MLC NANDs) + */ +struct nand_chip_ops { + int (*suspend)(struct nand_chip *chip); + void (*resume)(struct nand_chip *chip); + int (*lock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len); + int (*unlock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len); + int (*setup_read_retry)(struct nand_chip *chip, int retry_mode); +}; + /** * struct nand_chip - NAND Private Flash Chip Data * @base: Inherit from the generic NAND device + * @ops: NAND chip operations * @legacy: All legacy fields/hooks. If you develop a new driver, * don't even try to use any of these fields/hooks, and if * you're modifying an existing driver that is using those * fields/hooks, you should consider reworking the driver * avoid using them. - * @setup_read_retry: [FLASHSPECIFIC] flash (vendor) specific function for - * setting the read-retry mode. Mostly needed for MLC NAND. * @ecc: [BOARDSPECIFIC] ECC control structure * @buf_align: minimum buffer alignment required by a platform * @oob_poi: "poison value buffer," used for laying out OOB data @@ -1081,8 +1096,6 @@ struct nand_legacy { * @lock: lock protecting the suspended field. Also used to * serialize accesses to the NAND device. * @suspended: set to 1 when the device is suspended, 0 when it's not. - * @suspend: [REPLACEABLE] specific NAND device suspend operation - * @resume: [REPLACEABLE] specific NAND device resume operation * @bbt: [INTERN] bad block table pointer * @bbt_td: [REPLACEABLE] bad block table descriptor for flash * lookup. @@ -1096,17 +1109,13 @@ struct nand_legacy { * @manufacturer: [INTERN] Contains manufacturer information * @manufacturer.desc: [INTERN] Contains manufacturer's description * @manufacturer.priv: [INTERN] Contains manufacturer private information - * @lock_area: [REPLACEABLE] specific NAND chip lock operation - * @unlock_area: [REPLACEABLE] specific NAND chip unlock operation */ struct nand_chip { struct nand_device base; - + struct nand_chip_ops ops; struct nand_legacy legacy; - int (*setup_read_retry)(struct nand_chip *chip, int retry_mode); - unsigned int options; unsigned int bbt_options; @@ -1138,8 +1147,6 @@ struct nand_chip { struct mutex lock; unsigned int suspended : 1; - int (*suspend)(struct nand_chip *chip); - void (*resume)(struct nand_chip *chip); u8 *oob_poi; struct nand_controller *controller; @@ -1159,9 +1166,6 @@ struct nand_chip { const struct nand_manufacturer *desc; void *priv; } manufacturer; - - int (*lock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len); - int (*unlock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len); }; extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops; -- cgit v1.2.3-59-g8ed1b From 271de009b7c0c1c15f63491a352ab08835462977 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 29 May 2020 13:12:58 +0200 Subject: mtd: rawnand: Rename the manufacturer structure It is currently called nand_manufacturer but could actually be called nand_manufacturer_desc, like its instances, so that the former name is left unused for now. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200529111322.7184-5-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/internals.h | 6 +++--- drivers/mtd/nand/raw/nand_base.c | 14 +++++++------- drivers/mtd/nand/raw/nand_ids.c | 16 ++++++++-------- include/linux/mtd/rawnand.h | 2 +- 4 files changed, 19 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/drivers/mtd/nand/raw/internals.h b/drivers/mtd/nand/raw/internals.h index 03866b0aadea..a518acfd9b3f 100644 --- a/drivers/mtd/nand/raw/internals.h +++ b/drivers/mtd/nand/raw/internals.h @@ -53,12 +53,12 @@ struct nand_manufacturer_ops { }; /** - * struct nand_manufacturer - NAND Flash Manufacturer structure + * struct nand_manufacturer_desc - NAND Flash Manufacturer descriptor * @name: Manufacturer name * @id: manufacturer ID code of device. * @ops: manufacturer operations */ -struct nand_manufacturer { +struct nand_manufacturer_desc { int id; char *name; const struct nand_manufacturer_ops *ops; @@ -79,7 +79,7 @@ extern const struct nand_manufacturer_ops toshiba_nand_manuf_ops; extern const struct mtd_pairing_scheme dist3_pairing_scheme; /* Core functions */ -const struct nand_manufacturer *nand_get_manufacturer(u8 id); +const struct nand_manufacturer_desc *nand_get_manufacturer_desc(u8 id); int nand_bbm_get_next_page(struct nand_chip *chip, int page); int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs); int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr, diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index d9cb71e7c0ed..534ee75d0f2b 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -4810,9 +4810,9 @@ static void nand_manufacturer_cleanup(struct nand_chip *chip) } static const char * -nand_manufacturer_name(const struct nand_manufacturer *manufacturer) +nand_manufacturer_name(const struct nand_manufacturer_desc *manufacturer_desc) { - return manufacturer ? manufacturer->name : "Unknown"; + return manufacturer_desc ? manufacturer_desc->name : "Unknown"; } /* @@ -4820,7 +4820,7 @@ nand_manufacturer_name(const struct nand_manufacturer *manufacturer) */ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type) { - const struct nand_manufacturer *manufacturer; + const struct nand_manufacturer_desc *manufacturer_desc; struct mtd_info *mtd = nand_to_mtd(chip); struct nand_memory_organization *memorg; int busw, ret; @@ -4877,8 +4877,8 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type) chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data)); /* Try to identify manufacturer */ - manufacturer = nand_get_manufacturer(maf_id); - chip->manufacturer.desc = manufacturer; + manufacturer_desc = nand_get_manufacturer_desc(maf_id); + chip->manufacturer.desc = manufacturer_desc; if (!type) type = nand_flash_ids; @@ -4957,7 +4957,7 @@ ident_done: */ pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n", maf_id, dev_id); - pr_info("%s %s\n", nand_manufacturer_name(manufacturer), + pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc), mtd->name); pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8, (chip->options & NAND_BUSWIDTH_16) ? 16 : 8); @@ -4992,7 +4992,7 @@ ident_done: pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n", maf_id, dev_id); - pr_info("%s %s\n", nand_manufacturer_name(manufacturer), + pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc), chip->parameters.model); pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n", (int)(targetsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC", diff --git a/drivers/mtd/nand/raw/nand_ids.c b/drivers/mtd/nand/raw/nand_ids.c index ba27902fc54b..e0dbc2e316c7 100644 --- a/drivers/mtd/nand/raw/nand_ids.c +++ b/drivers/mtd/nand/raw/nand_ids.c @@ -166,7 +166,7 @@ struct nand_flash_dev nand_flash_ids[] = { }; /* Manufacturer IDs */ -static const struct nand_manufacturer nand_manufacturers[] = { +static const struct nand_manufacturer_desc nand_manufacturer_descs[] = { {NAND_MFR_AMD, "AMD/Spansion", &amd_nand_manuf_ops}, {NAND_MFR_ATO, "ATO"}, {NAND_MFR_EON, "Eon"}, @@ -186,20 +186,20 @@ static const struct nand_manufacturer nand_manufacturers[] = { }; /** - * nand_get_manufacturer - Get manufacturer information from the manufacturer - * ID + * nand_get_manufacturer_desc - Get manufacturer information from the + * manufacturer ID * @id: manufacturer ID * - * Returns a pointer a nand_manufacturer object if the manufacturer is defined + * Returns a nand_manufacturer_desc object if the manufacturer is defined * in the NAND manufacturers database, NULL otherwise. */ -const struct nand_manufacturer *nand_get_manufacturer(u8 id) +const struct nand_manufacturer_desc *nand_get_manufacturer_desc(u8 id) { int i; - for (i = 0; i < ARRAY_SIZE(nand_manufacturers); i++) - if (nand_manufacturers[i].id == id) - return &nand_manufacturers[i]; + for (i = 0; i < ARRAY_SIZE(nand_manufacturer_descs); i++) + if (nand_manufacturer_descs[i].id == id) + return &nand_manufacturer_descs[i]; return NULL; } diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 7f9be95ca8dc..860d3c1020ef 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1163,7 +1163,7 @@ struct nand_chip { void *priv; struct { - const struct nand_manufacturer *desc; + const struct nand_manufacturer_desc *desc; void *priv; } manufacturer; }; -- cgit v1.2.3-59-g8ed1b From 36017af430e6b2fad0b2ee5476103706160f1379 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 29 May 2020 13:12:59 +0200 Subject: mtd: rawnand: Declare the nand_manufacturer structure out of nand_chip Now that struct nand_manufacturer type is free, use it to store the nand_manufacturer_desc and the manufacturer's private data. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200529111322.7184-6-miquel.raynal@bootlin.com --- include/linux/mtd/rawnand.h | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 860d3c1020ef..a3dfa36a9fd5 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1043,10 +1043,21 @@ struct nand_chip_ops { int (*setup_read_retry)(struct nand_chip *chip, int retry_mode); }; +/** + * struct nand_manufacturer - NAND manufacturer structure + * @desc: The manufacturer description + * @priv: Private information for the manufacturer driver + */ +struct nand_manufacturer { + const struct nand_manufacturer_desc *desc; + void *priv; +}; + /** * struct nand_chip - NAND Private Flash Chip Data * @base: Inherit from the generic NAND device * @ops: NAND chip operations + * @manufacturer: Manufacturer information * @legacy: All legacy fields/hooks. If you develop a new driver, * don't even try to use any of these fields/hooks, and if * you're modifying an existing driver that is using those @@ -1106,13 +1117,11 @@ struct nand_chip_ops { * structure which is shared among multiple independent * devices. * @priv: [OPTIONAL] pointer to private chip data - * @manufacturer: [INTERN] Contains manufacturer information - * @manufacturer.desc: [INTERN] Contains manufacturer's description - * @manufacturer.priv: [INTERN] Contains manufacturer private information */ struct nand_chip { struct nand_device base; + struct nand_manufacturer manufacturer; struct nand_chip_ops ops; struct nand_legacy legacy; @@ -1161,11 +1170,6 @@ struct nand_chip { struct nand_bbt_descr *badblock_pattern; void *priv; - - struct { - const struct nand_manufacturer_desc *desc; - void *priv; - } manufacturer; }; extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops; -- cgit v1.2.3-59-g8ed1b From a63674c7cfe62221e05ba73107d9e15d73ff8bbd Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 29 May 2020 13:13:00 +0200 Subject: mtd: rawnand: Reorganize the nand_chip structure Reorder fields in this structure and pack entries by theme: * The main descriptive structures * The data interface details * Bad block information * The device layout * Extra buffers matching the device layout * Internal values * External objects like the ECC controller, the ECC engine and a private data pointer. While at it, adapt the documentation style. I changed on purpose the description of @oob_poi which was weird. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200529111322.7184-7-miquel.raynal@bootlin.com --- include/linux/mtd/rawnand.h | 166 ++++++++++++++++++++------------------------ 1 file changed, 76 insertions(+), 90 deletions(-) (limited to 'include') diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index a3dfa36a9fd5..544ec8736793 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1055,120 +1055,106 @@ struct nand_manufacturer { /** * struct nand_chip - NAND Private Flash Chip Data - * @base: Inherit from the generic NAND device - * @ops: NAND chip operations - * @manufacturer: Manufacturer information - * @legacy: All legacy fields/hooks. If you develop a new driver, - * don't even try to use any of these fields/hooks, and if - * you're modifying an existing driver that is using those - * fields/hooks, you should consider reworking the driver - * avoid using them. - * @ecc: [BOARDSPECIFIC] ECC control structure - * @buf_align: minimum buffer alignment required by a platform - * @oob_poi: "poison value buffer," used for laying out OOB data - * before writing - * @page_shift: [INTERN] number of address bits in a page (column - * address bits). - * @phys_erase_shift: [INTERN] number of address bits in a physical eraseblock - * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry - * @chip_shift: [INTERN] number of address bits in one chip - * @options: [BOARDSPECIFIC] various chip options. They can partly - * be set to inform nand_scan about special functionality. - * See the defines for further explanation. - * @bbt_options: [INTERN] bad block specific options. All options used - * here must come from bbm.h. By default, these options - * will be copied to the appropriate nand_bbt_descr's. - * @badblockpos: [INTERN] position of the bad block marker in the oob - * area. - * @badblockbits: [INTERN] minimum number of set bits in a good block's - * bad block marker position; i.e., BBM == 11110111b is - * not bad when badblockbits == 7 - * @onfi_timing_mode_default: [INTERN] default ONFI timing mode. This field is - * set to the actually used ONFI mode if the chip is - * ONFI compliant or deduced from the datasheet if - * the NAND chip is not ONFI compliant. - * @pagemask: [INTERN] page number mask = number of (pages / chip) - 1 - * @data_buf: [INTERN] buffer for data, size is (page size + oobsize). - * @pagecache: Structure containing page cache related fields - * @pagecache.bitflips: Number of bitflips of the cached page - * @pagecache.page: Page number currently in the cache. -1 means no page is - * currently cached - * @subpagesize: [INTERN] holds the subpagesize - * @id: [INTERN] holds NAND ID - * @parameters: [INTERN] holds generic parameters under an easily - * readable form. - * @data_interface: [INTERN] NAND interface timing information - * @cur_cs: currently selected target. -1 means no target selected, - * otherwise we should always have cur_cs >= 0 && - * cur_cs < nanddev_ntargets(). NAND Controller drivers - * should not modify this value, but they're allowed to - * read it. - * @read_retries: [INTERN] the number of read retry modes supported - * @lock: lock protecting the suspended field. Also used to - * serialize accesses to the NAND device. - * @suspended: set to 1 when the device is suspended, 0 when it's not. - * @bbt: [INTERN] bad block table pointer - * @bbt_td: [REPLACEABLE] bad block table descriptor for flash - * lookup. - * @bbt_md: [REPLACEABLE] bad block table mirror descriptor - * @badblock_pattern: [REPLACEABLE] bad block scan pattern used for initial - * bad block scan. - * @controller: [REPLACEABLE] a pointer to a hardware controller - * structure which is shared among multiple independent - * devices. - * @priv: [OPTIONAL] pointer to private chip data + * @base: Inherit from the generic NAND device + * @id: Holds NAND ID + * @parameters: Holds generic parameters under an easily readable form + * @manufacturer: Manufacturer information + * @ops: NAND chip operations + * @legacy: All legacy fields/hooks. If you develop a new driver, don't even try + * to use any of these fields/hooks, and if you're modifying an + * existing driver that is using those fields/hooks, you should + * consider reworking the driver and avoid using them. + * @options: Various chip options. They can partly be set to inform nand_scan + * about special functionality. See the defines for further + * explanation. + * @onfi_timing_mode_default: Default ONFI timing mode. This field is set to the + * actually used ONFI mode if the chip is ONFI + * compliant or deduced from the datasheet otherwise + * @data_interface: NAND interface timing information + * @bbt_erase_shift: Number of address bits in a bbt entry + * @bbt_options: Bad block table specific options. All options used here must + * come from bbm.h. By default, these options will be copied to + * the appropriate nand_bbt_descr's. + * @badblockpos: Bad block marker position in the oob area + * @badblockbits: Minimum number of set bits in a good block's bad block marker + * position; i.e., BBM = 11110111b is good when badblockbits = 7 + * @bbt_td: Bad block table descriptor for flash lookup + * @bbt_md: Bad block table mirror descriptor + * @badblock_pattern: Bad block scan pattern used for initial bad block scan + * @bbt: Bad block table pointer + * @page_shift: Number of address bits in a page (column address bits) + * @phys_erase_shift: Number of address bits in a physical eraseblock + * @chip_shift: Number of address bits in one chip + * @pagemask: Page number mask = number of (pages / chip) - 1 + * @subpagesize: Holds the subpagesize + * @data_buf: Buffer for data, size is (page size + oobsize) + * @oob_poi: pointer on the OOB area covered by data_buf + * @pagecache: Structure containing page cache related fields + * @pagecache.bitflips: Number of bitflips of the cached page + * @pagecache.page: Page number currently in the cache. -1 means no page is + * currently cached + * @buf_align: Minimum buffer alignment required by a platform + * @lock: Lock protecting the suspended field. Also used to serialize accesses + * to the NAND device + * @suspended: Set to 1 when the device is suspended, 0 when it's not + * @cur_cs: Currently selected target. -1 means no target selected, otherwise we + * should always have cur_cs >= 0 && cur_cs < nanddev_ntargets(). + * NAND Controller drivers should not modify this value, but they're + * allowed to read it. + * @read_retries: The number of read retry modes supported + * @controller: The hardware controller structure which is shared among multiple + * independent devices + * @ecc: The ECC controller structure + * @priv: Chip private data */ - struct nand_chip { struct nand_device base; + struct nand_id id; + struct nand_parameters parameters; struct nand_manufacturer manufacturer; struct nand_chip_ops ops; struct nand_legacy legacy; - unsigned int options; + + /* Data interface */ + int onfi_timing_mode_default; + struct nand_data_interface data_interface; + + /* Bad block information */ + unsigned int bbt_erase_shift; unsigned int bbt_options; + unsigned int badblockpos; + unsigned int badblockbits; + struct nand_bbt_descr *bbt_td; + struct nand_bbt_descr *bbt_md; + struct nand_bbt_descr *badblock_pattern; + u8 *bbt; + /* Device internal layout */ unsigned int page_shift; unsigned int phys_erase_shift; - unsigned int bbt_erase_shift; unsigned int chip_shift; unsigned int pagemask; - u8 *data_buf; + unsigned int subpagesize; + /* Buffers */ + u8 *data_buf; + u8 *oob_poi; struct { unsigned int bitflips; int page; } pagecache; + unsigned long buf_align; - unsigned int subpagesize; - int onfi_timing_mode_default; - unsigned int badblockpos; - unsigned int badblockbits; - - struct nand_id id; - struct nand_parameters parameters; - - struct nand_data_interface data_interface; - - int cur_cs; - - int read_retries; - + /* Internals */ struct mutex lock; unsigned int suspended : 1; + int cur_cs; + int read_retries; - u8 *oob_poi; + /* Externals */ struct nand_controller *controller; - struct nand_ecc_ctrl ecc; - unsigned long buf_align; - - u8 *bbt; - struct nand_bbt_descr *bbt_td; - struct nand_bbt_descr *bbt_md; - - struct nand_bbt_descr *badblock_pattern; - void *priv; }; -- cgit v1.2.3-59-g8ed1b From e0160cd41fb81fde9ee4612a7ea2dfd631de2638 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 29 May 2020 13:13:12 +0200 Subject: mtd: rawnand: Hide the chip->data_interface indirection As a preparation for allocating the data interface structure dynamically (and rename it), let's avoid accessing chip->data_interface directly. Instead, we introduce a helper, nand_get_interface_config(), and use it to retrieve the current data interface configuration out of a nand_chip object. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200529111322.7184-19-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/marvell_nand.c | 9 ++++++--- drivers/mtd/nand/raw/meson_nand.c | 8 ++++---- drivers/mtd/nand/raw/nand_base.c | 34 +++++++++++++++++----------------- drivers/mtd/nand/raw/nand_legacy.c | 5 ++++- drivers/mtd/nand/raw/nand_toshiba.c | 2 +- drivers/mtd/nand/raw/stm32_fmc2_nand.c | 2 +- drivers/mtd/nand/raw/tango_nand.c | 2 +- include/linux/mtd/rawnand.h | 11 +++++++++++ 8 files changed, 45 insertions(+), 28 deletions(-) (limited to 'include') diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index 260a0430313e..df859889e4eb 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -1096,6 +1096,8 @@ static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip, const u8 *oob_buf, bool raw, int page) { + const struct nand_sdr_timings *sdr = + nand_get_sdr_timings(nand_get_interface_config(chip)); struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip); struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout; @@ -1141,7 +1143,7 @@ static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip, return ret; ret = marvell_nfc_wait_op(chip, - PSEC_TO_MSEC(chip->data_interface.timings.sdr.tPROG_max)); + PSEC_TO_MSEC(sdr->tPROG_max)); return ret; } @@ -1562,6 +1564,8 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct nand_chip *chip, const u8 *buf, int oob_required, int page) { + const struct nand_sdr_timings *sdr = + nand_get_sdr_timings(nand_get_interface_config(chip)); struct mtd_info *mtd = nand_to_mtd(chip); const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout; const u8 *data = buf; @@ -1598,8 +1602,7 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct nand_chip *chip, marvell_nfc_wait_ndrun(chip); } - ret = marvell_nfc_wait_op(chip, - PSEC_TO_MSEC(chip->data_interface.timings.sdr.tPROG_max)); + ret = marvell_nfc_wait_op(chip, PSEC_TO_MSEC(sdr->tPROG_max)); marvell_nfc_disable_hw_ecc(chip); diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c index 3f376471f3f7..580b7be0719f 100644 --- a/drivers/mtd/nand/raw/meson_nand.c +++ b/drivers/mtd/nand/raw/meson_nand.c @@ -573,10 +573,10 @@ static int meson_nfc_write_buf(struct nand_chip *nand, u8 *buf, int len) static int meson_nfc_rw_cmd_prepare_and_execute(struct nand_chip *nand, int page, bool in) { + const struct nand_sdr_timings *sdr = + nand_get_sdr_timings(nand_get_interface_config(nand)); struct mtd_info *mtd = nand_to_mtd(nand); struct meson_nfc *nfc = nand_get_controller_data(nand); - const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&nand->data_interface); u32 *addrs = nfc->cmdfifo.rw.addrs; u32 cs = nfc->param.chip_select; u32 cmd0, cmd_num, row_start; @@ -626,9 +626,9 @@ static int meson_nfc_rw_cmd_prepare_and_execute(struct nand_chip *nand, static int meson_nfc_write_page_sub(struct nand_chip *nand, int page, int raw) { - struct mtd_info *mtd = nand_to_mtd(nand); const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&nand->data_interface); + nand_get_sdr_timings(nand_get_interface_config(nand)); + struct mtd_info *mtd = nand_to_mtd(nand); struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); struct meson_nfc *nfc = nand_get_controller_data(nand); int data_len, info_len; diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index b4de85794e07..7d393e1d0252 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -773,7 +773,7 @@ int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms) return -ENOTSUPP; /* Wait tWB before polling the STATUS reg. */ - timings = nand_get_sdr_timings(&chip->data_interface); + timings = nand_get_sdr_timings(nand_get_interface_config(chip)); ndelay(PSEC_TO_NSEC(timings->tWB_max)); ret = nand_status_op(chip, NULL); @@ -1119,9 +1119,9 @@ static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page, unsigned int offset_in_page, void *buf, unsigned int len) { - struct mtd_info *mtd = nand_to_mtd(chip); const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); + struct mtd_info *mtd = nand_to_mtd(chip); u8 addrs[4]; struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_READ0, 0), @@ -1163,7 +1163,7 @@ static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page, unsigned int len) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); u8 addrs[5]; struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_READ0, 0), @@ -1260,7 +1260,7 @@ int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf, if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_PARAM, 0), NAND_OP_ADDR(1, &page, PSEC_TO_NSEC(sdr->tWB_max)), @@ -1315,7 +1315,7 @@ int nand_change_read_column_op(struct nand_chip *chip, if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); u8 addrs[2] = {}; struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_RNDOUT, 0), @@ -1389,9 +1389,9 @@ static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page, unsigned int offset_in_page, const void *buf, unsigned int len, bool prog) { - struct mtd_info *mtd = nand_to_mtd(chip); const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); + struct mtd_info *mtd = nand_to_mtd(chip); u8 addrs[5] = {}; struct nand_op_instr instrs[] = { /* @@ -1514,7 +1514,7 @@ int nand_prog_page_end_op(struct nand_chip *chip) if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_PAGEPROG, PSEC_TO_NSEC(sdr->tWB_max)), @@ -1621,7 +1621,7 @@ int nand_change_write_column_op(struct nand_chip *chip, if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); u8 addrs[2]; struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_RNDIN, 0), @@ -1676,7 +1676,7 @@ int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf, if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_READID, 0), NAND_OP_ADDR(1, &addr, PSEC_TO_NSEC(sdr->tADL_min)), @@ -1715,7 +1715,7 @@ int nand_status_op(struct nand_chip *chip, u8 *status) { if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_STATUS, PSEC_TO_NSEC(sdr->tADL_min)), @@ -1784,7 +1784,7 @@ int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock) if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); u8 addrs[3] = { page, page >> 8, page >> 16 }; struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_ERASE1, 0), @@ -1843,7 +1843,7 @@ static int nand_set_features_op(struct nand_chip *chip, u8 feature, if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0), NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tADL_min)), @@ -1890,7 +1890,7 @@ static int nand_get_features_op(struct nand_chip *chip, u8 feature, if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0), NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tWB_max)), @@ -1947,7 +1947,7 @@ int nand_reset_op(struct nand_chip *chip) { if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_RESET, PSEC_TO_NSEC(sdr->tWB_max)), NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tRST_max), 0), @@ -3226,7 +3226,7 @@ static void nand_wait_readrdy(struct nand_chip *chip) if (!(chip->options & NAND_NEED_READRDY)) return; - sdr = nand_get_sdr_timings(&chip->data_interface); + sdr = nand_get_sdr_timings(nand_get_interface_config(chip)); WARN_ON(nand_wait_rdy_op(chip, PSEC_TO_MSEC(sdr->tR_max), 0)); } diff --git a/drivers/mtd/nand/raw/nand_legacy.c b/drivers/mtd/nand/raw/nand_legacy.c index 848403dcae03..fe769762e1d8 100644 --- a/drivers/mtd/nand/raw/nand_legacy.c +++ b/drivers/mtd/nand/raw/nand_legacy.c @@ -354,6 +354,9 @@ static void nand_command(struct nand_chip *chip, unsigned int command, static void nand_ccs_delay(struct nand_chip *chip) { + const struct nand_sdr_timings *sdr = + nand_get_sdr_timings(nand_get_interface_config(chip)); + /* * The controller already takes care of waiting for tCCS when the RNDIN * or RNDOUT command is sent, return directly. @@ -366,7 +369,7 @@ static void nand_ccs_delay(struct nand_chip *chip) * (which should be safe for all NANDs). */ if (nand_controller_can_setup_data_iface(chip)) - ndelay(chip->data_interface.timings.sdr.tCCS_min / 1000); + ndelay(sdr->tCCS_min / 1000); else ndelay(500); } diff --git a/drivers/mtd/nand/raw/nand_toshiba.c b/drivers/mtd/nand/raw/nand_toshiba.c index ae069905d7e4..333037bdca41 100644 --- a/drivers/mtd/nand/raw/nand_toshiba.c +++ b/drivers/mtd/nand/raw/nand_toshiba.c @@ -33,7 +33,7 @@ static int toshiba_nand_benand_read_eccstatus_op(struct nand_chip *chip, if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); struct nand_op_instr instrs[] = { NAND_OP_CMD(TOSHIBA_NAND_CMD_ECC_STATUS_READ, PSEC_TO_NSEC(sdr->tADL_min)), diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c index 65c9d17b25a3..7320c0fc19ec 100644 --- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c +++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c @@ -1308,7 +1308,7 @@ static int stm32_fmc2_nfc_waitrdy(struct nand_chip *chip, dev_warn(nfc->dev, "Waitrdy timeout\n"); /* Wait tWB before R/B# signal is low */ - timings = nand_get_sdr_timings(&chip->data_interface); + timings = nand_get_sdr_timings(nand_get_interface_config(chip)); ndelay(PSEC_TO_NSEC(timings->tWB_max)); /* R/B# signal is low, clear high level flag */ diff --git a/drivers/mtd/nand/raw/tango_nand.c b/drivers/mtd/nand/raw/tango_nand.c index b3a0d08f1733..648dc7e77f6a 100644 --- a/drivers/mtd/nand/raw/tango_nand.c +++ b/drivers/mtd/nand/raw/tango_nand.c @@ -336,7 +336,7 @@ static int tango_write_page(struct nand_chip *chip, const u8 *buf, if (err) return err; - timings = nand_get_sdr_timings(&chip->data_interface); + timings = nand_get_sdr_timings(nand_get_interface_config(chip)); err = tango_waitrdy(chip, PSEC_TO_MSEC(timings->tR_max)); if (err) return err; diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 544ec8736793..0852df941130 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1203,6 +1203,17 @@ static inline struct device_node *nand_get_flash_node(struct nand_chip *chip) return mtd_get_of_node(nand_to_mtd(chip)); } +/** + * nand_get_interface_config - Retrieve the current interface configuration + * of a NAND chip + * @chip: The NAND chip + */ +static inline const struct nand_data_interface * +nand_get_interface_config(struct nand_chip *chip) +{ + return &chip->data_interface; +} + /* * A helper for defining older NAND chips where the second ID byte fully * defined the chip, including the geometry (chip size, eraseblock size, page -- cgit v1.2.3-59-g8ed1b From 4c46667b3d67253604ee42840917844548c86657 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 29 May 2020 13:13:13 +0200 Subject: mtd: rawnand: s/data_interface/interface_config/ The name/suffix data_interface is a bit misleading in that the field or functions actually represent a configuration that can be applied by the controller/chip. Let's rename all fields/functions/hooks that are worth renaming. Signed-off-by: Boris Brezillon Signed-off-by: Miquel Raynal --- drivers/mtd/nand/raw/ams-delta.c | 6 +-- drivers/mtd/nand/raw/arasan-nand-controller.c | 6 +-- drivers/mtd/nand/raw/atmel/nand-controller.c | 34 ++++++------- drivers/mtd/nand/raw/cadence-nand-controller.c | 6 +-- drivers/mtd/nand/raw/denali.c | 8 +-- drivers/mtd/nand/raw/fsmc_nand.c | 6 +-- drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c | 6 +-- drivers/mtd/nand/raw/internals.h | 12 ++--- drivers/mtd/nand/raw/marvell_nand.c | 9 ++-- drivers/mtd/nand/raw/meson_nand.c | 6 +-- drivers/mtd/nand/raw/mtk_nand.c | 6 +-- drivers/mtd/nand/raw/mxc_nand.c | 20 ++++---- drivers/mtd/nand/raw/mxic_nand.c | 6 +-- drivers/mtd/nand/raw/nand_base.c | 69 +++++++++++++------------- drivers/mtd/nand/raw/nand_legacy.c | 2 +- drivers/mtd/nand/raw/nand_timings.c | 17 ++++--- drivers/mtd/nand/raw/s3c2410.c | 6 +-- drivers/mtd/nand/raw/stm32_fmc2_nand.c | 4 +- drivers/mtd/nand/raw/sunxi_nand.c | 6 +-- drivers/mtd/nand/raw/tango_nand.c | 4 +- drivers/mtd/nand/raw/tegra_nand.c | 6 +-- include/linux/mtd/rawnand.h | 33 ++++++------ 22 files changed, 139 insertions(+), 139 deletions(-) (limited to 'include') diff --git a/drivers/mtd/nand/raw/ams-delta.c b/drivers/mtd/nand/raw/ams-delta.c index 3711e7a0436c..fdba155416d2 100644 --- a/drivers/mtd/nand/raw/ams-delta.c +++ b/drivers/mtd/nand/raw/ams-delta.c @@ -191,8 +191,8 @@ static int gpio_nand_exec_op(struct nand_chip *this, return ret; } -static int gpio_nand_setup_data_interface(struct nand_chip *this, int csline, - const struct nand_data_interface *cf) +static int gpio_nand_setup_interface(struct nand_chip *this, int csline, + const struct nand_interface_config *cf) { struct gpio_nand *priv = nand_get_controller_data(this); const struct nand_sdr_timings *sdr = nand_get_sdr_timings(cf); @@ -217,7 +217,7 @@ static int gpio_nand_setup_data_interface(struct nand_chip *this, int csline, static const struct nand_controller_ops gpio_nand_ops = { .exec_op = gpio_nand_exec_op, - .setup_data_interface = gpio_nand_setup_data_interface, + .setup_interface = gpio_nand_setup_interface, }; /* diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c index 7141dcccba3c..12c643e97c85 100644 --- a/drivers/mtd/nand/raw/arasan-nand-controller.c +++ b/drivers/mtd/nand/raw/arasan-nand-controller.c @@ -854,8 +854,8 @@ static int anfc_exec_op(struct nand_chip *chip, return nand_op_parser_exec_op(chip, &anfc_op_parser, op, check_only); } -static int anfc_setup_data_interface(struct nand_chip *chip, int target, - const struct nand_data_interface *conf) +static int anfc_setup_interface(struct nand_chip *chip, int target, + const struct nand_interface_config *conf) { struct anand *anand = to_anand(chip); struct arasan_nfc *nfc = to_anfc(chip->controller); @@ -1083,7 +1083,7 @@ static void anfc_detach_chip(struct nand_chip *chip) static const struct nand_controller_ops anfc_ops = { .exec_op = anfc_exec_op, - .setup_data_interface = anfc_setup_data_interface, + .setup_interface = anfc_setup_interface, .attach_chip = anfc_attach_chip, .detach_chip = anfc_detach_chip, }; diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c index 46a3724a788e..c9818f548d07 100644 --- a/drivers/mtd/nand/raw/atmel/nand-controller.c +++ b/drivers/mtd/nand/raw/atmel/nand-controller.c @@ -200,8 +200,8 @@ struct atmel_nand_controller_ops { void (*nand_init)(struct atmel_nand_controller *nc, struct atmel_nand *nand); int (*ecc_init)(struct nand_chip *chip); - int (*setup_data_interface)(struct atmel_nand *nand, int csline, - const struct nand_data_interface *conf); + int (*setup_interface)(struct atmel_nand *nand, int csline, + const struct nand_interface_config *conf); }; struct atmel_nand_controller_caps { @@ -1168,7 +1168,7 @@ static int atmel_hsmc_nand_ecc_init(struct nand_chip *chip) } static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand, - const struct nand_data_interface *conf, + const struct nand_interface_config *conf, struct atmel_smc_cs_conf *smcconf) { u32 ncycles, totalcycles, timeps, mckperiodps; @@ -1397,9 +1397,9 @@ static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand, return 0; } -static int atmel_smc_nand_setup_data_interface(struct atmel_nand *nand, +static int atmel_smc_nand_setup_interface(struct atmel_nand *nand, int csline, - const struct nand_data_interface *conf) + const struct nand_interface_config *conf) { struct atmel_nand_controller *nc; struct atmel_smc_cs_conf smcconf; @@ -1422,9 +1422,9 @@ static int atmel_smc_nand_setup_data_interface(struct atmel_nand *nand, return 0; } -static int atmel_hsmc_nand_setup_data_interface(struct atmel_nand *nand, +static int atmel_hsmc_nand_setup_interface(struct atmel_nand *nand, int csline, - const struct nand_data_interface *conf) + const struct nand_interface_config *conf) { struct atmel_hsmc_nand_controller *nc; struct atmel_smc_cs_conf smcconf; @@ -1452,8 +1452,8 @@ static int atmel_hsmc_nand_setup_data_interface(struct atmel_nand *nand, return 0; } -static int atmel_nand_setup_data_interface(struct nand_chip *chip, int csline, - const struct nand_data_interface *conf) +static int atmel_nand_setup_interface(struct nand_chip *chip, int csline, + const struct nand_interface_config *conf) { struct atmel_nand *nand = to_atmel_nand(chip); struct atmel_nand_controller *nc; @@ -1464,7 +1464,7 @@ static int atmel_nand_setup_data_interface(struct nand_chip *chip, int csline, (csline < 0 && csline != NAND_DATA_IFACE_CHECK_ONLY)) return -EINVAL; - return nc->caps->ops->setup_data_interface(nand, csline, conf); + return nc->caps->ops->setup_interface(nand, csline, conf); } static void atmel_nand_init(struct atmel_nand_controller *nc, @@ -1483,7 +1483,7 @@ static void atmel_nand_init(struct atmel_nand_controller *nc, chip->legacy.write_buf = atmel_nand_write_buf; chip->legacy.select_chip = atmel_nand_select_chip; - if (!nc->mck || !nc->caps->ops->setup_data_interface) + if (!nc->mck || !nc->caps->ops->setup_interface) chip->options |= NAND_KEEP_TIMINGS; /* Some NANDs require a longer delay than the default one (20us). */ @@ -1956,7 +1956,7 @@ static int atmel_nand_attach_chip(struct nand_chip *chip) static const struct nand_controller_ops atmel_nand_controller_ops = { .attach_chip = atmel_nand_attach_chip, - .setup_data_interface = atmel_nand_setup_data_interface, + .setup_interface = atmel_nand_setup_interface, }; static int atmel_nand_controller_init(struct atmel_nand_controller *nc, @@ -2318,7 +2318,7 @@ static const struct atmel_nand_controller_ops atmel_hsmc_nc_ops = { .remove = atmel_hsmc_nand_controller_remove, .ecc_init = atmel_hsmc_nand_ecc_init, .nand_init = atmel_hsmc_nand_init, - .setup_data_interface = atmel_hsmc_nand_setup_data_interface, + .setup_interface = atmel_hsmc_nand_setup_interface, }; static const struct atmel_nand_controller_caps atmel_sama5_nc_caps = { @@ -2375,10 +2375,10 @@ atmel_smc_nand_controller_remove(struct atmel_nand_controller *nc) /* * The SMC reg layout of at91rm9200 is completely different which prevents us - * from re-using atmel_smc_nand_setup_data_interface() for the - * ->setup_data_interface() hook. + * from re-using atmel_smc_nand_setup_interface() for the + * ->setup_interface() hook. * At this point, there's no support for the at91rm9200 SMC IP, so we leave - * ->setup_data_interface() unassigned. + * ->setup_interface() unassigned. */ static const struct atmel_nand_controller_ops at91rm9200_nc_ops = { .probe = atmel_smc_nand_controller_probe, @@ -2399,7 +2399,7 @@ static const struct atmel_nand_controller_ops atmel_smc_nc_ops = { .remove = atmel_smc_nand_controller_remove, .ecc_init = atmel_nand_ecc_init, .nand_init = atmel_smc_nand_init, - .setup_data_interface = atmel_smc_nand_setup_data_interface, + .setup_interface = atmel_smc_nand_setup_interface, }; static const struct atmel_nand_controller_caps atmel_sam9260_nc_caps = { diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c index c405722adfe1..574cbd3446e5 100644 --- a/drivers/mtd/nand/raw/cadence-nand-controller.c +++ b/drivers/mtd/nand/raw/cadence-nand-controller.c @@ -2303,8 +2303,8 @@ static inline u32 calc_tdvw(u32 trp_cnt, u32 clk_period, u32 trhoh_min, } static int -cadence_nand_setup_data_interface(struct nand_chip *chip, int chipnr, - const struct nand_data_interface *conf) +cadence_nand_setup_interface(struct nand_chip *chip, int chipnr, + const struct nand_interface_config *conf) { const struct nand_sdr_timings *sdr; struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); @@ -2690,7 +2690,7 @@ static int cadence_nand_attach_chip(struct nand_chip *chip) static const struct nand_controller_ops cadence_nand_controller_ops = { .attach_chip = cadence_nand_attach_chip, .exec_op = cadence_nand_exec_op, - .setup_data_interface = cadence_nand_setup_data_interface, + .setup_interface = cadence_nand_setup_interface, }; static int cadence_nand_chip_init(struct cdns_nand_ctrl *cdns_ctrl, diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c index 4e6e1578aa2d..9d99dade95ce 100644 --- a/drivers/mtd/nand/raw/denali.c +++ b/drivers/mtd/nand/raw/denali.c @@ -761,8 +761,8 @@ static int denali_write_page(struct nand_chip *chip, const u8 *buf, return denali_page_xfer(chip, (void *)buf, mtd->writesize, page, true); } -static int denali_setup_data_interface(struct nand_chip *chip, int chipnr, - const struct nand_data_interface *conf) +static int denali_setup_interface(struct nand_chip *chip, int chipnr, + const struct nand_interface_config *conf) { static const unsigned int data_setup_on_host = 10000; struct denali_controller *denali = to_denali_controller(chip); @@ -1173,7 +1173,7 @@ static int denali_exec_op(struct nand_chip *chip, static const struct nand_controller_ops denali_controller_ops = { .attach_chip = denali_attach_chip, .exec_op = denali_exec_op, - .setup_data_interface = denali_setup_data_interface, + .setup_interface = denali_setup_interface, }; int denali_chip_init(struct denali_controller *denali, @@ -1230,7 +1230,7 @@ int denali_chip_init(struct denali_controller *denali, chip->buf_align = 16; } - /* clk rate info is needed for setup_data_interface */ + /* clk rate info is needed for setup_interface */ if (!denali->clk_rate || !denali->clk_x_rate) chip->options |= NAND_KEEP_TIMINGS; diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c index 3909752b14c5..92ddc41d0ff0 100644 --- a/drivers/mtd/nand/raw/fsmc_nand.c +++ b/drivers/mtd/nand/raw/fsmc_nand.c @@ -327,8 +327,8 @@ static int fsmc_calc_timings(struct fsmc_nand_data *host, return 0; } -static int fsmc_setup_data_interface(struct nand_chip *nand, int csline, - const struct nand_data_interface *conf) +static int fsmc_setup_interface(struct nand_chip *nand, int csline, + const struct nand_interface_config *conf) { struct fsmc_nand_data *host = nand_to_fsmc(nand); struct fsmc_nand_timings tims; @@ -951,7 +951,7 @@ static int fsmc_nand_attach_chip(struct nand_chip *nand) static const struct nand_controller_ops fsmc_nand_controller_ops = { .attach_chip = fsmc_nand_attach_chip, .exec_op = fsmc_exec_op, - .setup_data_interface = fsmc_setup_data_interface, + .setup_interface = fsmc_setup_interface, }; /** diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index 061a8ddda275..5d4aee46cc55 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -736,8 +736,8 @@ static void gpmi_nfc_apply_timings(struct gpmi_nand_data *this) udelay(dll_wait_time_us); } -static int gpmi_setup_data_interface(struct nand_chip *chip, int chipnr, - const struct nand_data_interface *conf) +static int gpmi_setup_interface(struct nand_chip *chip, int chipnr, + const struct nand_interface_config *conf) { struct gpmi_nand_data *this = nand_get_controller_data(chip); const struct nand_sdr_timings *sdr; @@ -2400,7 +2400,7 @@ unmap: static const struct nand_controller_ops gpmi_nand_controller_ops = { .attach_chip = gpmi_nand_attach_chip, - .setup_data_interface = gpmi_setup_data_interface, + .setup_interface = gpmi_setup_interface, .exec_op = gpmi_nfc_exec_op, }; diff --git a/drivers/mtd/nand/raw/internals.h b/drivers/mtd/nand/raw/internals.h index f851ed210d70..114c63a6a349 100644 --- a/drivers/mtd/nand/raw/internals.h +++ b/drivers/mtd/nand/raw/internals.h @@ -84,10 +84,10 @@ int nand_bbm_get_next_page(struct nand_chip *chip, int page); int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs); int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr, int allowbbt); -int onfi_fill_data_interface(struct nand_chip *chip, - struct nand_data_interface *iface, - enum nand_data_interface_type type, - unsigned int timing_mode); +int onfi_fill_interface_config(struct nand_chip *chip, + struct nand_interface_config *iface, + enum nand_interface_type type, + unsigned int timing_mode); unsigned int onfi_find_closest_sdr_mode(const struct nand_sdr_timings *spec_timings); int nand_get_features(struct nand_chip *chip, int addr, u8 *subfeature_param); @@ -133,10 +133,10 @@ static inline int nand_exec_op(struct nand_chip *chip, return chip->controller->ops->exec_op(chip, op, false); } -static inline bool nand_controller_can_setup_data_iface(struct nand_chip *chip) +static inline bool nand_controller_can_setup_interface(struct nand_chip *chip) { if (!chip->controller || !chip->controller->ops || - !chip->controller->ops->setup_data_interface) + !chip->controller->ops->setup_interface) return false; if (chip->options & NAND_KEEP_TIMINGS) diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index df859889e4eb..8482d3bd8b1f 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -2308,9 +2308,8 @@ static struct nand_bbt_descr bbt_mirror_descr = { .pattern = bbt_mirror_pattern }; -static int marvell_nfc_setup_data_interface(struct nand_chip *chip, int chipnr, - const struct nand_data_interface - *conf) +static int marvell_nfc_setup_interface(struct nand_chip *chip, int chipnr, + const struct nand_interface_config *conf) { struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip); struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); @@ -2511,7 +2510,7 @@ static int marvell_nand_attach_chip(struct nand_chip *chip) static const struct nand_controller_ops marvell_nand_controller_ops = { .attach_chip = marvell_nand_attach_chip, .exec_op = marvell_nfc_exec_op, - .setup_data_interface = marvell_nfc_setup_data_interface, + .setup_interface = marvell_nfc_setup_interface, }; static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc, @@ -2647,7 +2646,7 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc, /* * Save a reference value for timing registers before - * ->setup_data_interface() is called. + * ->setup_interface() is called. */ marvell_nand->ndtr0 = readl_relaxed(nfc->regs + NDTR0); marvell_nand->ndtr1 = readl_relaxed(nfc->regs + NDTR1); diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c index 580b7be0719f..0e5829a2b54f 100644 --- a/drivers/mtd/nand/raw/meson_nand.c +++ b/drivers/mtd/nand/raw/meson_nand.c @@ -1097,8 +1097,8 @@ static int meson_chip_buffer_init(struct nand_chip *nand) } static -int meson_nfc_setup_data_interface(struct nand_chip *nand, int csline, - const struct nand_data_interface *conf) +int meson_nfc_setup_interface(struct nand_chip *nand, int csline, + const struct nand_interface_config *conf) { struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); const struct nand_sdr_timings *timings; @@ -1222,7 +1222,7 @@ static int meson_nand_attach_chip(struct nand_chip *nand) static const struct nand_controller_ops meson_nand_controller_ops = { .attach_chip = meson_nand_attach_chip, .detach_chip = meson_nand_detach_chip, - .setup_data_interface = meson_nfc_setup_data_interface, + .setup_interface = meson_nfc_setup_interface, .exec_op = meson_nfc_exec_op, }; diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c index ca8457626d53..ad1b55dab211 100644 --- a/drivers/mtd/nand/raw/mtk_nand.c +++ b/drivers/mtd/nand/raw/mtk_nand.c @@ -531,8 +531,8 @@ static int mtk_nfc_exec_op(struct nand_chip *chip, return ret; } -static int mtk_nfc_setup_data_interface(struct nand_chip *chip, int csline, - const struct nand_data_interface *conf) +static int mtk_nfc_setup_interface(struct nand_chip *chip, int csline, + const struct nand_interface_config *conf) { struct mtk_nfc *nfc = nand_get_controller_data(chip); const struct nand_sdr_timings *timings; @@ -1357,7 +1357,7 @@ static int mtk_nfc_attach_chip(struct nand_chip *chip) static const struct nand_controller_ops mtk_nfc_controller_ops = { .attach_chip = mtk_nfc_attach_chip, - .setup_data_interface = mtk_nfc_setup_data_interface, + .setup_interface = mtk_nfc_setup_interface, .exec_op = mtk_nfc_exec_op, }; diff --git a/drivers/mtd/nand/raw/mxc_nand.c b/drivers/mtd/nand/raw/mxc_nand.c index 09dacb83cb5a..07c41e8bae2d 100644 --- a/drivers/mtd/nand/raw/mxc_nand.c +++ b/drivers/mtd/nand/raw/mxc_nand.c @@ -137,8 +137,8 @@ struct mxc_nand_devtype_data { u32 (*get_ecc_status)(struct mxc_nand_host *); const struct mtd_ooblayout_ops *ooblayout; void (*select_chip)(struct nand_chip *chip, int cs); - int (*setup_data_interface)(struct nand_chip *chip, int csline, - const struct nand_data_interface *conf); + int (*setup_interface)(struct nand_chip *chip, int csline, + const struct nand_interface_config *conf); void (*enable_hwecc)(struct nand_chip *chip, bool enable); /* @@ -1139,8 +1139,8 @@ static void preset_v1(struct mtd_info *mtd) writew(0x4, NFC_V1_V2_WRPROT); } -static int mxc_nand_v2_setup_data_interface(struct nand_chip *chip, int csline, - const struct nand_data_interface *conf) +static int mxc_nand_v2_setup_interface(struct nand_chip *chip, int csline, + const struct nand_interface_config *conf) { struct mxc_nand_host *host = nand_get_controller_data(chip); int tRC_min_ns, tRC_ps, ret; @@ -1521,7 +1521,7 @@ static const struct mxc_nand_devtype_data imx25_nand_devtype_data = { .get_ecc_status = get_ecc_status_v2, .ooblayout = &mxc_v2_ooblayout_ops, .select_chip = mxc_nand_select_chip_v2, - .setup_data_interface = mxc_nand_v2_setup_data_interface, + .setup_interface = mxc_nand_v2_setup_interface, .enable_hwecc = mxc_nand_enable_hwecc_v1_v2, .irqpending_quirk = 0, .needs_ip = 0, @@ -1738,17 +1738,17 @@ static int mxcnd_attach_chip(struct nand_chip *chip) return 0; } -static int mxcnd_setup_data_interface(struct nand_chip *chip, int chipnr, - const struct nand_data_interface *conf) +static int mxcnd_setup_interface(struct nand_chip *chip, int chipnr, + const struct nand_interface_config *conf) { struct mxc_nand_host *host = nand_get_controller_data(chip); - return host->devtype_data->setup_data_interface(chip, chipnr, conf); + return host->devtype_data->setup_interface(chip, chipnr, conf); } static const struct nand_controller_ops mxcnd_controller_ops = { .attach_chip = mxcnd_attach_chip, - .setup_data_interface = mxcnd_setup_data_interface, + .setup_interface = mxcnd_setup_interface, }; static int mxcnd_probe(struct platform_device *pdev) @@ -1809,7 +1809,7 @@ static int mxcnd_probe(struct platform_device *pdev) if (err < 0) return err; - if (!host->devtype_data->setup_data_interface) + if (!host->devtype_data->setup_interface) this->options |= NAND_KEEP_TIMINGS; if (host->devtype_data->needs_ip) { diff --git a/drivers/mtd/nand/raw/mxic_nand.c b/drivers/mtd/nand/raw/mxic_nand.c index 57f36721f4c6..d66b5b0971fa 100644 --- a/drivers/mtd/nand/raw/mxic_nand.c +++ b/drivers/mtd/nand/raw/mxic_nand.c @@ -451,8 +451,8 @@ static int mxic_nfc_exec_op(struct nand_chip *chip, return ret; } -static int mxic_nfc_setup_data_interface(struct nand_chip *chip, int chipnr, - const struct nand_data_interface *conf) +static int mxic_nfc_setup_interface(struct nand_chip *chip, int chipnr, + const struct nand_interface_config *conf) { struct mxic_nand_ctlr *nfc = nand_get_controller_data(chip); const struct nand_sdr_timings *sdr; @@ -480,7 +480,7 @@ static int mxic_nfc_setup_data_interface(struct nand_chip *chip, int chipnr, static const struct nand_controller_ops mxic_nand_controller_ops = { .exec_op = mxic_nfc_exec_op, - .setup_data_interface = mxic_nfc_setup_data_interface, + .setup_interface = mxic_nfc_setup_interface, }; static int mxic_nfc_probe(struct platform_device *pdev) diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 7d393e1d0252..4fa18fb68d62 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -898,7 +898,7 @@ static bool nand_supports_set_features(struct nand_chip *chip, int addr) } /** - * nand_reset_data_interface - Reset data interface and timings + * nand_reset_interface - Reset data interface and timings * @chip: The NAND chip * @chipnr: Internal die id * @@ -906,11 +906,12 @@ static bool nand_supports_set_features(struct nand_chip *chip, int addr) * * Returns 0 for success or negative error code otherwise. */ -static int nand_reset_data_interface(struct nand_chip *chip, int chipnr) +static int nand_reset_interface(struct nand_chip *chip, int chipnr) { + const struct nand_controller_ops *ops = chip->controller->ops; int ret; - if (!nand_controller_can_setup_data_iface(chip)) + if (!nand_controller_can_setup_interface(chip)) return 0; /* @@ -927,9 +928,9 @@ static int nand_reset_data_interface(struct nand_chip *chip, int chipnr) * timings to timing mode 0. */ - onfi_fill_data_interface(chip, &chip->data_interface, NAND_SDR_IFACE, 0); - ret = chip->controller->ops->setup_data_interface(chip, chipnr, - &chip->data_interface); + onfi_fill_interface_config(chip, &chip->interface_config, + NAND_SDR_IFACE, 0); + ret = ops->setup_interface(chip, chipnr, &chip->interface_config); if (ret) pr_err("Failed to configure data interface to SDR timing mode 0\n"); @@ -937,7 +938,7 @@ static int nand_reset_data_interface(struct nand_chip *chip, int chipnr) } /** - * nand_setup_data_interface - Setup the best data interface and timings + * nand_setup_interface - Setup the best data interface and timings * @chip: The NAND chip * @chipnr: Internal die id * @@ -946,13 +947,13 @@ static int nand_reset_data_interface(struct nand_chip *chip, int chipnr) * * Returns 0 for success or negative error code otherwise. */ -static int nand_setup_data_interface(struct nand_chip *chip, int chipnr) +static int nand_setup_interface(struct nand_chip *chip, int chipnr) { - u8 mode = chip->data_interface.timings.mode; + u8 mode = chip->interface_config.timings.mode; u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { mode, }; int ret; - if (!nand_controller_can_setup_data_iface(chip)) + if (!nand_controller_can_setup_interface(chip)) return 0; /* Change the mode on the chip side (if supported by the NAND chip) */ @@ -966,8 +967,8 @@ static int nand_setup_data_interface(struct nand_chip *chip, int chipnr) } /* Change the mode on the controller side */ - ret = chip->controller->ops->setup_data_interface(chip, chipnr, - &chip->data_interface); + ret = chip->controller->ops->setup_interface(chip, chipnr, + &chip->interface_config); if (ret) return ret; @@ -996,7 +997,7 @@ err_reset_chip: * Fallback to mode 0 if the chip explicitly did not ack the chosen * timing mode. */ - nand_reset_data_interface(chip, chipnr); + nand_reset_interface(chip, chipnr); nand_select_target(chip, chipnr); nand_reset_op(chip); nand_deselect_target(chip); @@ -1005,7 +1006,7 @@ err_reset_chip: } /** - * nand_choose_data_interface - find the best data interface and timings + * nand_choose_interface_config - find the best data interface and timings * @chip: The NAND chip * * Find the best data interface and NAND timings supported by the chip @@ -1013,16 +1014,16 @@ err_reset_chip: * First tries to retrieve supported timing modes from ONFI information, * and if the NAND chip does not support ONFI, relies on the * ->onfi_timing_mode_default specified in the nand_ids table. After this - * function nand_chip->data_interface is initialized with the best timing mode + * function nand_chip->interface_ is initialized with the best timing mode * available. * * Returns 0 for success or negative error code otherwise. */ -static int nand_choose_data_interface(struct nand_chip *chip) +static int nand_choose_interface_config(struct nand_chip *chip) { int modes, mode, ret; - if (!nand_controller_can_setup_data_iface(chip)) + if (!nand_controller_can_setup_interface(chip)) return 0; /* @@ -1040,8 +1041,8 @@ static int nand_choose_data_interface(struct nand_chip *chip) } for (mode = fls(modes) - 1; mode >= 0; mode--) { - ret = onfi_fill_data_interface(chip, &chip->data_interface, - NAND_SDR_IFACE, mode); + ret = onfi_fill_interface_config(chip, &chip->interface_config, + NAND_SDR_IFACE, mode); if (ret) continue; @@ -1049,9 +1050,9 @@ static int nand_choose_data_interface(struct nand_chip *chip) * Pass NAND_DATA_IFACE_CHECK_ONLY to only check if the * controller supports the requested timings. */ - ret = chip->controller->ops->setup_data_interface(chip, + ret = chip->controller->ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY, - &chip->data_interface); + &chip->interface_config); if (!ret) { chip->onfi_timing_mode_default = mode; break; @@ -2477,17 +2478,17 @@ EXPORT_SYMBOL_GPL(nand_subop_get_data_len); * @chipnr: Internal die id * * Save the timings data structure, then apply SDR timings mode 0 (see - * nand_reset_data_interface for details), do the reset operation, and - * apply back the previous timings. + * nand_reset_interface for details), do the reset operation, and apply + * back the previous timings. * * Returns 0 on success, a negative error code otherwise. */ int nand_reset(struct nand_chip *chip, int chipnr) { - struct nand_data_interface saved_data_intf = chip->data_interface; + struct nand_interface_config saved_intf_config = chip->interface_config; int ret; - ret = nand_reset_data_interface(chip, chipnr); + ret = nand_reset_interface(chip, chipnr); if (ret) return ret; @@ -2503,18 +2504,18 @@ int nand_reset(struct nand_chip *chip, int chipnr) return ret; /* - * A nand_reset_data_interface() put both the NAND chip and the NAND + * A nand_reset_interface() put both the NAND chip and the NAND * controller in timings mode 0. If the default mode for this chip is * also 0, no need to proceed to the change again. Plus, at probe time, - * nand_setup_data_interface() uses ->set/get_features() which would + * nand_setup_interface() uses ->set/get_features() which would * fail anyway as the parameter page is not available yet. */ - if (!memcmp(&chip->data_interface, &saved_data_intf, - sizeof(saved_data_intf))) + if (!memcmp(&chip->interface_config, &saved_intf_config, + sizeof(saved_intf_config))) return 0; - chip->data_interface = saved_data_intf; - ret = nand_setup_data_interface(chip, chipnr); + chip->interface_config = saved_intf_config; + ret = nand_setup_interface(chip, chipnr); if (ret) return ret; @@ -5183,7 +5184,7 @@ static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips, mutex_init(&chip->lock); /* Enforce the right timings for reset/detection */ - onfi_fill_data_interface(chip, &chip->data_interface, NAND_SDR_IFACE, 0); + onfi_fill_interface_config(chip, &chip->interface_config, NAND_SDR_IFACE, 0); ret = nand_dt_init(chip); if (ret) @@ -5971,13 +5972,13 @@ static int nand_scan_tail(struct nand_chip *chip) mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4); /* Find the fastest data interface for this chip */ - ret = nand_choose_data_interface(chip); + ret = nand_choose_interface_config(chip); if (ret) goto err_nanddev_cleanup; /* Enter fastest possible mode on all dies. */ for (i = 0; i < nanddev_ntargets(&chip->base); i++) { - ret = nand_setup_data_interface(chip, i); + ret = nand_setup_interface(chip, i); if (ret) goto err_nanddev_cleanup; } diff --git a/drivers/mtd/nand/raw/nand_legacy.c b/drivers/mtd/nand/raw/nand_legacy.c index fe769762e1d8..2bcc03714432 100644 --- a/drivers/mtd/nand/raw/nand_legacy.c +++ b/drivers/mtd/nand/raw/nand_legacy.c @@ -368,7 +368,7 @@ static void nand_ccs_delay(struct nand_chip *chip) * Wait tCCS_min if it is correctly defined, otherwise wait 500ns * (which should be safe for all NANDs). */ - if (nand_controller_can_setup_data_iface(chip)) + if (nand_controller_can_setup_interface(chip)) ndelay(sdr->tCCS_min / 1000); else ndelay(500); diff --git a/drivers/mtd/nand/raw/nand_timings.c b/drivers/mtd/nand/raw/nand_timings.c index efff3583c549..bf05b4bceaa0 100644 --- a/drivers/mtd/nand/raw/nand_timings.c +++ b/drivers/mtd/nand/raw/nand_timings.c @@ -19,7 +19,7 @@ * * These four values are tweaked to be more accurate in the case of ONFI chips. */ -static const struct nand_data_interface onfi_sdr_timings[] = { +static const struct nand_interface_config onfi_sdr_timings[] = { /* Mode 0 */ { .type = NAND_SDR_IFACE, @@ -340,16 +340,17 @@ onfi_find_closest_sdr_mode(const struct nand_sdr_timings *spec_timings) } /** - * onfi_fill_data_interface - Initialize a data interface from a given ONFI mode + * onfi_fill_interface_config - Initialize an interface config from a given + * ONFI mode * @chip: The NAND chip - * @iface: The data interface to fill - * @type: The data interface type + * @iface: The interface configuration to fill + * @type: The interface type * @timing_mode: The ONFI timing mode */ -int onfi_fill_data_interface(struct nand_chip *chip, - struct nand_data_interface *iface, - enum nand_data_interface_type type, - unsigned int timing_mode) +int onfi_fill_interface_config(struct nand_chip *chip, + struct nand_interface_config *iface, + enum nand_interface_type type, + unsigned int timing_mode) { struct onfi_params *onfi = chip->parameters.onfi; diff --git a/drivers/mtd/nand/raw/s3c2410.c b/drivers/mtd/nand/raw/s3c2410.c index f86dff311464..f121a3ae294c 100644 --- a/drivers/mtd/nand/raw/s3c2410.c +++ b/drivers/mtd/nand/raw/s3c2410.c @@ -808,8 +808,8 @@ static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info, return -ENODEV; } -static int s3c2410_nand_setup_data_interface(struct nand_chip *chip, int csline, - const struct nand_data_interface *conf) +static int s3c2410_nand_setup_interface(struct nand_chip *chip, int csline, + const struct nand_interface_config *conf) { struct mtd_info *mtd = nand_to_mtd(chip); struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); @@ -999,7 +999,7 @@ static int s3c2410_nand_attach_chip(struct nand_chip *chip) static const struct nand_controller_ops s3c24xx_nand_controller_ops = { .attach_chip = s3c2410_nand_attach_chip, - .setup_data_interface = s3c2410_nand_setup_data_interface, + .setup_interface = s3c2410_nand_setup_interface, }; static const struct of_device_id s3c24xx_nand_dt_ids[] = { diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c index 7320c0fc19ec..a4140af43ed4 100644 --- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c +++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c @@ -1546,7 +1546,7 @@ static void stm32_fmc2_nfc_calc_timings(struct nand_chip *chip, } static int stm32_fmc2_nfc_setup_interface(struct nand_chip *chip, int chipnr, - const struct nand_data_interface *conf) + const struct nand_interface_config *conf) { const struct nand_sdr_timings *sdrt; @@ -1764,7 +1764,7 @@ static int stm32_fmc2_nfc_attach_chip(struct nand_chip *chip) static const struct nand_controller_ops stm32_fmc2_nfc_controller_ops = { .attach_chip = stm32_fmc2_nfc_attach_chip, .exec_op = stm32_fmc2_nfc_exec_op, - .setup_data_interface = stm32_fmc2_nfc_setup_interface, + .setup_interface = stm32_fmc2_nfc_setup_interface, }; static int stm32_fmc2_nfc_parse_child(struct stm32_fmc2_nfc *nfc, diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c index ffbc1651fadc..9c50c2b965e1 100644 --- a/drivers/mtd/nand/raw/sunxi_nand.c +++ b/drivers/mtd/nand/raw/sunxi_nand.c @@ -1376,8 +1376,8 @@ static int _sunxi_nand_lookup_timing(const s32 *lut, int lut_size, u32 duration, #define sunxi_nand_lookup_timing(l, p, c) \ _sunxi_nand_lookup_timing(l, ARRAY_SIZE(l), p, c) -static int sunxi_nfc_setup_data_interface(struct nand_chip *nand, int csline, - const struct nand_data_interface *conf) +static int sunxi_nfc_setup_interface(struct nand_chip *nand, int csline, + const struct nand_interface_config *conf) { struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller); @@ -1920,7 +1920,7 @@ static int sunxi_nfc_exec_op(struct nand_chip *nand, static const struct nand_controller_ops sunxi_nand_controller_ops = { .attach_chip = sunxi_nand_attach_chip, - .setup_data_interface = sunxi_nfc_setup_data_interface, + .setup_interface = sunxi_nfc_setup_interface, .exec_op = sunxi_nfc_exec_op, }; diff --git a/drivers/mtd/nand/raw/tango_nand.c b/drivers/mtd/nand/raw/tango_nand.c index 648dc7e77f6a..bdb965ae7a4a 100644 --- a/drivers/mtd/nand/raw/tango_nand.c +++ b/drivers/mtd/nand/raw/tango_nand.c @@ -515,7 +515,7 @@ static u32 to_ticks(int kHz, int ps) } static int tango_set_timings(struct nand_chip *chip, int csline, - const struct nand_data_interface *conf) + const struct nand_interface_config *conf) { const struct nand_sdr_timings *sdr = nand_get_sdr_timings(conf); struct tango_nfc *nfc = to_tango_nfc(chip->controller); @@ -565,7 +565,7 @@ static int tango_attach_chip(struct nand_chip *chip) static const struct nand_controller_ops tango_controller_ops = { .attach_chip = tango_attach_chip, - .setup_data_interface = tango_set_timings, + .setup_interface = tango_set_timings, .exec_op = tango_exec_op, }; diff --git a/drivers/mtd/nand/raw/tegra_nand.c b/drivers/mtd/nand/raw/tegra_nand.c index f9d046b2cd3b..6b6212ffa01c 100644 --- a/drivers/mtd/nand/raw/tegra_nand.c +++ b/drivers/mtd/nand/raw/tegra_nand.c @@ -813,8 +813,8 @@ static void tegra_nand_setup_timing(struct tegra_nand_controller *ctrl, writel_relaxed(reg, ctrl->regs + TIMING_2); } -static int tegra_nand_setup_data_interface(struct nand_chip *chip, int csline, - const struct nand_data_interface *conf) +static int tegra_nand_setup_interface(struct nand_chip *chip, int csline, + const struct nand_interface_config *conf) { struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller); const struct nand_sdr_timings *timings; @@ -1053,7 +1053,7 @@ static int tegra_nand_attach_chip(struct nand_chip *chip) static const struct nand_controller_ops tegra_nand_controller_ops = { .attach_chip = &tegra_nand_attach_chip, .exec_op = tegra_nand_exec_op, - .setup_data_interface = tegra_nand_setup_data_interface, + .setup_interface = tegra_nand_setup_interface, }; static int tegra_nand_chips_init(struct device *dev, diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 0852df941130..2ca56eef0f07 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -492,22 +492,22 @@ struct nand_sdr_timings { }; /** - * enum nand_data_interface_type - NAND interface timing type + * enum nand_interface_type - NAND interface type * @NAND_SDR_IFACE: Single Data Rate interface */ -enum nand_data_interface_type { +enum nand_interface_type { NAND_SDR_IFACE, }; /** - * struct nand_data_interface - NAND interface timing + * struct nand_interface_config - NAND interface timing * @type: type of the timing * @timings: The timing information * @timings.mode: Timing mode as defined in the specification * @timings.sdr: Use it when @type is %NAND_SDR_IFACE. */ -struct nand_data_interface { - enum nand_data_interface_type type; +struct nand_interface_config { + enum nand_interface_type type; struct nand_timings { unsigned int mode; union { @@ -521,7 +521,7 @@ struct nand_data_interface { * @conf: The data interface */ static inline const struct nand_sdr_timings * -nand_get_sdr_timings(const struct nand_data_interface *conf) +nand_get_sdr_timings(const struct nand_interface_config *conf) { if (conf->type != NAND_SDR_IFACE) return ERR_PTR(-EINVAL); @@ -944,11 +944,10 @@ static inline void nand_op_trace(const char *prefix, * This method replaces chip->legacy.cmdfunc(), * chip->legacy.{read,write}_{buf,byte,word}(), * chip->legacy.dev_ready() and chip->legacy.waifunc(). - * @setup_data_interface: setup the data interface and timing. If - * chipnr is set to %NAND_DATA_IFACE_CHECK_ONLY this - * means the configuration should not be applied but - * only checked. - * This hook is optional. + * @setup_interface: setup the data interface and timing. If chipnr is set to + * %NAND_DATA_IFACE_CHECK_ONLY this means the configuration + * should not be applied but only checked. + * This hook is optional. */ struct nand_controller_ops { int (*attach_chip)(struct nand_chip *chip); @@ -956,8 +955,8 @@ struct nand_controller_ops { int (*exec_op)(struct nand_chip *chip, const struct nand_operation *op, bool check_only); - int (*setup_data_interface)(struct nand_chip *chip, int chipnr, - const struct nand_data_interface *conf); + int (*setup_interface)(struct nand_chip *chip, int chipnr, + const struct nand_interface_config *conf); }; /** @@ -1070,7 +1069,7 @@ struct nand_manufacturer { * @onfi_timing_mode_default: Default ONFI timing mode. This field is set to the * actually used ONFI mode if the chip is ONFI * compliant or deduced from the datasheet otherwise - * @data_interface: NAND interface timing information + * @interface_config: NAND interface timing information * @bbt_erase_shift: Number of address bits in a bbt entry * @bbt_options: Bad block table specific options. All options used here must * come from bbm.h. By default, these options will be copied to @@ -1118,7 +1117,7 @@ struct nand_chip { /* Data interface */ int onfi_timing_mode_default; - struct nand_data_interface data_interface; + struct nand_interface_config interface_config; /* Bad block information */ unsigned int bbt_erase_shift; @@ -1208,10 +1207,10 @@ static inline struct device_node *nand_get_flash_node(struct nand_chip *chip) * of a NAND chip * @chip: The NAND chip */ -static inline const struct nand_data_interface * +static inline const struct nand_interface_config * nand_get_interface_config(struct nand_chip *chip) { - return &chip->data_interface; + return &chip->interface_config; } /* -- cgit v1.2.3-59-g8ed1b From 26d014f0400e5ff54cc80c8329e3adbd74db1e04 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 29 May 2020 13:13:16 +0200 Subject: mtd: rawnand: Add the ->choose_interface_config() hook This hook can be overloaded by NAND manufacturer drivers to propose alternative timings when not following the main standards. In this case, the manufacturer drivers is responsible for choosing the best interface configuration that fits both the controller and chip capabilities. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200529111322.7184-23-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nand_base.c | 17 +++++++++++------ include/linux/mtd/rawnand.h | 3 +++ 2 files changed, 14 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 742d099df5c6..2f4eba1a1082 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -1066,18 +1066,23 @@ int nand_choose_best_sdr_timings(struct nand_chip *chip, * @chip: The NAND chip * * Find the best data interface and NAND timings supported by the chip - * and the driver. - * First tries to retrieve supported timing modes from ONFI information, - * and if the NAND chip does not support ONFI, relies on the - * ->onfi_timing_mode_default specified in the nand_ids table. After this - * function nand_chip->interface_ is initialized with the best timing mode - * available. + * and the driver. Eventually let the NAND manufacturer driver propose his own + * set of timings. + * + * After this function nand_chip->interface_config is initialized with the best + * timing mode available. + * + * Returns 0 for success or negative error code otherwise. */ static int nand_choose_interface_config(struct nand_chip *chip) { if (!nand_controller_can_setup_interface(chip)) return 0; + if (chip->ops.choose_interface_config) + return chip->ops.choose_interface_config(chip, + &chip->interface_config); + return nand_choose_best_sdr_timings(chip, &chip->interface_config, NULL); } diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 2ca56eef0f07..316a02189da1 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1033,6 +1033,7 @@ struct nand_legacy { * @lock_area: Lock operation * @unlock_area: Unlock operation * @setup_read_retry: Set the read-retry mode (mostly needed for MLC NANDs) + * @choose_interface_config: Choose the best interface configuration */ struct nand_chip_ops { int (*suspend)(struct nand_chip *chip); @@ -1040,6 +1041,8 @@ struct nand_chip_ops { int (*lock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len); int (*unlock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len); int (*setup_read_retry)(struct nand_chip *chip, int retry_mode); + int (*choose_interface_config)(struct nand_chip *chip, + struct nand_interface_config *iface); }; /** -- cgit v1.2.3-59-g8ed1b From a69ad11168dca68b3f0adc6882422f4a2e2cb050 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 29 May 2020 13:13:21 +0200 Subject: mtd: rawnand: Get rid of the default ONFI timing mode The ->choose_interface() hook is here for manufacturer drivers to provide a better timing interface than the default one, this field is not needed anymore. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200529111322.7184-28-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nand_base.c | 19 ++++--------------- include/linux/mtd/rawnand.h | 9 --------- 2 files changed, 4 insertions(+), 24 deletions(-) (limited to 'include') diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 2f4eba1a1082..753328f106c1 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -1012,10 +1012,8 @@ err_reset_chip: * @iface: the interface configuration (can eventually be updated) * @spec_timings: specific timings, when not fitting the ONFI specification * - * If specific timings are provided, use them. Otherwise, try to retrieve - * supported timing modes from ONFI information. Finally, if the NAND chip does - * not follow the ONFI specification, rely on the ->default_timing_mode - * specified in the nand_ids table. + * If specific timings are provided, use them. Otherwise, retrieve supported + * timing modes from ONFI information. */ int nand_choose_best_sdr_timings(struct nand_chip *chip, struct nand_interface_config *iface, @@ -1038,15 +1036,8 @@ int nand_choose_best_sdr_timings(struct nand_chip *chip, /* Fallback to slower modes */ best_mode = iface->timings.mode; - } else { - if (chip->parameters.onfi) { - unsigned int onfi_modes; - - onfi_modes = chip->parameters.onfi->async_timing_mode; - best_mode = fls(onfi_modes) - 1; - } else { - best_mode = chip->onfi_timing_mode_default; - } + } else if (chip->parameters.onfi) { + best_mode = fls(chip->parameters.onfi->async_timing_mode) - 1; } for (mode = best_mode; mode >= 0; mode--) { @@ -4767,8 +4758,6 @@ static bool find_full_id_nand(struct nand_chip *chip, chip->options |= type->options; chip->base.eccreq.strength = NAND_ECC_STRENGTH(type); chip->base.eccreq.step_size = NAND_ECC_STEP(type); - chip->onfi_timing_mode_default = - type->onfi_timing_mode_default; chip->parameters.model = kstrdup(type->name, GFP_KERNEL); if (!chip->parameters.model) diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 316a02189da1..a2427c67d38b 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1069,9 +1069,6 @@ struct nand_manufacturer { * @options: Various chip options. They can partly be set to inform nand_scan * about special functionality. See the defines for further * explanation. - * @onfi_timing_mode_default: Default ONFI timing mode. This field is set to the - * actually used ONFI mode if the chip is ONFI - * compliant or deduced from the datasheet otherwise * @interface_config: NAND interface timing information * @bbt_erase_shift: Number of address bits in a bbt entry * @bbt_options: Bad block table specific options. All options used here must @@ -1119,7 +1116,6 @@ struct nand_chip { unsigned int options; /* Data interface */ - int onfi_timing_mode_default; struct nand_interface_config interface_config; /* Bad block information */ @@ -1268,10 +1264,6 @@ nand_get_interface_config(struct nand_chip *chip) * @ecc_step_ds in nand_chip{}, also from the datasheet. * For example, the "4bit ECC for each 512Byte" can be set with * NAND_ECC_INFO(4, 512). - * @onfi_timing_mode_default: the default ONFI timing mode entered after a NAND - * reset. Should be deduced from timings described - * in the datasheet. - * */ struct nand_flash_dev { char *name; @@ -1292,7 +1284,6 @@ struct nand_flash_dev { uint16_t strength_ds; uint16_t step_ds; } ecc; - int onfi_timing_mode_default; }; int nand_create_bbt(struct nand_chip *chip); -- cgit v1.2.3-59-g8ed1b From 35b6bcc970f759d4a86d6221d09ca28ea20467c8 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 29 May 2020 13:13:22 +0200 Subject: mtd: rawnand: Allocate the interface configurations dynamically Instead of manipulating the statically allocated structure and copy timings around, allocate one at identification time and save it in the nand_chip structure once it has been initialized. All NAND chips using the same interface configuration during reset and startup, we define a helper to retrieve a single reset interface configuration object, shared across all NAND chips. We use a second pointer to always have a reference on the currently applied interface configuration, which may either point to the "best interface configuration" or to the "default reset interface configuration". Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200529111322.7184-29-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/internals.h | 1 + drivers/mtd/nand/raw/nand_base.c | 84 +++++++++++++++++++++++-------------- drivers/mtd/nand/raw/nand_timings.c | 6 +++ include/linux/mtd/rawnand.h | 11 +++-- 4 files changed, 67 insertions(+), 35 deletions(-) (limited to 'include') diff --git a/drivers/mtd/nand/raw/internals.h b/drivers/mtd/nand/raw/internals.h index 5ebfbb89e572..012876e14317 100644 --- a/drivers/mtd/nand/raw/internals.h +++ b/drivers/mtd/nand/raw/internals.h @@ -93,6 +93,7 @@ onfi_find_closest_sdr_mode(const struct nand_sdr_timings *spec_timings); int nand_choose_best_sdr_timings(struct nand_chip *chip, struct nand_interface_config *iface, struct nand_sdr_timings *spec_timings); +const struct nand_interface_config *nand_get_reset_interface_config(void); int nand_get_features(struct nand_chip *chip, int addr, u8 *subfeature_param); int nand_set_features(struct nand_chip *chip, int addr, u8 *subfeature_param); int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf, diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 753328f106c1..0c768cb88f96 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -928,9 +928,9 @@ static int nand_reset_interface(struct nand_chip *chip, int chipnr) * timings to timing mode 0. */ - onfi_fill_interface_config(chip, &chip->interface_config, - NAND_SDR_IFACE, 0); - ret = ops->setup_interface(chip, chipnr, &chip->interface_config); + chip->current_interface_config = nand_get_reset_interface_config(); + ret = ops->setup_interface(chip, chipnr, + chip->current_interface_config); if (ret) pr_err("Failed to configure data interface to SDR timing mode 0\n"); @@ -949,13 +949,25 @@ static int nand_reset_interface(struct nand_chip *chip, int chipnr) */ static int nand_setup_interface(struct nand_chip *chip, int chipnr) { - u8 mode = chip->interface_config.timings.mode; - u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { mode, }; + const struct nand_controller_ops *ops = chip->controller->ops; + u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { }; int ret; if (!nand_controller_can_setup_interface(chip)) return 0; + /* + * A nand_reset_interface() put both the NAND chip and the NAND + * controller in timings mode 0. If the default mode for this chip is + * also 0, no need to proceed to the change again. Plus, at probe time, + * nand_setup_interface() uses ->set/get_features() which would + * fail anyway as the parameter page is not available yet. + */ + if (!chip->best_interface_config) + return 0; + + tmode_param[0] = chip->best_interface_config->timings.mode; + /* Change the mode on the chip side (if supported by the NAND chip) */ if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) { nand_select_target(chip, chipnr); @@ -967,14 +979,13 @@ static int nand_setup_interface(struct nand_chip *chip, int chipnr) } /* Change the mode on the controller side */ - ret = chip->controller->ops->setup_interface(chip, chipnr, - &chip->interface_config); + ret = ops->setup_interface(chip, chipnr, chip->best_interface_config); if (ret) return ret; /* Check the mode has been accepted by the chip, if supported */ if (!nand_supports_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) - return 0; + goto update_interface_config; memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN); nand_select_target(chip, chipnr); @@ -984,12 +995,15 @@ static int nand_setup_interface(struct nand_chip *chip, int chipnr) if (ret) goto err_reset_chip; - if (tmode_param[0] != mode) { + if (tmode_param[0] != chip->best_interface_config->timings.mode) { pr_warn("timing mode %d not acknowledged by the NAND chip\n", - mode); + chip->best_interface_config->timings.mode); goto err_reset_chip; } +update_interface_config: + chip->current_interface_config = chip->best_interface_config; + return 0; err_reset_chip: @@ -1031,8 +1045,10 @@ int nand_choose_best_sdr_timings(struct nand_chip *chip, /* Verify the controller supports the requested interface */ ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY, iface); - if (!ret) + if (!ret) { + chip->best_interface_config = iface; return ret; + } /* Fallback to slower modes */ best_mode = iface->timings.mode; @@ -1046,9 +1062,11 @@ int nand_choose_best_sdr_timings(struct nand_chip *chip, ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY, iface); if (!ret) - return 0; + break; } + chip->best_interface_config = iface; + return 0; } @@ -1067,15 +1085,25 @@ int nand_choose_best_sdr_timings(struct nand_chip *chip, */ static int nand_choose_interface_config(struct nand_chip *chip) { + struct nand_interface_config *iface; + int ret; + if (!nand_controller_can_setup_interface(chip)) return 0; + iface = kzalloc(sizeof(*iface), GFP_KERNEL); + if (!iface) + return -ENOMEM; + if (chip->ops.choose_interface_config) - return chip->ops.choose_interface_config(chip, - &chip->interface_config); + ret = chip->ops.choose_interface_config(chip, iface); + else + ret = nand_choose_best_sdr_timings(chip, iface, NULL); - return nand_choose_best_sdr_timings(chip, &chip->interface_config, - NULL); + if (ret) + kfree(iface); + + return ret; } /** @@ -2501,7 +2529,6 @@ EXPORT_SYMBOL_GPL(nand_subop_get_data_len); */ int nand_reset(struct nand_chip *chip, int chipnr) { - struct nand_interface_config saved_intf_config = chip->interface_config; int ret; ret = nand_reset_interface(chip, chipnr); @@ -2519,18 +2546,6 @@ int nand_reset(struct nand_chip *chip, int chipnr) if (ret) return ret; - /* - * A nand_reset_interface() put both the NAND chip and the NAND - * controller in timings mode 0. If the default mode for this chip is - * also 0, no need to proceed to the change again. Plus, at probe time, - * nand_setup_interface() uses ->set/get_features() which would - * fail anyway as the parameter page is not available yet. - */ - if (!memcmp(&chip->interface_config, &saved_intf_config, - sizeof(saved_intf_config))) - return 0; - - chip->interface_config = saved_intf_config; ret = nand_setup_interface(chip, chipnr); if (ret) return ret; @@ -5198,7 +5213,7 @@ static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips, mutex_init(&chip->lock); /* Enforce the right timings for reset/detection */ - onfi_fill_interface_config(chip, &chip->interface_config, NAND_SDR_IFACE, 0); + chip->current_interface_config = nand_get_reset_interface_config(); ret = nand_dt_init(chip); if (ret) @@ -5994,7 +6009,7 @@ static int nand_scan_tail(struct nand_chip *chip) for (i = 0; i < nanddev_ntargets(&chip->base); i++) { ret = nand_setup_interface(chip, i); if (ret) - goto err_nanddev_cleanup; + goto err_free_interface_config; } /* Check, if we should skip the bad block table scan */ @@ -6004,10 +6019,12 @@ static int nand_scan_tail(struct nand_chip *chip) /* Build bad block table */ ret = nand_create_bbt(chip); if (ret) - goto err_nanddev_cleanup; + goto err_free_interface_config; return 0; +err_free_interface_config: + kfree(chip->best_interface_config); err_nanddev_cleanup: nanddev_cleanup(&chip->base); @@ -6101,6 +6118,9 @@ void nand_cleanup(struct nand_chip *chip) & NAND_BBT_DYNAMICSTRUCT) kfree(chip->badblock_pattern); + /* Free the data interface */ + kfree(chip->best_interface_config); + /* Free manufacturer priv data. */ nand_manufacturer_cleanup(chip); diff --git a/drivers/mtd/nand/raw/nand_timings.c b/drivers/mtd/nand/raw/nand_timings.c index 1e22006c79ba..94d832646487 100644 --- a/drivers/mtd/nand/raw/nand_timings.c +++ b/drivers/mtd/nand/raw/nand_timings.c @@ -292,6 +292,12 @@ static const struct nand_interface_config onfi_sdr_timings[] = { }, }; +/* All NAND chips share the same reset data interface: SDR mode 0 */ +const struct nand_interface_config *nand_get_reset_interface_config(void) +{ + return &onfi_sdr_timings[0]; +} + /** * onfi_find_closest_sdr_mode - Derive the closest ONFI SDR timing mode given a * set of timings diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index a2427c67d38b..a725b620aca2 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1069,7 +1069,11 @@ struct nand_manufacturer { * @options: Various chip options. They can partly be set to inform nand_scan * about special functionality. See the defines for further * explanation. - * @interface_config: NAND interface timing information + * @current_interface_config: The currently used NAND interface configuration + * @best_interface_config: The best NAND interface configuration which fits both + * the NAND chip and NAND controller constraints. If + * unset, the default reset interface configuration must + * be used. * @bbt_erase_shift: Number of address bits in a bbt entry * @bbt_options: Bad block table specific options. All options used here must * come from bbm.h. By default, these options will be copied to @@ -1116,7 +1120,8 @@ struct nand_chip { unsigned int options; /* Data interface */ - struct nand_interface_config interface_config; + const struct nand_interface_config *current_interface_config; + struct nand_interface_config *best_interface_config; /* Bad block information */ unsigned int bbt_erase_shift; @@ -1209,7 +1214,7 @@ static inline struct device_node *nand_get_flash_node(struct nand_chip *chip) static inline const struct nand_interface_config * nand_get_interface_config(struct nand_chip *chip) { - return &chip->interface_config; + return chip->current_interface_config; } /* -- cgit v1.2.3-59-g8ed1b From bccb48c89fe3c09f1cbb7c8612e31f5daa1d4541 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Mon, 1 Jun 2020 20:13:21 +0200 Subject: batman-adv: Fix typos and grammar in documentation Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich --- Documentation/networking/batman-adv.rst | 6 ++-- include/uapi/linux/batadv_packet.h | 50 ++++++++++++++++----------------- include/uapi/linux/batman_adv.h | 4 +-- net/batman-adv/bat_iv_ogm.c | 8 +++--- net/batman-adv/bat_v_elp.c | 10 +++---- net/batman-adv/bat_v_ogm.c | 14 ++++----- net/batman-adv/bridge_loop_avoidance.c | 6 ++-- net/batman-adv/distributed-arp-table.c | 2 +- net/batman-adv/fragmentation.c | 6 ++-- net/batman-adv/hard-interface.c | 14 ++++----- net/batman-adv/log.h | 6 ++-- net/batman-adv/main.c | 2 +- net/batman-adv/main.h | 6 ++-- net/batman-adv/multicast.c | 21 +++++++------- net/batman-adv/netlink.c | 2 +- net/batman-adv/network-coding.c | 14 ++++----- net/batman-adv/originator.c | 8 +++--- net/batman-adv/routing.c | 4 +-- net/batman-adv/send.c | 4 +-- net/batman-adv/soft-interface.c | 2 +- net/batman-adv/tp_meter.c | 12 ++++---- net/batman-adv/translation-table.c | 10 +++---- net/batman-adv/tvlv.c | 4 +-- net/batman-adv/types.h | 12 ++++---- 24 files changed, 114 insertions(+), 113 deletions(-) (limited to 'include') diff --git a/Documentation/networking/batman-adv.rst b/Documentation/networking/batman-adv.rst index 02af49b08635..74821d29a22f 100644 --- a/Documentation/networking/batman-adv.rst +++ b/Documentation/networking/batman-adv.rst @@ -73,7 +73,7 @@ lower value. This will make the mesh more responsive to topology changes, but will also increase the overhead. Information about the current state can be accessed via the batadv generic -netlink family. batctl provides human readable version via its debug tables +netlink family. batctl provides a human readable version via its debug tables subcommands. @@ -115,8 +115,8 @@ are prefixed with "batman-adv:" So to see just these messages try:: $ dmesg | grep batman-adv When investigating problems with your mesh network, it is sometimes necessary to -see more detail debug messages. This must be enabled when compiling the -batman-adv module. When building batman-adv as part of kernel, use "make +see more detailed debug messages. This must be enabled when compiling the +batman-adv module. When building batman-adv as part of the kernel, use "make menuconfig" and enable the option ``B.A.T.M.A.N. debugging`` (``CONFIG_BATMAN_ADV_DEBUG=y``). diff --git a/include/uapi/linux/batadv_packet.h b/include/uapi/linux/batadv_packet.h index 0ae34c85ef9e..9c8604c5b5f6 100644 --- a/include/uapi/linux/batadv_packet.h +++ b/include/uapi/linux/batadv_packet.h @@ -72,8 +72,8 @@ enum batadv_subtype { /** * enum batadv_iv_flags - flags used in B.A.T.M.A.N. IV OGM packets - * @BATADV_NOT_BEST_NEXT_HOP: flag is set when ogm packet is forwarded and was - * previously received from someone else than the best neighbor. + * @BATADV_NOT_BEST_NEXT_HOP: flag is set when the ogm packet is forwarded and + * was previously received from someone other than the best neighbor. * @BATADV_PRIMARIES_FIRST_HOP: flag unused. * @BATADV_DIRECTLINK: flag is for the first hop or if rebroadcasted from a * one hop neighbor on the interface where it was originally received. @@ -195,8 +195,8 @@ struct batadv_bla_claim_dst { /** * struct batadv_ogm_packet - ogm (routing protocol) packet * @packet_type: batman-adv packet type, part of the general header - * @version: batman-adv protocol version, part of the genereal header - * @ttl: time to live for this packet, part of the genereal header + * @version: batman-adv protocol version, part of the general header + * @ttl: time to live for this packet, part of the general header * @flags: contains routing relevant flags - see enum batadv_iv_flags * @seqno: sequence identification * @orig: address of the source node @@ -247,7 +247,7 @@ struct batadv_ogm2_packet { /** * struct batadv_elp_packet - elp (neighbor discovery) packet * @packet_type: batman-adv packet type, part of the general header - * @version: batman-adv protocol version, part of the genereal header + * @version: batman-adv protocol version, part of the general header * @orig: originator mac address * @seqno: sequence number * @elp_interval: currently used ELP sending interval in ms @@ -265,15 +265,15 @@ struct batadv_elp_packet { /** * struct batadv_icmp_header - common members among all the ICMP packets * @packet_type: batman-adv packet type, part of the general header - * @version: batman-adv protocol version, part of the genereal header - * @ttl: time to live for this packet, part of the genereal header + * @version: batman-adv protocol version, part of the general header + * @ttl: time to live for this packet, part of the general header * @msg_type: ICMP packet type * @dst: address of the destination node * @orig: address of the source node * @uid: local ICMP socket identifier * @align: not used - useful for alignment purposes only * - * This structure is used for ICMP packets parsing only and it is never sent + * This structure is used for ICMP packet parsing only and it is never sent * over the wire. The alignment field at the end is there to ensure that * members are padded the same way as they are in real packets. */ @@ -291,8 +291,8 @@ struct batadv_icmp_header { /** * struct batadv_icmp_packet - ICMP packet * @packet_type: batman-adv packet type, part of the general header - * @version: batman-adv protocol version, part of the genereal header - * @ttl: time to live for this packet, part of the genereal header + * @version: batman-adv protocol version, part of the general header + * @ttl: time to live for this packet, part of the general header * @msg_type: ICMP packet type * @dst: address of the destination node * @orig: address of the source node @@ -315,8 +315,8 @@ struct batadv_icmp_packet { /** * struct batadv_icmp_tp_packet - ICMP TP Meter packet * @packet_type: batman-adv packet type, part of the general header - * @version: batman-adv protocol version, part of the genereal header - * @ttl: time to live for this packet, part of the genereal header + * @version: batman-adv protocol version, part of the general header + * @ttl: time to live for this packet, part of the general header * @msg_type: ICMP packet type * @dst: address of the destination node * @orig: address of the source node @@ -358,8 +358,8 @@ enum batadv_icmp_tp_subtype { /** * struct batadv_icmp_packet_rr - ICMP RouteRecord packet * @packet_type: batman-adv packet type, part of the general header - * @version: batman-adv protocol version, part of the genereal header - * @ttl: time to live for this packet, part of the genereal header + * @version: batman-adv protocol version, part of the general header + * @ttl: time to live for this packet, part of the general header * @msg_type: ICMP packet type * @dst: address of the destination node * @orig: address of the source node @@ -397,8 +397,8 @@ struct batadv_icmp_packet_rr { /** * struct batadv_unicast_packet - unicast packet for network payload * @packet_type: batman-adv packet type, part of the general header - * @version: batman-adv protocol version, part of the genereal header - * @ttl: time to live for this packet, part of the genereal header + * @version: batman-adv protocol version, part of the general header + * @ttl: time to live for this packet, part of the general header * @ttvn: translation table version number * @dest: originator destination of the unicast packet */ @@ -433,8 +433,8 @@ struct batadv_unicast_4addr_packet { /** * struct batadv_frag_packet - fragmented packet * @packet_type: batman-adv packet type, part of the general header - * @version: batman-adv protocol version, part of the genereal header - * @ttl: time to live for this packet, part of the genereal header + * @version: batman-adv protocol version, part of the general header + * @ttl: time to live for this packet, part of the general header * @dest: final destination used when routing fragments * @orig: originator of the fragment used when merging the packet * @no: fragment number within this sequence @@ -467,8 +467,8 @@ struct batadv_frag_packet { /** * struct batadv_bcast_packet - broadcast packet for network payload * @packet_type: batman-adv packet type, part of the general header - * @version: batman-adv protocol version, part of the genereal header - * @ttl: time to live for this packet, part of the genereal header + * @version: batman-adv protocol version, part of the general header + * @ttl: time to live for this packet, part of the general header * @reserved: reserved byte for alignment * @seqno: sequence identification * @orig: originator of the broadcast packet @@ -488,10 +488,10 @@ struct batadv_bcast_packet { /** * struct batadv_coded_packet - network coded packet * @packet_type: batman-adv packet type, part of the general header - * @version: batman-adv protocol version, part of the genereal header - * @ttl: time to live for this packet, part of the genereal header + * @version: batman-adv protocol version, part of the general header + * @ttl: time to live for this packet, part of the general header * @first_source: original source of first included packet - * @first_orig_dest: original destinal of first included packet + * @first_orig_dest: original destination of first included packet * @first_crc: checksum of first included packet * @first_ttvn: tt-version number of first included packet * @second_ttl: ttl of second packet @@ -523,8 +523,8 @@ struct batadv_coded_packet { /** * struct batadv_unicast_tvlv_packet - generic unicast packet with tvlv payload * @packet_type: batman-adv packet type, part of the general header - * @version: batman-adv protocol version, part of the genereal header - * @ttl: time to live for this packet, part of the genereal header + * @version: batman-adv protocol version, part of the general header + * @ttl: time to live for this packet, part of the general header * @reserved: reserved field (for packet alignment) * @src: address of the source * @dst: address of the destination diff --git a/include/uapi/linux/batman_adv.h b/include/uapi/linux/batman_adv.h index 617c180ff0c8..8cf2ad11ead9 100644 --- a/include/uapi/linux/batman_adv.h +++ b/include/uapi/linux/batman_adv.h @@ -69,7 +69,7 @@ enum batadv_tt_client_flags { /** * @BATADV_TT_CLIENT_TEMP: this global client has been detected to be - * part of the network but no nnode has already announced it + * part of the network but no node has already announced it */ BATADV_TT_CLIENT_TEMP = (1 << 11), }; @@ -131,7 +131,7 @@ enum batadv_gw_modes { /** @BATADV_GW_MODE_CLIENT: send DHCP requests to gw servers */ BATADV_GW_MODE_CLIENT, - /** @BATADV_GW_MODE_SERVER: announce itself as gatway server */ + /** @BATADV_GW_MODE_SERVER: announce itself as gateway server */ BATADV_GW_MODE_SERVER, }; diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index e87f19c82e8d..5b3a41983156 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -134,7 +134,7 @@ static u8 batadv_ring_buffer_avg(const u8 lq_recv[]) * * Return: the originator object corresponding to the passed mac address or NULL * on failure. - * If the object does not exists it is created an initialised. + * If the object does not exist, it is created and initialised. */ static struct batadv_orig_node * batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const u8 *addr) @@ -871,7 +871,7 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface) } /** - * batadv_iv_orig_ifinfo_sum() - Get bcast_own sum for originator over iterface + * batadv_iv_orig_ifinfo_sum() - Get bcast_own sum for originator over interface * @orig_node: originator which reproadcasted the OGMs directly * @if_outgoing: interface which transmitted the original OGM and received the * direct rebroadcast @@ -1554,7 +1554,7 @@ static void batadv_iv_ogm_process_reply(struct batadv_ogm_packet *ogm_packet, * batadv_iv_ogm_process() - process an incoming batman iv OGM * @skb: the skb containing the OGM * @ogm_offset: offset to the OGM which should be processed (for aggregates) - * @if_incoming: the interface where this packet was receved + * @if_incoming: the interface where this packet was received */ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset, struct batadv_hard_iface *if_incoming) @@ -2288,7 +2288,7 @@ batadv_iv_ogm_neigh_dump_hardif(struct sk_buff *msg, u32 portid, u32 seq, * @msg: Netlink message to dump into * @cb: Control block containing additional options * @bat_priv: The bat priv with all the soft interface information - * @single_hardif: Limit dump to this hard interfaace + * @single_hardif: Limit dump to this hard interface */ static void batadv_iv_ogm_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb, diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c index 0bdefa35da98..d35aca0e969a 100644 --- a/net/batman-adv/bat_v_elp.c +++ b/net/batman-adv/bat_v_elp.c @@ -60,7 +60,7 @@ static void batadv_v_elp_start_timer(struct batadv_hard_iface *hard_iface) * @neigh: the neighbour for which the throughput has to be obtained * * Return: The throughput towards the given neighbour in multiples of 100kpbs - * (a value of '1' equals to 0.1Mbps, '10' equals 1Mbps, etc). + * (a value of '1' equals 0.1Mbps, '10' equals 1Mbps, etc). */ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh) { @@ -183,8 +183,8 @@ void batadv_v_elp_throughput_metric_update(struct work_struct *work) * * Sends a predefined number of unicast wifi packets to a given neighbour in * order to trigger the throughput estimation on this link by the RC algorithm. - * Packets are sent only if there there is not enough payload unicast traffic - * towards this neighbour.. + * Packets are sent only if there is not enough payload unicast traffic towards + * this neighbour.. * * Return: True on success and false in case of error during skb preparation. */ @@ -244,7 +244,7 @@ batadv_v_elp_wifi_neigh_probe(struct batadv_hardif_neigh_node *neigh) * batadv_v_elp_periodic_work() - ELP periodic task per interface * @work: work queue item * - * Emits broadcast ELP message in regular intervals. + * Emits broadcast ELP messages in regular intervals. */ static void batadv_v_elp_periodic_work(struct work_struct *work) { @@ -499,7 +499,7 @@ orig_free: * @skb: the received packet * @if_incoming: the interface this packet was received through * - * Return: NET_RX_SUCCESS and consumes the skb if the packet was peoperly + * Return: NET_RX_SUCCESS and consumes the skb if the packet was properly * processed or NET_RX_DROP in case of failure. */ int batadv_v_elp_packet_recv(struct sk_buff *skb, diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c index 18028b9f95f0..0d404f7bcd9f 100644 --- a/net/batman-adv/bat_v_ogm.c +++ b/net/batman-adv/bat_v_ogm.c @@ -47,9 +47,9 @@ * @bat_priv: the bat priv with all the soft interface information * @addr: the address of the originator * - * Return: the orig_node corresponding to the specified address. If such object - * does not exist it is allocated here. In case of allocation failure returns - * NULL. + * Return: the orig_node corresponding to the specified address. If such an + * object does not exist, it is allocated here. In case of allocation failure + * returns NULL. */ struct batadv_orig_node *batadv_v_ogm_orig_get(struct batadv_priv *bat_priv, const u8 *addr) @@ -172,7 +172,7 @@ static bool batadv_v_ogm_queue_left(struct sk_buff *skb, * batadv_v_ogm_aggr_list_free - free all elements in an aggregation queue * @hard_iface: the interface holding the aggregation queue * - * Empties the OGMv2 aggregation queue and frees all the skbs it contained. + * Empties the OGMv2 aggregation queue and frees all the skbs it contains. * * Caller needs to hold the hard_iface->bat_v.aggr_list.lock. */ @@ -378,7 +378,7 @@ static void batadv_v_ogm_send(struct work_struct *work) * batadv_v_ogm_aggr_work() - OGM queue periodic task per interface * @work: work queue item * - * Emits aggregated OGM message in regular intervals. + * Emits aggregated OGM messages in regular intervals. */ void batadv_v_ogm_aggr_work(struct work_struct *work) { @@ -399,7 +399,7 @@ void batadv_v_ogm_aggr_work(struct work_struct *work) * batadv_v_ogm_iface_enable() - prepare an interface for B.A.T.M.A.N. V * @hard_iface: the interface to prepare * - * Takes care of scheduling own OGM sending routine for this interface. + * Takes care of scheduling its own OGM sending routine for this interface. * * Return: 0 on success or a negative error code otherwise */ @@ -847,7 +847,7 @@ batadv_v_ogm_aggr_packet(int buff_pos, int packet_len, * batadv_v_ogm_process() - process an incoming batman v OGM * @skb: the skb containing the OGM * @ogm_offset: offset to the OGM which should be processed (for aggregates) - * @if_incoming: the interface where this packet was receved + * @if_incoming: the interface where this packet was received */ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset, struct batadv_hard_iface *if_incoming) diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index 41cc87f06b14..91a04ca373dc 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -992,7 +992,7 @@ static bool batadv_handle_claim(struct batadv_priv *bat_priv, * @hw_dst: the Hardware destination in the ARP Header * @ethhdr: pointer to the Ethernet header of the claim frame * - * checks if it is a claim packet and if its on the same group. + * checks if it is a claim packet and if it's on the same group. * This function also applies the group ID of the sender * if it is in the same mesh. * @@ -1757,7 +1757,7 @@ void batadv_bla_free(struct batadv_priv *bat_priv) * @vid: the VLAN ID of the frame * * Checks if this packet is a loop detect frame which has been sent by us, - * throw an uevent and log the event if that is the case. + * throws an uevent and logs the event if that is the case. * * Return: true if it is a loop detect frame which is to be dropped, false * otherwise. @@ -1815,7 +1815,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb, * * we have to race for a claim * * if the frame is allowed on the LAN * - * in these cases, the skb is further handled by this function + * In these cases, the skb is further handled by this function * * Return: true if handled, otherwise it returns false and the caller shall * further process the skb. diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index b85da4b7a77b..0e6e53e9b5f3 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c @@ -666,7 +666,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst, * @vid: VLAN identifier * @packet_subtype: unicast4addr packet subtype to use * - * This function copies the skb with pskb_copy() and is sent as unicast packet + * This function copies the skb with pskb_copy() and is sent as a unicast packet * to each of the selected candidates. * * Return: true if the packet is sent to at least one candidate, false diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c index 7cad97644d05..9fdbe3068153 100644 --- a/net/batman-adv/fragmentation.c +++ b/net/batman-adv/fragmentation.c @@ -102,8 +102,8 @@ static int batadv_frag_size_limit(void) * * Caller must hold chain->lock. * - * Return: true if chain is empty and caller can just insert the new fragment - * without searching for the right position. + * Return: true if chain is empty and the caller can just insert the new + * fragment without searching for the right position. */ static bool batadv_frag_init_chain(struct batadv_frag_table_entry *chain, u16 seqno) @@ -306,7 +306,7 @@ free: * set *skb to merged packet; 2) Packet is buffered: Return true and set *skb * to NULL; 3) Error: Return false and free skb. * - * Return: true when packet is merged or buffered, false when skb is not not + * Return: true when the packet is merged or buffered, false when skb is not not * used. */ bool batadv_frag_skb_buffer(struct sk_buff **skb, diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 3a256af92784..53c27c67cc11 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -138,10 +138,10 @@ static bool batadv_mutual_parents(const struct net_device *dev1, * @net_dev: the device to check * * If the user creates any virtual device on top of a batman-adv interface, it - * is important to prevent this new interface to be used to create a new mesh - * network (this behaviour would lead to a batman-over-batman configuration). - * This function recursively checks all the fathers of the device passed as - * argument looking for a batman-adv soft interface. + * is important to prevent this new interface from being used to create a new + * mesh network (this behaviour would lead to a batman-over-batman + * configuration). This function recursively checks all the fathers of the + * device passed as argument looking for a batman-adv soft interface. * * Return: true if the device is descendant of a batman-adv mesh interface (or * if it is a batman-adv interface itself), false otherwise @@ -680,8 +680,8 @@ batadv_hardif_deactivate_interface(struct batadv_hard_iface *hard_iface) * @slave: the interface enslaved in another master * @master: the master from which slave has to be removed * - * Invoke ndo_del_slave on master passing slave as argument. In this way slave - * is free'd and master can correctly change its internal state. + * Invoke ndo_del_slave on master passing slave as argument. In this way the + * slave is free'd and the master can correctly change its internal state. * * Return: 0 on success, a negative value representing the error otherwise */ @@ -818,7 +818,7 @@ err: * @soft_iface: soft interface to check * * This function is only using RCU for locking - the result can therefore be - * off when another functions is modifying the list at the same time. The + * off when another function is modifying the list at the same time. The * caller can use the rtnl_lock to make sure that the count is accurate. * * Return: number of connected/enslaved hard interfaces diff --git a/net/batman-adv/log.h b/net/batman-adv/log.h index f9884dc56cf3..979864c0fa6b 100644 --- a/net/batman-adv/log.h +++ b/net/batman-adv/log.h @@ -69,7 +69,7 @@ int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...) __printf(2, 3); /** - * _batadv_dbg() - Store debug output with(out) ratelimiting + * _batadv_dbg() - Store debug output with(out) rate limiting * @type: type of debug message * @bat_priv: the bat priv with all the soft interface information * @ratelimited: whether output should be rate limited @@ -95,7 +95,7 @@ static inline void _batadv_dbg(int type __always_unused, #endif /** - * batadv_dbg() - Store debug output without ratelimiting + * batadv_dbg() - Store debug output without rate limiting * @type: type of debug message * @bat_priv: the bat priv with all the soft interface information * @arg: format string and variable arguments @@ -104,7 +104,7 @@ static inline void _batadv_dbg(int type __always_unused, _batadv_dbg(type, bat_priv, 0, ## arg) /** - * batadv_dbg_ratelimited() - Store debug output with ratelimiting + * batadv_dbg_ratelimited() - Store debug output with rate limiting * @type: type of debug message * @bat_priv: the bat priv with all the soft interface information * @arg: format string and variable arguments diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index d8a255c85e77..519c08c2cfba 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c @@ -666,7 +666,7 @@ unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len) * @vid: the VLAN identifier for which the AP isolation attributed as to be * looked up * - * Return: true if AP isolation is on for the VLAN idenfied by vid, false + * Return: true if AP isolation is on for the VLAN identified by vid, false * otherwise */ bool batadv_vlan_ap_isola_get(struct batadv_priv *bat_priv, unsigned short vid) diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index 42b8d1e76dea..0393bb9ed3d0 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@ -308,7 +308,7 @@ static inline bool batadv_has_timed_out(unsigned long timestamp, * @y: value to compare @x against * * It handles overflows/underflows and can correctly check for a predecessor - * unless the variable sequence number has grown by more then + * unless the variable sequence number has grown by more than * 2**(bitwidth(x)-1)-1. * * This means that for a u8 with the maximum value 255, it would think: @@ -330,11 +330,11 @@ static inline bool batadv_has_timed_out(unsigned long timestamp, /** * batadv_seq_after() - Checks if a sequence number x is a successor of y - * @x: potential sucessor of @y + * @x: potential successor of @y * @y: value to compare @x against * * It handles overflows/underflows and can correctly check for a successor - * unless the variable sequence number has grown by more then + * unless the variable sequence number has grown by more than * 2**(bitwidth(x)-1)-1. * * This means that for a u8 with the maximum value 255, it would think: diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c index 9ebdc1e864b9..bdc4a1fba1c6 100644 --- a/net/batman-adv/multicast.c +++ b/net/batman-adv/multicast.c @@ -510,7 +510,7 @@ batadv_mcast_mla_softif_get_ipv6(struct net_device *dev, * the given mcast_list. In general, multicast listeners provided by * your multicast receiving applications run directly on this node. * - * If there is a bridge interface on top of dev, collects from that one + * If there is a bridge interface on top of dev, collect from that one * instead. Just like with IP addresses and routes, multicast listeners * will(/should) register to the bridge interface instead of an * enslaved bat0. @@ -832,8 +832,8 @@ batadv_mcast_bridge_log(struct batadv_priv *bat_priv, * @bat_priv: the bat priv with all the soft interface information * @flags: TVLV flags indicating the new multicast state * - * Whenever the multicast TVLV flags this nodes announces change this notifies - * userspace via the 'mcast' log level. + * Whenever the multicast TVLV flags this node announces change, this function + * should be used to notify userspace about the change. */ static void batadv_mcast_flags_log(struct batadv_priv *bat_priv, u8 flags) { @@ -1244,7 +1244,7 @@ batadv_mcast_forw_ipv6_node_get(struct batadv_priv *bat_priv) * @ethhdr: an ethernet header to determine the protocol family from * * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 or - * BATADV_MCAST_WANT_ALL_IPV6 flag, depending on the provided ethhdr, set and + * BATADV_MCAST_WANT_ALL_IPV6 flag, depending on the provided ethhdr, sets and * increases its refcount. */ static struct batadv_orig_node * @@ -1693,7 +1693,7 @@ batadv_mcast_forw_want_rtr(struct batadv_priv *bat_priv, } /** - * batadv_mcast_forw_send() - send packet to any detected multicast recpient + * batadv_mcast_forw_send() - send packet to any detected multicast recipient * @bat_priv: the bat priv with all the soft interface information * @skb: the multicast packet to transmit * @vid: the vlan identifier @@ -1742,7 +1742,8 @@ int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb, * @mcast_flags: flags indicating the new multicast state * * If the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag of this originator, - * orig, has toggled then this method updates counter and list accordingly. + * orig, has toggled then this method updates the counter and the list + * accordingly. * * Caller needs to hold orig->mcast_handler_lock. */ @@ -1787,7 +1788,7 @@ static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv, * @mcast_flags: flags indicating the new multicast state * * If the BATADV_MCAST_WANT_ALL_IPV4 flag of this originator, orig, has - * toggled then this method updates counter and list accordingly. + * toggled then this method updates the counter and the list accordingly. * * Caller needs to hold orig->mcast_handler_lock. */ @@ -1832,7 +1833,7 @@ static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv, * @mcast_flags: flags indicating the new multicast state * * If the BATADV_MCAST_WANT_ALL_IPV6 flag of this originator, orig, has - * toggled then this method updates counter and list accordingly. + * toggled then this method updates the counter and the list accordingly. * * Caller needs to hold orig->mcast_handler_lock. */ @@ -1877,7 +1878,7 @@ static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv, * @mcast_flags: flags indicating the new multicast state * * If the BATADV_MCAST_WANT_NO_RTR4 flag of this originator, orig, has - * toggled then this method updates counter and list accordingly. + * toggled then this method updates the counter and the list accordingly. * * Caller needs to hold orig->mcast_handler_lock. */ @@ -1922,7 +1923,7 @@ static void batadv_mcast_want_rtr4_update(struct batadv_priv *bat_priv, * @mcast_flags: flags indicating the new multicast state * * If the BATADV_MCAST_WANT_NO_RTR6 flag of this originator, orig, has - * toggled then this method updates counter and list accordingly. + * toggled then this method updates the counter and the list accordingly. * * Caller needs to hold orig->mcast_handler_lock. */ diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c index 02ed073f95a9..cfb00dfa468a 100644 --- a/net/batman-adv/netlink.c +++ b/net/batman-adv/netlink.c @@ -640,7 +640,7 @@ batadv_netlink_tp_meter_put(struct sk_buff *msg, u32 cookie) * @bat_priv: the bat priv with all the soft interface information * @dst: destination of tp_meter session * @result: reason for tp meter session stop - * @test_time: total time ot the tp_meter session + * @test_time: total time of the tp_meter session * @total_bytes: bytes acked to the receiver * @cookie: cookie of tp_meter session * diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c index b0469d15da0e..48d707850f3e 100644 --- a/net/batman-adv/network-coding.c +++ b/net/batman-adv/network-coding.c @@ -134,7 +134,7 @@ static void batadv_nc_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv, } /** - * batadv_nc_mesh_init() - initialise coding hash table and start house keeping + * batadv_nc_mesh_init() - initialise coding hash table and start housekeeping * @bat_priv: the bat priv with all the soft interface information * * Return: 0 on success or negative error number in case of failure @@ -700,7 +700,7 @@ batadv_nc_process_nc_paths(struct batadv_priv *bat_priv, } /** - * batadv_nc_worker() - periodic task for house keeping related to network + * batadv_nc_worker() - periodic task for housekeeping related to network * coding * @work: kernel work struct */ @@ -1316,7 +1316,7 @@ batadv_nc_path_search(struct batadv_priv *bat_priv, } /** - * batadv_nc_skb_src_search() - Loops through the list of neighoring nodes of + * batadv_nc_skb_src_search() - Loops through the list of neighboring nodes of * the skb's sender (may be equal to the originator). * @bat_priv: the bat priv with all the soft interface information * @skb: data skb to forward @@ -1402,10 +1402,10 @@ static void batadv_nc_skb_store_before_coding(struct batadv_priv *bat_priv, * @neigh_node: next hop to forward packet to * @ethhdr: pointer to the ethernet header inside the skb * - * Loops through list of neighboring nodes the next hop has a good connection to - * (receives OGMs with a sufficient quality). We need to find a neighbor of our - * next hop that potentially sent a packet which our next hop also received - * (overheard) and has stored for later decoding. + * Loops through the list of neighboring nodes the next hop has a good + * connection to (receives OGMs with a sufficient quality). We need to find a + * neighbor of our next hop that potentially sent a packet which our next hop + * also received (overheard) and has stored for later decoding. * * Return: true if the skb was consumed (encoded packet sent) or false otherwise */ diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index 5b0c2fffc214..805d8969bdfb 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@ -325,7 +325,7 @@ void batadv_neigh_node_put(struct batadv_neigh_node *neigh_node) * @if_outgoing: the interface where the payload packet has been received or * the OGM should be sent to * - * Return: the neighbor which should be router for this orig_node/iface. + * Return: the neighbor which should be the router for this orig_node/iface. * * The object is returned with refcounter increased by 1. */ @@ -515,7 +515,7 @@ out: * Looks for and possibly returns a neighbour belonging to this originator list * which is connected through the provided hard interface. * - * Return: neighbor when found. Othwerwise NULL + * Return: neighbor when found. Otherwise NULL */ static struct batadv_neigh_node * batadv_neigh_node_get(const struct batadv_orig_node *orig_node, @@ -620,7 +620,7 @@ batadv_hardif_neigh_get_or_create(struct batadv_hard_iface *hard_iface, * * Looks for and possibly returns a neighbour belonging to this hard interface. * - * Return: neighbor when found. Othwerwise NULL + * Return: neighbor when found. Otherwise NULL */ struct batadv_hardif_neigh_node * batadv_hardif_neigh_get(const struct batadv_hard_iface *hard_iface, @@ -999,7 +999,7 @@ void batadv_originator_free(struct batadv_priv *bat_priv) * @bat_priv: the bat priv with all the soft interface information * @addr: the mac address of the originator * - * Creates a new originator object and initialise all the generic fields. + * Creates a new originator object and initialises all the generic fields. * The new object is not added to the originator list. * * Return: the newly created object or NULL on failure. diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index d343382e9664..27cdf5e4349a 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -449,7 +449,7 @@ free_skb: * @skb: packet to check * @hdr_size: size of header to pull * - * Check for short header and bad addresses in given packet. + * Checks for short header and bad addresses in the given packet. * * Return: negative value when check fails and 0 otherwise. The negative value * depends on the reason: -ENODATA for bad header, -EBADR for broadcast @@ -1113,7 +1113,7 @@ free_skb: * @recv_if: interface that the skb is received on * * This function does one of the three following things: 1) Forward fragment, if - * the assembled packet will exceed our MTU; 2) Buffer fragment, if we till + * the assembled packet will exceed our MTU; 2) Buffer fragment, if we still * lack further fragments; 3) Merge fragments, if we have all needed parts. * * Return: NET_RX_DROP if the skb is not consumed, NET_RX_SUCCESS otherwise. diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index 7f8ade04e08e..d267b94800d6 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c @@ -605,8 +605,8 @@ bool batadv_forw_packet_steal(struct batadv_forw_packet *forw_packet, * given hard_iface. If hard_iface is NULL forwarding packets on all hard * interfaces will be claimed. * - * The packets are being moved from the forw_list to the cleanup_list and - * by that allows already running threads to notice the claiming. + * The packets are being moved from the forw_list to the cleanup_list. This + * makes it possible for already running threads to notice the claim. */ static void batadv_forw_packet_list_steal(struct hlist_head *forw_list, diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index f1f1c86f3419..23833a0ba5e6 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -406,7 +406,7 @@ end: * @hdr_size: size of already parsed batman-adv header * @orig_node: originator from which the batman-adv packet was sent * - * Sends a ethernet frame to the receive path of the local @soft_iface. + * Sends an ethernet frame to the receive path of the local @soft_iface. * skb->data has still point to the batman-adv header with the size @hdr_size. * The caller has to have parsed this header already and made sure that at least * @hdr_size bytes are still available for pull in @skb. diff --git a/net/batman-adv/tp_meter.c b/net/batman-adv/tp_meter.c index bd2ac570c42c..db7e3774825b 100644 --- a/net/batman-adv/tp_meter.c +++ b/net/batman-adv/tp_meter.c @@ -66,7 +66,7 @@ /** * BATADV_TP_MAX_RTO - Maximum sender timeout. If the sender RTO gets beyond - * such amound of milliseconds, the receiver is considered unreachable and the + * such amount of milliseconds, the receiver is considered unreachable and the * connection is killed */ #define BATADV_TP_MAX_RTO 30000 @@ -108,10 +108,10 @@ static u32 batadv_tp_session_cookie(const u8 session[2], u8 icmp_uid) * batadv_tp_cwnd() - compute the new cwnd size * @base: base cwnd size value * @increment: the value to add to base to get the new size - * @min: minumim cwnd value (usually MSS) + * @min: minimum cwnd value (usually MSS) * - * Return the new cwnd size and ensures it does not exceed the Advertised - * Receiver Window size. It is wrap around safe. + * Return the new cwnd size and ensure it does not exceed the Advertised + * Receiver Window size. It is wrapped around safely. * For details refer to Section 3.1 of RFC5681 * * Return: new congestion window size in bytes @@ -254,7 +254,7 @@ static void batadv_tp_batctl_error_notify(enum batadv_tp_meter_reason reason, * @dst: the other endpoint MAC address to look for * * Look for a tp_vars object matching dst as end_point and return it after - * having incremented the refcounter. Return NULL is not found + * having increment the refcounter. Return NULL is not found * * Return: matching tp_vars or NULL when no tp_vars with @dst was found */ @@ -291,7 +291,7 @@ static struct batadv_tp_vars *batadv_tp_list_find(struct batadv_priv *bat_priv, * @session: session identifier * * Look for a tp_vars object matching dst as end_point, session as tp meter - * session and return it after having incremented the refcounter. Return NULL + * session and return it after having increment the refcounter. Return NULL * is not found * * Return: matching tp_vars or NULL when no tp_vars was found diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index a9635c882fe0..98a0aaaf0d50 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -301,7 +301,7 @@ void batadv_tt_global_entry_put(struct batadv_tt_global_entry *tt_global_entry) * @vid: VLAN identifier * * Return: the number of originators advertising the given address/data - * (excluding ourself). + * (excluding our self). */ int batadv_tt_global_hash_count(struct batadv_priv *bat_priv, const u8 *addr, unsigned short vid) @@ -842,7 +842,7 @@ out: * table. In case of success the value is updated with the real amount of * reserved bytes * Allocate the needed amount of memory for the entire TT TVLV and write its - * header made up by one tvlv_tt_data object and a series of tvlv_tt_vlan_data + * header made up of one tvlv_tt_data object and a series of tvlv_tt_vlan_data * objects, one per active VLAN served by the originator node. * * Return: the size of the allocated buffer or 0 in case of failure. @@ -1674,7 +1674,7 @@ out: * the function argument. * If a TT local entry exists for this non-mesh client remove it. * - * The caller must hold orig_node refcount. + * The caller must hold the orig_node refcount. * * Return: true if the new entry has been added, false otherwise */ @@ -1839,7 +1839,7 @@ out: * @bat_priv: the bat priv with all the soft interface information * @tt_global_entry: global translation table entry to be analyzed * - * This functon assumes the caller holds rcu_read_lock(). + * This function assumes the caller holds rcu_read_lock(). * Return: best originator list entry or NULL on errors. */ static struct batadv_tt_orig_list_entry * @@ -1887,7 +1887,7 @@ batadv_transtable_best_orig(struct batadv_priv *bat_priv, * @tt_global_entry: global translation table entry to be printed * @seq: debugfs table seq_file struct * - * This functon assumes the caller holds rcu_read_lock(). + * This function assumes the caller holds rcu_read_lock(). */ static void batadv_tt_global_print_entry(struct batadv_priv *bat_priv, diff --git a/net/batman-adv/tvlv.c b/net/batman-adv/tvlv.c index 0963a43ad996..6a23a566cde1 100644 --- a/net/batman-adv/tvlv.c +++ b/net/batman-adv/tvlv.c @@ -353,8 +353,8 @@ end: * @tvlv_value: tvlv content * @tvlv_value_len: tvlv content length * - * Return: success if handler was not found or the return value of the handler - * callback. + * Return: success if the handler was not found or the return value of the + * handler callback. */ static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv, struct batadv_tvlv_handler *tvlv_handler, diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index d152b8e81f61..cc151e1f23b2 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -455,8 +455,8 @@ struct batadv_orig_node { spinlock_t tt_buff_lock; /** - * @tt_lock: prevents from updating the table while reading it. Table - * update is made up by two operations (data structure update and + * @tt_lock: avoids concurrent read from and write to the table. Table + * update is made up of two operations (data structure update and * metadata -CRC/TTVN-recalculation) and they have to be executed * atomically in order to avoid another thread to read the * table/metadata between those. @@ -748,7 +748,7 @@ struct batadv_neigh_ifinfo { * struct batadv_bcast_duplist_entry - structure for LAN broadcast suppression */ struct batadv_bcast_duplist_entry { - /** @orig: mac address of orig node orginating the broadcast */ + /** @orig: mac address of orig node originating the broadcast */ u8 orig[ETH_ALEN]; /** @crc: crc32 checksum of broadcast payload */ @@ -1010,7 +1010,7 @@ struct batadv_priv_tt { /** * @commit_lock: prevents from executing a local TT commit while reading - * the local table. The local TT commit is made up by two operations + * the local table. The local TT commit is made up of two operations * (data structure update and metadata -CRC/TTVN- recalculation) and * they have to be executed atomically in order to avoid another thread * to read the table/metadata between those. @@ -1024,7 +1024,7 @@ struct batadv_priv_tt { #ifdef CONFIG_BATMAN_ADV_BLA /** - * struct batadv_priv_bla - per mesh interface bridge loope avoidance data + * struct batadv_priv_bla - per mesh interface bridge loop avoidance data */ struct batadv_priv_bla { /** @num_requests: number of bla requests in flight */ @@ -1718,7 +1718,7 @@ struct batadv_priv { spinlock_t softif_vlan_list_lock; #ifdef CONFIG_BATMAN_ADV_BLA - /** @bla: bridge loope avoidance data */ + /** @bla: bridge loop avoidance data */ struct batadv_priv_bla bla; #endif -- cgit v1.2.3-59-g8ed1b From 3bda14d09dc5789a895ab02b7dcfcec19b0a65b3 Mon Sep 17 00:00:00 2001 From: Linus Lüssing Date: Mon, 1 Jun 2020 22:35:22 +0200 Subject: batman-adv: Introduce a configurable per interface hop penalty MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In some setups multiple hard interfaces with similar link qualities or throughput values are available. But people have expressed the desire to consider one of them as a backup only. Some creative solutions are currently in use: Such people are configuring multiple batman-adv mesh/soft interfaces, wire them together with some veth pairs and then tune the hop penalty to achieve an effect similar to a tunable per interface hop penalty. This patch introduces a new, configurable, per hard interface hop penalty to simplify such setups. Signed-off-by: Linus Lüssing Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich --- include/uapi/linux/batman_adv.h | 3 ++- net/batman-adv/bat_iv_ogm.c | 17 +++++++++-------- net/batman-adv/bat_v_ogm.c | 13 ++++++++++--- net/batman-adv/hard-interface.c | 2 ++ net/batman-adv/netlink.c | 12 +++++++++++- net/batman-adv/types.h | 6 ++++++ 6 files changed, 40 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/batman_adv.h b/include/uapi/linux/batman_adv.h index 8cf2ad11ead9..bb0ae945b36a 100644 --- a/include/uapi/linux/batman_adv.h +++ b/include/uapi/linux/batman_adv.h @@ -427,7 +427,8 @@ enum batadv_nl_attrs { /** * @BATADV_ATTR_HOP_PENALTY: defines the penalty which will be applied - * to an originator message's tq-field on every hop. + * to an originator message's tq-field on every hop and/or per + * hard interface */ BATADV_ATTR_HOP_PENALTY, diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 5b3a41983156..a4faf5f904d9 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -1075,10 +1075,10 @@ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node, struct batadv_neigh_ifinfo *neigh_ifinfo; u8 total_count; u8 orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own; + unsigned int tq_iface_hop_penalty = BATADV_TQ_MAX_VALUE; unsigned int neigh_rq_inv_cube, neigh_rq_max_cube; unsigned int tq_asym_penalty, inv_asym_penalty; unsigned int combined_tq; - unsigned int tq_iface_penalty; bool ret = false; /* find corresponding one hop neighbor */ @@ -1157,31 +1157,32 @@ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node, inv_asym_penalty = BATADV_TQ_MAX_VALUE * neigh_rq_inv_cube; inv_asym_penalty /= neigh_rq_max_cube; tq_asym_penalty = BATADV_TQ_MAX_VALUE - inv_asym_penalty; + tq_iface_hop_penalty -= atomic_read(&if_incoming->hop_penalty); /* penalize if the OGM is forwarded on the same interface. WiFi * interfaces and other half duplex devices suffer from throughput * drops as they can't send and receive at the same time. */ - tq_iface_penalty = BATADV_TQ_MAX_VALUE; if (if_outgoing && if_incoming == if_outgoing && batadv_is_wifi_hardif(if_outgoing)) - tq_iface_penalty = batadv_hop_penalty(BATADV_TQ_MAX_VALUE, - bat_priv); + tq_iface_hop_penalty = batadv_hop_penalty(tq_iface_hop_penalty, + bat_priv); combined_tq = batadv_ogm_packet->tq * tq_own * tq_asym_penalty * - tq_iface_penalty; + tq_iface_hop_penalty; combined_tq /= BATADV_TQ_MAX_VALUE * BATADV_TQ_MAX_VALUE * BATADV_TQ_MAX_VALUE; batadv_ogm_packet->tq = combined_tq; batadv_dbg(BATADV_DBG_BATMAN, bat_priv, - "bidirectional: orig = %pM neigh = %pM => own_bcast = %2i, real recv = %2i, local tq: %3i, asym_penalty: %3i, iface_penalty: %3i, total tq: %3i, if_incoming = %s, if_outgoing = %s\n", + "bidirectional: orig = %pM neigh = %pM => own_bcast = %2i, real recv = %2i, local tq: %3i, asym_penalty: %3i, iface_hop_penalty: %3i, total tq: %3i, if_incoming = %s, if_outgoing = %s\n", orig_node->orig, orig_neigh_node->orig, total_count, - neigh_rq_count, tq_own, tq_asym_penalty, tq_iface_penalty, - batadv_ogm_packet->tq, if_incoming->net_dev->name, + neigh_rq_count, tq_own, tq_asym_penalty, + tq_iface_hop_penalty, batadv_ogm_packet->tq, + if_incoming->net_dev->name, if_outgoing ? if_outgoing->net_dev->name : "DEFAULT"); /* if link has the minimum required transmission quality diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c index 0d404f7bcd9f..0f8495b9eeb1 100644 --- a/net/batman-adv/bat_v_ogm.c +++ b/net/batman-adv/bat_v_ogm.c @@ -455,15 +455,17 @@ unlock: * @throughput: the current throughput * * Apply a penalty on the current throughput metric value based on the - * characteristic of the interface where the OGM has been received. The return - * value is computed as follows: + * characteristic of the interface where the OGM has been received. + * + * Initially the per hardif hop penalty is applied to the throughput. After + * that the return value is then computed as follows: * - throughput * 50% if the incoming and outgoing interface are the * same WiFi interface and the throughput is above * 1MBit/s * - throughput if the outgoing interface is the default * interface (i.e. this OGM is processed for the * internal table and not forwarded) - * - throughput * hop penalty otherwise + * - throughput * node hop penalty otherwise * * Return: the penalised throughput metric. */ @@ -472,9 +474,14 @@ static u32 batadv_v_forward_penalty(struct batadv_priv *bat_priv, struct batadv_hard_iface *if_outgoing, u32 throughput) { + int if_hop_penalty = atomic_read(&if_incoming->hop_penalty); int hop_penalty = atomic_read(&bat_priv->hop_penalty); int hop_penalty_max = BATADV_TQ_MAX_VALUE; + /* Apply per hardif hop penalty */ + throughput = throughput * (hop_penalty_max - if_hop_penalty) / + hop_penalty_max; + /* Don't apply hop penalty in default originator table. */ if (if_outgoing == BATADV_IF_DEFAULT) return throughput; diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 53c27c67cc11..fa06b51c0144 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -939,6 +939,8 @@ batadv_hardif_add_interface(struct net_device *net_dev) if (batadv_is_wifi_hardif(hard_iface)) hard_iface->num_bcasts = BATADV_NUM_BCASTS_WIRELESS; + atomic_set(&hard_iface->hop_penalty, 0); + batadv_v_hardif_init(hard_iface); batadv_check_known_mac_addr(hard_iface->net_dev); diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c index cfb00dfa468a..dc193618a761 100644 --- a/net/batman-adv/netlink.c +++ b/net/batman-adv/netlink.c @@ -826,6 +826,10 @@ static int batadv_netlink_hardif_fill(struct sk_buff *msg, goto nla_put_failure; } + if (nla_put_u8(msg, BATADV_ATTR_HOP_PENALTY, + atomic_read(&hard_iface->hop_penalty))) + goto nla_put_failure; + #ifdef CONFIG_BATMAN_ADV_BATMAN_V if (nla_put_u32(msg, BATADV_ATTR_ELP_INTERVAL, atomic_read(&hard_iface->bat_v.elp_interval))) @@ -920,9 +924,15 @@ static int batadv_netlink_set_hardif(struct sk_buff *skb, { struct batadv_hard_iface *hard_iface = info->user_ptr[1]; struct batadv_priv *bat_priv = info->user_ptr[0]; + struct nlattr *attr; + + if (info->attrs[BATADV_ATTR_HOP_PENALTY]) { + attr = info->attrs[BATADV_ATTR_HOP_PENALTY]; + + atomic_set(&hard_iface->hop_penalty, nla_get_u8(attr)); + } #ifdef CONFIG_BATMAN_ADV_BATMAN_V - struct nlattr *attr; if (info->attrs[BATADV_ATTR_ELP_INTERVAL]) { attr = info->attrs[BATADV_ATTR_ELP_INTERVAL]; diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index cc151e1f23b2..ed519efa3c36 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -208,6 +208,12 @@ struct batadv_hard_iface { /** @rcu: struct used for freeing in an RCU-safe manner */ struct rcu_head rcu; + /** + * @hop_penalty: penalty which will be applied to the tq-field + * of an OGM received via this interface + */ + atomic_t hop_penalty; + /** @bat_iv: per hard-interface B.A.T.M.A.N. IV data */ struct batadv_hard_iface_bat_iv bat_iv; -- cgit v1.2.3-59-g8ed1b From d41a39dda1407ff50c7c4bfbd307c188f1ae364b Mon Sep 17 00:00:00 2001 From: Nirmoy Das Date: Thu, 25 Jun 2020 14:07:23 +0200 Subject: drm/scheduler: improve job distribution with multiple queues MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch uses score to select a new drm scheduler for better loadbalance between multiple drm schedulers instead of num_jobs. Below are test results after running amdgpu_test for ~10 times. Before this patch: sched_name num of many times it got schedule ========= ================================== sdma0 1463 sdma1 198 comp_1.0.1 280 After this patch: sched_name num of many times it got schedule ========= ================================== sdma0 925 sdma1 928 comp_1.0.1 177 comp_1.1.1 44 comp_1.2.1 43 comp_1.3.1 44 Signed-off-by: Nirmoy Das Reviewed-by: Christian König Link: https://patchwork.freedesktop.org/patch/373000/ Signed-off-by: Christian König --- drivers/gpu/drm/scheduler/sched_entity.c | 2 +- drivers/gpu/drm/scheduler/sched_main.c | 14 ++++++++------ include/drm/gpu_scheduler.h | 6 +++--- 3 files changed, 12 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index c803e14eed91..146380118962 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -486,7 +486,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job, bool first; trace_drm_sched_job(sched_job, entity); - atomic_inc(&entity->rq->sched->num_jobs); + atomic_inc(&entity->rq->sched->score); WRITE_ONCE(entity->last_user, current->group_leader); first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node); diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 8e731ed0d9d9..25a9e6911602 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -92,6 +92,7 @@ void drm_sched_rq_add_entity(struct drm_sched_rq *rq, if (!list_empty(&entity->list)) return; spin_lock(&rq->lock); + atomic_inc(&rq->sched->score); list_add_tail(&entity->list, &rq->entities); spin_unlock(&rq->lock); } @@ -110,6 +111,7 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, if (list_empty(&entity->list)) return; spin_lock(&rq->lock); + atomic_dec(&rq->sched->score); list_del_init(&entity->list); if (rq->current_entity == entity) rq->current_entity = NULL; @@ -647,7 +649,7 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb) struct drm_gpu_scheduler *sched = s_fence->sched; atomic_dec(&sched->hw_rq_count); - atomic_dec(&sched->num_jobs); + atomic_dec(&sched->score); trace_drm_sched_process_job(s_fence); @@ -712,7 +714,7 @@ drm_sched_pick_best(struct drm_gpu_scheduler **sched_list, { struct drm_gpu_scheduler *sched, *picked_sched = NULL; int i; - unsigned int min_jobs = UINT_MAX, num_jobs; + unsigned int min_score = UINT_MAX, num_score; for (i = 0; i < num_sched_list; ++i) { sched = sched_list[i]; @@ -723,9 +725,9 @@ drm_sched_pick_best(struct drm_gpu_scheduler **sched_list, continue; } - num_jobs = atomic_read(&sched->num_jobs); - if (num_jobs < min_jobs) { - min_jobs = num_jobs; + num_score = atomic_read(&sched->score); + if (num_score < min_score) { + min_score = num_score; picked_sched = sched; } } @@ -860,7 +862,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, spin_lock_init(&sched->job_list_lock); atomic_set(&sched->hw_rq_count, 0); INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout); - atomic_set(&sched->num_jobs, 0); + atomic_set(&sched->score, 0); atomic64_set(&sched->job_id_count, 0); /* Each scheduler will run on a seperate kernel thread */ diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index a21b3b92135a..b9780ae9dd26 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -263,7 +263,7 @@ struct drm_sched_backend_ops { * @job_list_lock: lock to protect the ring_mirror_list. * @hang_limit: once the hangs by a job crosses this limit then it is marked * guilty and it will be considered for scheduling further. - * @num_jobs: the number of jobs in queue in the scheduler + * @score: score to help loadbalancer pick a idle sched * @ready: marks if the underlying HW is ready to work * @free_guilty: A hit to time out handler to free the guilty job. * @@ -284,8 +284,8 @@ struct drm_gpu_scheduler { struct list_head ring_mirror_list; spinlock_t job_list_lock; int hang_limit; - atomic_t num_jobs; - bool ready; + atomic_t score; + bool ready; bool free_guilty; }; -- cgit v1.2.3-59-g8ed1b From 6407d666c5353d15d9e5c7cc1b8d8ced40e425f0 Mon Sep 17 00:00:00 2001 From: Nirmoy Das Date: Wed, 24 Jun 2020 20:26:48 +0200 Subject: drm/ttm: do not keep GPU dependent addresses MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit GPU address handling is device specific and should be handle by its device driver. Signed-off-by: Nirmoy Das Reviewed-by: Christian König Link: https://patchwork.freedesktop.org/patch/372937/ Signed-off-by: Christian König --- drivers/gpu/drm/ttm/ttm_bo.c | 7 ------- include/drm/ttm/ttm_bo_api.h | 2 -- include/drm/ttm/ttm_bo_driver.h | 1 - 3 files changed, 10 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index f73b81c2576e..f78cfc76ad78 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -85,7 +85,6 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, struct drm_printer *p drm_printf(p, " has_type: %d\n", man->has_type); drm_printf(p, " use_type: %d\n", man->use_type); drm_printf(p, " flags: 0x%08X\n", man->flags); - drm_printf(p, " gpu_offset: 0x%08llX\n", man->gpu_offset); drm_printf(p, " size: %llu\n", man->size); drm_printf(p, " available_caching: 0x%08X\n", man->available_caching); drm_printf(p, " default_caching: 0x%08X\n", man->default_caching); @@ -343,12 +342,6 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, moved: bo->evicted = false; - if (bo->mem.mm_node) - bo->offset = (bo->mem.start << PAGE_SHIFT) + - bdev->man[bo->mem.mem_type].gpu_offset; - else - bo->offset = 0; - ctx->bytes_moved += bo->num_pages << PAGE_SHIFT; return 0; diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 0a9d042e075a..a6ec6101d9ec 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -213,8 +213,6 @@ struct ttm_buffer_object { * either of these locks held. */ - uint64_t offset; /* GPU address space is independent of CPU word size */ - struct sg_table *sg; }; diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 54a527aa79cc..aa1f398c2ea7 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -177,7 +177,6 @@ struct ttm_mem_type_manager { bool has_type; bool use_type; uint32_t flags; - uint64_t gpu_offset; /* GPU address space is independent of CPU word size */ uint64_t size; uint32_t available_caching; uint32_t default_caching; -- cgit v1.2.3-59-g8ed1b From 5c45a918263e4da142b9cf40a1dfcb4134d454a2 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Tue, 23 Jun 2020 09:08:59 +0200 Subject: net: netdevice.h: add a description for napi_defer_hard_irqs Changeset 6f8b12d661d0 ("net: napi: add hard irqs deferral feature") added a new element at struct net_device. Add a description for it, based on what's described at the changeset which added such feature. Fixes: 6f8b12d661d0 ("net: napi: add hard irqs deferral feature") Signed-off-by: Mauro Carvalho Chehab Link: https://lore.kernel.org/r/807a3840e7bc1562adefadb0535c9f47e6ab52e0.1592895969.git.mchehab+huawei@kernel.org Signed-off-by: Jonathan Corbet --- include/linux/netdevice.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 6fc613ed8eae..f3ca52958a17 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1742,6 +1742,8 @@ enum netdev_priv_flags { * @real_num_rx_queues: Number of RX queues currently active in device * @xdp_prog: XDP sockets filter program pointer * @gro_flush_timeout: timeout for GRO layer in NAPI + * @napi_defer_hard_irqs: If not zero, provides a counter that would + * allow to avoid NIC hard IRQ, on busy queues. * * @rx_handler: handler for received packets * @rx_handler_data: XXX: need comments on this one -- cgit v1.2.3-59-g8ed1b From 5d682f5ec9d1c49e7fe2945e586aa264692859f0 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Tue, 23 Jun 2020 09:09:01 +0200 Subject: net: pylink.h: add kernel-doc descriptions for new fields at phylink_config Some fields were moved from struct phylink into phylink_config. Update the kernel-doc markups for the config struct accordingly Fixes: 5c05c1dbb177 ("net: phylink, dsa: eliminate phylink_fixed_state_cb()") Reviewed-by: Russell King Signed-off-by: Mauro Carvalho Chehab Link: https://lore.kernel.org/r/34970f447ff86415a6cef10a785fbef81c2819a7.1592895969.git.mchehab+huawei@kernel.org Signed-off-by: Jonathan Corbet --- include/linux/phylink.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include') diff --git a/include/linux/phylink.h b/include/linux/phylink.h index cc5b452a184e..02ff1419d4be 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -62,6 +62,10 @@ enum phylink_op_type { * @dev: a pointer to a struct device associated with the MAC * @type: operation type of PHYLINK instance * @pcs_poll: MAC PCS cannot provide link change interrupt + * @poll_fixed_state: if true, starts link_poll, + * if MAC link is at %MLO_AN_FIXED mode. + * @get_fixed_state: callback to execute to determine the fixed link state, + * if MAC link is at %MLO_AN_FIXED mode. */ struct phylink_config { struct device *dev; -- cgit v1.2.3-59-g8ed1b From 21b9cb34385d93d661a9134adf19eae2bd734218 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Tue, 23 Jun 2020 09:09:03 +0200 Subject: fs: fs.h: fix a kernel-doc parameter description Changeset 3b0311e7ca71 ("vfs: track per-sb writeback errors and report them to syncfs") added a variant of filemap_sample_wb_err(), but it forgot to rename the arguments at the kernel-doc markup. Fix it. Fix those warnings: ./include/linux/fs.h:2845: warning: Function parameter or member 'file' not described in 'file_sample_sb_err' ./include/linux/fs.h:2845: warning: Excess function parameter 'mapping' description in 'file_sample_sb_err' Fixes: 3b0311e7ca71 ("vfs: track per-sb writeback errors and report them to syncfs") Signed-off-by: Mauro Carvalho Chehab Reviewed-by: Jan Kara Link: https://lore.kernel.org/r/7b33bbceb29ac80874622a2bc84127bb10103245.1592895969.git.mchehab+huawei@kernel.org Signed-off-by: Jonathan Corbet --- include/linux/fs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/fs.h b/include/linux/fs.h index 6c4ab4dc1cd7..7e17ecc461d5 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2829,7 +2829,7 @@ static inline errseq_t filemap_sample_wb_err(struct address_space *mapping) /** * file_sample_sb_err - sample the current errseq_t to test for later errors - * @mapping: mapping to be sampled + * @file: file pointer to be sampled * * Grab the most current superblock-level errseq_t value for the given * struct file. -- cgit v1.2.3-59-g8ed1b From 15d737f8a14e73fcf25f6f797630279a203ce99c Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Tue, 23 Jun 2020 09:09:04 +0200 Subject: kcsan: fix a kernel-doc warning One of the kernel-doc markups there have two "note" sections: ./include/linux/kcsan-checks.h:346: warning: duplicate section name 'Note' While this is not the case here, duplicated sections can cause build issues on Sphinx. So, let's change the notes section to use, instead, a list for those 2 notes at the same function. Signed-off-by: Mauro Carvalho Chehab Acked-by: Marco Elver Link: https://lore.kernel.org/r/20f7995fab2ba85ce723203e9a7c822a55cca2af.1592895969.git.mchehab+huawei@kernel.org Signed-off-by: Jonathan Corbet --- include/linux/kcsan-checks.h | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h index 7b0b9c44f5f3..c5f6c1dcf7e3 100644 --- a/include/linux/kcsan-checks.h +++ b/include/linux/kcsan-checks.h @@ -337,11 +337,13 @@ static inline void __kcsan_disable_current(void) { } * release_for_reuse(obj); * } * - * Note: ASSERT_EXCLUSIVE_ACCESS_SCOPED(), if applicable, performs more thorough - * checking if a clear scope where no concurrent accesses are expected exists. + * Note: * - * Note: For cases where the object is freed, `KASAN `_ is a better - * fit to detect use-after-free bugs. + * 1. ASSERT_EXCLUSIVE_ACCESS_SCOPED(), if applicable, performs more thorough + * checking if a clear scope where no concurrent accesses are expected exists. + * + * 2. For cases where the object is freed, `KASAN `_ is a better + * fit to detect use-after-free bugs. * * @var: variable to assert on */ -- cgit v1.2.3-59-g8ed1b From 985098a05eee6bf5caca7e997e02a5b15242cfa0 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Tue, 23 Jun 2020 09:09:10 +0200 Subject: docs: fix references for DMA*.txt files As we moved those files to core-api, fix references to point to their newer locations. Signed-off-by: Mauro Carvalho Chehab Link: https://lore.kernel.org/r/37b2fd159fbc7655dbf33b3eb1215396a25f6344.1592895969.git.mchehab+huawei@kernel.org Signed-off-by: Jonathan Corbet --- Documentation/PCI/pci.rst | 6 +++--- Documentation/block/biodoc.rst | 2 +- Documentation/bus-virt-phys-mapping.txt | 2 +- Documentation/core-api/dma-api.rst | 6 +++--- Documentation/core-api/dma-isa-lpc.rst | 2 +- Documentation/driver-api/usb/dma.rst | 6 +++--- Documentation/translations/ko_KR/memory-barriers.txt | 6 +++--- arch/ia64/hp/common/sba_iommu.c | 12 ++++++------ arch/parisc/kernel/pci-dma.c | 2 +- arch/x86/include/asm/dma-mapping.h | 4 ++-- arch/x86/kernel/amd_gart_64.c | 2 +- drivers/parisc/sba_iommu.c | 14 +++++++------- include/linux/dma-mapping.h | 2 +- include/media/videobuf-dma-sg.h | 2 +- kernel/dma/debug.c | 2 +- 15 files changed, 35 insertions(+), 35 deletions(-) (limited to 'include') diff --git a/Documentation/PCI/pci.rst b/Documentation/PCI/pci.rst index 8c016d8c9862..d10d3fe604c5 100644 --- a/Documentation/PCI/pci.rst +++ b/Documentation/PCI/pci.rst @@ -265,7 +265,7 @@ Set the DMA mask size --------------------- .. note:: If anything below doesn't make sense, please refer to - Documentation/DMA-API.txt. This section is just a reminder that + :doc:`/core-api/dma-api`. This section is just a reminder that drivers need to indicate DMA capabilities of the device and is not an authoritative source for DMA interfaces. @@ -291,7 +291,7 @@ Many 64-bit "PCI" devices (before PCI-X) and some PCI-X devices are Setup shared control data ------------------------- Once the DMA masks are set, the driver can allocate "consistent" (a.k.a. shared) -memory. See Documentation/DMA-API.txt for a full description of +memory. See :doc:`/core-api/dma-api` for a full description of the DMA APIs. This section is just a reminder that it needs to be done before enabling DMA on the device. @@ -421,7 +421,7 @@ owners if there is one. Then clean up "consistent" buffers which contain the control data. -See Documentation/DMA-API.txt for details on unmapping interfaces. +See :doc:`/core-api/dma-api` for details on unmapping interfaces. Unregister from other subsystems diff --git a/Documentation/block/biodoc.rst b/Documentation/block/biodoc.rst index b964796ec9c7..ba7f45d0271c 100644 --- a/Documentation/block/biodoc.rst +++ b/Documentation/block/biodoc.rst @@ -196,7 +196,7 @@ a virtual address mapping (unlike the earlier scheme of virtual address do not have a corresponding kernel virtual address space mapping) and low-memory pages. -Note: Please refer to Documentation/DMA-API-HOWTO.txt for a discussion +Note: Please refer to :doc:`/core-api/dma-api-howto` for a discussion on PCI high mem DMA aspects and mapping of scatter gather lists, and support for 64 bit PCI. diff --git a/Documentation/bus-virt-phys-mapping.txt b/Documentation/bus-virt-phys-mapping.txt index 4bb07c2f3e7d..c7bc99cd2e21 100644 --- a/Documentation/bus-virt-phys-mapping.txt +++ b/Documentation/bus-virt-phys-mapping.txt @@ -8,7 +8,7 @@ How to access I/O mapped memory from within device drivers The virt_to_bus() and bus_to_virt() functions have been superseded by the functionality provided by the PCI DMA interface - (see Documentation/DMA-API-HOWTO.txt). They continue + (see :doc:`/core-api/dma-api-howto`). They continue to be documented below for historical purposes, but new code must not use them. --davidm 00/12/12 diff --git a/Documentation/core-api/dma-api.rst b/Documentation/core-api/dma-api.rst index 2d8d2fed7317..63b4a2f20867 100644 --- a/Documentation/core-api/dma-api.rst +++ b/Documentation/core-api/dma-api.rst @@ -5,7 +5,7 @@ Dynamic DMA mapping using the generic device :Author: James E.J. Bottomley This document describes the DMA API. For a more gentle introduction -of the API (and actual examples), see Documentation/DMA-API-HOWTO.txt. +of the API (and actual examples), see :doc:`/core-api/dma-api-howto`. This API is split into two pieces. Part I describes the basic API. Part II describes extensions for supporting non-consistent memory @@ -471,7 +471,7 @@ without the _attrs suffixes, except that they pass an optional dma_attrs. The interpretation of DMA attributes is architecture-specific, and -each attribute should be documented in Documentation/DMA-attributes.txt. +each attribute should be documented in :doc:`/core-api/dma-attributes`. If dma_attrs are 0, the semantics of each of these functions is identical to those of the corresponding function @@ -484,7 +484,7 @@ for DMA:: #include /* DMA_ATTR_FOO should be defined in linux/dma-mapping.h and - * documented in Documentation/DMA-attributes.txt */ + * documented in Documentation/core-api/dma-attributes.rst */ ... unsigned long attr; diff --git a/Documentation/core-api/dma-isa-lpc.rst b/Documentation/core-api/dma-isa-lpc.rst index b1ec7b16c21f..e59a3d35a93d 100644 --- a/Documentation/core-api/dma-isa-lpc.rst +++ b/Documentation/core-api/dma-isa-lpc.rst @@ -17,7 +17,7 @@ To do ISA style DMA you need to include two headers:: #include The first is the generic DMA API used to convert virtual addresses to -bus addresses (see Documentation/DMA-API.txt for details). +bus addresses (see :doc:`/core-api/dma-api` for details). The second contains the routines specific to ISA DMA transfers. Since this is not present on all platforms make sure you construct your diff --git a/Documentation/driver-api/usb/dma.rst b/Documentation/driver-api/usb/dma.rst index 59d5aee89e37..2b3dbd3265b4 100644 --- a/Documentation/driver-api/usb/dma.rst +++ b/Documentation/driver-api/usb/dma.rst @@ -10,7 +10,7 @@ API overview The big picture is that USB drivers can continue to ignore most DMA issues, though they still must provide DMA-ready buffers (see -``Documentation/DMA-API-HOWTO.txt``). That's how they've worked through +:doc:`/core-api/dma-api-howto`). That's how they've worked through the 2.4 (and earlier) kernels, or they can now be DMA-aware. DMA-aware usb drivers: @@ -60,7 +60,7 @@ and effects like cache-trashing can impose subtle penalties. force a consistent memory access ordering by using memory barriers. It's not using a streaming DMA mapping, so it's good for small transfers on systems where the I/O would otherwise thrash an IOMMU mapping. (See - ``Documentation/DMA-API-HOWTO.txt`` for definitions of "coherent" and + :doc:`/core-api/dma-api-howto` for definitions of "coherent" and "streaming" DMA mappings.) Asking for 1/Nth of a page (as well as asking for N pages) is reasonably @@ -91,7 +91,7 @@ Working with existing buffers Existing buffers aren't usable for DMA without first being mapped into the DMA address space of the device. However, most buffers passed to your driver can safely be used with such DMA mapping. (See the first section -of Documentation/DMA-API-HOWTO.txt, titled "What memory is DMA-able?") +of :doc:`/core-api/dma-api-howto`, titled "What memory is DMA-able?") - When you're using scatterlists, you can map everything at once. On some systems, this kicks in an IOMMU and turns the scatterlists into single diff --git a/Documentation/translations/ko_KR/memory-barriers.txt b/Documentation/translations/ko_KR/memory-barriers.txt index 34d041d68f78..604cee350e53 100644 --- a/Documentation/translations/ko_KR/memory-barriers.txt +++ b/Documentation/translations/ko_KR/memory-barriers.txt @@ -570,8 +570,8 @@ ACQUIRE 는 해당 오퍼레이션의 로드 부분에만 적용되고 RELEASE [*] 버스 마스터링 DMA 와 일관성에 대해서는 다음을 참고하시기 바랍니다: Documentation/driver-api/pci/pci.rst - Documentation/DMA-API-HOWTO.txt - Documentation/DMA-API.txt + Documentation/core-api/dma-api-howto.rst + Documentation/core-api/dma-api.rst 데이터 의존성 배리어 (역사적) @@ -1907,7 +1907,7 @@ Mandatory 배리어들은 SMP 시스템에서도 UP 시스템에서도 SMP 효 writel_relaxed() 와 같은 완화된 I/O 접근자들에 대한 자세한 내용을 위해서는 "커널 I/O 배리어의 효과" 섹션을, consistent memory 에 대한 자세한 내용을 - 위해선 Documentation/DMA-API.txt 문서를 참고하세요. + 위해선 Documentation/core-api/dma-api.rst 문서를 참고하세요. ========================= diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index a806227c1fad..656a4888c300 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -907,7 +907,7 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) * @dir: dma direction * @attrs: optional dma attributes * - * See Documentation/DMA-API-HOWTO.txt + * See Documentation/core-api/dma-api-howto.rst */ static dma_addr_t sba_map_page(struct device *dev, struct page *page, unsigned long poff, size_t size, @@ -1028,7 +1028,7 @@ sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size) * @dir: R/W or both. * @attrs: optional dma attributes * - * See Documentation/DMA-API-HOWTO.txt + * See Documentation/core-api/dma-api-howto.rst */ static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, enum dma_data_direction dir, unsigned long attrs) @@ -1105,7 +1105,7 @@ static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, * @size: number of bytes mapped in driver buffer. * @dma_handle: IOVA of new buffer. * - * See Documentation/DMA-API-HOWTO.txt + * See Documentation/core-api/dma-api-howto.rst */ static void * sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, @@ -1162,7 +1162,7 @@ sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, * @vaddr: virtual address IOVA of "consistent" buffer. * @dma_handler: IO virtual address of "consistent" buffer. * - * See Documentation/DMA-API-HOWTO.txt + * See Documentation/core-api/dma-api-howto.rst */ static void sba_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) @@ -1425,7 +1425,7 @@ static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, * @dir: R/W or both. * @attrs: optional dma attributes * - * See Documentation/DMA-API-HOWTO.txt + * See Documentation/core-api/dma-api-howto.rst */ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction dir, @@ -1524,7 +1524,7 @@ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, * @dir: R/W or both. * @attrs: optional dma attributes * - * See Documentation/DMA-API-HOWTO.txt + * See Documentation/core-api/dma-api-howto.rst */ static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction dir, diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c index 70cd24bdcfec..4f1596bb1936 100644 --- a/arch/parisc/kernel/pci-dma.c +++ b/arch/parisc/kernel/pci-dma.c @@ -3,7 +3,7 @@ ** PARISC 1.1 Dynamic DMA mapping support. ** This implementation is for PA-RISC platforms that do not support ** I/O TLBs (aka DMA address translation hardware). -** See Documentation/DMA-API-HOWTO.txt for interface definitions. +** See Documentation/core-api/dma-api-howto.rst for interface definitions. ** ** (c) Copyright 1999,2000 Hewlett-Packard Company ** (c) Copyright 2000 Grant Grundler diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 6b15a24930e0..fed67eafcacc 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -3,8 +3,8 @@ #define _ASM_X86_DMA_MAPPING_H /* - * IOMMU interface. See Documentation/DMA-API-HOWTO.txt and - * Documentation/DMA-API.txt for documentation. + * IOMMU interface. See Documentation/core-api/dma-api-howto.rst and + * Documentation/core-api/dma-api.rst for documentation. */ #include diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c index 17cb5b933dcf..e89031e9c847 100644 --- a/arch/x86/kernel/amd_gart_64.c +++ b/arch/x86/kernel/amd_gart_64.c @@ -6,7 +6,7 @@ * This allows to use PCI devices that only support 32bit addresses on systems * with more than 4GB. * - * See Documentation/DMA-API-HOWTO.txt for the interface specification. + * See Documentation/core-api/dma-api-howto.rst for the interface specification. * * Copyright 2002 Andi Kleen, SuSE Labs. */ diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index 7e112829d250..5368452eb5a6 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -666,7 +666,7 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) * @dev: instance of PCI owned by the driver that's asking * @mask: number of address bits this PCI device can handle * - * See Documentation/DMA-API-HOWTO.txt + * See Documentation/core-api/dma-api-howto.rst */ static int sba_dma_supported( struct device *dev, u64 mask) { @@ -698,7 +698,7 @@ static int sba_dma_supported( struct device *dev, u64 mask) * @size: number of bytes to map in driver buffer. * @direction: R/W or both. * - * See Documentation/DMA-API-HOWTO.txt + * See Documentation/core-api/dma-api-howto.rst */ static dma_addr_t sba_map_single(struct device *dev, void *addr, size_t size, @@ -788,7 +788,7 @@ sba_map_page(struct device *dev, struct page *page, unsigned long offset, * @size: number of bytes mapped in driver buffer. * @direction: R/W or both. * - * See Documentation/DMA-API-HOWTO.txt + * See Documentation/core-api/dma-api-howto.rst */ static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, @@ -867,7 +867,7 @@ sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, * @size: number of bytes mapped in driver buffer. * @dma_handle: IOVA of new buffer. * - * See Documentation/DMA-API-HOWTO.txt + * See Documentation/core-api/dma-api-howto.rst */ static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) @@ -898,7 +898,7 @@ static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle * @vaddr: virtual address IOVA of "consistent" buffer. * @dma_handler: IO virtual address of "consistent" buffer. * - * See Documentation/DMA-API-HOWTO.txt + * See Documentation/core-api/dma-api-howto.rst */ static void sba_free(struct device *hwdev, size_t size, void *vaddr, @@ -933,7 +933,7 @@ int dump_run_sg = 0; * @nents: number of entries in list * @direction: R/W or both. * - * See Documentation/DMA-API-HOWTO.txt + * See Documentation/core-api/dma-api-howto.rst */ static int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, @@ -1017,7 +1017,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, * @nents: number of entries in list * @direction: R/W or both. * - * See Documentation/DMA-API-HOWTO.txt + * See Documentation/core-api/dma-api-howto.rst */ static void sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 78f677cf45ab..ef2b153ddbd9 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -14,7 +14,7 @@ /** * List of possible attributes associated with a DMA mapping. The semantics - * of each attribute should be defined in Documentation/DMA-attributes.txt. + * of each attribute should be defined in Documentation/core-api/dma-attributes.rst. */ /* diff --git a/include/media/videobuf-dma-sg.h b/include/media/videobuf-dma-sg.h index b89d5e31f172..34450f7ad510 100644 --- a/include/media/videobuf-dma-sg.h +++ b/include/media/videobuf-dma-sg.h @@ -31,7 +31,7 @@ * does memory allocation too using vmalloc_32(). * * videobuf_dma_*() - * see Documentation/DMA-API-HOWTO.txt, these functions to + * see Documentation/core-api/dma-api-howto.rst, these functions to * basically the same. The map function does also build a * scatterlist for the buffer (and unmap frees it ...) * diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c index 36c962a86bf2..f97f088ace7e 100644 --- a/kernel/dma/debug.c +++ b/kernel/dma/debug.c @@ -1071,7 +1071,7 @@ static void check_unmap(struct dma_debug_entry *ref) /* * Drivers should use dma_mapping_error() to check the returned * addresses of dma_map_single() and dma_map_page(). - * If not, print this warning message. See Documentation/DMA-API.txt. + * If not, print this warning message. See Documentation/core-api/dma-api.rst. */ if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { err_printk(ref->dev, entry, -- cgit v1.2.3-59-g8ed1b From d4cdd146d0db900b2eb6c2d28cba719b3bf0a928 Mon Sep 17 00:00:00 2001 From: Alan Maguire Date: Fri, 29 May 2020 22:46:20 +0100 Subject: kunit: generalize kunit_resource API beyond allocated resources In its original form, the kunit resources API - consisting the struct kunit_resource and associated functions - was focused on adding allocated resources during test operation that would be automatically cleaned up on test completion. The recent RFC patch proposing converting KASAN tests to KUnit [1] showed another potential model - where outside of test context, but with a pointer to the test state, we wish to access/update test-related data, but expressly want to avoid allocations. It turns out we can generalize the kunit_resource to support static resources where the struct kunit_resource * is passed in and initialized for us. As part of this work, we also change the "allocation" field to the more general "data" name, as instead of associating an allocation, we can associate a pointer to static data. Static data is distinguished by a NULL free functions. A test is added to cover using kunit_add_resource() with a static resource and data. Finally we also make use of the kernel's krefcount interfaces to manage reference counting of KUnit resources. The motivation for this is simple; if we have kernel threads accessing and using resources (say via kunit_find_resource()) we need to ensure we do not remove said resources (or indeed free them if they were dynamically allocated) until the reference count reaches zero. A new function - kunit_put_resource() - is added to handle this, and it should be called after a thread using kunit_find_resource() is finished with the retrieved resource. We ensure that the functions needed to look up, use and drop reference count are "static inline"-defined so that they can be used by builtin code as well as modules in the case that KUnit is built as a module. A cosmetic change here also; I've tried moving to kunit_[action]_resource() as the format of function names for consistency and readability. [1] https://lkml.org/lkml/2020/2/26/1286 Signed-off-by: Alan Maguire Reviewed-by: Brendan Higgins Signed-off-by: Shuah Khan --- include/kunit/test.h | 156 +++++++++++++++++++++++++++++++++++++--------- lib/kunit/kunit-test.c | 74 ++++++++++++++++------ lib/kunit/string-stream.c | 14 ++--- lib/kunit/test.c | 153 ++++++++++++++++++++++++--------------------- 4 files changed, 268 insertions(+), 129 deletions(-) (limited to 'include') diff --git a/include/kunit/test.h b/include/kunit/test.h index 47e61e1d5337..f9b914ebfd75 100644 --- a/include/kunit/test.h +++ b/include/kunit/test.h @@ -15,6 +15,7 @@ #include #include #include +#include struct kunit_resource; @@ -23,13 +24,19 @@ typedef void (*kunit_resource_free_t)(struct kunit_resource *); /** * struct kunit_resource - represents a *test managed resource* - * @allocation: for the user to store arbitrary data. + * @data: for the user to store arbitrary data. * @free: a user supplied function to free the resource. Populated by - * kunit_alloc_resource(). + * kunit_resource_alloc(). * * Represents a *test managed resource*, a resource which will automatically be * cleaned up at the end of a test case. * + * Resources are reference counted so if a resource is retrieved via + * kunit_alloc_and_get_resource() or kunit_find_resource(), we need + * to call kunit_put_resource() to reduce the resource reference count + * when finished with it. Note that kunit_alloc_resource() does not require a + * kunit_resource_put() because it does not retrieve the resource itself. + * * Example: * * .. code-block:: c @@ -42,9 +49,9 @@ typedef void (*kunit_resource_free_t)(struct kunit_resource *); * static int kunit_kmalloc_init(struct kunit_resource *res, void *context) * { * struct kunit_kmalloc_params *params = context; - * res->allocation = kmalloc(params->size, params->gfp); + * res->data = kmalloc(params->size, params->gfp); * - * if (!res->allocation) + * if (!res->data) * return -ENOMEM; * * return 0; @@ -52,30 +59,26 @@ typedef void (*kunit_resource_free_t)(struct kunit_resource *); * * static void kunit_kmalloc_free(struct kunit_resource *res) * { - * kfree(res->allocation); + * kfree(res->data); * } * * void *kunit_kmalloc(struct kunit *test, size_t size, gfp_t gfp) * { * struct kunit_kmalloc_params params; - * struct kunit_resource *res; * * params.size = size; * params.gfp = gfp; * - * res = kunit_alloc_resource(test, kunit_kmalloc_init, + * return kunit_alloc_resource(test, kunit_kmalloc_init, * kunit_kmalloc_free, ¶ms); - * if (res) - * return res->allocation; - * - * return NULL; * } */ struct kunit_resource { - void *allocation; - kunit_resource_free_t free; + void *data; /* private: internal use only. */ + kunit_resource_free_t free; + struct kref refcount; struct list_head node; }; @@ -283,6 +286,64 @@ struct kunit_resource *kunit_alloc_and_get_resource(struct kunit *test, gfp_t internal_gfp, void *context); +/** + * kunit_get_resource() - Hold resource for use. Should not need to be used + * by most users as we automatically get resources + * retrieved by kunit_find_resource*(). + * @res: resource + */ +static inline void kunit_get_resource(struct kunit_resource *res) +{ + kref_get(&res->refcount); +} + +/* + * Called when refcount reaches zero via kunit_put_resources(); + * should not be called directly. + */ +static inline void kunit_release_resource(struct kref *kref) +{ + struct kunit_resource *res = container_of(kref, struct kunit_resource, + refcount); + + /* If free function is defined, resource was dynamically allocated. */ + if (res->free) { + res->free(res); + kfree(res); + } +} + +/** + * kunit_put_resource() - When caller is done with retrieved resource, + * kunit_put_resource() should be called to drop + * reference count. The resource list maintains + * a reference count on resources, so if no users + * are utilizing a resource and it is removed from + * the resource list, it will be freed via the + * associated free function (if any). Only + * needs to be used if we alloc_and_get() or + * find() resource. + * @res: resource + */ +static inline void kunit_put_resource(struct kunit_resource *res) +{ + kref_put(&res->refcount, kunit_release_resource); +} + +/** + * kunit_add_resource() - Add a *test managed resource*. + * @test: The test context object. + * @init: a user-supplied function to initialize the result (if needed). If + * none is supplied, the resource data value is simply set to @data. + * If an init function is supplied, @data is passed to it instead. + * @free: a user-supplied function to free the resource (if needed). + * @data: value to pass to init function or set in resource data field. + */ +int kunit_add_resource(struct kunit *test, + kunit_resource_init_t init, + kunit_resource_free_t free, + struct kunit_resource *res, + void *data); /** * kunit_alloc_resource() - Allocates a *test managed resource*. * @test: The test context object. @@ -295,7 +356,7 @@ struct kunit_resource *kunit_alloc_and_get_resource(struct kunit *test, * cleaned up at the end of a test case. See &struct kunit_resource for an * example. * - * NOTE: KUnit needs to allocate memory for each kunit_resource object. You must + * Note: KUnit needs to allocate memory for a kunit_resource object. You must * specify an @internal_gfp that is compatible with the use context of your * resource. */ @@ -307,54 +368,89 @@ static inline void *kunit_alloc_resource(struct kunit *test, { struct kunit_resource *res; - res = kunit_alloc_and_get_resource(test, init, free, internal_gfp, - context); + res = kzalloc(sizeof(*res), internal_gfp); + if (!res) + return NULL; - if (res) - return res->allocation; + if (!kunit_add_resource(test, init, free, res, context)) + return res->data; return NULL; } typedef bool (*kunit_resource_match_t)(struct kunit *test, - const void *res, + struct kunit_resource *res, void *match_data); /** * kunit_resource_instance_match() - Match a resource with the same instance. * @test: Test case to which the resource belongs. - * @res: The data stored in kunit_resource->allocation. + * @res: The resource. * @match_data: The resource pointer to match against. * * An instance of kunit_resource_match_t that matches a resource whose * allocation matches @match_data. */ static inline bool kunit_resource_instance_match(struct kunit *test, - const void *res, + struct kunit_resource *res, void *match_data) { - return res == match_data; + return res->data == match_data; } /** - * kunit_resource_destroy() - Find a kunit_resource and destroy it. + * kunit_find_resource() - Find a resource using match function/data. + * @test: Test case to which the resource belongs. + * @match: match function to be applied to resources/match data. + * @match_data: data to be used in matching. + */ +static inline struct kunit_resource * +kunit_find_resource(struct kunit *test, + kunit_resource_match_t match, + void *match_data) +{ + struct kunit_resource *res, *found = NULL; + + spin_lock(&test->lock); + + list_for_each_entry_reverse(res, &test->resources, node) { + if (match(test, res, (void *)match_data)) { + found = res; + kunit_get_resource(found); + break; + } + } + + spin_unlock(&test->lock); + + return found; +} + +/** + * kunit_destroy_resource() - Find a kunit_resource and destroy it. * @test: Test case to which the resource belongs. * @match: Match function. Returns whether a given resource matches @match_data. - * @free: Must match free on the kunit_resource to free. * @match_data: Data passed into @match. * - * Free the latest kunit_resource of @test for which @free matches the - * kunit_resource_free_t associated with the resource and for which @match - * returns true. - * * RETURNS: * 0 if kunit_resource is found and freed, -ENOENT if not found. */ -int kunit_resource_destroy(struct kunit *test, +int kunit_destroy_resource(struct kunit *test, kunit_resource_match_t match, - kunit_resource_free_t free, void *match_data); +/** + * kunit_remove_resource: remove resource from resource list associated with + * test. + * @test: The test context object. + * @res: The resource to be removed. + * + * Note that the resource will not be immediately freed since it is likely + * the caller has a reference to it via alloc_and_get() or find(); + * in this case a final call to kunit_put_resource() is required. + */ +void kunit_remove_resource(struct kunit *test, struct kunit_resource *res); + /** * kunit_kmalloc() - Like kmalloc() except the allocation is *test managed*. * @test: The test context object. diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c index 4f3d36a72f8f..03f3ecaa1ef9 100644 --- a/lib/kunit/kunit-test.c +++ b/lib/kunit/kunit-test.c @@ -118,14 +118,14 @@ static int fake_resource_init(struct kunit_resource *res, void *context) { struct kunit_test_resource_context *ctx = context; - res->allocation = &ctx->is_resource_initialized; + res->data = &ctx->is_resource_initialized; ctx->is_resource_initialized = true; return 0; } static void fake_resource_free(struct kunit_resource *res) { - bool *is_resource_initialized = res->allocation; + bool *is_resource_initialized = res->data; *is_resource_initialized = false; } @@ -154,11 +154,21 @@ static void kunit_resource_test_alloc_resource(struct kunit *test) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, res); KUNIT_EXPECT_PTR_EQ(test, &ctx->is_resource_initialized, - (bool *) res->allocation); + (bool *)res->data); KUNIT_EXPECT_TRUE(test, list_is_last(&res->node, &ctx->test.resources)); KUNIT_EXPECT_PTR_EQ(test, free, res->free); + + kunit_put_resource(res); } +/* + * Note: tests below use kunit_alloc_and_get_resource(), so as a consequence + * they have a reference to the associated resource that they must release + * via kunit_put_resource(). In normal operation, users will only + * have to do this for cases where they use kunit_find_resource(), and the + * kunit_alloc_resource() function will be used (which does not take a + * resource reference). + */ static void kunit_resource_test_destroy_resource(struct kunit *test) { struct kunit_test_resource_context *ctx = test->priv; @@ -169,11 +179,12 @@ static void kunit_resource_test_destroy_resource(struct kunit *test) GFP_KERNEL, ctx); + kunit_put_resource(res); + KUNIT_ASSERT_FALSE(test, - kunit_resource_destroy(&ctx->test, + kunit_destroy_resource(&ctx->test, kunit_resource_instance_match, - res->free, - res->allocation)); + res->data)); KUNIT_EXPECT_FALSE(test, ctx->is_resource_initialized); KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources)); @@ -191,6 +202,7 @@ static void kunit_resource_test_cleanup_resources(struct kunit *test) fake_resource_free, GFP_KERNEL, ctx); + kunit_put_resource(resources[i]); } kunit_cleanup(&ctx->test); @@ -221,14 +233,14 @@ static int fake_resource_2_init(struct kunit_resource *res, void *context) KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, allocate_order, 2); - res->allocation = ctx; + res->data = ctx; return 0; } static void fake_resource_2_free(struct kunit_resource *res) { - struct kunit_test_resource_context *ctx = res->allocation; + struct kunit_test_resource_context *ctx = res->data; KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, free_order, 2); } @@ -236,23 +248,26 @@ static void fake_resource_2_free(struct kunit_resource *res) static int fake_resource_1_init(struct kunit_resource *res, void *context) { struct kunit_test_resource_context *ctx = context; + struct kunit_resource *res2; - kunit_alloc_and_get_resource(&ctx->test, - fake_resource_2_init, - fake_resource_2_free, - GFP_KERNEL, - ctx); + res2 = kunit_alloc_and_get_resource(&ctx->test, + fake_resource_2_init, + fake_resource_2_free, + GFP_KERNEL, + ctx); KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, allocate_order, 1); - res->allocation = ctx; + res->data = ctx; + + kunit_put_resource(res2); return 0; } static void fake_resource_1_free(struct kunit_resource *res) { - struct kunit_test_resource_context *ctx = res->allocation; + struct kunit_test_resource_context *ctx = res->data; KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, free_order, 1); } @@ -265,13 +280,14 @@ static void fake_resource_1_free(struct kunit_resource *res) static void kunit_resource_test_proper_free_ordering(struct kunit *test) { struct kunit_test_resource_context *ctx = test->priv; + struct kunit_resource *res; /* fake_resource_1 allocates a fake_resource_2 in its init. */ - kunit_alloc_and_get_resource(&ctx->test, - fake_resource_1_init, - fake_resource_1_free, - GFP_KERNEL, - ctx); + res = kunit_alloc_and_get_resource(&ctx->test, + fake_resource_1_init, + fake_resource_1_free, + GFP_KERNEL, + ctx); /* * Since fake_resource_2_init calls KUNIT_RESOURCE_TEST_MARK_ORDER @@ -281,6 +297,8 @@ static void kunit_resource_test_proper_free_ordering(struct kunit *test) KUNIT_EXPECT_EQ(test, ctx->allocate_order[0], 2); KUNIT_EXPECT_EQ(test, ctx->allocate_order[1], 1); + kunit_put_resource(res); + kunit_cleanup(&ctx->test); /* @@ -292,6 +310,21 @@ static void kunit_resource_test_proper_free_ordering(struct kunit *test) KUNIT_EXPECT_EQ(test, ctx->free_order[1], 2); } +static void kunit_resource_test_static(struct kunit *test) +{ + struct kunit_test_resource_context ctx; + struct kunit_resource res; + + KUNIT_EXPECT_EQ(test, kunit_add_resource(test, NULL, NULL, &res, &ctx), + 0); + + KUNIT_EXPECT_PTR_EQ(test, res.data, (void *)&ctx); + + kunit_cleanup(test); + + KUNIT_EXPECT_TRUE(test, list_empty(&test->resources)); +} + static int kunit_resource_test_init(struct kunit *test) { struct kunit_test_resource_context *ctx = @@ -320,6 +353,7 @@ static struct kunit_case kunit_resource_test_cases[] = { KUNIT_CASE(kunit_resource_test_destroy_resource), KUNIT_CASE(kunit_resource_test_cleanup_resources), KUNIT_CASE(kunit_resource_test_proper_free_ordering), + KUNIT_CASE(kunit_resource_test_static), {} }; diff --git a/lib/kunit/string-stream.c b/lib/kunit/string-stream.c index 350392013c14..141789ca8949 100644 --- a/lib/kunit/string-stream.c +++ b/lib/kunit/string-stream.c @@ -33,14 +33,14 @@ static int string_stream_fragment_init(struct kunit_resource *res, if (!frag->fragment) return -ENOMEM; - res->allocation = frag; + res->data = frag; return 0; } static void string_stream_fragment_free(struct kunit_resource *res) { - struct string_stream_fragment *frag = res->allocation; + struct string_stream_fragment *frag = res->data; list_del(&frag->node); kunit_kfree(frag->test, frag->fragment); @@ -65,9 +65,8 @@ static struct string_stream_fragment *alloc_string_stream_fragment( static int string_stream_fragment_destroy(struct string_stream_fragment *frag) { - return kunit_resource_destroy(frag->test, + return kunit_destroy_resource(frag->test, kunit_resource_instance_match, - string_stream_fragment_free, frag); } @@ -179,7 +178,7 @@ static int string_stream_init(struct kunit_resource *res, void *context) if (!stream) return -ENOMEM; - res->allocation = stream; + res->data = stream; stream->gfp = ctx->gfp; stream->test = ctx->test; INIT_LIST_HEAD(&stream->fragments); @@ -190,7 +189,7 @@ static int string_stream_init(struct kunit_resource *res, void *context) static void string_stream_free(struct kunit_resource *res) { - struct string_stream *stream = res->allocation; + struct string_stream *stream = res->data; string_stream_clear(stream); } @@ -211,8 +210,7 @@ struct string_stream *alloc_string_stream(struct kunit *test, gfp_t gfp) int string_stream_destroy(struct string_stream *stream) { - return kunit_resource_destroy(stream->test, + return kunit_destroy_resource(stream->test, kunit_resource_instance_match, - string_stream_free, stream); } diff --git a/lib/kunit/test.c b/lib/kunit/test.c index ccb2ffad8dcf..569b7f9f17e4 100644 --- a/lib/kunit/test.c +++ b/lib/kunit/test.c @@ -8,6 +8,7 @@ #include #include +#include #include #include "debugfs.h" @@ -406,90 +407,92 @@ void __kunit_test_suites_exit(struct kunit_suite **suites) } EXPORT_SYMBOL_GPL(__kunit_test_suites_exit); -struct kunit_resource *kunit_alloc_and_get_resource(struct kunit *test, - kunit_resource_init_t init, - kunit_resource_free_t free, - gfp_t internal_gfp, - void *context) +/* + * Used for static resources and when a kunit_resource * has been created by + * kunit_alloc_resource(). When an init function is supplied, @data is passed + * into the init function; otherwise, we simply set the resource data field to + * the data value passed in. + */ +int kunit_add_resource(struct kunit *test, + kunit_resource_init_t init, + kunit_resource_free_t free, + struct kunit_resource *res, + void *data) { - struct kunit_resource *res; - int ret; + int ret = 0; - res = kzalloc(sizeof(*res), internal_gfp); - if (!res) - return NULL; + res->free = free; + kref_init(&res->refcount); - ret = init(res, context); - if (ret) - return NULL; + if (init) { + ret = init(res, data); + if (ret) + return ret; + } else { + res->data = data; + } - res->free = free; spin_lock(&test->lock); list_add_tail(&res->node, &test->resources); + /* refcount for list is established by kref_init() */ spin_unlock(&test->lock); - return res; -} -EXPORT_SYMBOL_GPL(kunit_alloc_and_get_resource); - -static void kunit_resource_free(struct kunit *test, struct kunit_resource *res) -{ - res->free(res); - kfree(res); + return ret; } +EXPORT_SYMBOL_GPL(kunit_add_resource); -static struct kunit_resource *kunit_resource_find(struct kunit *test, - kunit_resource_match_t match, - kunit_resource_free_t free, - void *match_data) +struct kunit_resource *kunit_alloc_and_get_resource(struct kunit *test, + kunit_resource_init_t init, + kunit_resource_free_t free, + gfp_t internal_gfp, + void *data) { - struct kunit_resource *resource; + struct kunit_resource *res; + int ret; - lockdep_assert_held(&test->lock); + res = kzalloc(sizeof(*res), internal_gfp); + if (!res) + return NULL; - list_for_each_entry_reverse(resource, &test->resources, node) { - if (resource->free != free) - continue; - if (match(test, resource->allocation, match_data)) - return resource; + ret = kunit_add_resource(test, init, free, res, data); + if (!ret) { + /* + * bump refcount for get; kunit_resource_put() should be called + * when done. + */ + kunit_get_resource(res); + return res; } - return NULL; } +EXPORT_SYMBOL_GPL(kunit_alloc_and_get_resource); -static struct kunit_resource *kunit_resource_remove( - struct kunit *test, - kunit_resource_match_t match, - kunit_resource_free_t free, - void *match_data) +void kunit_remove_resource(struct kunit *test, struct kunit_resource *res) { - struct kunit_resource *resource; - spin_lock(&test->lock); - resource = kunit_resource_find(test, match, free, match_data); - if (resource) - list_del(&resource->node); + list_del(&res->node); spin_unlock(&test->lock); - - return resource; + kunit_put_resource(res); } +EXPORT_SYMBOL_GPL(kunit_remove_resource); -int kunit_resource_destroy(struct kunit *test, - kunit_resource_match_t match, - kunit_resource_free_t free, +int kunit_destroy_resource(struct kunit *test, kunit_resource_match_t match, void *match_data) { - struct kunit_resource *resource; - - resource = kunit_resource_remove(test, match, free, match_data); + struct kunit_resource *res = kunit_find_resource(test, match, + match_data); - if (!resource) + if (!res) return -ENOENT; - kunit_resource_free(test, resource); + kunit_remove_resource(test, res); + + /* We have a reference also via _find(); drop it. */ + kunit_put_resource(res); + return 0; } -EXPORT_SYMBOL_GPL(kunit_resource_destroy); +EXPORT_SYMBOL_GPL(kunit_destroy_resource); struct kunit_kmalloc_params { size_t size; @@ -500,8 +503,8 @@ static int kunit_kmalloc_init(struct kunit_resource *res, void *context) { struct kunit_kmalloc_params *params = context; - res->allocation = kmalloc(params->size, params->gfp); - if (!res->allocation) + res->data = kmalloc(params->size, params->gfp); + if (!res->data) return -ENOMEM; return 0; @@ -509,7 +512,7 @@ static int kunit_kmalloc_init(struct kunit_resource *res, void *context) static void kunit_kmalloc_free(struct kunit_resource *res) { - kfree(res->allocation); + kfree(res->data); } void *kunit_kmalloc(struct kunit *test, size_t size, gfp_t gfp) @@ -529,20 +532,25 @@ EXPORT_SYMBOL_GPL(kunit_kmalloc); void kunit_kfree(struct kunit *test, const void *ptr) { - int rc; + struct kunit_resource *res; - rc = kunit_resource_destroy(test, - kunit_resource_instance_match, - kunit_kmalloc_free, - (void *)ptr); + res = kunit_find_resource(test, kunit_resource_instance_match, + (void *)ptr); + + /* + * Removing the resource from the list of resources drops the + * reference count to 1; the final put will trigger the free. + */ + kunit_remove_resource(test, res); + + kunit_put_resource(res); - WARN_ON(rc); } EXPORT_SYMBOL_GPL(kunit_kfree); void kunit_cleanup(struct kunit *test) { - struct kunit_resource *resource; + struct kunit_resource *res; /* * test->resources is a stack - each allocation must be freed in the @@ -559,13 +567,16 @@ void kunit_cleanup(struct kunit *test) spin_unlock(&test->lock); break; } - resource = list_last_entry(&test->resources, - struct kunit_resource, - node); - list_del(&resource->node); + res = list_last_entry(&test->resources, + struct kunit_resource, + node); + /* + * Need to unlock here as a resource may remove another + * resource, and this can't happen if the test->lock + * is held. + */ spin_unlock(&test->lock); - - kunit_resource_free(test, resource); + kunit_remove_resource(test, res); } } EXPORT_SYMBOL_GPL(kunit_cleanup); -- cgit v1.2.3-59-g8ed1b From 725aca9585956676687c4cb803e88f770b0df2b2 Mon Sep 17 00:00:00 2001 From: Alan Maguire Date: Fri, 29 May 2020 22:46:21 +0100 Subject: kunit: add support for named resources The kunit resources API allows for custom initialization and cleanup code (init/fini); here a new resource add function sets the "struct kunit_resource" "name" field, and calls the standard add function. Having a simple way to name resources is useful in cases such as multithreaded tests where a set of resources are shared among threads; a pointer to the "struct kunit *" test state then is all that is needed to retrieve and use named resources. Support is provided to add, find and destroy named resources; the latter two are simply wrappers that use a "match-by-name" callback. If an attempt to add a resource with a name that already exists is made kunit_add_named_resource() will return -EEXIST. Signed-off-by: Alan Maguire Reviewed-by: Brendan Higgins Signed-off-by: Shuah Khan --- include/kunit/test.h | 54 ++++++++++++++++++++++++++++++++++++++++++++++++++ lib/kunit/kunit-test.c | 37 ++++++++++++++++++++++++++++++++++ lib/kunit/test.c | 24 ++++++++++++++++++++++ 3 files changed, 115 insertions(+) (limited to 'include') diff --git a/include/kunit/test.h b/include/kunit/test.h index f9b914ebfd75..59f3144f009a 100644 --- a/include/kunit/test.h +++ b/include/kunit/test.h @@ -72,9 +72,15 @@ typedef void (*kunit_resource_free_t)(struct kunit_resource *); * return kunit_alloc_resource(test, kunit_kmalloc_init, * kunit_kmalloc_free, ¶ms); * } + * + * Resources can also be named, with lookup/removal done on a name + * basis also. kunit_add_named_resource(), kunit_find_named_resource() + * and kunit_destroy_named_resource(). Resource names must be + * unique within the test instance. */ struct kunit_resource { void *data; + const char *name; /* optional name */ /* private: internal use only. */ kunit_resource_free_t free; @@ -344,6 +350,21 @@ int kunit_add_resource(struct kunit *test, kunit_resource_free_t free, struct kunit_resource *res, void *data); + +/** + * kunit_add_named_resource() - Add a named *test managed resource*. + * @test: The test context object. + * @init: a user-supplied function to initialize the resource data, if needed. + * @free: a user-supplied function to free the resource data, if needed. + * @name_data: name and data to be set for resource. + */ +int kunit_add_named_resource(struct kunit *test, + kunit_resource_init_t init, + kunit_resource_free_t free, + struct kunit_resource *res, + const char *name, + void *data); + /** * kunit_alloc_resource() - Allocates a *test managed resource*. * @test: The test context object. @@ -398,6 +419,19 @@ static inline bool kunit_resource_instance_match(struct kunit *test, return res->data == match_data; } +/** + * kunit_resource_name_match() - Match a resource with the same name. + * @test: Test case to which the resource belongs. + * @res: The resource. + * @match_name: The name to match against. + */ +static inline bool kunit_resource_name_match(struct kunit *test, + struct kunit_resource *res, + void *match_name) +{ + return res->name && strcmp(res->name, match_name) == 0; +} + /** * kunit_find_resource() - Find a resource using match function/data. * @test: Test case to which the resource belongs. @@ -426,6 +460,19 @@ kunit_find_resource(struct kunit *test, return found; } +/** + * kunit_find_named_resource() - Find a resource using match name. + * @test: Test case to which the resource belongs. + * @name: match name. + */ +static inline struct kunit_resource * +kunit_find_named_resource(struct kunit *test, + const char *name) +{ + return kunit_find_resource(test, kunit_resource_name_match, + (void *)name); +} + /** * kunit_destroy_resource() - Find a kunit_resource and destroy it. * @test: Test case to which the resource belongs. @@ -439,6 +486,13 @@ int kunit_destroy_resource(struct kunit *test, kunit_resource_match_t match, void *match_data); +static inline int kunit_destroy_named_resource(struct kunit *test, + const char *name) +{ + return kunit_destroy_resource(test, kunit_resource_name_match, + (void *)name); +} + /** * kunit_remove_resource: remove resource from resource list associated with * test. diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c index 03f3ecaa1ef9..69f902440a0e 100644 --- a/lib/kunit/kunit-test.c +++ b/lib/kunit/kunit-test.c @@ -325,6 +325,42 @@ static void kunit_resource_test_static(struct kunit *test) KUNIT_EXPECT_TRUE(test, list_empty(&test->resources)); } +static void kunit_resource_test_named(struct kunit *test) +{ + struct kunit_resource res1, res2, *found = NULL; + struct kunit_test_resource_context ctx; + + KUNIT_EXPECT_EQ(test, + kunit_add_named_resource(test, NULL, NULL, &res1, + "resource_1", &ctx), + 0); + KUNIT_EXPECT_PTR_EQ(test, res1.data, (void *)&ctx); + + KUNIT_EXPECT_EQ(test, + kunit_add_named_resource(test, NULL, NULL, &res1, + "resource_1", &ctx), + -EEXIST); + + KUNIT_EXPECT_EQ(test, + kunit_add_named_resource(test, NULL, NULL, &res2, + "resource_2", &ctx), + 0); + + found = kunit_find_named_resource(test, "resource_1"); + + KUNIT_EXPECT_PTR_EQ(test, found, &res1); + + if (found) + kunit_put_resource(&res1); + + KUNIT_EXPECT_EQ(test, kunit_destroy_named_resource(test, "resource_2"), + 0); + + kunit_cleanup(test); + + KUNIT_EXPECT_TRUE(test, list_empty(&test->resources)); +} + static int kunit_resource_test_init(struct kunit *test) { struct kunit_test_resource_context *ctx = @@ -354,6 +390,7 @@ static struct kunit_case kunit_resource_test_cases[] = { KUNIT_CASE(kunit_resource_test_cleanup_resources), KUNIT_CASE(kunit_resource_test_proper_free_ordering), KUNIT_CASE(kunit_resource_test_static), + KUNIT_CASE(kunit_resource_test_named), {} }; diff --git a/lib/kunit/test.c b/lib/kunit/test.c index 569b7f9f17e4..c36037200310 100644 --- a/lib/kunit/test.c +++ b/lib/kunit/test.c @@ -441,6 +441,30 @@ int kunit_add_resource(struct kunit *test, } EXPORT_SYMBOL_GPL(kunit_add_resource); +int kunit_add_named_resource(struct kunit *test, + kunit_resource_init_t init, + kunit_resource_free_t free, + struct kunit_resource *res, + const char *name, + void *data) +{ + struct kunit_resource *existing; + + if (!name) + return -EINVAL; + + existing = kunit_find_named_resource(test, name); + if (existing) { + kunit_put_resource(existing); + return -EEXIST; + } + + res->name = name; + + return kunit_add_resource(test, init, free, res, data); +} +EXPORT_SYMBOL_GPL(kunit_add_named_resource); + struct kunit_resource *kunit_alloc_and_get_resource(struct kunit *test, kunit_resource_init_t init, kunit_resource_free_t free, -- cgit v1.2.3-59-g8ed1b From 333740981f94fa80326cc8e5d2da105f17bc1dd5 Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Fri, 26 Jun 2020 17:53:23 +0200 Subject: net: mdio: add a forward declaration for reset_control to mdio.h This header refers to struct reset_control but doesn't include any reset header. The structure definition is probably somehow indirectly pulled in since no warnings are reported but for the sake of correctness add the forward declaration for struct reset_control. Signed-off-by: Bartosz Golaszewski Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- include/linux/mdio.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/mdio.h b/include/linux/mdio.h index 36d2e0673d03..898cbf00332a 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h @@ -18,6 +18,7 @@ struct gpio_desc; struct mii_bus; +struct reset_control; /* Multiple levels of nesting are possible. However typically this is * limited to nested DSA like layer, a MUX layer, and the normal -- cgit v1.2.3-59-g8ed1b From adf4f9d49c74a812757c5c67879ece0e54b75417 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sun, 7 Jun 2020 23:51:23 +0200 Subject: irqchip/vic: Drop cascaded intialization call We got rid of the last user of the cascaded intialization from board files so drop this API. Signed-off-by: Linus Walleij Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20200607215124.48638-1-linus.walleij@linaro.org --- drivers/irqchip/irq-vic.c | 21 --------------------- include/linux/irqchip/arm-vic.h | 2 -- 2 files changed, 23 deletions(-) (limited to 'include') diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c index 3c87d925f74c..927ff2c1bf67 100644 --- a/drivers/irqchip/irq-vic.c +++ b/drivers/irqchip/irq-vic.c @@ -481,27 +481,6 @@ void __init vic_init(void __iomem *base, unsigned int irq_start, __vic_init(base, 0, irq_start, vic_sources, resume_sources, NULL); } -/** - * vic_init_cascaded() - initialise a cascaded vectored interrupt controller - * @base: iomem base address - * @parent_irq: the parent IRQ we're cascaded off - * @vic_sources: bitmask of interrupt sources to allow - * @resume_sources: bitmask of interrupt sources to allow for resume - * - * This returns the base for the new interrupts or negative on error. - */ -int __init vic_init_cascaded(void __iomem *base, unsigned int parent_irq, - u32 vic_sources, u32 resume_sources) -{ - struct vic_device *v; - - v = &vic_devices[vic_id]; - __vic_init(base, parent_irq, 0, vic_sources, resume_sources, NULL); - /* Return out acquired base */ - return v->irq; -} -EXPORT_SYMBOL_GPL(vic_init_cascaded); - #ifdef CONFIG_OF static int __init vic_of_init(struct device_node *node, struct device_node *parent) diff --git a/include/linux/irqchip/arm-vic.h b/include/linux/irqchip/arm-vic.h index a158b97242c7..2a4b6a5d8522 100644 --- a/include/linux/irqchip/arm-vic.h +++ b/include/linux/irqchip/arm-vic.h @@ -19,7 +19,5 @@ struct pt_regs; void __vic_init(void __iomem *base, int parent_irq, int irq_start, u32 vic_sources, u32 resume_sources, struct device_node *node); void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources); -int vic_init_cascaded(void __iomem *base, unsigned int parent_irq, - u32 vic_sources, u32 resume_sources); #endif -- cgit v1.2.3-59-g8ed1b From b0b92ab6a86e59779c2b17c5f611b04120fdfbb6 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sun, 7 Jun 2020 23:51:24 +0200 Subject: irqchip/vic: Cut down the external API There are registers and functions in the header file that are only used inside the driver. Move these into the driver. Signed-off-by: Linus Walleij Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20200607215124.48638-2-linus.walleij@linaro.org --- drivers/irqchip/irq-vic.c | 5 ++++- include/linux/irqchip/arm-vic.h | 9 --------- 2 files changed, 4 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c index 927ff2c1bf67..bc235db8a4c5 100644 --- a/drivers/irqchip/irq-vic.c +++ b/drivers/irqchip/irq-vic.c @@ -27,7 +27,10 @@ #define VIC_IRQ_STATUS 0x00 #define VIC_FIQ_STATUS 0x04 +#define VIC_RAW_STATUS 0x08 #define VIC_INT_SELECT 0x0c /* 1 = FIQ, 0 = IRQ */ +#define VIC_INT_ENABLE 0x10 /* 1 = enable, 0 = disable */ +#define VIC_INT_ENABLE_CLEAR 0x14 #define VIC_INT_SOFT 0x18 #define VIC_INT_SOFT_CLEAR 0x1c #define VIC_PROTECT 0x20 @@ -428,7 +431,7 @@ static void __init vic_init_st(void __iomem *base, unsigned int irq_start, vic_register(base, 0, irq_start, vic_sources, 0, node); } -void __init __vic_init(void __iomem *base, int parent_irq, int irq_start, +static void __init __vic_init(void __iomem *base, int parent_irq, int irq_start, u32 vic_sources, u32 resume_sources, struct device_node *node) { diff --git a/include/linux/irqchip/arm-vic.h b/include/linux/irqchip/arm-vic.h index 2a4b6a5d8522..f2b11d1df23d 100644 --- a/include/linux/irqchip/arm-vic.h +++ b/include/linux/irqchip/arm-vic.h @@ -9,15 +9,6 @@ #include -#define VIC_RAW_STATUS 0x08 -#define VIC_INT_ENABLE 0x10 /* 1 = enable, 0 = disable */ -#define VIC_INT_ENABLE_CLEAR 0x14 - -struct device_node; -struct pt_regs; - -void __vic_init(void __iomem *base, int parent_irq, int irq_start, - u32 vic_sources, u32 resume_sources, struct device_node *node); void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources); #endif -- cgit v1.2.3-59-g8ed1b From 89778093d38d547cd80f6097659d1cf1c2dd4d9d Mon Sep 17 00:00:00 2001 From: Oscar Carter Date: Sat, 30 May 2020 16:34:28 +0200 Subject: drivers/acpi: Add new macro ACPI_DECLARE_SUBTABLE_PROBE_ENTRY In an effort to enable -Wcast-function-type in the top-level Makefile to support Control Flow Integrity builds, there are the need to remove all the function callback casts. To do this, create a new macro called ACPI_DECLARE_SUBTABLE_PROBE_ENTRY to initialize the acpi_probe_entry struct using the probe_subtbl field instead of the probe_table field. This is a previous work to be able to modify the IRQCHIP_ACPI_DECLARE macro to use this new defined macro. Even though these two commented fields are part of a union, this is necessary to avoid function cast mismatches. That is, due to the IRQCHIP_ACPI_DECLARE invocations use as last parameter a function with the protoype "int (*func)(struct acpi_subtable_header *, const unsigned long)" it's necessary that this macro initialize the probe_subtbl field of the acpi_probe_entry struct and not the probe_table field. Co-developed-by: Marc Zyngier Signed-off-by: Marc Zyngier Signed-off-by: Oscar Carter Acked-by: Rafael J. Wysocki Link: https://lore.kernel.org/r/20200530143430.5203-2-oscar.carter@gmx.com --- include/linux/acpi.h | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'include') diff --git a/include/linux/acpi.h b/include/linux/acpi.h index d661cd0ee64d..cf74e044a570 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -1154,6 +1154,17 @@ struct acpi_probe_entry { .driver_data = data, \ } +#define ACPI_DECLARE_SUBTABLE_PROBE_ENTRY(table, name, table_id, \ + subtable, valid, data, fn) \ + static const struct acpi_probe_entry __acpi_probe_##name \ + __used __section(__##table##_acpi_probe_table) = { \ + .id = table_id, \ + .type = subtable, \ + .subtable_valid = valid, \ + .probe_subtbl = fn, \ + .driver_data = data, \ + } + #define ACPI_PROBE_TABLE(name) __##name##_acpi_probe_table #define ACPI_PROBE_TABLE_END(name) __##name##_acpi_probe_table_end -- cgit v1.2.3-59-g8ed1b From aba3c7ed3fcf74524b7072615028827d5e5750d7 Mon Sep 17 00:00:00 2001 From: Oscar Carter Date: Sat, 30 May 2020 16:34:29 +0200 Subject: drivers/irqchip: Use new macro ACPI_DECLARE_SUBTABLE_PROBE_ENTRY In an effort to enable -Wcast-function-type in the top-level Makefile to support Control Flow Integrity builds, there are the need to remove all the function callback casts. To do this, modify the IRQCHIP_ACPI_DECLARE macro to use the new defined macro ACPI_DECLARE_SUBTABLE_PROBE_ENTRY instead of the macro ACPI_DECLARE_PROBE_ENTRY. This is necessary to be able to initialize the the acpi_probe_entry struct using the probe_subtbl field instead of the probe_table field and avoid function cast mismatches. Also, modify the prototype of the functions used by the invocation of the IRQCHIP_ACPI_DECLARE macro to match all the parameters. Co-developed-by: Marc Zyngier Signed-off-by: Marc Zyngier Signed-off-by: Oscar Carter Acked-by: Rafael J. Wysocki Link: https://lore.kernel.org/r/20200530143430.5203-3-oscar.carter@gmx.com --- drivers/irqchip/irq-gic-v3.c | 2 +- drivers/irqchip/irq-gic.c | 2 +- include/linux/irqchip.h | 5 +++-- 3 files changed, 5 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index cc46bc2d634b..324f280ff606 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -2116,7 +2116,7 @@ static void __init gic_acpi_setup_kvm_info(void) } static int __init -gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end) +gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_generic_distributor *dist; struct fwnode_handle *domain_handle; diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 00de05abd3c3..71bd64884ae5 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -1592,7 +1592,7 @@ static void __init gic_acpi_setup_kvm_info(void) gic_set_kvm_info(&gic_v2_kvm_info); } -static int __init gic_v2_acpi_init(struct acpi_subtable_header *header, +static int __init gic_v2_acpi_init(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_generic_distributor *dist; diff --git a/include/linux/irqchip.h b/include/linux/irqchip.h index 950e4b2458f0..447f22880a69 100644 --- a/include/linux/irqchip.h +++ b/include/linux/irqchip.h @@ -39,8 +39,9 @@ * @fn: initialization function */ #define IRQCHIP_ACPI_DECLARE(name, subtable, validate, data, fn) \ - ACPI_DECLARE_PROBE_ENTRY(irqchip, name, ACPI_SIG_MADT, \ - subtable, validate, data, fn) + ACPI_DECLARE_SUBTABLE_PROBE_ENTRY(irqchip, name, \ + ACPI_SIG_MADT, subtable, \ + validate, data, fn) #ifdef CONFIG_IRQCHIP void irqchip_init(void); -- cgit v1.2.3-59-g8ed1b From 8ebf642f3d809b59f57d0d408189a2218294e269 Mon Sep 17 00:00:00 2001 From: Oscar Carter Date: Sat, 30 May 2020 16:34:30 +0200 Subject: drivers/acpi: Remove function cast Remove the function cast in the ACPI_DECLARE_PROBE_ENTRY macro to ensure that the functions passed as a last parameter to this macro have the right prototype. This is an effort to enable -Wcast-function-type in the top-level Makefile to support Control Flow Integrity builds. Suggested-by: Marc Zyngier Signed-off-by: Oscar Carter Signed-off-by: Marc Zyngier Acked-by: Rafael J. Wysocki Link: https://lore.kernel.org/r/20200530143430.5203-4-oscar.carter@gmx.com --- include/linux/acpi.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/linux/acpi.h b/include/linux/acpi.h index cf74e044a570..1cda2d32e4c4 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -1143,16 +1143,16 @@ struct acpi_probe_entry { kernel_ulong_t driver_data; }; -#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \ +#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, \ + valid, data, fn) \ static const struct acpi_probe_entry __acpi_probe_##name \ - __used __section(__##table##_acpi_probe_table) \ - = { \ + __used __section(__##table##_acpi_probe_table) = { \ .id = table_id, \ .type = subtable, \ .subtable_valid = valid, \ - .probe_table = (acpi_tbl_table_handler)fn, \ - .driver_data = data, \ - } + .probe_table = fn, \ + .driver_data = data, \ + } #define ACPI_DECLARE_SUBTABLE_PROBE_ENTRY(table, name, table_id, \ subtable, valid, data, fn) \ -- cgit v1.2.3-59-g8ed1b From 167cbce27444be3203081b97ea65178c4088b062 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Wed, 10 Jun 2020 17:51:21 +0200 Subject: serial: core: drop unnecessary gpio include Drop the recently added gpio include from the serial-core header in favour of a forward declaration and instead include the gpio header only where needed. Signed-off-by: Johan Hovold Link: https://lore.kernel.org/r/20200610155121.14014-1-johan@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/8250/8250_port.c | 1 + drivers/tty/serial/serial_core.c | 1 + include/linux/serial_core.h | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 1632f7d25acc..d64ca77d9cfa 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 57840cf90388..5750f3628357 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 9fd550e7946a..653c653ad0c2 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -10,7 +10,6 @@ #include #include #include -#include #include #include #include @@ -30,6 +29,7 @@ struct uart_port; struct serial_struct; struct device; +struct gpio_desc; /* * This structure describes all the operations that can be done on the -- cgit v1.2.3-59-g8ed1b From 9205d7b1c1cffa827c23bdbf35e04c7cbe1e1f10 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Thu, 25 Jun 2020 22:59:41 -0700 Subject: net/mlx5: Avoid RDMA file inclusion in core driver mlx5 cq.h does not depend on RDMA verbs. Remove RDMA verbs file inclusion. Signed-off-by: Parav Pandit Signed-off-by: Saeed Mahameed --- include/linux/mlx5/cq.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include') diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h index b5a9399e07ee..7bfb67363434 100644 --- a/include/linux/mlx5/cq.h +++ b/include/linux/mlx5/cq.h @@ -33,7 +33,6 @@ #ifndef MLX5_CORE_CQ_H #define MLX5_CORE_CQ_H -#include #include #include -- cgit v1.2.3-59-g8ed1b From 2d1b69ed65ee033aa541518cc9f6a815296ac493 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Thu, 25 Jun 2020 22:59:43 -0700 Subject: net/mlx5: kTLS, Improve TLS params layout structures Add explicit WQE segment structures for the TLS static and progress params. According to the HW spec, TISN is not part of the progress params context, take it out of it. Rename the control segment tisn field as it could hold either a TIS or a TIR number. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c | 14 +++++++++----- .../net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c | 2 +- include/linux/mlx5/device.h | 9 +++++++++ include/linux/mlx5/mlx5_ifc.h | 5 +---- include/linux/mlx5/qp.h | 2 +- 7 files changed, 23 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index bfd3e1161bc6..31cac239563d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -182,7 +182,7 @@ mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map, static inline bool mlx5e_transport_inline_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg) { - return cseg && !!cseg->tisn; + return cseg && !!cseg->tis_tir_num; } static inline u8 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h index c6180892cfcb..806ed185dd4c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h @@ -19,7 +19,7 @@ #define MLX5E_KTLS_PROGRESS_WQE_SZ \ (offsetof(struct mlx5e_tx_wqe, tls_progress_params_ctx) + \ - MLX5_ST_SZ_BYTES(tls_progress_params)) + sizeof(struct mlx5_wqe_tls_progress_params_seg)) #define MLX5E_KTLS_PROGRESS_WQEBBS \ (DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_BB)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index 3cd78d9503c1..ad7300f19815 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c @@ -64,7 +64,7 @@ build_static_params(struct mlx5e_umr_wqe *wqe, u16 pc, u32 sqn, cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) | STATIC_PARAMS_DS_CNT); cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; - cseg->tisn = cpu_to_be32(priv_tx->tisn << 8); + cseg->tis_tir_num = cpu_to_be32(priv_tx->tisn << 8); ucseg->flags = MLX5_UMR_INLINE; ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16); @@ -75,10 +75,14 @@ build_static_params(struct mlx5e_umr_wqe *wqe, u16 pc, u32 sqn, static void fill_progress_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx) { - MLX5_SET(tls_progress_params, ctx, tisn, priv_tx->tisn); - MLX5_SET(tls_progress_params, ctx, record_tracker_state, + struct mlx5_wqe_tls_progress_params_seg *params; + + params = ctx; + + params->tis_tir_num = cpu_to_be32(priv_tx->tisn); + MLX5_SET(tls_progress_params, params->ctx, record_tracker_state, MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START); - MLX5_SET(tls_progress_params, ctx, auth_state, + MLX5_SET(tls_progress_params, params->ctx, auth_state, MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD); } @@ -284,7 +288,7 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool fir cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP); cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); - cseg->tisn = cpu_to_be32(tisn << 8); + cseg->tis_tir_num = cpu_to_be32(tisn << 8); cseg->fm_ce_se = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; fsz = skb_frag_size(frag); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c index 05454a843b28..72d26fbc8d5b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c @@ -305,7 +305,7 @@ err_out: void mlx5e_tls_handle_tx_wqe(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg, struct mlx5e_accel_tx_tls_state *state) { - cseg->tisn = cpu_to_be32(state->tls_tisn << 8); + cseg->tis_tir_num = cpu_to_be32(state->tls_tisn << 8); } static int tls_update_resync_sn(struct net_device *netdev, diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 1bc27aca648b..57db125e5802 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -458,6 +458,15 @@ enum { MLX5_OPC_MOD_TLS_TIR_PROGRESS_PARAMS = 0x2, }; +struct mlx5_wqe_tls_static_params_seg { + u8 ctx[MLX5_ST_SZ_BYTES(tls_static_params)]; +}; + +struct mlx5_wqe_tls_progress_params_seg { + __be32 tis_tir_num; + u8 ctx[MLX5_ST_SZ_BYTES(tls_progress_params)]; +}; + enum { MLX5_SET_PORT_RESET_QKEY = 0, MLX5_SET_PORT_GUID0 = 16, diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 116bd9bb347f..a227518c70cf 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -10638,16 +10638,13 @@ struct mlx5_ifc_tls_static_params_bits { }; struct mlx5_ifc_tls_progress_params_bits { - u8 reserved_at_0[0x8]; - u8 tisn[0x18]; - u8 next_record_tcp_sn[0x20]; u8 hw_resync_tcp_sn[0x20]; u8 record_tracker_state[0x2]; u8 auth_state[0x2]; - u8 reserved_at_64[0x4]; + u8 reserved_at_44[0x4]; u8 hw_offset_record_number[0x18]; }; diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index b8992b861ae6..36492a1342cf 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -209,7 +209,7 @@ struct mlx5_wqe_ctrl_seg { __be32 general_id; __be32 imm; __be32 umr_mkey; - __be32 tisn; + __be32 tis_tir_num; }; }; -- cgit v1.2.3-59-g8ed1b From acb5a07aaf2723cd273a4089e62611a414fb1c35 Mon Sep 17 00:00:00 2001 From: Boris Pismenny Date: Mon, 8 Jun 2020 12:42:52 +0300 Subject: Revert "net/tls: Add force_resync for driver resync" This reverts commit b3ae2459f89773adcbf16fef4b68deaaa3be1929. Revert the force resync API. Not in use. To be replaced by a better async resync API downstream. Signed-off-by: Boris Pismenny Signed-off-by: Tariq Toukan Reviewed-by: Maxim Mikityanskiy Signed-off-by: Saeed Mahameed --- include/net/tls.h | 12 +----------- net/tls/tls_device.c | 9 +++------ 2 files changed, 4 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/include/net/tls.h b/include/net/tls.h index 3212d3c214a9..ca5f7f437289 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -607,22 +607,12 @@ tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction) #endif /* The TLS context is valid until sk_destruct is called */ -#define RESYNC_REQ (1 << 0) -#define RESYNC_REQ_FORCE (1 << 1) static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); - atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | RESYNC_REQ); -} - -static inline void tls_offload_rx_force_resync_request(struct sock *sk) -{ - struct tls_context *tls_ctx = tls_get_ctx(sk); - struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); - - atomic64_set(&rx_ctx->resync_req, RESYNC_REQ | RESYNC_REQ_FORCE); + atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | 1); } static inline void diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 0e55f8365ce2..a562ebaaa33c 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -694,11 +694,10 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_offload_context_rx *rx_ctx; - bool is_req_pending, is_force_resync; u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE]; + u32 sock_data, is_req_pending; struct tls_prot_info *prot; s64 resync_req; - u32 sock_data; u32 req_seq; if (tls_ctx->rx_conf != TLS_HW) @@ -713,11 +712,9 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) resync_req = atomic64_read(&rx_ctx->resync_req); req_seq = resync_req >> 32; seq += TLS_HEADER_SIZE - 1; - is_req_pending = resync_req & RESYNC_REQ; - is_force_resync = resync_req & RESYNC_REQ_FORCE; + is_req_pending = resync_req; - if (likely(!is_req_pending) || - (!is_force_resync && req_seq != seq) || + if (likely(!is_req_pending) || req_seq != seq || !atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0)) return; break; -- cgit v1.2.3-59-g8ed1b From ed9b7646b06a2ed2450dd9437fc7d1ad2783140c Mon Sep 17 00:00:00 2001 From: Boris Pismenny Date: Mon, 8 Jun 2020 19:11:38 +0300 Subject: net/tls: Add asynchronous resync This patch adds support for asynchronous resynchronization in tls_device. Async resync follows two distinct stages: 1. The NIC driver indicates that it would like to resync on some TLS record within the received packet (P), but the driver does not know (yet) which of the TLS records within the packet. At this stage, the NIC driver will query the device to find the exact TCP sequence for resync (tcpsn), however, the driver does not wait for the device to provide the response. 2. Eventually, the device responds, and the driver provides the tcpsn within the resync packet to KTLS. Now, KTLS can check the tcpsn against any processed TLS records within packet P, and also against any record that is processed in the future within packet P. The asynchronous resync path simplifies the device driver, as it can save bits on the packet completion (32-bit TCP sequence), and pass this information on an asynchronous command instead. Signed-off-by: Boris Pismenny Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- include/net/tls.h | 38 +++++++++++++++++++++++++++++++++++++- net/tls/tls_device.c | 51 +++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 88 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/tls.h b/include/net/tls.h index ca5f7f437289..c875c0a445a6 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -291,11 +291,19 @@ struct tlsdev_ops { enum tls_offload_sync_type { TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ = 0, TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT = 1, + TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC = 2, }; #define TLS_DEVICE_RESYNC_NH_START_IVAL 2 #define TLS_DEVICE_RESYNC_NH_MAX_IVAL 128 +#define TLS_DEVICE_RESYNC_ASYNC_LOGMAX 13 +struct tls_offload_resync_async { + atomic64_t req; + u32 loglen; + u32 log[TLS_DEVICE_RESYNC_ASYNC_LOGMAX]; +}; + struct tls_offload_context_rx { /* sw must be the first member of tls_offload_context_rx */ struct tls_sw_context_rx sw; @@ -314,6 +322,10 @@ struct tls_offload_context_rx { u32 decrypted_failed; u32 decrypted_tgt; } resync_nh; + /* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC */ + struct { + struct tls_offload_resync_async *resync_async; + }; }; u8 driver_state[] __aligned(8); /* The TLS layer reserves room for driver specific state @@ -606,13 +618,37 @@ tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction) } #endif +#define RESYNC_REQ BIT(0) +#define RESYNC_REQ_ASYNC BIT(1) /* The TLS context is valid until sk_destruct is called */ static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); - atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | 1); + atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | RESYNC_REQ); +} + +/* Log all TLS record header TCP sequences in [seq, seq+len] */ +static inline void +tls_offload_rx_resync_async_request_start(struct sock *sk, __be32 seq, u16 len) +{ + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); + + atomic64_set(&rx_ctx->resync_async->req, ((u64)ntohl(seq) << 32) | + (len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC); + rx_ctx->resync_async->loglen = 0; +} + +static inline void +tls_offload_rx_resync_async_request_end(struct sock *sk, __be32 seq) +{ + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); + + atomic64_set(&rx_ctx->resync_async->req, + ((u64)ntohl(seq) << 32) | RESYNC_REQ); } static inline void diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index a562ebaaa33c..18fa6067bb7f 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -690,6 +690,47 @@ static void tls_device_resync_rx(struct tls_context *tls_ctx, TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICERESYNC); } +static bool +tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async, + s64 resync_req, u32 *seq) +{ + u32 is_async = resync_req & RESYNC_REQ_ASYNC; + u32 req_seq = resync_req >> 32; + u32 req_end = req_seq + ((resync_req >> 16) & 0xffff); + + if (is_async) { + /* asynchronous stage: log all headers seq such that + * req_seq <= seq <= end_seq, and wait for real resync request + */ + if (between(*seq, req_seq, req_end) && + resync_async->loglen < TLS_DEVICE_RESYNC_ASYNC_LOGMAX) + resync_async->log[resync_async->loglen++] = *seq; + + return false; + } + + /* synchronous stage: check against the logged entries and + * proceed to check the next entries if no match was found + */ + while (resync_async->loglen) { + if (req_seq == resync_async->log[resync_async->loglen - 1] && + atomic64_try_cmpxchg(&resync_async->req, + &resync_req, 0)) { + resync_async->loglen = 0; + *seq = req_seq; + return true; + } + resync_async->loglen--; + } + + if (req_seq == *seq && + atomic64_try_cmpxchg(&resync_async->req, + &resync_req, 0)) + return true; + + return false; +} + void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) { struct tls_context *tls_ctx = tls_get_ctx(sk); @@ -736,6 +777,16 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) seq += rcd_len; tls_bigint_increment(rcd_sn, prot->rec_seq_size); break; + case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC: + resync_req = atomic64_read(&rx_ctx->resync_async->req); + is_req_pending = resync_req; + if (likely(!is_req_pending)) + return; + + if (!tls_device_rx_resync_async(rx_ctx->resync_async, + resync_req, &seq)) + return; + break; } tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn); -- cgit v1.2.3-59-g8ed1b From 322b598be4d9b9090cda560c4caab78704615ab4 Mon Sep 17 00:00:00 2001 From: Xu Yilun Date: Tue, 16 Jun 2020 12:08:44 +0800 Subject: fpga: dfl: introduce interrupt trigger setting API FPGA user applications may be interested in interrupts generated by DFL features. For example, users can implement their own FPGA logics with interrupts enabled in AFU (Accelerated Function Unit, dynamic region of DFL based FPGA). So user applications need to be notified to handle these interrupts. In order to allow userspace applications to monitor interrupts, driver requires userspace to provide eventfds as interrupt notification channels. Applications then poll/select on the eventfds to get notified. This patch introduces a generic helper functions to do eventfds binding with given interrupts. Sub feature drivers are expected to use XXX_GET_IRQ_NUM to query irq info, and XXX_SET_IRQ to set eventfds for interrupts. This patch also introduces helper functions for these 2 ioctls. Signed-off-by: Luwei Kang Signed-off-by: Wu Hao Signed-off-by: Xu Yilun Signed-off-by: Tom Rix Reviewed-by: Marcelo Tosatti Acked-by: Wu Hao Signed-off-by: Moritz Fischer --- drivers/fpga/dfl.c | 157 ++++++++++++++++++++++++++++++++++++++++++ drivers/fpga/dfl.h | 16 +++++ include/uapi/linux/fpga-dfl.h | 13 ++++ 3 files changed, 186 insertions(+) (limited to 'include') diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c index d915331fb52c..649958a36e62 100644 --- a/drivers/fpga/dfl.c +++ b/drivers/fpga/dfl.c @@ -10,7 +10,9 @@ * Wu Hao * Xiao Guangrong */ +#include #include +#include #include "dfl.h" @@ -533,6 +535,7 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo) unsigned int i; /* save resource information for each feature */ + feature->dev = fdev; feature->id = finfo->fid; feature->resource_index = index; feature->ioaddr = finfo->ioaddr; @@ -1393,6 +1396,160 @@ done: } EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_vf); +static irqreturn_t dfl_irq_handler(int irq, void *arg) +{ + struct eventfd_ctx *trigger = arg; + + eventfd_signal(trigger, 1); + return IRQ_HANDLED; +} + +static int do_set_irq_trigger(struct dfl_feature *feature, unsigned int idx, + int fd) +{ + struct platform_device *pdev = feature->dev; + struct eventfd_ctx *trigger; + int irq, ret; + + irq = feature->irq_ctx[idx].irq; + + if (feature->irq_ctx[idx].trigger) { + free_irq(irq, feature->irq_ctx[idx].trigger); + kfree(feature->irq_ctx[idx].name); + eventfd_ctx_put(feature->irq_ctx[idx].trigger); + feature->irq_ctx[idx].trigger = NULL; + } + + if (fd < 0) + return 0; + + feature->irq_ctx[idx].name = + kasprintf(GFP_KERNEL, "fpga-irq[%u](%s-%llx)", idx, + dev_name(&pdev->dev), feature->id); + if (!feature->irq_ctx[idx].name) + return -ENOMEM; + + trigger = eventfd_ctx_fdget(fd); + if (IS_ERR(trigger)) { + ret = PTR_ERR(trigger); + goto free_name; + } + + ret = request_irq(irq, dfl_irq_handler, 0, + feature->irq_ctx[idx].name, trigger); + if (!ret) { + feature->irq_ctx[idx].trigger = trigger; + return ret; + } + + eventfd_ctx_put(trigger); +free_name: + kfree(feature->irq_ctx[idx].name); + + return ret; +} + +/** + * dfl_fpga_set_irq_triggers - set eventfd triggers for dfl feature interrupts + * + * @feature: dfl sub feature. + * @start: start of irq index in this dfl sub feature. + * @count: number of irqs. + * @fds: eventfds to bind with irqs. unbind related irq if fds[n] is negative. + * unbind "count" specified number of irqs if fds ptr is NULL. + * + * Bind given eventfds with irqs in this dfl sub feature. Unbind related irq if + * fds[n] is negative. Unbind "count" specified number of irqs if fds ptr is + * NULL. + * + * Return: 0 on success, negative error code otherwise. + */ +int dfl_fpga_set_irq_triggers(struct dfl_feature *feature, unsigned int start, + unsigned int count, int32_t *fds) +{ + unsigned int i; + int ret = 0; + + /* overflow */ + if (unlikely(start + count < start)) + return -EINVAL; + + /* exceeds nr_irqs */ + if (start + count > feature->nr_irqs) + return -EINVAL; + + for (i = 0; i < count; i++) { + int fd = fds ? fds[i] : -1; + + ret = do_set_irq_trigger(feature, start + i, fd); + if (ret) { + while (i--) + do_set_irq_trigger(feature, start + i, -1); + break; + } + } + + return ret; +} +EXPORT_SYMBOL_GPL(dfl_fpga_set_irq_triggers); + +/** + * dfl_feature_ioctl_get_num_irqs - dfl feature _GET_IRQ_NUM ioctl interface. + * @pdev: the feature device which has the sub feature + * @feature: the dfl sub feature + * @arg: ioctl argument + * + * Return: 0 on success, negative error code otherwise. + */ +long dfl_feature_ioctl_get_num_irqs(struct platform_device *pdev, + struct dfl_feature *feature, + unsigned long arg) +{ + return put_user(feature->nr_irqs, (__u32 __user *)arg); +} +EXPORT_SYMBOL_GPL(dfl_feature_ioctl_get_num_irqs); + +/** + * dfl_feature_ioctl_set_irq - dfl feature _SET_IRQ ioctl interface. + * @pdev: the feature device which has the sub feature + * @feature: the dfl sub feature + * @arg: ioctl argument + * + * Return: 0 on success, negative error code otherwise. + */ +long dfl_feature_ioctl_set_irq(struct platform_device *pdev, + struct dfl_feature *feature, + unsigned long arg) +{ + struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); + struct dfl_fpga_irq_set hdr; + s32 *fds; + long ret; + + if (!feature->nr_irqs) + return -ENOENT; + + if (copy_from_user(&hdr, (void __user *)arg, sizeof(hdr))) + return -EFAULT; + + if (!hdr.count || (hdr.start + hdr.count > feature->nr_irqs) || + (hdr.start + hdr.count < hdr.start)) + return -EINVAL; + + fds = memdup_user((void __user *)(arg + sizeof(hdr)), + hdr.count * sizeof(s32)); + if (IS_ERR(fds)) + return PTR_ERR(fds); + + mutex_lock(&pdata->lock); + ret = dfl_fpga_set_irq_triggers(feature, hdr.start, hdr.count, fds); + mutex_unlock(&pdata->lock); + + kfree(fds); + return ret; +} +EXPORT_SYMBOL_GPL(dfl_feature_ioctl_set_irq); + static void __exit dfl_fpga_exit(void) { dfl_chardev_uinit(); diff --git a/drivers/fpga/dfl.h b/drivers/fpga/dfl.h index e1f3ab86560e..a32dfba2a88b 100644 --- a/drivers/fpga/dfl.h +++ b/drivers/fpga/dfl.h @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -214,14 +215,19 @@ struct dfl_feature_driver { * struct dfl_feature_irq_ctx - dfl private feature interrupt context * * @irq: Linux IRQ number of this interrupt. + * @trigger: eventfd context to signal when interrupt happens. + * @name: irq name needed when requesting irq. */ struct dfl_feature_irq_ctx { int irq; + struct eventfd_ctx *trigger; + char *name; }; /** * struct dfl_feature - sub feature of the feature devices * + * @dev: ptr to pdev of the feature device which has the sub feature. * @id: sub feature id. * @resource_index: each sub feature has one mmio resource for its registers. * this index is used to find its mmio resource from the @@ -233,6 +239,7 @@ struct dfl_feature_irq_ctx { * @priv: priv data of this feature. */ struct dfl_feature { + struct platform_device *dev; u64 id; int resource_index; void __iomem *ioaddr; @@ -503,4 +510,13 @@ int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id); int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id); void dfl_fpga_cdev_config_ports_pf(struct dfl_fpga_cdev *cdev); int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vf); +int dfl_fpga_set_irq_triggers(struct dfl_feature *feature, unsigned int start, + unsigned int count, int32_t *fds); +long dfl_feature_ioctl_get_num_irqs(struct platform_device *pdev, + struct dfl_feature *feature, + unsigned long arg); +long dfl_feature_ioctl_set_irq(struct platform_device *pdev, + struct dfl_feature *feature, + unsigned long arg); + #endif /* __FPGA_DFL_H */ diff --git a/include/uapi/linux/fpga-dfl.h b/include/uapi/linux/fpga-dfl.h index ec70a0746e59..7331350f3067 100644 --- a/include/uapi/linux/fpga-dfl.h +++ b/include/uapi/linux/fpga-dfl.h @@ -151,6 +151,19 @@ struct dfl_fpga_port_dma_unmap { #define DFL_FPGA_PORT_DMA_UNMAP _IO(DFL_FPGA_MAGIC, DFL_PORT_BASE + 4) +/** + * struct dfl_fpga_irq_set - the argument for DFL_FPGA_XXX_SET_IRQ ioctl. + * + * @start: Index of the first irq. + * @count: The number of eventfd handler. + * @evtfds: Eventfd handlers. + */ +struct dfl_fpga_irq_set { + __u32 start; + __u32 count; + __s32 evtfds[]; +}; + /* IOCTLs for FME file descriptor */ /** -- cgit v1.2.3-59-g8ed1b From fe80536acf8397827be77f9b8ada384b90e790d0 Mon Sep 17 00:00:00 2001 From: Martin Date: Sun, 28 Jun 2020 23:18:23 +0530 Subject: bareudp: Added attribute to enable & disable rx metadata collection Metadata need not be collected in receive if the packet from bareudp device is not targeted to openvswitch. Signed-off-by: Martin Signed-off-by: David S. Miller --- Documentation/networking/bareudp.rst | 6 ++++-- drivers/net/bareudp.c | 23 +++++++++++++++++------ include/net/bareudp.h | 1 + include/uapi/linux/if_link.h | 1 + 4 files changed, 23 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/Documentation/networking/bareudp.rst b/Documentation/networking/bareudp.rst index 465a8b251bfe..0e00636d8d74 100644 --- a/Documentation/networking/bareudp.rst +++ b/Documentation/networking/bareudp.rst @@ -48,5 +48,7 @@ enabled. The bareudp device could be used along with OVS or flower filter in TC. The OVS or TC flower layer must set the tunnel information in SKB dst field before sending packet buffer to the bareudp device for transmission. On reception the -bareudp device extracts and stores the tunnel information in SKB dst field before -passing the packet buffer to the network stack. +bareudp device decapsulates the udp header and passes the inner packet to the +network stack. If RX_COLLECT_METADATA flag is enabled in the device the tunnel +information will be stored in the SKB dst field before the packet buffer is +passed to the network stack. diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c index 3dd46cd55114..108a8cafc4f8 100644 --- a/drivers/net/bareudp.c +++ b/drivers/net/bareudp.c @@ -46,6 +46,7 @@ struct bareudp_dev { __be16 port; u16 sport_min; bool multi_proto_mode; + bool rx_collect_metadata; struct socket __rcu *sock; struct list_head next; /* bareudp node on namespace list */ struct gro_cells gro_cells; @@ -125,13 +126,14 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) bareudp->dev->stats.rx_dropped++; goto drop; } - - tun_dst = udp_tun_rx_dst(skb, family, TUNNEL_KEY, 0, 0); - if (!tun_dst) { - bareudp->dev->stats.rx_dropped++; - goto drop; + if (bareudp->rx_collect_metadata) { + tun_dst = udp_tun_rx_dst(skb, family, TUNNEL_KEY, 0, 0); + if (!tun_dst) { + bareudp->dev->stats.rx_dropped++; + goto drop; + } + skb_dst_set(skb, &tun_dst->dst); } - skb_dst_set(skb, &tun_dst->dst); skb->dev = bareudp->dev; oiph = skb_network_header(skb); skb_reset_network_header(skb); @@ -575,6 +577,9 @@ static int bareudp2info(struct nlattr *data[], struct bareudp_conf *conf, if (data[IFLA_BAREUDP_MULTIPROTO_MODE]) conf->multi_proto_mode = true; + if (data[IFLA_BAREUDP_RX_COLLECT_METADATA]) + conf->rx_collect_metadata = true; + return 0; } @@ -612,6 +617,8 @@ static int bareudp_configure(struct net *net, struct net_device *dev, bareudp->ethertype = conf->ethertype; bareudp->sport_min = conf->sport_min; bareudp->multi_proto_mode = conf->multi_proto_mode; + bareudp->rx_collect_metadata = conf->rx_collect_metadata; + err = register_netdevice(dev); if (err) return err; @@ -669,6 +676,7 @@ static size_t bareudp_get_size(const struct net_device *dev) nla_total_size(sizeof(__be16)) + /* IFLA_BAREUDP_ETHERTYPE */ nla_total_size(sizeof(__u16)) + /* IFLA_BAREUDP_SRCPORT_MIN */ nla_total_size(0) + /* IFLA_BAREUDP_MULTIPROTO_MODE */ + nla_total_size(0) + /* IFLA_BAREUDP_RX_COLLECT_METADATA */ 0; } @@ -685,6 +693,9 @@ static int bareudp_fill_info(struct sk_buff *skb, const struct net_device *dev) if (bareudp->multi_proto_mode && nla_put_flag(skb, IFLA_BAREUDP_MULTIPROTO_MODE)) goto nla_put_failure; + if (bareudp->rx_collect_metadata && + nla_put_flag(skb, IFLA_BAREUDP_RX_COLLECT_METADATA)) + goto nla_put_failure; return 0; diff --git a/include/net/bareudp.h b/include/net/bareudp.h index dc65a0d71d9b..3dd5f9a8d01c 100644 --- a/include/net/bareudp.h +++ b/include/net/bareudp.h @@ -12,6 +12,7 @@ struct bareudp_conf { __be16 port; u16 sport_min; bool multi_proto_mode; + bool rx_collect_metadata; }; struct net_device *bareudp_dev_create(struct net *net, const char *name, diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index a009365ad67b..cc185a007ade 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -600,6 +600,7 @@ enum { IFLA_BAREUDP_ETHERTYPE, IFLA_BAREUDP_SRCPORT_MIN, IFLA_BAREUDP_MULTIPROTO_MODE, + IFLA_BAREUDP_RX_COLLECT_METADATA, __IFLA_BAREUDP_MAX }; -- cgit v1.2.3-59-g8ed1b From 6fc3e68f5b35c4861b28733fa32f636db7188746 Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Sun, 28 Jun 2020 17:32:25 +0800 Subject: sctp: use list_is_singular in sctp_list_single_entry Use list_is_singular() instead of open-coding. Signed-off-by: Geliang Tang Signed-off-by: David S. Miller --- include/net/sctp/sctp.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index f8bcb75bb044..e3bd198b00ae 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -412,7 +412,7 @@ static inline void sctp_skb_set_owner_r(struct sk_buff *skb, struct sock *sk) /* Tests if the list has one and only one entry. */ static inline int sctp_list_single_entry(struct list_head *head) { - return (head->next != head) && (head->next == head->prev); + return list_is_singular(head); } static inline bool sctp_chunk_pending(const struct sctp_chunk *chunk) -- cgit v1.2.3-59-g8ed1b From cea0f76a483d1270ac6f6513964e3e75193dda48 Mon Sep 17 00:00:00 2001 From: Anurag Kumar Vulisha Date: Mon, 29 Jun 2020 15:00:52 +0300 Subject: dt-bindings: phy: Add DT bindings for Xilinx ZynqMP PSGTR PHY Add DT bindings for the Xilinx ZynqMP PHY. ZynqMP SoCs have a High Speed Processing System Gigabit Transceiver which provides PHY capabilities to USB, SATA, PCIE, Display Port and Ehernet SGMII controllers. Signed-off-by: Anurag Kumar Vulisha Signed-off-by: Laurent Pinchart Reviewed-by: Rob Herring Link: https://lore.kernel.org/r/20200629120054.29338-2-laurent.pinchart@ideasonboard.com Signed-off-by: Vinod Koul --- .../devicetree/bindings/phy/xlnx,zynqmp-psgtr.yaml | 105 +++++++++++++++++++++ include/dt-bindings/phy/phy.h | 1 + 2 files changed, 106 insertions(+) create mode 100644 Documentation/devicetree/bindings/phy/xlnx,zynqmp-psgtr.yaml (limited to 'include') diff --git a/Documentation/devicetree/bindings/phy/xlnx,zynqmp-psgtr.yaml b/Documentation/devicetree/bindings/phy/xlnx,zynqmp-psgtr.yaml new file mode 100644 index 000000000000..09e3cde7ebca --- /dev/null +++ b/Documentation/devicetree/bindings/phy/xlnx,zynqmp-psgtr.yaml @@ -0,0 +1,105 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/phy/xlnx,zynqmp-psgtr.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Xilinx ZynqMP Gigabit Transceiver PHY Device Tree Bindings + +maintainers: + - Laurent Pinchart + +description: | + This binding describes the Xilinx ZynqMP Gigabit Transceiver (GTR) PHY. The + GTR provides four lanes and is used by USB, SATA, PCIE, Display port and + Ethernet SGMII controllers. + +properties: + "#phy-cells": + const: 4 + description: | + The cells contain the following arguments. + + - description: The GTR lane + minimum: 0 + maximum: 3 + - description: The PHY type + enum: + - PHY_TYPE_DP + - PHY_TYPE_PCIE + - PHY_TYPE_SATA + - PHY_TYPE_SGMII + - PHY_TYPE_USB + - description: The PHY instance + minimum: 0 + maximum: 1 # for DP, SATA or USB + maximum: 3 # for PCIE or SGMII + - description: The reference clock number + minimum: 0 + maximum: 3 + + compatible: + enum: + - xlnx,zynqmp-psgtr-v1.1 + - xlnx,zynqmp-psgtr + + clocks: + minItems: 1 + maxItems: 4 + description: | + Clock for each PS_MGTREFCLK[0-3] reference clock input. Unconnected + inputs shall not have an entry. + + clock-names: + minItems: 1 + maxItems: 4 + items: + pattern: "^ref[0-3]$" + + reg: + items: + - description: SERDES registers block + - description: SIOU registers block + + reg-names: + items: + - const: serdes + - const: siou + + xlnx,tx-termination-fix: + description: | + Include this for fixing functional issue with the TX termination + resistance in GT, which can be out of spec for the XCZU9EG silicon + version. + type: boolean + +required: + - "#phy-cells" + - compatible + - reg + - reg-names + +if: + properties: + compatible: + const: xlnx,zynqmp-psgtr-v1.1 + +then: + properties: + xlnx,tx-termination-fix: false + +additionalProperties: false + +examples: + - | + phy: phy@fd400000 { + compatible = "xlnx,zynqmp-psgtr-v1.1"; + reg = <0x0 0xfd400000 0x0 0x40000>, + <0x0 0xfd3d0000 0x0 0x1000>; + reg-names = "serdes", "siou"; + clocks = <&refclks 3>, <&refclks 2>, <&refclks 0>; + clock-names = "ref1", "ref2", "ref3"; + #phy-cells = <4>; + }; + +... diff --git a/include/dt-bindings/phy/phy.h b/include/dt-bindings/phy/phy.h index 3727ef72138b..36e8c241cf48 100644 --- a/include/dt-bindings/phy/phy.h +++ b/include/dt-bindings/phy/phy.h @@ -18,5 +18,6 @@ #define PHY_TYPE_UFS 5 #define PHY_TYPE_DP 6 #define PHY_TYPE_XPCS 7 +#define PHY_TYPE_SGMII 8 #endif /* _DT_BINDINGS_PHY */ -- cgit v1.2.3-59-g8ed1b From db9819c76c1fd48c30699381c94bba5c95dd467e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 27 Jun 2020 09:31:47 +0200 Subject: block: remove bio_disassociate_blkg bio_disassociate_blkg has two callers, of which one immediately assigns a new value to >bi_blkg. Just open code the function in the two callers. Acked-by: Tejun Heo Reviewed-by: Johannes Thumshirn Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/bio.c | 27 ++++++++------------------- include/linux/bio.h | 2 -- 2 files changed, 8 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/block/bio.c b/block/bio.c index fb5533416fa6..8aef4460b32e 100644 --- a/block/bio.c +++ b/block/bio.c @@ -234,8 +234,12 @@ fallback: void bio_uninit(struct bio *bio) { - bio_disassociate_blkg(bio); - +#ifdef CONFIG_BLK_CGROUP + if (bio->bi_blkg) { + blkg_put(bio->bi_blkg); + bio->bi_blkg = NULL; + } +#endif if (bio_integrity(bio)) bio_integrity_free(bio); @@ -1625,21 +1629,6 @@ EXPORT_SYMBOL(bioset_init_from_src); #ifdef CONFIG_BLK_CGROUP -/** - * bio_disassociate_blkg - puts back the blkg reference if associated - * @bio: target bio - * - * Helper to disassociate the blkg from @bio if a blkg is associated. - */ -void bio_disassociate_blkg(struct bio *bio) -{ - if (bio->bi_blkg) { - blkg_put(bio->bi_blkg); - bio->bi_blkg = NULL; - } -} -EXPORT_SYMBOL_GPL(bio_disassociate_blkg); - /** * __bio_associate_blkg - associate a bio with the a blkg * @bio: target bio @@ -1656,8 +1645,8 @@ EXPORT_SYMBOL_GPL(bio_disassociate_blkg); */ static void __bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg) { - bio_disassociate_blkg(bio); - + if (bio->bi_blkg) + blkg_put(bio->bi_blkg); bio->bi_blkg = blkg_tryget_closest(blkg); } diff --git a/include/linux/bio.h b/include/linux/bio.h index 0282f8aa8593..4cd229e175c0 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -497,13 +497,11 @@ static inline void bio_associate_blkg_from_page(struct bio *bio, #endif #ifdef CONFIG_BLK_CGROUP -void bio_disassociate_blkg(struct bio *bio); void bio_associate_blkg(struct bio *bio); void bio_associate_blkg_from_css(struct bio *bio, struct cgroup_subsys_state *css); void bio_clone_blkg_association(struct bio *dst, struct bio *src); #else /* CONFIG_BLK_CGROUP */ -static inline void bio_disassociate_blkg(struct bio *bio) { } static inline void bio_associate_blkg(struct bio *bio) { } static inline void bio_associate_blkg_from_css(struct bio *bio, struct cgroup_subsys_state *css) -- cgit v1.2.3-59-g8ed1b From a18b9b1590ca64f877588700de32c9ad236f405c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 27 Jun 2020 09:31:50 +0200 Subject: block: move bio_associate_blkg_from_page to mm/page_io.c bio_associate_blkg_from_page is a special purpose helper for swap bios that doesn't need access to bio internals. Move it to the swap code instead of having it in bio.c. Acked-by: Tejun Heo Reviewed-by: Johannes Thumshirn Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/bio.c | 26 -------------------------- include/linux/bio.h | 7 ------- mm/page_io.c | 17 +++++++++++++++++ 3 files changed, 17 insertions(+), 33 deletions(-) (limited to 'include') diff --git a/block/bio.c b/block/bio.c index bc8de2432e36..901d22715dd4 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1659,32 +1659,6 @@ void bio_associate_blkg_from_css(struct bio *bio, } EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css); -#ifdef CONFIG_MEMCG -/** - * bio_associate_blkg_from_page - associate a bio with the page's blkg - * @bio: target bio - * @page: the page to lookup the blkcg from - * - * Associate @bio with the blkg from @page's owning memcg and the respective - * request_queue. If cgroup_e_css returns %NULL, fall back to the queue's - * root_blkg. - */ -void bio_associate_blkg_from_page(struct bio *bio, struct page *page) -{ - struct cgroup_subsys_state *css; - - if (!page->mem_cgroup) - return; - - rcu_read_lock(); - - css = cgroup_e_css(page->mem_cgroup->css.cgroup, &io_cgrp_subsys); - bio_associate_blkg_from_css(bio, css); - - rcu_read_unlock(); -} -#endif /* CONFIG_MEMCG */ - /** * bio_associate_blkg - associate a bio with a blkg * @bio: target bio diff --git a/include/linux/bio.h b/include/linux/bio.h index 4cd229e175c0..c6d765382926 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -489,13 +489,6 @@ do { \ #define bio_dev(bio) \ disk_devt((bio)->bi_disk) -#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) -void bio_associate_blkg_from_page(struct bio *bio, struct page *page); -#else -static inline void bio_associate_blkg_from_page(struct bio *bio, - struct page *page) { } -#endif - #ifdef CONFIG_BLK_CGROUP void bio_associate_blkg(struct bio *bio); void bio_associate_blkg_from_css(struct bio *bio, diff --git a/mm/page_io.c b/mm/page_io.c index e8726f3e3820..ccda76790088 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -277,6 +277,23 @@ static inline void count_swpout_vm_event(struct page *page) count_vm_events(PSWPOUT, hpage_nr_pages(page)); } +#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) +static void bio_associate_blkg_from_page(struct bio *bio, struct page *page) +{ + struct cgroup_subsys_state *css; + + if (!page->mem_cgroup) + return; + + rcu_read_lock(); + css = cgroup_e_css(page->mem_cgroup->css.cgroup, &io_cgrp_subsys); + bio_associate_blkg_from_css(bio, css); + rcu_read_unlock(); +} +#else +#define bio_associate_blkg_from_page(bio, page) do { } while (0) +#endif /* CONFIG_MEMCG && CONFIG_BLK_CGROUP */ + int __swap_writepage(struct page *page, struct writeback_control *wbc, bio_end_io_t end_write_func) { -- cgit v1.2.3-59-g8ed1b From 28fc591ff9d64cbc2b780be95ee3fde8f6ade7fd Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 27 Jun 2020 09:31:51 +0200 Subject: block: move the bio cgroup associatation helpers to blk-cgroup.c Keep the cgroup code together. Acked-by: Tejun Heo Reviewed-by: Johannes Thumshirn Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/bio.c | 75 --------------------------------- block/blk-cgroup.c | 103 ++++++++++++++++++++++++++++++++++++++++++++- include/linux/blk-cgroup.h | 30 ------------- 3 files changed, 101 insertions(+), 107 deletions(-) (limited to 'include') diff --git a/block/bio.c b/block/bio.c index 901d22715dd4..fc1299f9d86a 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1627,81 +1627,6 @@ int bioset_init_from_src(struct bio_set *bs, struct bio_set *src) } EXPORT_SYMBOL(bioset_init_from_src); -#ifdef CONFIG_BLK_CGROUP -/** - * bio_associate_blkg_from_css - associate a bio with a specified css - * @bio: target bio - * @css: target css - * - * Associate @bio with the blkg found by combining the css's blkg and the - * request_queue of the @bio. An association failure is handled by walking up - * the blkg tree. Therefore, the blkg associated can be anything between @blkg - * and q->root_blkg. This situation only happens when a cgroup is dying and - * then the remaining bios will spill to the closest alive blkg. - * - * A reference will be taken on the blkg and will be released when @bio is - * freed. - */ -void bio_associate_blkg_from_css(struct bio *bio, - struct cgroup_subsys_state *css) -{ - struct request_queue *q = bio->bi_disk->queue; - struct blkcg_gq *blkg = q->root_blkg; - - if (bio->bi_blkg) - blkg_put(bio->bi_blkg); - - rcu_read_lock(); - if (css && css->parent) - blkg = blkg_lookup_create(css_to_blkcg(css), q); - bio->bi_blkg = blkg_tryget_closest(blkg); - rcu_read_unlock(); -} -EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css); - -/** - * bio_associate_blkg - associate a bio with a blkg - * @bio: target bio - * - * Associate @bio with the blkg found from the bio's css and request_queue. - * If one is not found, bio_lookup_blkg() creates the blkg. If a blkg is - * already associated, the css is reused and association redone as the - * request_queue may have changed. - */ -void bio_associate_blkg(struct bio *bio) -{ - struct cgroup_subsys_state *css; - - rcu_read_lock(); - - if (bio->bi_blkg) - css = &bio_blkcg(bio)->css; - else - css = blkcg_css(); - - bio_associate_blkg_from_css(bio, css); - - rcu_read_unlock(); -} -EXPORT_SYMBOL_GPL(bio_associate_blkg); - -/** - * bio_clone_blkg_association - clone blkg association from src to dst bio - * @dst: destination bio - * @src: source bio - */ -void bio_clone_blkg_association(struct bio *dst, struct bio *src) -{ - if (src->bi_blkg) { - if (dst->bi_blkg) - blkg_put(dst->bi_blkg); - blkg_get(src->bi_blkg); - dst->bi_blkg = src->bi_blkg; - } -} -EXPORT_SYMBOL_GPL(bio_clone_blkg_association); -#endif /* CONFIG_BLK_CGROUP */ - static void __init biovec_init_slabs(void) { int i; diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 0ecc897b225c..bb0607bfd771 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -328,7 +328,7 @@ err_free_blkg: * Returns the blkg or the closest blkg if blkg_create() fails as it walks * down from root. */ -struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, +static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, struct request_queue *q) { struct blkcg_gq *blkg; @@ -377,7 +377,7 @@ struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, * This looks up or creates the blkg representing the unique pair * of the blkcg and the request_queue. */ -struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, +static struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, struct request_queue *q) { struct blkcg_gq *blkg = blkg_lookup(blkcg, q); @@ -1727,6 +1727,105 @@ void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta) atomic64_add(delta, &blkg->delay_nsec); } +/** + * blkg_tryget_closest - try and get a blkg ref on the closet blkg + * @blkg: blkg to get + * + * This needs to be called rcu protected. As the failure mode here is to walk + * up the blkg tree, this ensure that the blkg->parent pointers are always + * valid. This returns the blkg that it ended up taking a reference on or %NULL + * if no reference was taken. + */ +static inline struct blkcg_gq *blkg_tryget_closest(struct blkcg_gq *blkg) +{ + struct blkcg_gq *ret_blkg = NULL; + + WARN_ON_ONCE(!rcu_read_lock_held()); + + while (blkg) { + if (blkg_tryget(blkg)) { + ret_blkg = blkg; + break; + } + blkg = blkg->parent; + } + + return ret_blkg; +} + +/** + * bio_associate_blkg_from_css - associate a bio with a specified css + * @bio: target bio + * @css: target css + * + * Associate @bio with the blkg found by combining the css's blkg and the + * request_queue of the @bio. An association failure is handled by walking up + * the blkg tree. Therefore, the blkg associated can be anything between @blkg + * and q->root_blkg. This situation only happens when a cgroup is dying and + * then the remaining bios will spill to the closest alive blkg. + * + * A reference will be taken on the blkg and will be released when @bio is + * freed. + */ +void bio_associate_blkg_from_css(struct bio *bio, + struct cgroup_subsys_state *css) +{ + struct request_queue *q = bio->bi_disk->queue; + struct blkcg_gq *blkg = q->root_blkg; + + if (bio->bi_blkg) + blkg_put(bio->bi_blkg); + + rcu_read_lock(); + if (css && css->parent) + blkg = blkg_lookup_create(css_to_blkcg(css), q); + bio->bi_blkg = blkg_tryget_closest(blkg); + rcu_read_unlock(); +} +EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css); + +/** + * bio_associate_blkg - associate a bio with a blkg + * @bio: target bio + * + * Associate @bio with the blkg found from the bio's css and request_queue. + * If one is not found, bio_lookup_blkg() creates the blkg. If a blkg is + * already associated, the css is reused and association redone as the + * request_queue may have changed. + */ +void bio_associate_blkg(struct bio *bio) +{ + struct cgroup_subsys_state *css; + + rcu_read_lock(); + + if (bio->bi_blkg) + css = &bio_blkcg(bio)->css; + else + css = blkcg_css(); + + bio_associate_blkg_from_css(bio, css); + + rcu_read_unlock(); +} +EXPORT_SYMBOL_GPL(bio_associate_blkg); + +/** + * bio_clone_blkg_association - clone blkg association from src to dst bio + * @dst: destination bio + * @src: source bio + */ +void bio_clone_blkg_association(struct bio *dst, struct bio *src) +{ + if (src->bi_blkg) { + if (dst->bi_blkg) + blkg_put(dst->bi_blkg); + blkg_get(src->bi_blkg); + dst->bi_blkg = src->bi_blkg; + } +} +EXPORT_SYMBOL_GPL(bio_clone_blkg_association); + static int __init blkcg_init(void) { blkcg_punt_bio_wq = alloc_workqueue("blkcg_punt_bio", diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index a57ebe2f00ab..60df97202314 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -183,10 +183,6 @@ extern bool blkcg_debug_stats; struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, struct request_queue *q, bool update_hint); -struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, - struct request_queue *q); -struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, - struct request_queue *q); int blkcg_init_queue(struct request_queue *q); void blkcg_exit_queue(struct request_queue *q); @@ -480,32 +476,6 @@ static inline bool blkg_tryget(struct blkcg_gq *blkg) return blkg && percpu_ref_tryget(&blkg->refcnt); } -/** - * blkg_tryget_closest - try and get a blkg ref on the closet blkg - * @blkg: blkg to get - * - * This needs to be called rcu protected. As the failure mode here is to walk - * up the blkg tree, this ensure that the blkg->parent pointers are always - * valid. This returns the blkg that it ended up taking a reference on or %NULL - * if no reference was taken. - */ -static inline struct blkcg_gq *blkg_tryget_closest(struct blkcg_gq *blkg) -{ - struct blkcg_gq *ret_blkg = NULL; - - WARN_ON_ONCE(!rcu_read_lock_held()); - - while (blkg) { - if (blkg_tryget(blkg)) { - ret_blkg = blkg; - break; - } - blkg = blkg->parent; - } - - return ret_blkg; -} - /** * blkg_put - put a blkg reference * @blkg: blkg to put -- cgit v1.2.3-59-g8ed1b From 81630e27fff3dc1ccbf64ecb48a70170d7071545 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 27 Jun 2020 09:31:55 +0200 Subject: blk-cgroup: remove the !bio->bi_blkg check in blkcg_bio_issue_check This is purely a sanity check for grave programming errors. Remove it to simplify further work in this area. Acked-by: Tejun Heo Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- include/linux/blk-cgroup.h | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) (limited to 'include') diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 60df97202314..8e86b598316c 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -543,24 +543,11 @@ static inline void blkcg_bio_issue_init(struct bio *bio) static inline bool blkcg_bio_issue_check(struct request_queue *q, struct bio *bio) { - struct blkcg_gq *blkg; + struct blkcg_gq *blkg = bio->bi_blkg; bool throtl = false; rcu_read_lock(); - - if (!bio->bi_blkg) { - char b[BDEVNAME_SIZE]; - - WARN_ONCE(1, - "no blkg associated for bio on block-device: %s\n", - bio_devname(bio, b)); - bio_associate_blkg(bio); - } - - blkg = bio->bi_blkg; - throtl = blk_throtl_bio(q, blkg, bio); - if (!throtl) { struct blkg_iostat_set *bis; int rwd, cpu; -- cgit v1.2.3-59-g8ed1b From 93b8063804b62b55248e16499d853e1b20eff905 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 27 Jun 2020 09:31:57 +0200 Subject: blk-cgroup: move rcu locking from blkcg_bio_issue_check to blk_throtl_bio The only thing in blkcg_bio_issue_check that needs to be under rcu_read_lock is blk_throtl_bio, so move the locking there. Acked-by: Tejun Heo Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-throttle.c | 3 ++- include/linux/blk-cgroup.h | 2 -- 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 209fdd8939fb..ac0083450500 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -2168,7 +2168,7 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, bool throttled = false; struct throtl_data *td = tg->td; - WARN_ON_ONCE(!rcu_read_lock_held()); + rcu_read_lock(); /* see throtl_charge_bio() */ if (bio_flagged(bio, BIO_THROTTLED)) @@ -2273,6 +2273,7 @@ out: if (throttled || !td->track_bio_latency) bio->bi_issue.value |= BIO_ISSUE_THROTL_SKIP_LATENCY; #endif + rcu_read_unlock(); return throttled; } diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 8e86b598316c..8ab043c911f2 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -546,7 +546,6 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q, struct blkcg_gq *blkg = bio->bi_blkg; bool throtl = false; - rcu_read_lock(); throtl = blk_throtl_bio(q, blkg, bio); if (!throtl) { struct blkg_iostat_set *bis; @@ -582,7 +581,6 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q, blkcg_bio_issue_init(bio); - rcu_read_unlock(); return !throtl; } -- cgit v1.2.3-59-g8ed1b From db18a53e5ba840993a3fc908dec648402ed740bd Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 27 Jun 2020 09:31:58 +0200 Subject: blk-cgroup: remove blkcg_bio_issue_check blkcg_bio_issue_check is a giant inline function that does three entirely different things. Factor out the blk-cgroup related bio initalization into a new helper, and the open code the sequence in the only caller, relying on the fact that all the actual functionality is stubbed out for non-cgroup builds. Acked-by: Tejun Heo Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-cgroup.c | 34 ++++++++++++++++++++++++++++ block/blk-core.c | 7 +++++- block/blk-throttle.c | 5 +++-- block/blk.h | 2 ++ include/linux/blk-cgroup.h | 56 ++-------------------------------------------- 5 files changed, 47 insertions(+), 57 deletions(-) (limited to 'include') diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index d21ec2acd716..1ce94afc03bc 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1813,6 +1813,40 @@ void bio_clone_blkg_association(struct bio *dst, struct bio *src) } EXPORT_SYMBOL_GPL(bio_clone_blkg_association); +static int blk_cgroup_io_type(struct bio *bio) +{ + if (op_is_discard(bio->bi_opf)) + return BLKG_IOSTAT_DISCARD; + if (op_is_write(bio->bi_opf)) + return BLKG_IOSTAT_WRITE; + return BLKG_IOSTAT_READ; +} + +void blk_cgroup_bio_start(struct bio *bio) +{ + int rwd = blk_cgroup_io_type(bio), cpu; + struct blkg_iostat_set *bis; + + cpu = get_cpu(); + bis = per_cpu_ptr(bio->bi_blkg->iostat_cpu, cpu); + u64_stats_update_begin(&bis->sync); + + /* + * If the bio is flagged with BIO_CGROUP_ACCT it means this is a split + * bio and we would have already accounted for the size of the bio. + */ + if (!bio_flagged(bio, BIO_CGROUP_ACCT)) { + bio_set_flag(bio, BIO_CGROUP_ACCT); + bis->cur.bytes[rwd] += bio->bi_iter.bi_size; + } + bis->cur.ios[rwd]++; + + u64_stats_update_end(&bis->sync); + if (cgroup_subsys_on_dfl(io_cgrp_subsys)) + cgroup_rstat_updated(bio->bi_blkg->blkcg->css.cgroup, cpu); + put_cpu(); +} + static int __init blkcg_init(void) { blkcg_punt_bio_wq = alloc_workqueue("blkcg_punt_bio", diff --git a/block/blk-core.c b/block/blk-core.c index a9769c1a2875..76cfd5709f66 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1073,8 +1073,13 @@ generic_make_request_checks(struct bio *bio) if (unlikely(!current->io_context)) create_task_io_context(current, GFP_ATOMIC, q->node); - if (!blkcg_bio_issue_check(q, bio)) + if (blk_throtl_bio(bio)) { + blkcg_bio_issue_init(bio); return false; + } + + blk_cgroup_bio_start(bio); + blkcg_bio_issue_init(bio); if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) { trace_block_bio_queue(q, bio); diff --git a/block/blk-throttle.c b/block/blk-throttle.c index ac0083450500..9d00f62c05ec 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -2158,9 +2158,10 @@ static inline void throtl_update_latency_buckets(struct throtl_data *td) } #endif -bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, - struct bio *bio) +bool blk_throtl_bio(struct bio *bio) { + struct request_queue *q = bio->bi_disk->queue; + struct blkcg_gq *blkg = bio->bi_blkg; struct throtl_qnode *qn = NULL; struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg); struct throtl_service_queue *sq; diff --git a/block/blk.h b/block/blk.h index 3a120a070dac..41a50880c94e 100644 --- a/block/blk.h +++ b/block/blk.h @@ -288,10 +288,12 @@ int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node); extern int blk_throtl_init(struct request_queue *q); extern void blk_throtl_exit(struct request_queue *q); extern void blk_throtl_register_queue(struct request_queue *q); +bool blk_throtl_bio(struct bio *bio); #else /* CONFIG_BLK_DEV_THROTTLING */ static inline int blk_throtl_init(struct request_queue *q) { return 0; } static inline void blk_throtl_exit(struct request_queue *q) { } static inline void blk_throtl_register_queue(struct request_queue *q) { } +static inline bool blk_throtl_bio(struct bio *bio) { return false; } #endif /* CONFIG_BLK_DEV_THROTTLING */ #ifdef CONFIG_BLK_DEV_THROTTLING_LOW extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page); diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 8ab043c911f2..431b2d18bf40 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -517,14 +517,6 @@ static inline void blkg_put(struct blkcg_gq *blkg) if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ (p_blkg)->q, false))) -#ifdef CONFIG_BLK_DEV_THROTTLING -extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, - struct bio *bio); -#else -static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, - struct bio *bio) { return false; } -#endif - bool __blkcg_punt_bio_submit(struct bio *bio); static inline bool blkcg_punt_bio_submit(struct bio *bio) @@ -540,50 +532,6 @@ static inline void blkcg_bio_issue_init(struct bio *bio) bio_issue_init(&bio->bi_issue, bio_sectors(bio)); } -static inline bool blkcg_bio_issue_check(struct request_queue *q, - struct bio *bio) -{ - struct blkcg_gq *blkg = bio->bi_blkg; - bool throtl = false; - - throtl = blk_throtl_bio(q, blkg, bio); - if (!throtl) { - struct blkg_iostat_set *bis; - int rwd, cpu; - - if (op_is_discard(bio->bi_opf)) - rwd = BLKG_IOSTAT_DISCARD; - else if (op_is_write(bio->bi_opf)) - rwd = BLKG_IOSTAT_WRITE; - else - rwd = BLKG_IOSTAT_READ; - - cpu = get_cpu(); - bis = per_cpu_ptr(blkg->iostat_cpu, cpu); - u64_stats_update_begin(&bis->sync); - - /* - * If the bio is flagged with BIO_CGROUP_ACCT it means this is a - * split bio and we would have already accounted for the size of - * the bio. - */ - if (!bio_flagged(bio, BIO_CGROUP_ACCT)) { - bio_set_flag(bio, BIO_CGROUP_ACCT); - bis->cur.bytes[rwd] += bio->bi_iter.bi_size; - } - bis->cur.ios[rwd]++; - - u64_stats_update_end(&bis->sync); - if (cgroup_subsys_on_dfl(io_cgrp_subsys)) - cgroup_rstat_updated(blkg->blkcg->css.cgroup, cpu); - put_cpu(); - } - - blkcg_bio_issue_init(bio); - - return !throtl; -} - static inline void blkcg_use_delay(struct blkcg_gq *blkg) { if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0)) @@ -657,6 +605,7 @@ static inline void blkcg_clear_delay(struct blkcg_gq *blkg) atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); } +void blk_cgroup_bio_start(struct bio *bio); void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta); void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay); void blkcg_maybe_throttle_current(void); @@ -710,8 +659,7 @@ static inline void blkg_put(struct blkcg_gq *blkg) { } static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; } static inline void blkcg_bio_issue_init(struct bio *bio) { } -static inline bool blkcg_bio_issue_check(struct request_queue *q, - struct bio *bio) { return true; } +static inline void blk_cgroup_bio_start(struct bio *bio) { } #define blk_queue_for_each_rl(rl, q) \ for ((rl) = &(q)->root_rl; (rl); (rl) = NULL) -- cgit v1.2.3-59-g8ed1b From 42fdc5e49c2be97db112d410d07044e0e2c7d5bb Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 29 Jun 2020 17:08:34 +0200 Subject: blk-mq: remove the BLK_MQ_REQ_INTERNAL flag Just check for a non-NULL elevator directly to make the code more clear. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq-tag.c | 4 ++-- block/blk-mq.c | 10 +++------- block/blk-mq.h | 2 +- include/linux/blk-mq.h | 2 -- 4 files changed, 6 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index ae722f8b13fb..281367b04527 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -90,9 +90,9 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, static int __blk_mq_get_tag(struct blk_mq_alloc_data *data, struct sbitmap_queue *bt) { - if (!(data->flags & BLK_MQ_REQ_INTERNAL) && - !hctx_may_queue(data->hctx, bt)) + if (!data->q->elevator && !hctx_may_queue(data->hctx, bt)) return BLK_MQ_NO_TAG; + if (data->shallow_depth) return __sbitmap_queue_get_shallow(bt, data->shallow_depth); else diff --git a/block/blk-mq.c b/block/blk-mq.c index d07e55455726..72d3034fe39d 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -279,7 +279,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, struct request *rq = tags->static_rqs[tag]; req_flags_t rq_flags = 0; - if (data->flags & BLK_MQ_REQ_INTERNAL) { + if (data->q->elevator) { rq->tag = BLK_MQ_NO_TAG; rq->internal_tag = tag; } else { @@ -364,8 +364,6 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data) data->flags |= BLK_MQ_REQ_NOWAIT; if (e) { - data->flags |= BLK_MQ_REQ_INTERNAL; - /* * Flush requests are special and go directly to the * dispatch list. Don't include reserved tags in the @@ -380,7 +378,7 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data) retry: data->ctx = blk_mq_get_ctx(q); data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx); - if (!(data->flags & BLK_MQ_REQ_INTERNAL)) + if (!e) blk_mq_tag_busy(data->hctx); /* @@ -476,9 +474,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask); data.ctx = __blk_mq_get_ctx(q, cpu); - if (q->elevator) - data.flags |= BLK_MQ_REQ_INTERNAL; - else + if (!q->elevator) blk_mq_tag_busy(data.hctx); ret = -EWOULDBLOCK; diff --git a/block/blk-mq.h b/block/blk-mq.h index b3ce0f3a2ad2..c6330335767c 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -159,7 +159,7 @@ struct blk_mq_alloc_data { static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data) { - if (data->flags & BLK_MQ_REQ_INTERNAL) + if (data->q->elevator) return data->hctx->sched_tags; return data->hctx->tags; diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 1641ec6cd7e5..8986e88a986b 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -447,8 +447,6 @@ enum { BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0), /* allocate from reserved pool */ BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1), - /* allocate internal/sched tag */ - BLK_MQ_REQ_INTERNAL = (__force blk_mq_req_flags_t)(1 << 2), /* set RQF_PREEMPT */ BLK_MQ_REQ_PREEMPT = (__force blk_mq_req_flags_t)(1 << 3), }; -- cgit v1.2.3-59-g8ed1b From cbba1d719534b77b857267890b0f54f0f0a90de4 Mon Sep 17 00:00:00 2001 From: Andrzej Pietrasiewicz Date: Mon, 29 Jun 2020 14:29:17 +0200 Subject: thermal: Add current mode to thermal zone device Prepare for changing the place where the mode is stored: now it is in drivers, which might or might not implement get_mode()/set_mode() methods. A lot of cleanup can be done thanks to storing it in struct tzd. The get_mode() methods will become redundant. Signed-off-by: Andrzej Pietrasiewicz Reviewed-by: Guenter Roeck Reviewed-by: Bartlomiej Zolnierkiewicz Reviewed-by: Amit Kucheria Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/20200629122925.21729-4-andrzej.p@collabora.com --- include/linux/thermal.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 216185bb3014..5f91d7f04512 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -128,6 +128,7 @@ struct thermal_cooling_device { * @trip_temp_attrs: attributes for trip points for sysfs: trip temperature * @trip_type_attrs: attributes for trip points for sysfs: trip type * @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis + * @mode: current mode of this thermal zone * @devdata: private pointer for device private data * @trips: number of trip points the thermal zone supports * @trips_disabled; bitmap for disabled trips @@ -170,6 +171,7 @@ struct thermal_zone_device { struct thermal_attr *trip_temp_attrs; struct thermal_attr *trip_type_attrs; struct thermal_attr *trip_hyst_attrs; + enum thermal_device_mode mode; void *devdata; int trips; unsigned long trips_disabled; /* bitmap for disabled trips */ -- cgit v1.2.3-59-g8ed1b From 1ee14820fd8ee79c4fc191155f48c985f28040e2 Mon Sep 17 00:00:00 2001 From: Andrzej Pietrasiewicz Date: Mon, 29 Jun 2020 14:29:19 +0200 Subject: thermal: remove get_mode() operation of drivers get_mode() is now redundant, as the state is stored in struct thermal_zone_device. Consequently the "mode" attribute in sysfs can always be visible, because it is always possible to get the mode from struct tzd. Signed-off-by: Andrzej Pietrasiewicz [for acerhdf] Acked-by: Peter Kaestle Reviewed-by: Bartlomiej Zolnierkiewicz Reviewed-by: Amit Kucheria Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/20200629122925.21729-6-andrzej.p@collabora.com --- drivers/acpi/thermal.c | 9 ------- drivers/net/ethernet/mellanox/mlxsw/core_thermal.c | 19 -------------- drivers/platform/x86/acerhdf.c | 12 --------- drivers/thermal/da9062-thermal.c | 8 ------ drivers/thermal/imx_thermal.c | 9 ------- .../intel/int340x_thermal/int3400_thermal.c | 9 ------- drivers/thermal/intel/intel_quark_dts_thermal.c | 8 ------ drivers/thermal/thermal_core.c | 7 +---- drivers/thermal/thermal_of.c | 9 ------- drivers/thermal/thermal_sysfs.c | 30 ++-------------------- include/linux/thermal.h | 2 -- 11 files changed, 3 insertions(+), 119 deletions(-) (limited to 'include') diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index 4ba273f49d87..592be97c4456 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c @@ -525,14 +525,6 @@ static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp) return 0; } -static int thermal_get_mode(struct thermal_zone_device *thermal, - enum thermal_device_mode *mode) -{ - *mode = thermal->mode; - - return 0; -} - static int thermal_set_mode(struct thermal_zone_device *thermal, enum thermal_device_mode mode) { @@ -847,7 +839,6 @@ static struct thermal_zone_device_ops acpi_thermal_zone_ops = { .bind = acpi_thermal_bind_cooling_device, .unbind = acpi_thermal_unbind_cooling_device, .get_temp = thermal_get_temp, - .get_mode = thermal_get_mode, .set_mode = thermal_set_mode, .get_trip_type = thermal_get_trip_type, .get_trip_temp = thermal_get_trip_temp, diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c index 51667ed99c21..ad61b2db30b8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c @@ -275,14 +275,6 @@ static int mlxsw_thermal_unbind(struct thermal_zone_device *tzdev, return 0; } -static int mlxsw_thermal_get_mode(struct thermal_zone_device *tzdev, - enum thermal_device_mode *mode) -{ - *mode = tzdev->mode; - - return 0; -} - static int mlxsw_thermal_set_mode(struct thermal_zone_device *tzdev, enum thermal_device_mode mode) { @@ -402,7 +394,6 @@ static int mlxsw_thermal_trend_get(struct thermal_zone_device *tzdev, static struct thermal_zone_device_ops mlxsw_thermal_ops = { .bind = mlxsw_thermal_bind, .unbind = mlxsw_thermal_unbind, - .get_mode = mlxsw_thermal_get_mode, .set_mode = mlxsw_thermal_set_mode, .get_temp = mlxsw_thermal_get_temp, .get_trip_type = mlxsw_thermal_get_trip_type, @@ -461,14 +452,6 @@ static int mlxsw_thermal_module_unbind(struct thermal_zone_device *tzdev, return err; } -static int mlxsw_thermal_module_mode_get(struct thermal_zone_device *tzdev, - enum thermal_device_mode *mode) -{ - *mode = tzdev->mode; - - return 0; -} - static int mlxsw_thermal_module_mode_set(struct thermal_zone_device *tzdev, enum thermal_device_mode mode) { @@ -606,7 +589,6 @@ static int mlxsw_thermal_module_trend_get(struct thermal_zone_device *tzdev, static struct thermal_zone_device_ops mlxsw_thermal_module_ops = { .bind = mlxsw_thermal_module_bind, .unbind = mlxsw_thermal_module_unbind, - .get_mode = mlxsw_thermal_module_mode_get, .set_mode = mlxsw_thermal_module_mode_set, .get_temp = mlxsw_thermal_module_temp_get, .get_trip_type = mlxsw_thermal_module_trip_type_get, @@ -645,7 +627,6 @@ static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev, static struct thermal_zone_device_ops mlxsw_thermal_gearbox_ops = { .bind = mlxsw_thermal_module_bind, .unbind = mlxsw_thermal_module_unbind, - .get_mode = mlxsw_thermal_module_mode_get, .set_mode = mlxsw_thermal_module_mode_set, .get_temp = mlxsw_thermal_gearbox_temp_get, .get_trip_type = mlxsw_thermal_module_trip_type_get, diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c index 6f21015e5fd9..58c4e1caaa09 100644 --- a/drivers/platform/x86/acerhdf.c +++ b/drivers/platform/x86/acerhdf.c @@ -413,17 +413,6 @@ static inline void acerhdf_enable_kernelmode(void) pr_notice("kernel mode fan control ON\n"); } -static int acerhdf_get_mode(struct thermal_zone_device *thermal, - enum thermal_device_mode *mode) -{ - if (verbose) - pr_notice("kernel mode fan control %d\n", kernelmode); - - *mode = thermal->mode; - - return 0; -} - /* * set operation mode; * enabled: the thermal layer of the kernel takes care about @@ -490,7 +479,6 @@ static struct thermal_zone_device_ops acerhdf_dev_ops = { .bind = acerhdf_bind, .unbind = acerhdf_unbind, .get_temp = acerhdf_get_ec_temp, - .get_mode = acerhdf_get_mode, .set_mode = acerhdf_set_mode, .get_trip_type = acerhdf_get_trip_type, .get_trip_hyst = acerhdf_get_trip_hyst, diff --git a/drivers/thermal/da9062-thermal.c b/drivers/thermal/da9062-thermal.c index a14c7981c7c7..a7ac8afb063e 100644 --- a/drivers/thermal/da9062-thermal.c +++ b/drivers/thermal/da9062-thermal.c @@ -120,13 +120,6 @@ static irqreturn_t da9062_thermal_irq_handler(int irq, void *data) return IRQ_HANDLED; } -static int da9062_thermal_get_mode(struct thermal_zone_device *z, - enum thermal_device_mode *mode) -{ - *mode = z->mode; - return 0; -} - static int da9062_thermal_get_trip_type(struct thermal_zone_device *z, int trip, enum thermal_trip_type *type) @@ -179,7 +172,6 @@ static int da9062_thermal_get_temp(struct thermal_zone_device *z, static struct thermal_zone_device_ops da9062_thermal_ops = { .get_temp = da9062_thermal_get_temp, - .get_mode = da9062_thermal_get_mode, .get_trip_type = da9062_thermal_get_trip_type, .get_trip_temp = da9062_thermal_get_trip_temp, }; diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c index 9a1114d721b6..2c7ee5da608a 100644 --- a/drivers/thermal/imx_thermal.c +++ b/drivers/thermal/imx_thermal.c @@ -330,14 +330,6 @@ static int imx_get_temp(struct thermal_zone_device *tz, int *temp) return 0; } -static int imx_get_mode(struct thermal_zone_device *tz, - enum thermal_device_mode *mode) -{ - *mode = tz->mode; - - return 0; -} - static int imx_set_mode(struct thermal_zone_device *tz, enum thermal_device_mode mode) { @@ -464,7 +456,6 @@ static struct thermal_zone_device_ops imx_tz_ops = { .bind = imx_bind, .unbind = imx_unbind, .get_temp = imx_get_temp, - .get_mode = imx_get_mode, .set_mode = imx_set_mode, .get_trip_type = imx_get_trip_type, .get_trip_temp = imx_get_trip_temp, diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c index f65b2fc09198..9a622aaf29dd 100644 --- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c @@ -377,14 +377,6 @@ static int int3400_thermal_get_temp(struct thermal_zone_device *thermal, return 0; } -static int int3400_thermal_get_mode(struct thermal_zone_device *thermal, - enum thermal_device_mode *mode) -{ - *mode = thermal->mode; - - return 0; -} - static int int3400_thermal_set_mode(struct thermal_zone_device *thermal, enum thermal_device_mode mode) { @@ -412,7 +404,6 @@ static int int3400_thermal_set_mode(struct thermal_zone_device *thermal, static struct thermal_zone_device_ops int3400_thermal_ops = { .get_temp = int3400_thermal_get_temp, - .get_mode = int3400_thermal_get_mode, .set_mode = int3400_thermal_set_mode, }; diff --git a/drivers/thermal/intel/intel_quark_dts_thermal.c b/drivers/thermal/intel/intel_quark_dts_thermal.c index d77cb3df5ade..c4879b4bfbf1 100644 --- a/drivers/thermal/intel/intel_quark_dts_thermal.c +++ b/drivers/thermal/intel/intel_quark_dts_thermal.c @@ -308,13 +308,6 @@ static int sys_get_curr_temp(struct thermal_zone_device *tzd, return 0; } -static int sys_get_mode(struct thermal_zone_device *tzd, - enum thermal_device_mode *mode) -{ - *mode = tzd->mode; - return 0; -} - static int sys_set_mode(struct thermal_zone_device *tzd, enum thermal_device_mode mode) { @@ -336,7 +329,6 @@ static struct thermal_zone_device_ops tzone_ops = { .get_trip_type = sys_get_trip_type, .set_trip_temp = sys_set_trip_temp, .get_crit_temp = sys_get_crit_temp, - .get_mode = sys_get_mode, .set_mode = sys_set_mode, }; diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index b71196eaf90e..14d3b1b94c4f 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -1456,7 +1456,6 @@ static int thermal_pm_notify(struct notifier_block *nb, unsigned long mode, void *_unused) { struct thermal_zone_device *tz; - enum thermal_device_mode tz_mode; switch (mode) { case PM_HIBERNATION_PREPARE: @@ -1469,11 +1468,7 @@ static int thermal_pm_notify(struct notifier_block *nb, case PM_POST_SUSPEND: atomic_set(&in_suspend, 0); list_for_each_entry(tz, &thermal_tz_list, node) { - tz_mode = THERMAL_DEVICE_ENABLED; - if (tz->ops->get_mode) - tz->ops->get_mode(tz, &tz_mode); - - if (tz_mode == THERMAL_DEVICE_DISABLED) + if (tz->mode == THERMAL_DEVICE_DISABLED) continue; thermal_zone_device_init(tz); diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c index c495b1e48ef2..ba65d48a48cb 100644 --- a/drivers/thermal/thermal_of.c +++ b/drivers/thermal/thermal_of.c @@ -267,14 +267,6 @@ static int of_thermal_unbind(struct thermal_zone_device *thermal, return 0; } -static int of_thermal_get_mode(struct thermal_zone_device *tz, - enum thermal_device_mode *mode) -{ - *mode = tz->mode; - - return 0; -} - static int of_thermal_set_mode(struct thermal_zone_device *tz, enum thermal_device_mode mode) { @@ -389,7 +381,6 @@ static int of_thermal_get_crit_temp(struct thermal_zone_device *tz, } static struct thermal_zone_device_ops of_thermal_ops = { - .get_mode = of_thermal_get_mode, .set_mode = of_thermal_set_mode, .get_trip_type = of_thermal_get_trip_type, diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c index aa99edb4dff7..096370977068 100644 --- a/drivers/thermal/thermal_sysfs.c +++ b/drivers/thermal/thermal_sysfs.c @@ -49,18 +49,9 @@ static ssize_t mode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_zone_device *tz = to_thermal_zone(dev); - enum thermal_device_mode mode; - int result; - - if (!tz->ops->get_mode) - return -EPERM; - result = tz->ops->get_mode(tz, &mode); - if (result) - return result; - - return sprintf(buf, "%s\n", mode == THERMAL_DEVICE_ENABLED ? "enabled" - : "disabled"); + return sprintf(buf, "%s\n", tz->mode == THERMAL_DEVICE_ENABLED ? + "enabled" : "disabled"); } static ssize_t @@ -428,30 +419,13 @@ static struct attribute_group thermal_zone_attribute_group = { .attrs = thermal_zone_dev_attrs, }; -/* We expose mode only if .get_mode is present */ static struct attribute *thermal_zone_mode_attrs[] = { &dev_attr_mode.attr, NULL, }; -static umode_t thermal_zone_mode_is_visible(struct kobject *kobj, - struct attribute *attr, - int attrno) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct thermal_zone_device *tz; - - tz = container_of(dev, struct thermal_zone_device, device); - - if (tz->ops->get_mode) - return attr->mode; - - return 0; -} - static struct attribute_group thermal_zone_mode_attribute_group = { .attrs = thermal_zone_mode_attrs, - .is_visible = thermal_zone_mode_is_visible, }; /* We expose passive only if passive trips are present */ diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 5f91d7f04512..a808f6fa2777 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -76,8 +76,6 @@ struct thermal_zone_device_ops { struct thermal_cooling_device *); int (*get_temp) (struct thermal_zone_device *, int *); int (*set_trips) (struct thermal_zone_device *, int, int); - int (*get_mode) (struct thermal_zone_device *, - enum thermal_device_mode *); int (*set_mode) (struct thermal_zone_device *, enum thermal_device_mode); int (*get_trip_type) (struct thermal_zone_device *, int, -- cgit v1.2.3-59-g8ed1b From ac5d9ecc74d8beee8c87f1441e4adaf4e9fe90c5 Mon Sep 17 00:00:00 2001 From: Andrzej Pietrasiewicz Date: Mon, 29 Jun 2020 14:29:20 +0200 Subject: thermal: Add mode helpers Prepare for making the drivers not access tzd's private members. Signed-off-by: Andrzej Pietrasiewicz Reviewed-by: Bartlomiej Zolnierkiewicz [staticize thermal_zone_device_set_mode()] Signed-off-by: kernel test robot Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/20200629122925.21729-7-andrzej.p@collabora.com --- drivers/thermal/thermal_core.c | 53 ++++++++++++++++++++++++++++++++++++++++++ include/linux/thermal.h | 13 +++++++++++ 2 files changed, 66 insertions(+) (limited to 'include') diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 14d3b1b94c4f..f02c57c986f0 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -459,6 +459,59 @@ static void thermal_zone_device_reset(struct thermal_zone_device *tz) thermal_zone_device_init(tz); } +static int thermal_zone_device_set_mode(struct thermal_zone_device *tz, + enum thermal_device_mode mode) +{ + int ret = 0; + + mutex_lock(&tz->lock); + + /* do nothing if mode isn't changing */ + if (mode == tz->mode) { + mutex_unlock(&tz->lock); + + return ret; + } + + if (tz->ops->set_mode) + ret = tz->ops->set_mode(tz, mode); + + if (!ret) + tz->mode = mode; + + mutex_unlock(&tz->lock); + + thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); + + return ret; +} + +int thermal_zone_device_enable(struct thermal_zone_device *tz) +{ + return thermal_zone_device_set_mode(tz, THERMAL_DEVICE_ENABLED); +} +EXPORT_SYMBOL_GPL(thermal_zone_device_enable); + +int thermal_zone_device_disable(struct thermal_zone_device *tz) +{ + return thermal_zone_device_set_mode(tz, THERMAL_DEVICE_DISABLED); +} +EXPORT_SYMBOL_GPL(thermal_zone_device_disable); + +int thermal_zone_device_is_enabled(struct thermal_zone_device *tz) +{ + enum thermal_device_mode mode; + + mutex_lock(&tz->lock); + + mode = tz->mode; + + mutex_unlock(&tz->lock); + + return mode == THERMAL_DEVICE_ENABLED; +} +EXPORT_SYMBOL_GPL(thermal_zone_device_is_enabled); + void thermal_zone_device_update(struct thermal_zone_device *tz, enum thermal_notify_event event) { diff --git a/include/linux/thermal.h b/include/linux/thermal.h index a808f6fa2777..df013c39ba9b 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -416,6 +416,9 @@ int thermal_zone_get_offset(struct thermal_zone_device *tz); void thermal_cdev_update(struct thermal_cooling_device *); void thermal_notify_framework(struct thermal_zone_device *, int); +int thermal_zone_device_enable(struct thermal_zone_device *tz); +int thermal_zone_device_disable(struct thermal_zone_device *tz); +int thermal_zone_device_is_enabled(struct thermal_zone_device *tz); #else static inline struct thermal_zone_device *thermal_zone_device_register( const char *type, int trips, int mask, void *devdata, @@ -463,6 +466,16 @@ static inline void thermal_cdev_update(struct thermal_cooling_device *cdev) static inline void thermal_notify_framework(struct thermal_zone_device *tz, int trip) { } + +static inline int thermal_zone_device_enable(struct thermal_zone_device *tz) +{ return -ENODEV; } + +static inline int thermal_zone_device_disable(struct thermal_zone_device *tz) +{ return -ENODEV; } + +static inline int +thermal_zone_device_is_enabled(struct thermal_zone_device *tz) +{ return -ENODEV; } #endif /* CONFIG_THERMAL */ #endif /* __THERMAL_H__ */ -- cgit v1.2.3-59-g8ed1b From f5e50bf4d3ef0aba4d5414c9ed51fa4a02e2ed12 Mon Sep 17 00:00:00 2001 From: Andrzej Pietrasiewicz Date: Mon, 29 Jun 2020 14:29:25 +0200 Subject: thermal: Rename set_mode() to change_mode() set_mode() is only called when tzd's mode is about to change. Actual setting is performed in thermal_core, in thermal_zone_device_set_mode(). The meaning of set_mode() callback is actually to notify the driver about the mode being changed and giving the driver a chance to oppose such change. To better reflect the purpose of the method rename it to change_mode() Signed-off-by: Andrzej Pietrasiewicz [for acerhdf] Acked-by: Peter Kaestle Reviewed-by: Bartlomiej Zolnierkiewicz Reviewed-by: Amit Kucheria Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/20200629122925.21729-12-andrzej.p@collabora.com --- drivers/platform/x86/acerhdf.c | 6 +++--- drivers/thermal/imx_thermal.c | 8 ++++---- drivers/thermal/intel/int340x_thermal/int3400_thermal.c | 6 +++--- drivers/thermal/intel/intel_quark_dts_thermal.c | 6 +++--- drivers/thermal/thermal_core.c | 4 ++-- include/linux/thermal.h | 2 +- 6 files changed, 16 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c index 76323855c80c..f816a8a13039 100644 --- a/drivers/platform/x86/acerhdf.c +++ b/drivers/platform/x86/acerhdf.c @@ -413,8 +413,8 @@ static inline void acerhdf_enable_kernelmode(void) * the temperature and the fan. * disabled: the BIOS takes control of the fan. */ -static int acerhdf_set_mode(struct thermal_zone_device *thermal, - enum thermal_device_mode mode) +static int acerhdf_change_mode(struct thermal_zone_device *thermal, + enum thermal_device_mode mode) { if (mode == THERMAL_DEVICE_DISABLED && kernelmode) acerhdf_revert_to_bios_mode(); @@ -473,7 +473,7 @@ static struct thermal_zone_device_ops acerhdf_dev_ops = { .bind = acerhdf_bind, .unbind = acerhdf_unbind, .get_temp = acerhdf_get_ec_temp, - .set_mode = acerhdf_set_mode, + .change_mode = acerhdf_change_mode, .get_trip_type = acerhdf_get_trip_type, .get_trip_hyst = acerhdf_get_trip_hyst, .get_trip_temp = acerhdf_get_trip_temp, diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c index a02398118d88..9700ae39feb7 100644 --- a/drivers/thermal/imx_thermal.c +++ b/drivers/thermal/imx_thermal.c @@ -330,8 +330,8 @@ static int imx_get_temp(struct thermal_zone_device *tz, int *temp) return 0; } -static int imx_set_mode(struct thermal_zone_device *tz, - enum thermal_device_mode mode) +static int imx_change_mode(struct thermal_zone_device *tz, + enum thermal_device_mode mode) { struct imx_thermal_data *data = tz->devdata; struct regmap *map = data->tempmon; @@ -447,7 +447,7 @@ static struct thermal_zone_device_ops imx_tz_ops = { .bind = imx_bind, .unbind = imx_unbind, .get_temp = imx_get_temp, - .set_mode = imx_set_mode, + .change_mode = imx_change_mode, .get_trip_type = imx_get_trip_type, .get_trip_temp = imx_get_trip_temp, .get_crit_temp = imx_get_crit_temp, @@ -860,7 +860,7 @@ static int __maybe_unused imx_thermal_suspend(struct device *dev) * Need to disable thermal sensor, otherwise, when thermal core * try to get temperature before thermal sensor resume, a wrong * temperature will be read as the thermal sensor is powered - * down. This is done in set_mode() operation called from + * down. This is done in change_mode() operation called from * thermal_zone_device_disable() */ ret = thermal_zone_device_disable(data->tz); diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c index ce49d3b100d5..d3732f624913 100644 --- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c @@ -377,8 +377,8 @@ static int int3400_thermal_get_temp(struct thermal_zone_device *thermal, return 0; } -static int int3400_thermal_set_mode(struct thermal_zone_device *thermal, - enum thermal_device_mode mode) +static int int3400_thermal_change_mode(struct thermal_zone_device *thermal, + enum thermal_device_mode mode) { struct int3400_thermal_priv *priv = thermal->devdata; int result = 0; @@ -399,7 +399,7 @@ static int int3400_thermal_set_mode(struct thermal_zone_device *thermal, static struct thermal_zone_device_ops int3400_thermal_ops = { .get_temp = int3400_thermal_get_temp, - .set_mode = int3400_thermal_set_mode, + .change_mode = int3400_thermal_change_mode, }; static struct thermal_zone_params int3400_thermal_params = { diff --git a/drivers/thermal/intel/intel_quark_dts_thermal.c b/drivers/thermal/intel/intel_quark_dts_thermal.c index e29c3e330b17..3eafc6b0e6c3 100644 --- a/drivers/thermal/intel/intel_quark_dts_thermal.c +++ b/drivers/thermal/intel/intel_quark_dts_thermal.c @@ -298,8 +298,8 @@ static int sys_get_curr_temp(struct thermal_zone_device *tzd, return 0; } -static int sys_set_mode(struct thermal_zone_device *tzd, - enum thermal_device_mode mode) +static int sys_change_mode(struct thermal_zone_device *tzd, + enum thermal_device_mode mode) { int ret; @@ -319,7 +319,7 @@ static struct thermal_zone_device_ops tzone_ops = { .get_trip_type = sys_get_trip_type, .set_trip_temp = sys_set_trip_temp, .get_crit_temp = sys_get_crit_temp, - .set_mode = sys_set_mode, + .change_mode = sys_change_mode, }; static void free_soc_dts(struct soc_sensor_entry *aux_entry) diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index e613f5c07bad..a61e91513584 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -482,8 +482,8 @@ static int thermal_zone_device_set_mode(struct thermal_zone_device *tz, return ret; } - if (tz->ops->set_mode) - ret = tz->ops->set_mode(tz, mode); + if (tz->ops->change_mode) + ret = tz->ops->change_mode(tz, mode); if (!ret) tz->mode = mode; diff --git a/include/linux/thermal.h b/include/linux/thermal.h index df013c39ba9b..b9efaa780d88 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -76,7 +76,7 @@ struct thermal_zone_device_ops { struct thermal_cooling_device *); int (*get_temp) (struct thermal_zone_device *, int *); int (*set_trips) (struct thermal_zone_device *, int, int); - int (*set_mode) (struct thermal_zone_device *, + int (*change_mode) (struct thermal_zone_device *, enum thermal_device_mode); int (*get_trip_type) (struct thermal_zone_device *, int, enum thermal_trip_type *); -- cgit v1.2.3-59-g8ed1b From 2cdb54c93a7e5beb6f3f8b63575d9fb664dfc603 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Tue, 21 Apr 2020 19:04:05 +0200 Subject: docs: RCU: Convert rculist_nulls.txt to ReST - Add a SPDX header; - Adjust document title; - Some whitespace fixes and new line breaks; - Mark literal blocks as such; - Add it to RCU/index.rst. Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Paul E. McKenney --- Documentation/RCU/index.rst | 1 + Documentation/RCU/rculist_nulls.rst | 194 ++++++++++++++++++++++++++++++++++++ Documentation/RCU/rculist_nulls.txt | 172 -------------------------------- include/linux/rculist_nulls.h | 2 +- net/core/sock.c | 4 +- 5 files changed, 198 insertions(+), 175 deletions(-) create mode 100644 Documentation/RCU/rculist_nulls.rst delete mode 100644 Documentation/RCU/rculist_nulls.txt (limited to 'include') diff --git a/Documentation/RCU/index.rst b/Documentation/RCU/index.rst index fa7a2a8949b7..577a47e27f5d 100644 --- a/Documentation/RCU/index.rst +++ b/Documentation/RCU/index.rst @@ -17,6 +17,7 @@ RCU concepts rcu_dereference whatisRCU rcu + rculist_nulls listRCU NMI-RCU UP diff --git a/Documentation/RCU/rculist_nulls.rst b/Documentation/RCU/rculist_nulls.rst new file mode 100644 index 000000000000..d40374221d69 --- /dev/null +++ b/Documentation/RCU/rculist_nulls.rst @@ -0,0 +1,194 @@ +.. SPDX-License-Identifier: GPL-2.0 + +================================================= +Using RCU hlist_nulls to protect list and objects +================================================= + +This section describes how to use hlist_nulls to +protect read-mostly linked lists and +objects using SLAB_TYPESAFE_BY_RCU allocations. + +Please read the basics in Documentation/RCU/listRCU.rst + +Using special makers (called 'nulls') is a convenient way +to solve following problem : + +A typical RCU linked list managing objects which are +allocated with SLAB_TYPESAFE_BY_RCU kmem_cache can +use following algos : + +1) Lookup algo +-------------- + +:: + + rcu_read_lock() + begin: + obj = lockless_lookup(key); + if (obj) { + if (!try_get_ref(obj)) // might fail for free objects + goto begin; + /* + * Because a writer could delete object, and a writer could + * reuse these object before the RCU grace period, we + * must check key after getting the reference on object + */ + if (obj->key != key) { // not the object we expected + put_ref(obj); + goto begin; + } + } + rcu_read_unlock(); + +Beware that lockless_lookup(key) cannot use traditional hlist_for_each_entry_rcu() +but a version with an additional memory barrier (smp_rmb()) + +:: + + lockless_lookup(key) + { + struct hlist_node *node, *next; + for (pos = rcu_dereference((head)->first); + pos && ({ next = pos->next; smp_rmb(); prefetch(next); 1; }) && + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); + pos = rcu_dereference(next)) + if (obj->key == key) + return obj; + return NULL; + } + +And note the traditional hlist_for_each_entry_rcu() misses this smp_rmb():: + + struct hlist_node *node; + for (pos = rcu_dereference((head)->first); + pos && ({ prefetch(pos->next); 1; }) && + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); + pos = rcu_dereference(pos->next)) + if (obj->key == key) + return obj; + return NULL; + +Quoting Corey Minyard:: + + "If the object is moved from one list to another list in-between the + time the hash is calculated and the next field is accessed, and the + object has moved to the end of a new list, the traversal will not + complete properly on the list it should have, since the object will + be on the end of the new list and there's not a way to tell it's on a + new list and restart the list traversal. I think that this can be + solved by pre-fetching the "next" field (with proper barriers) before + checking the key." + +2) Insert algo +-------------- + +We need to make sure a reader cannot read the new 'obj->obj_next' value +and previous value of 'obj->key'. Or else, an item could be deleted +from a chain, and inserted into another chain. If new chain was empty +before the move, 'next' pointer is NULL, and lockless reader can +not detect it missed following items in original chain. + +:: + + /* + * Please note that new inserts are done at the head of list, + * not in the middle or end. + */ + obj = kmem_cache_alloc(...); + lock_chain(); // typically a spin_lock() + obj->key = key; + /* + * we need to make sure obj->key is updated before obj->next + * or obj->refcnt + */ + smp_wmb(); + atomic_set(&obj->refcnt, 1); + hlist_add_head_rcu(&obj->obj_node, list); + unlock_chain(); // typically a spin_unlock() + + +3) Remove algo +-------------- +Nothing special here, we can use a standard RCU hlist deletion. +But thanks to SLAB_TYPESAFE_BY_RCU, beware a deleted object can be reused +very very fast (before the end of RCU grace period) + +:: + + if (put_last_reference_on(obj) { + lock_chain(); // typically a spin_lock() + hlist_del_init_rcu(&obj->obj_node); + unlock_chain(); // typically a spin_unlock() + kmem_cache_free(cachep, obj); + } + + + +-------------------------------------------------------------------------- + +With hlist_nulls we can avoid extra smp_rmb() in lockless_lookup() +and extra smp_wmb() in insert function. + +For example, if we choose to store the slot number as the 'nulls' +end-of-list marker for each slot of the hash table, we can detect +a race (some writer did a delete and/or a move of an object +to another chain) checking the final 'nulls' value if +the lookup met the end of chain. If final 'nulls' value +is not the slot number, then we must restart the lookup at +the beginning. If the object was moved to the same chain, +then the reader doesn't care : It might eventually +scan the list again without harm. + + +1) lookup algo +-------------- + +:: + + head = &table[slot]; + rcu_read_lock(); + begin: + hlist_nulls_for_each_entry_rcu(obj, node, head, member) { + if (obj->key == key) { + if (!try_get_ref(obj)) // might fail for free objects + goto begin; + if (obj->key != key) { // not the object we expected + put_ref(obj); + goto begin; + } + goto out; + } + /* + * if the nulls value we got at the end of this lookup is + * not the expected one, we must restart lookup. + * We probably met an item that was moved to another chain. + */ + if (get_nulls_value(node) != slot) + goto begin; + obj = NULL; + + out: + rcu_read_unlock(); + +2) Insert function +------------------ + +:: + + /* + * Please note that new inserts are done at the head of list, + * not in the middle or end. + */ + obj = kmem_cache_alloc(cachep); + lock_chain(); // typically a spin_lock() + obj->key = key; + /* + * changes to obj->key must be visible before refcnt one + */ + smp_wmb(); + atomic_set(&obj->refcnt, 1); + /* + * insert obj in RCU way (readers might be traversing chain) + */ + hlist_nulls_add_head_rcu(&obj->obj_node, list); + unlock_chain(); // typically a spin_unlock() diff --git a/Documentation/RCU/rculist_nulls.txt b/Documentation/RCU/rculist_nulls.txt deleted file mode 100644 index 23f115dc87cf..000000000000 --- a/Documentation/RCU/rculist_nulls.txt +++ /dev/null @@ -1,172 +0,0 @@ -Using hlist_nulls to protect read-mostly linked lists and -objects using SLAB_TYPESAFE_BY_RCU allocations. - -Please read the basics in Documentation/RCU/listRCU.rst - -Using special makers (called 'nulls') is a convenient way -to solve following problem : - -A typical RCU linked list managing objects which are -allocated with SLAB_TYPESAFE_BY_RCU kmem_cache can -use following algos : - -1) Lookup algo --------------- -rcu_read_lock() -begin: -obj = lockless_lookup(key); -if (obj) { - if (!try_get_ref(obj)) // might fail for free objects - goto begin; - /* - * Because a writer could delete object, and a writer could - * reuse these object before the RCU grace period, we - * must check key after getting the reference on object - */ - if (obj->key != key) { // not the object we expected - put_ref(obj); - goto begin; - } -} -rcu_read_unlock(); - -Beware that lockless_lookup(key) cannot use traditional hlist_for_each_entry_rcu() -but a version with an additional memory barrier (smp_rmb()) - -lockless_lookup(key) -{ - struct hlist_node *node, *next; - for (pos = rcu_dereference((head)->first); - pos && ({ next = pos->next; smp_rmb(); prefetch(next); 1; }) && - ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); - pos = rcu_dereference(next)) - if (obj->key == key) - return obj; - return NULL; - -And note the traditional hlist_for_each_entry_rcu() misses this smp_rmb() : - - struct hlist_node *node; - for (pos = rcu_dereference((head)->first); - pos && ({ prefetch(pos->next); 1; }) && - ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); - pos = rcu_dereference(pos->next)) - if (obj->key == key) - return obj; - return NULL; -} - -Quoting Corey Minyard : - -"If the object is moved from one list to another list in-between the - time the hash is calculated and the next field is accessed, and the - object has moved to the end of a new list, the traversal will not - complete properly on the list it should have, since the object will - be on the end of the new list and there's not a way to tell it's on a - new list and restart the list traversal. I think that this can be - solved by pre-fetching the "next" field (with proper barriers) before - checking the key." - -2) Insert algo : ----------------- - -We need to make sure a reader cannot read the new 'obj->obj_next' value -and previous value of 'obj->key'. Or else, an item could be deleted -from a chain, and inserted into another chain. If new chain was empty -before the move, 'next' pointer is NULL, and lockless reader can -not detect it missed following items in original chain. - -/* - * Please note that new inserts are done at the head of list, - * not in the middle or end. - */ -obj = kmem_cache_alloc(...); -lock_chain(); // typically a spin_lock() -obj->key = key; -/* - * we need to make sure obj->key is updated before obj->next - * or obj->refcnt - */ -smp_wmb(); -atomic_set(&obj->refcnt, 1); -hlist_add_head_rcu(&obj->obj_node, list); -unlock_chain(); // typically a spin_unlock() - - -3) Remove algo --------------- -Nothing special here, we can use a standard RCU hlist deletion. -But thanks to SLAB_TYPESAFE_BY_RCU, beware a deleted object can be reused -very very fast (before the end of RCU grace period) - -if (put_last_reference_on(obj) { - lock_chain(); // typically a spin_lock() - hlist_del_init_rcu(&obj->obj_node); - unlock_chain(); // typically a spin_unlock() - kmem_cache_free(cachep, obj); -} - - - --------------------------------------------------------------------------- -With hlist_nulls we can avoid extra smp_rmb() in lockless_lookup() -and extra smp_wmb() in insert function. - -For example, if we choose to store the slot number as the 'nulls' -end-of-list marker for each slot of the hash table, we can detect -a race (some writer did a delete and/or a move of an object -to another chain) checking the final 'nulls' value if -the lookup met the end of chain. If final 'nulls' value -is not the slot number, then we must restart the lookup at -the beginning. If the object was moved to the same chain, -then the reader doesn't care : It might eventually -scan the list again without harm. - - -1) lookup algo - - head = &table[slot]; - rcu_read_lock(); -begin: - hlist_nulls_for_each_entry_rcu(obj, node, head, member) { - if (obj->key == key) { - if (!try_get_ref(obj)) // might fail for free objects - goto begin; - if (obj->key != key) { // not the object we expected - put_ref(obj); - goto begin; - } - goto out; - } -/* - * if the nulls value we got at the end of this lookup is - * not the expected one, we must restart lookup. - * We probably met an item that was moved to another chain. - */ - if (get_nulls_value(node) != slot) - goto begin; - obj = NULL; - -out: - rcu_read_unlock(); - -2) Insert function : --------------------- - -/* - * Please note that new inserts are done at the head of list, - * not in the middle or end. - */ -obj = kmem_cache_alloc(cachep); -lock_chain(); // typically a spin_lock() -obj->key = key; -/* - * changes to obj->key must be visible before refcnt one - */ -smp_wmb(); -atomic_set(&obj->refcnt, 1); -/* - * insert obj in RCU way (readers might be traversing chain) - */ -hlist_nulls_add_head_rcu(&obj->obj_node, list); -unlock_chain(); // typically a spin_unlock() diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h index 9670b54b484a..ff3e94779e73 100644 --- a/include/linux/rculist_nulls.h +++ b/include/linux/rculist_nulls.h @@ -162,7 +162,7 @@ static inline void hlist_nulls_add_fake(struct hlist_nulls_node *n) * The barrier() is needed to make sure compiler doesn't cache first element [1], * as this loop can be restarted [2] * [1] Documentation/core-api/atomic_ops.rst around line 114 - * [2] Documentation/RCU/rculist_nulls.txt around line 146 + * [2] Documentation/RCU/rculist_nulls.rst around line 146 */ #define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \ for (({barrier();}), \ diff --git a/net/core/sock.c b/net/core/sock.c index d832c650287c..6921a85a1177 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1973,7 +1973,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) /* * Before updating sk_refcnt, we must commit prior changes to memory - * (Documentation/RCU/rculist_nulls.txt for details) + * (Documentation/RCU/rculist_nulls.rst for details) */ smp_wmb(); refcount_set(&newsk->sk_refcnt, 2); @@ -3035,7 +3035,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) sk_rx_queue_clear(sk); /* * Before updating sk_refcnt, we must commit prior changes to memory - * (Documentation/RCU/rculist_nulls.txt for details) + * (Documentation/RCU/rculist_nulls.rst for details) */ smp_wmb(); refcount_set(&sk->sk_refcnt, 1); -- cgit v1.2.3-59-g8ed1b From 88748e330040ecf4681a2c8f344fd386862bf913 Mon Sep 17 00:00:00 2001 From: Madhuparna Bhowmik Date: Mon, 4 May 2020 08:05:05 -0400 Subject: trace: events: rcu: Change description of rcu_dyntick trace event The different strings used for describing the polarity are Start, End and StillNonIdle. Since StillIdle is not used in any trace point for rcu_dyntick, it can be removed and StillNonIdle can be added in the description. Because StillNonIdle is used in a few tracepoints for rcu_dyntick. Similarly, USER, IDLE and IRQ are used for describing context in the rcu_dyntick tracepoints. Since, "KERNEL" is not used for any of the rcu_dyntick tracepoints, remove it from the description. Signed-off-by: Madhuparna Bhowmik Signed-off-by: Joel Fernandes (Google) Signed-off-by: Paul E. McKenney --- include/trace/events/rcu.h | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index f9a7811148e2..af274d1532bf 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h @@ -435,11 +435,12 @@ TRACE_EVENT_RCU(rcu_fqs, #endif /* #if defined(CONFIG_TREE_RCU) */ /* - * Tracepoint for dyntick-idle entry/exit events. These take a string - * as argument: "Start" for entering dyntick-idle mode, "Startirq" for - * entering it from irq/NMI, "End" for leaving it, "Endirq" for leaving it - * to irq/NMI, "--=" for events moving towards idle, and "++=" for events - * moving away from idle. + * Tracepoint for dyntick-idle entry/exit events. These take 2 strings + * as argument: + * polarity: "Start", "End", "StillNonIdle" for entering, exiting or still not + * being in dyntick-idle mode. + * context: "USER" or "IDLE" or "IRQ". + * NMIs nested in IRQs are inferred with dynticks_nesting > 1 in IRQ context. * * These events also take a pair of numbers, which indicate the nesting * depth before and after the event of interest, and a third number that is -- cgit v1.2.3-59-g8ed1b From 24692fa22c30cb8fcfcabdc07a3c82964475b639 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Mon, 15 Jun 2020 08:46:49 +0200 Subject: rcu: Fix some kernel-doc warnings The current code provokes some kernel-doc warnings: ./kernel/rcu/tree.c:2915: warning: Function parameter or member 'count' not described in 'kfree_rcu_cpu' ./include/linux/rculist.h:517: warning: bad line: [@right ][node2 ... ] ./include/linux/rculist.h:2: WARNING: Unexpected indentation. This commit therefore moves the comment for "count" to the kernel-doc markup and adds a missing "*" on one kernel-doc continuation line. Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Paul E. McKenney --- include/linux/rculist.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/rculist.h b/include/linux/rculist.h index df587d181844..7eed65b5f713 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -512,7 +512,7 @@ static inline void hlist_replace_rcu(struct hlist_node *old, * @right: The hlist head on the right * * The lists start out as [@left ][node1 ... ] and - [@right ][node2 ... ] + * [@right ][node2 ... ] * The lists end up as [@left ][node2 ... ] * [@right ][node1 ... ] */ -- cgit v1.2.3-59-g8ed1b From c408b215f58f7156bb6bafb64c0263ee907033df Mon Sep 17 00:00:00 2001 From: "Uladzislau Rezki (Sony)" Date: Mon, 25 May 2020 23:47:55 +0200 Subject: rcu: Rename *_kfree_callback/*_kfree_rcu_offset/kfree_call_* The following changes are introduced: 1. Rename rcu_invoke_kfree_callback() to rcu_invoke_kvfree_callback(), as well as the associated trace events, so the rcu_kfree_callback(), becomes rcu_kvfree_callback(). The reason is to be aligned with kvfree() notation. 2. Rename __is_kfree_rcu_offset to __is_kvfree_rcu_offset. All RCU paths use kvfree() now instead of kfree(), thus rename it. 3. Rename kfree_call_rcu() to the kvfree_call_rcu(). The reason is, it is capable of freeing vmalloc() memory now. Do the same with __kfree_rcu() macro, it becomes __kvfree_rcu(), the goal is the same. Reviewed-by: Joel Fernandes (Google) Co-developed-by: Joel Fernandes (Google) Signed-off-by: Joel Fernandes (Google) Signed-off-by: Uladzislau Rezki (Sony) Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 14 +++++++------- include/linux/rcutiny.h | 2 +- include/linux/rcutree.h | 2 +- include/trace/events/rcu.h | 8 ++++---- kernel/rcu/tiny.c | 4 ++-- kernel/rcu/tree.c | 16 ++++++++-------- 6 files changed, 23 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 659cbfa7581a..b344fc800a9b 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -828,17 +828,17 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) /* * Does the specified offset indicate that the corresponding rcu_head - * structure can be handled by kfree_rcu()? + * structure can be handled by kvfree_rcu()? */ -#define __is_kfree_rcu_offset(offset) ((offset) < 4096) +#define __is_kvfree_rcu_offset(offset) ((offset) < 4096) /* * Helper macro for kfree_rcu() to prevent argument-expansion eyestrain. */ -#define __kfree_rcu(head, offset) \ +#define __kvfree_rcu(head, offset) \ do { \ - BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \ - kfree_call_rcu(head, (rcu_callback_t)(unsigned long)(offset)); \ + BUILD_BUG_ON(!__is_kvfree_rcu_offset(offset)); \ + kvfree_call_rcu(head, (rcu_callback_t)(unsigned long)(offset)); \ } while (0) /** @@ -857,7 +857,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) * Because the functions are not allowed in the low-order 4096 bytes of * kernel virtual memory, offsets up to 4095 bytes can be accommodated. * If the offset is larger than 4095 bytes, a compile-time error will - * be generated in __kfree_rcu(). If this error is triggered, you can + * be generated in __kvfree_rcu(). If this error is triggered, you can * either fall back to use of call_rcu() or rearrange the structure to * position the rcu_head structure into the first 4096 bytes. * @@ -872,7 +872,7 @@ do { \ typeof (ptr) ___p = (ptr); \ \ if (___p) \ - __kfree_rcu(&((___p)->rhf), offsetof(typeof(*(ptr)), rhf)); \ + __kvfree_rcu(&((___p)->rhf), offsetof(typeof(*(ptr)), rhf)); \ } while (0) /* diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 8512caeb7682..fb2eb39c484f 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -34,7 +34,7 @@ static inline void synchronize_rcu_expedited(void) synchronize_rcu(); } -static inline void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func) +static inline void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) { call_rcu(head, func); } diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index d5cc9d675987..d2f4064ebd1d 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -33,7 +33,7 @@ static inline void rcu_virt_note_context_switch(int cpu) } void synchronize_rcu_expedited(void); -void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func); +void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func); void rcu_barrier(void); bool rcu_eqs_special_set(int cpu); diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index f9a7811148e2..0ee93d0b1daa 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h @@ -506,13 +506,13 @@ TRACE_EVENT_RCU(rcu_callback, /* * Tracepoint for the registration of a single RCU callback of the special - * kfree() form. The first argument is the RCU type, the second argument + * kvfree() form. The first argument is the RCU type, the second argument * is a pointer to the RCU callback, the third argument is the offset * of the callback within the enclosing RCU-protected data structure, * the fourth argument is the number of lazy callbacks queued, and the * fifth argument is the total number of callbacks queued. */ -TRACE_EVENT_RCU(rcu_kfree_callback, +TRACE_EVENT_RCU(rcu_kvfree_callback, TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset, long qlen), @@ -596,12 +596,12 @@ TRACE_EVENT_RCU(rcu_invoke_callback, /* * Tracepoint for the invocation of a single RCU callback of the special - * kfree() form. The first argument is the RCU flavor, the second + * kvfree() form. The first argument is the RCU flavor, the second * argument is a pointer to the RCU callback, and the third argument * is the offset of the callback within the enclosing RCU-protected * data structure. */ -TRACE_EVENT_RCU(rcu_invoke_kfree_callback, +TRACE_EVENT_RCU(rcu_invoke_kvfree_callback, TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset), diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c index 4b99f7b88bee..aa897c3f2e92 100644 --- a/kernel/rcu/tiny.c +++ b/kernel/rcu/tiny.c @@ -85,8 +85,8 @@ static inline bool rcu_reclaim_tiny(struct rcu_head *head) unsigned long offset = (unsigned long)head->func; rcu_lock_acquire(&rcu_callback_map); - if (__is_kfree_rcu_offset(offset)) { - trace_rcu_invoke_kfree_callback("", head, offset); + if (__is_kvfree_rcu_offset(offset)) { + trace_rcu_invoke_kvfree_callback("", head, offset); kvfree((void *)head - offset); rcu_lock_release(&rcu_callback_map); return true; diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 67c4b984c499..f22c47e72287 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2905,8 +2905,8 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func) return; // Enqueued onto ->nocb_bypass, so just leave. // If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock. rcu_segcblist_enqueue(&rdp->cblist, head); - if (__is_kfree_rcu_offset((unsigned long)func)) - trace_rcu_kfree_callback(rcu_state.name, head, + if (__is_kvfree_rcu_offset((unsigned long)func)) + trace_rcu_kvfree_callback(rcu_state.name, head, (unsigned long)func, rcu_segcblist_n_cbs(&rdp->cblist)); else @@ -3146,7 +3146,7 @@ static void kfree_rcu_work(struct work_struct *work) bkvhead[i]->records); } else { // vmalloc() / vfree(). for (j = 0; j < bkvhead[i]->nr_records; j++) { - trace_rcu_invoke_kfree_callback( + trace_rcu_invoke_kvfree_callback( rcu_state.name, bkvhead[i]->records[j], 0); @@ -3179,9 +3179,9 @@ static void kfree_rcu_work(struct work_struct *work) next = head->next; debug_rcu_head_unqueue((struct rcu_head *)ptr); rcu_lock_acquire(&rcu_callback_map); - trace_rcu_invoke_kfree_callback(rcu_state.name, head, offset); + trace_rcu_invoke_kvfree_callback(rcu_state.name, head, offset); - if (!WARN_ON_ONCE(!__is_kfree_rcu_offset(offset))) + if (!WARN_ON_ONCE(!__is_kvfree_rcu_offset(offset))) kvfree(ptr); rcu_lock_release(&rcu_callback_map); @@ -3344,12 +3344,12 @@ kvfree_call_rcu_add_ptr_to_bulk(struct kfree_rcu_cpu *krcp, void *ptr) * one, that is used only when the main path can not be maintained temporary, * due to memory pressure. * - * Each kfree_call_rcu() request is added to a batch. The batch will be drained + * Each kvfree_call_rcu() request is added to a batch. The batch will be drained * every KFREE_DRAIN_JIFFIES number of jiffies. All the objects in the batch will * be free'd in workqueue context. This allows us to: batch requests together to * reduce the number of grace periods during heavy kfree_rcu()/kvfree_rcu() load. */ -void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func) +void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) { unsigned long flags; struct kfree_rcu_cpu *krcp; @@ -3388,7 +3388,7 @@ void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func) unlock_return: krc_this_cpu_unlock(krcp, flags); } -EXPORT_SYMBOL_GPL(kfree_call_rcu); +EXPORT_SYMBOL_GPL(kvfree_call_rcu); static unsigned long kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc) -- cgit v1.2.3-59-g8ed1b From ce4dce123fdcb5f209752d13f9f06926be65fc78 Mon Sep 17 00:00:00 2001 From: "Uladzislau Rezki (Sony)" Date: Mon, 25 May 2020 23:47:57 +0200 Subject: rcu: Introduce 2 arg kvfree_rcu() interface kvmalloc() can allocate two types of objects: SLAB backed and vmalloc backed. How it behaves depends on requested object's size and memory pressure. Add a kvfree_rcu() interface that can free memory allocated via kvmalloc(). It is a simple alias to kfree_rcu() which can now handle either type of object. struct test_kvfree_rcu { struct rcu_head rcu; unsigned char array[100]; }; struct test_kvfree_rcu *p; p = kvmalloc(10 * PAGE_SIZE); if (p) kvfree_rcu(p, rcu); Signed-off-by: Uladzislau Rezki (Sony) Co-developed-by: Joel Fernandes (Google) Reviewed-by: Joel Fernandes (Google) Signed-off-by: Joel Fernandes (Google) Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'include') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index b344fc800a9b..51b26ab02878 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -875,6 +875,15 @@ do { \ __kvfree_rcu(&((___p)->rhf), offsetof(typeof(*(ptr)), rhf)); \ } while (0) +/** + * kvfree_rcu() - kvfree an object after a grace period. + * @ptr: pointer to kvfree + * @rhf: the name of the struct rcu_head within the type of @ptr. + * + * Same as kfree_rcu(), just simple alias. + */ +#define kvfree_rcu(ptr, rhf) kfree_rcu(ptr, rhf) + /* * Place this after a lock-acquisition primitive to guarantee that * an UNLOCK+LOCK pair acts as a full barrier. This guarantee applies -- cgit v1.2.3-59-g8ed1b From 3042f83f19bec2e0cd356f72b39e4d816e8cd5ff Mon Sep 17 00:00:00 2001 From: "Uladzislau Rezki (Sony)" Date: Mon, 25 May 2020 23:47:58 +0200 Subject: rcu: Support reclaim for head-less object Update the kvfree_call_rcu() function with head-less support. This allows RCU to reclaim objects without an embedded rcu_head. tree-RCU: We introduce two chains of arrays to store SLAB-backed and vmalloc pointers, each. Storage in either of these arrays does not require embedding an rcu_head within the object. Maintaining the arrays may become impossible due to high memory pressure. For such cases there is an emergency path. Objects with rcu_head inside are just queued on a backup rcu_head list. Later on that list is drained. As for the head-less variant, as the current context can sleep, the following emergency measures are applied: a) Synchronously wait until a grace period has elapsed. b) Call kvfree(). tiny-RCU: For double argument calls, there are no new changes in behavior. For single argument call, kvfree() is directly inlined on the current stack after a synchronize_rcu() call. Note that for tiny-RCU, any call to synchronize_rcu() is actually a quiescent state, therefore it does nothing. Reviewed-by: Joel Fernandes (Google) Signed-off-by: Uladzislau Rezki (Sony) Signed-off-by: Joel Fernandes (Google) Co-developed-by: Joel Fernandes (Google) Signed-off-by: Paul E. McKenney --- include/linux/rcutiny.h | 18 +++++++++++++++++- kernel/rcu/tree.c | 45 +++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 60 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index fb2eb39c484f..5cc9637cac16 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -34,9 +34,25 @@ static inline void synchronize_rcu_expedited(void) synchronize_rcu(); } +/* + * Add one more declaration of kvfree() here. It is + * not so straight forward to just include + * where it is defined due to getting many compile + * errors caused by that include. + */ +extern void kvfree(const void *addr); + static inline void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) { - call_rcu(head, func); + if (head) { + call_rcu(head, func); + return; + } + + // kvfree_rcu(one_arg) call. + might_sleep(); + synchronize_rcu(); + kvfree((void *) func); } void rcu_qs(void); diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index f22c47e72287..01f29e4500ba 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3314,6 +3314,13 @@ kvfree_call_rcu_add_ptr_to_bulk(struct kfree_rcu_cpu *krcp, void *ptr) if (IS_ENABLED(CONFIG_PREEMPT_RT)) return false; + /* + * NOTE: For one argument of kvfree_rcu() we can + * drop the lock and get the page in sleepable + * context. That would allow to maintain an array + * for the CONFIG_PREEMPT_RT as well if no cached + * pages are available. + */ bnode = (struct kvfree_rcu_bulk_data *) __get_free_page(GFP_NOWAIT | __GFP_NOWARN); } @@ -3353,16 +3360,33 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) { unsigned long flags; struct kfree_rcu_cpu *krcp; + bool success; void *ptr; + if (head) { + ptr = (void *) head - (unsigned long) func; + } else { + /* + * Please note there is a limitation for the head-less + * variant, that is why there is a clear rule for such + * objects: it can be used from might_sleep() context + * only. For other places please embed an rcu_head to + * your data. + */ + might_sleep(); + ptr = (unsigned long *) func; + } + krcp = krc_this_cpu_lock(&flags); - ptr = (void *)head - (unsigned long)func; // Queue the object but don't yet schedule the batch. if (debug_rcu_head_queue(ptr)) { // Probable double kfree_rcu(), just leak. WARN_ONCE(1, "%s(): Double-freed call. rcu_head %p\n", __func__, head); + + // Mark as success and leave. + success = true; goto unlock_return; } @@ -3370,10 +3394,16 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) * Under high memory pressure GFP_NOWAIT can fail, * in that case the emergency path is maintained. */ - if (unlikely(!kvfree_call_rcu_add_ptr_to_bulk(krcp, ptr))) { + success = kvfree_call_rcu_add_ptr_to_bulk(krcp, ptr); + if (!success) { + if (head == NULL) + // Inline if kvfree_rcu(one_arg) call. + goto unlock_return; + head->func = func; head->next = krcp->head; krcp->head = head; + success = true; } WRITE_ONCE(krcp->count, krcp->count + 1); @@ -3387,6 +3417,17 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) unlock_return: krc_this_cpu_unlock(krcp, flags); + + /* + * Inline kvfree() after synchronize_rcu(). We can do + * it from might_sleep() context only, so the current + * CPU can pass the QS state. + */ + if (!success) { + debug_rcu_head_unqueue((struct rcu_head *) ptr); + synchronize_rcu(); + kvfree(ptr); + } } EXPORT_SYMBOL_GPL(kvfree_call_rcu); -- cgit v1.2.3-59-g8ed1b From 1835f475e3518ade61e25a57572c78b953778656 Mon Sep 17 00:00:00 2001 From: "Uladzislau Rezki (Sony)" Date: Mon, 25 May 2020 23:47:59 +0200 Subject: rcu: Introduce single argument kvfree_rcu() interface Make kvfree_rcu() capable of freeing objects that will not embed an rcu_head within it. This saves storage overhead in such objects. Reclaiming headless objects this way requires only a single argument (pointer to the object). After this patch, there are two ways to use kvfree_rcu(): a) kvfree_rcu(ptr, rhf); struct X { struct rcu_head rhf; unsigned char data[100]; }; void *ptr = kvmalloc(sizeof(struct X), GFP_KERNEL); if (ptr) kvfree_rcu(ptr, rhf); b) kvfree_rcu(ptr); void *ptr = kvmalloc(some_bytes, GFP_KERNEL); if (ptr) kvfree_rcu(ptr); Note that the headless usage (example b) can only be used in a code that can sleep. This is enforced by the CONFIG_DEBUG_ATOMIC_SLEEP option. Co-developed-by: Joel Fernandes (Google) Reviewed-by: Joel Fernandes (Google) Signed-off-by: Uladzislau Rezki (Sony) Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 38 ++++++++++++++++++++++++++++++++++---- 1 file changed, 34 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 51b26ab02878..d15d46db61f7 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -877,12 +877,42 @@ do { \ /** * kvfree_rcu() - kvfree an object after a grace period. - * @ptr: pointer to kvfree - * @rhf: the name of the struct rcu_head within the type of @ptr. * - * Same as kfree_rcu(), just simple alias. + * This macro consists of one or two arguments and it is + * based on whether an object is head-less or not. If it + * has a head then a semantic stays the same as it used + * to be before: + * + * kvfree_rcu(ptr, rhf); + * + * where @ptr is a pointer to kvfree(), @rhf is the name + * of the rcu_head structure within the type of @ptr. + * + * When it comes to head-less variant, only one argument + * is passed and that is just a pointer which has to be + * freed after a grace period. Therefore the semantic is + * + * kvfree_rcu(ptr); + * + * where @ptr is a pointer to kvfree(). + * + * Please note, head-less way of freeing is permitted to + * use from a context that has to follow might_sleep() + * annotation. Otherwise, please switch and embed the + * rcu_head structure within the type of @ptr. */ -#define kvfree_rcu(ptr, rhf) kfree_rcu(ptr, rhf) +#define kvfree_rcu(...) KVFREE_GET_MACRO(__VA_ARGS__, \ + kvfree_rcu_arg_2, kvfree_rcu_arg_1)(__VA_ARGS__) + +#define KVFREE_GET_MACRO(_1, _2, NAME, ...) NAME +#define kvfree_rcu_arg_2(ptr, rhf) kfree_rcu(ptr, rhf) +#define kvfree_rcu_arg_1(ptr) \ +do { \ + typeof(ptr) ___p = (ptr); \ + \ + if (___p) \ + kvfree_call_rcu(NULL, (rcu_callback_t) (___p)); \ +} while (0) /* * Place this after a lock-acquisition primitive to guarantee that -- cgit v1.2.3-59-g8ed1b From c7dcf8106f7570b133b05ff68fd4100064965d9d Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 12 Jun 2020 13:11:29 -0700 Subject: rcu-tasks: Fix synchronize_rcu_tasks_trace() header comment The synchronize_rcu_tasks_trace() header comment incorrectly claims that any number of things delimit RCU Tasks Trace read-side critical sections, when in fact only rcu_read_lock_trace() and rcu_read_unlock_trace() do so. This commit therefore fixes this comment, and, while in the area, fixes a typo in the rcu_read_lock_trace() header comment. Reported-by: Alexei Starovoitov Signed-off-by: Paul E. McKenney --- include/linux/rcupdate_trace.h | 4 ++-- kernel/rcu/tasks.h | 9 ++++----- 2 files changed, 6 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/linux/rcupdate_trace.h b/include/linux/rcupdate_trace.h index 4c25a41f8b27..d9015aac78c6 100644 --- a/include/linux/rcupdate_trace.h +++ b/include/linux/rcupdate_trace.h @@ -36,8 +36,8 @@ void rcu_read_unlock_trace_special(struct task_struct *t, int nesting); /** * rcu_read_lock_trace - mark beginning of RCU-trace read-side critical section * - * When synchronize_rcu_trace() is invoked by one task, then that task - * is guaranteed to block until all other tasks exit their read-side + * When synchronize_rcu_tasks_trace() is invoked by one task, then that + * task is guaranteed to block until all other tasks exit their read-side * critical sections. Similarly, if call_rcu_trace() is invoked on one * task while other tasks are within RCU read-side critical sections, * invocation of the corresponding RCU callback is deferred until after diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index ce23f6cc5043..a77298c1d126 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -1118,11 +1118,10 @@ EXPORT_SYMBOL_GPL(call_rcu_tasks_trace); * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period * * Control will return to the caller some time after a trace rcu-tasks - * grace period has elapsed, in other words after all currently - * executing rcu-tasks read-side critical sections have elapsed. These - * read-side critical sections are delimited by calls to schedule(), - * cond_resched_tasks_rcu_qs(), userspace execution, and (in theory, - * anyway) cond_resched(). + * grace period has elapsed, in other words after all currently executing + * rcu-tasks read-side critical sections have elapsed. These read-side + * critical sections are delimited by calls to rcu_read_lock_trace() + * and rcu_read_unlock_trace(). * * This is a very specialized primitive, intended only for a few uses in * tracing and other situations requiring manipulation of function preambles -- cgit v1.2.3-59-g8ed1b From 4a5f133c15b77c4018e8d7996541868ac94afb4f Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 24 Apr 2020 11:21:40 -0700 Subject: rcutorture: Add races with task-exit processing Several variants of Linux-kernel RCU interact with task-exit processing, including preemptible RCU, Tasks RCU, and Tasks Trace RCU. This commit therefore adds testing of this interaction to rcutorture by adding rcutorture.read_exit_burst and rcutorture.read_exit_delay kernel-boot parameters. These kernel parameters control the frequency and spacing of special read-then-exit kthreads that are spawned. [ paulmck: Apply feedback from Dan Carpenter's static checker. ] [ paulmck: Reduce latency to avoid false-positive shutdown hangs. ] Signed-off-by: Paul E. McKenney --- Documentation/admin-guide/kernel-parameters.txt | 14 +++ include/linux/torture.h | 5 ++ kernel/rcu/rcutorture.c | 112 +++++++++++++++++++++++- 3 files changed, 128 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index fb95fad81c79..a0dcc925c8a2 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -4258,6 +4258,20 @@ Set time (jiffies) between CPU-hotplug operations, or zero to disable CPU-hotplug testing. + rcutorture.read_exit= [KNL] + Set the number of read-then-exit kthreads used + to test the interaction of RCU updaters and + task-exit processing. + + rcutorture.read_exit_burst= [KNL] + The number of times in a given read-then-exit + episode that a set of read-then-exit kthreads + is spawned. + + rcutorture.read_exit_delay= [KNL] + The delay, in seconds, between successive + read-then-exit testing episodes. + rcutorture.shuffle_interval= [KNL] Set task-shuffle interval (s). Shuffling tasks allows some CPUs to go into dyntick-idle mode diff --git a/include/linux/torture.h b/include/linux/torture.h index 629b66e6c161..7f65bd1dd307 100644 --- a/include/linux/torture.h +++ b/include/linux/torture.h @@ -55,6 +55,11 @@ struct torture_random_state { #define DEFINE_TORTURE_RANDOM_PERCPU(name) \ DEFINE_PER_CPU(struct torture_random_state, name) unsigned long torture_random(struct torture_random_state *trsp); +static inline void torture_random_init(struct torture_random_state *trsp) +{ + trsp->trs_state = 0; + trsp->trs_count = 0; +} /* Task shuffler, which causes CPUs to occasionally go idle. */ void torture_shuffle_task_register(struct task_struct *tp); diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index efb792e13fca..2621a339c8a4 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -109,6 +109,10 @@ torture_param(int, object_debug, 0, torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (jiffies), 0=disable"); +torture_param(int, read_exit_delay, 13, + "Delay between read-then-exit episodes (s)"); +torture_param(int, read_exit_burst, 16, + "# of read-then-exit bursts per episode, zero to disable"); torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles"); torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable."); torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable."); @@ -146,6 +150,7 @@ static struct task_struct *stall_task; static struct task_struct *fwd_prog_task; static struct task_struct **barrier_cbs_tasks; static struct task_struct *barrier_task; +static struct task_struct *read_exit_task; #define RCU_TORTURE_PIPE_LEN 10 @@ -177,6 +182,7 @@ static long n_rcu_torture_boosts; static atomic_long_t n_rcu_torture_timers; static long n_barrier_attempts; static long n_barrier_successes; /* did rcu_barrier test succeed? */ +static unsigned long n_read_exits; static struct list_head rcu_torture_removed; static unsigned long shutdown_jiffies; @@ -1539,10 +1545,11 @@ rcu_torture_stats_print(void) n_rcu_torture_boosts, atomic_long_read(&n_rcu_torture_timers)); torture_onoff_stats(); - pr_cont("barrier: %ld/%ld:%ld\n", + pr_cont("barrier: %ld/%ld:%ld ", data_race(n_barrier_successes), data_race(n_barrier_attempts), data_race(n_rcu_torture_barrier_error)); + pr_cont("read-exits: %ld\n", data_race(n_read_exits)); pr_alert("%s%s ", torture_type, TORTURE_FLAG); if (atomic_read(&n_rcu_torture_mberror) || @@ -1634,7 +1641,8 @@ rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d " "stall_cpu_block=%d " "n_barrier_cbs=%d " - "onoff_interval=%d onoff_holdoff=%d\n", + "onoff_interval=%d onoff_holdoff=%d " + "read_exit_delay=%d read_exit_burst=%d\n", torture_type, tag, nrealreaders, nfakewriters, stat_interval, verbose, test_no_idle_hz, shuffle_interval, stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, @@ -1643,7 +1651,8 @@ rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff, stall_cpu_block, n_barrier_cbs, - onoff_interval, onoff_holdoff); + onoff_interval, onoff_holdoff, + read_exit_delay, read_exit_burst); } static int rcutorture_booster_cleanup(unsigned int cpu) @@ -2338,6 +2347,99 @@ static bool rcu_torture_can_boost(void) return true; } +static bool read_exit_child_stop; +static bool read_exit_child_stopped; +static wait_queue_head_t read_exit_wq; + +// Child kthread which just does an rcutorture reader and exits. +static int rcu_torture_read_exit_child(void *trsp_in) +{ + struct torture_random_state *trsp = trsp_in; + + set_user_nice(current, MAX_NICE); + // Minimize time between reading and exiting. + while (!kthread_should_stop()) + schedule_timeout_uninterruptible(1); + (void)rcu_torture_one_read(trsp); + return 0; +} + +// Parent kthread which creates and destroys read-exit child kthreads. +static int rcu_torture_read_exit(void *unused) +{ + int count = 0; + bool errexit = false; + int i; + struct task_struct *tsp; + DEFINE_TORTURE_RANDOM(trs); + + // Allocate and initialize. + set_user_nice(current, MAX_NICE); + VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test"); + + // Each pass through this loop does one read-exit episode. + do { + if (++count > read_exit_burst) { + VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode"); + rcu_barrier(); // Wait for task_struct free, avoid OOM. + for (i = 0; i < read_exit_delay; i++) { + schedule_timeout_uninterruptible(HZ); + if (READ_ONCE(read_exit_child_stop)) + break; + } + if (!READ_ONCE(read_exit_child_stop)) + VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode"); + count = 0; + } + if (READ_ONCE(read_exit_child_stop)) + break; + // Spawn child. + tsp = kthread_run(rcu_torture_read_exit_child, + &trs, "%s", + "rcu_torture_read_exit_child"); + if (IS_ERR(tsp)) { + VERBOSE_TOROUT_ERRSTRING("out of memory"); + errexit = true; + tsp = NULL; + break; + } + cond_resched(); + kthread_stop(tsp); + n_read_exits ++; + stutter_wait("rcu_torture_read_exit"); + } while (!errexit && !READ_ONCE(read_exit_child_stop)); + + // Clean up and exit. + smp_store_release(&read_exit_child_stopped, true); // After reaping. + smp_mb(); // Store before wakeup. + wake_up(&read_exit_wq); + while (!torture_must_stop()) + schedule_timeout_uninterruptible(1); + torture_kthread_stopping("rcu_torture_read_exit"); + return 0; +} + +static int rcu_torture_read_exit_init(void) +{ + if (read_exit_burst <= 0) + return -EINVAL; + init_waitqueue_head(&read_exit_wq); + read_exit_child_stop = false; + read_exit_child_stopped = false; + return torture_create_kthread(rcu_torture_read_exit, NULL, + read_exit_task); +} + +static void rcu_torture_read_exit_cleanup(void) +{ + if (!read_exit_task) + return; + WRITE_ONCE(read_exit_child_stop, true); + smp_mb(); // Above write before wait. + wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped)); + torture_stop_kthread(rcutorture_read_exit, read_exit_task); +} + static enum cpuhp_state rcutor_hp; static void @@ -2359,6 +2461,7 @@ rcu_torture_cleanup(void) } show_rcu_gp_kthreads(); + rcu_torture_read_exit_cleanup(); rcu_torture_barrier_cleanup(); torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task); torture_stop_kthread(rcu_torture_stall, stall_task); @@ -2680,6 +2783,9 @@ rcu_torture_init(void) if (firsterr) goto unwind; firsterr = rcu_torture_barrier_init(); + if (firsterr) + goto unwind; + firsterr = rcu_torture_read_exit_init(); if (firsterr) goto unwind; if (object_debug) -- cgit v1.2.3-59-g8ed1b From c93773c1a3fedf6c3f6fa12833e2b74a9897c3e3 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 12 Feb 2020 13:29:15 -0800 Subject: rculist: Add ASSERT_EXCLUSIVE_ACCESS() to __list_splice_init_rcu() After the sync() in __list_splice_init_rcu(), there should be no readers traversing the old list. This commit therefore enlists the help of KCSAN to verify this condition via a pair of calls to ASSERT_EXCLUSIVE_ACCESS(). Signed-off-by: Paul E. McKenney Cc: Marco Elver --- include/linux/rculist.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/linux/rculist.h b/include/linux/rculist.h index df587d181844..2ebd112f86f7 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -248,6 +248,8 @@ static inline void __list_splice_init_rcu(struct list_head *list, */ sync(); + ASSERT_EXCLUSIVE_ACCESS(*first); + ASSERT_EXCLUSIVE_ACCESS(*last); /* * Readers are finished with the source list, so perform splice. -- cgit v1.2.3-59-g8ed1b From 0fb8125635e8eb5483fb095f98dcf0651206a7b8 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Tue, 9 Jun 2020 22:04:44 +0200 Subject: video: fbdev: amba-clcd: Retire elder CLCD driver All the functionality in this driver has been reimplemented in the new DRM driver in drivers/gpu/drm/pl111/* and all the boards using it have been migrated to use the DRM driver with all configuration coming from the device tree. I started the work to migrate the CLCD driver to DRM in april 2017 and it took a little more than 3 years to do this properly without leaving any platforms behind. Reviewed-by: Eric Anholt Signed-off-by: Linus Walleij Cc: Russell King Link: https://patchwork.freedesktop.org/patch/msgid/20200609200446.153209-2-linus.walleij@linaro.org --- MAINTAINERS | 5 - drivers/video/fbdev/Kconfig | 20 - drivers/video/fbdev/Makefile | 1 - drivers/video/fbdev/amba-clcd.c | 986 ---------------------------------------- include/linux/amba/clcd.h | 290 ------------ 5 files changed, 1302 deletions(-) delete mode 100644 drivers/video/fbdev/amba-clcd.c delete mode 100644 include/linux/amba/clcd.h (limited to 'include') diff --git a/MAINTAINERS b/MAINTAINERS index dad5a62d21a7..40474982a21d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1448,11 +1448,6 @@ S: Odd Fixes F: drivers/amba/ F: include/linux/amba/bus.h -ARM PRIMECELL CLCD PL110 DRIVER -M: Russell King -S: Odd Fixes -F: drivers/video/fbdev/amba-clcd.* - ARM PRIMECELL KMI PL050 DRIVER M: Russell King S: Odd Fixes diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index 0f559aeaf469..b33bb6318c2b 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -272,26 +272,6 @@ config FB_PM2_FIFO_DISCONNECT help Support the Permedia2 FIFO disconnect feature. -config FB_ARMCLCD - tristate "ARM PrimeCell PL110 support" - depends on ARM || ARM64 || COMPILE_TEST - depends on FB && ARM_AMBA && HAS_IOMEM - select FB_CFB_FILLRECT - select FB_CFB_COPYAREA - select FB_CFB_IMAGEBLIT - select FB_MODE_HELPERS if OF - select VIDEOMODE_HELPERS if OF - select BACKLIGHT_CLASS_DEVICE if OF - help - This framebuffer device driver is for the ARM PrimeCell PL110 - Colour LCD controller. ARM PrimeCells provide the building - blocks for System on a Chip devices. - - If you want to compile this as a module (=code which can be - inserted into and removed from the running kernel), say M - here and read . The module - will be called amba-clcd. - config FB_ACORN bool "Acorn VIDC support" depends on (FB = y) && ARM && ARCH_ACORN diff --git a/drivers/video/fbdev/Makefile b/drivers/video/fbdev/Makefile index aa6352798cf4..76a43ec8f24c 100644 --- a/drivers/video/fbdev/Makefile +++ b/drivers/video/fbdev/Makefile @@ -75,7 +75,6 @@ obj-$(CONFIG_FB_HIT) += hitfb.o obj-$(CONFIG_FB_ATMEL) += atmel_lcdfb.o obj-$(CONFIG_FB_PVR2) += pvr2fb.o obj-$(CONFIG_FB_VOODOO1) += sstfb.o -obj-$(CONFIG_FB_ARMCLCD) += amba-clcd.o obj-$(CONFIG_FB_GOLDFISH) += goldfishfb.o obj-$(CONFIG_FB_68328) += 68328fb.o obj-$(CONFIG_FB_GBE) += gbefb.o diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c deleted file mode 100644 index b7682de412d8..000000000000 --- a/drivers/video/fbdev/amba-clcd.c +++ /dev/null @@ -1,986 +0,0 @@ -/* - * linux/drivers/video/amba-clcd.c - * - * Copyright (C) 2001 ARM Limited, by David A Rusling - * Updated to 2.5, Deep Blue Solutions Ltd. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file COPYING in the main directory of this archive - * for more details. - * - * ARM PrimeCell PL110 Color LCD Controller - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include