From 35025735a79eaa894c43837b94fd33c9d6b122df Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Thu, 3 Mar 2022 15:41:18 +0000 Subject: KVM: x86/xen: Support direct injection of event channel events This adds a KVM_XEN_HVM_EVTCHN_SEND ioctl which allows direct injection of events given an explicit { vcpu, port, priority } in precisely the same form that those fields are given in the IRQ routing table. Userspace is currently able to inject 2-level events purely by setting the bits in the shared_info and vcpu_info, but FIFO event channels are harder to deal with; we will need the kernel to take sole ownership of delivery when we support those. A patch advertising this feature with a new bit in the KVM_CAP_XEN_HVM ioctl will be added in a subsequent patch. Signed-off-by: David Woodhouse Signed-off-by: Paolo Bonzini Message-Id: <20220303154127.202856-9-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini --- include/uapi/linux/kvm.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 91a6fe4e02c0..49cd2e9e0f6a 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1699,6 +1699,9 @@ struct kvm_xen_hvm_attr { #define KVM_XEN_VCPU_GET_ATTR _IOWR(KVMIO, 0xca, struct kvm_xen_vcpu_attr) #define KVM_XEN_VCPU_SET_ATTR _IOW(KVMIO, 0xcb, struct kvm_xen_vcpu_attr) +/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */ +#define KVM_XEN_HVM_EVTCHN_SEND _IOW(KVMIO, 0xd0, struct kvm_irq_routing_xen_evtchn) + #define KVM_GET_SREGS2 _IOR(KVMIO, 0xcc, struct kvm_sregs2) #define KVM_SET_SREGS2 _IOW(KVMIO, 0xcd, struct kvm_sregs2) -- cgit v1.2.3-59-g8ed1b From 2fd6df2f2b47d4301b1ee0fe9d627d1c061a5988 Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Thu, 3 Mar 2022 15:41:19 +0000 Subject: KVM: x86/xen: intercept EVTCHNOP_send from guests Userspace registers a sending @port to either deliver to an @eventfd or directly back to a local event channel port. After binding events the guest or host may wish to bind those events to a particular vcpu. This is usually done for unbound and and interdomain events. Update requests are handled via the KVM_XEN_EVTCHN_UPDATE flag. Unregistered ports are handled by the emulator. Co-developed-by: Ankur Arora Co-developed-By: David Woodhouse Signed-off-by: Joao Martins Signed-off-by: Ankur Arora Signed-off-by: David Woodhouse Signed-off-by: Paolo Bonzini Message-Id: <20220303154127.202856-10-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/xen.c | 295 ++++++++++++++++++++++++++++++++++++++-- include/uapi/linux/kvm.h | 28 ++++ 3 files changed, 309 insertions(+), 15 deletions(-) (limited to 'include/uapi/linux') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 7808491ebba8..b20f7d99d702 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1024,6 +1024,7 @@ struct kvm_xen { bool long_mode; u8 upcall_vector; struct gfn_to_pfn_cache shinfo_cache; + struct idr evtchn_ports; }; enum kvm_irqchip_mode { diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index f0f0011c4617..3d95167028ba 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -11,6 +11,7 @@ #include "lapic.h" #include "hyperv.h" +#include #include #include @@ -21,6 +22,9 @@ #include "trace.h" +static int kvm_xen_setattr_evtchn(struct kvm *kvm, struct kvm_xen_hvm_attr *data); +static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r); + DEFINE_STATIC_KEY_DEFERRED_FALSE(kvm_xen_enabled, HZ); static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn) @@ -365,36 +369,44 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) { int r = -ENOENT; - mutex_lock(&kvm->lock); switch (data->type) { case KVM_XEN_ATTR_TYPE_LONG_MODE: if (!IS_ENABLED(CONFIG_64BIT) && data->u.long_mode) { r = -EINVAL; } else { + mutex_lock(&kvm->lock); kvm->arch.xen.long_mode = !!data->u.long_mode; + mutex_unlock(&kvm->lock); r = 0; } break; case KVM_XEN_ATTR_TYPE_SHARED_INFO: + mutex_lock(&kvm->lock); r = kvm_xen_shared_info_init(kvm, data->u.shared_info.gfn); + mutex_unlock(&kvm->lock); break; case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR: if (data->u.vector && data->u.vector < 0x10) r = -EINVAL; else { + mutex_lock(&kvm->lock); kvm->arch.xen.upcall_vector = data->u.vector; + mutex_unlock(&kvm->lock); r = 0; } break; + case KVM_XEN_ATTR_TYPE_EVTCHN: + r = kvm_xen_setattr_evtchn(kvm, data); + break; + default: break; } - mutex_unlock(&kvm->lock); return r; } @@ -770,18 +782,6 @@ int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc) return 0; } -void kvm_xen_init_vm(struct kvm *kvm) -{ -} - -void kvm_xen_destroy_vm(struct kvm *kvm) -{ - kvm_gfn_to_pfn_cache_destroy(kvm, &kvm->arch.xen.shinfo_cache); - - if (kvm->arch.xen_hvm_config.msr) - static_branch_slow_dec_deferred(&kvm_xen_enabled); -} - static int kvm_xen_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result) { kvm_rax_write(vcpu, result); @@ -801,7 +801,8 @@ static int kvm_xen_hypercall_complete_userspace(struct kvm_vcpu *vcpu) int kvm_xen_hypercall(struct kvm_vcpu *vcpu) { bool longmode; - u64 input, params[6]; + u64 input, params[6], r = -ENOSYS; + bool handled = false; input = (u64)kvm_register_read(vcpu, VCPU_REGS_RAX); @@ -832,6 +833,19 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu) trace_kvm_xen_hypercall(input, params[0], params[1], params[2], params[3], params[4], params[5]); + switch (input) { + case __HYPERVISOR_event_channel_op: + if (params[0] == EVTCHNOP_send) + handled = kvm_xen_hcall_evtchn_send(vcpu, params[1], &r); + break; + + default: + break; + } + + if (handled) + return kvm_xen_hypercall_set_result(vcpu, r); + vcpu->run->exit_reason = KVM_EXIT_XEN; vcpu->run->xen.type = KVM_EXIT_XEN_HCALL; vcpu->run->xen.u.hcall.longmode = longmode; @@ -1118,6 +1132,234 @@ int kvm_xen_hvm_evtchn_send(struct kvm *kvm, struct kvm_irq_routing_xen_evtchn * return ret; } +/* + * Support for *outbound* event channel events via the EVTCHNOP_send hypercall. + */ +struct evtchnfd { + u32 send_port; + u32 type; + union { + struct kvm_xen_evtchn port; + struct { + u32 port; /* zero */ + struct eventfd_ctx *ctx; + } eventfd; + } deliver; +}; + +/* + * Update target vCPU or priority for a registered sending channel. + */ +static int kvm_xen_eventfd_update(struct kvm *kvm, + struct kvm_xen_hvm_attr *data) +{ + u32 port = data->u.evtchn.send_port; + struct evtchnfd *evtchnfd; + + if (!port || port >= max_evtchn_port(kvm)) + return -EINVAL; + + mutex_lock(&kvm->lock); + evtchnfd = idr_find(&kvm->arch.xen.evtchn_ports, port); + mutex_unlock(&kvm->lock); + + if (!evtchnfd) + return -ENOENT; + + /* For an UPDATE, nothing may change except the priority/vcpu */ + if (evtchnfd->type != data->u.evtchn.type) + return -EINVAL; + + /* + * Port cannot change, and if it's zero that was an eventfd + * which can't be changed either. + */ + if (!evtchnfd->deliver.port.port || + evtchnfd->deliver.port.port != data->u.evtchn.deliver.port.port) + return -EINVAL; + + /* We only support 2 level event channels for now */ + if (data->u.evtchn.deliver.port.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) + return -EINVAL; + + mutex_lock(&kvm->lock); + evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority; + if (evtchnfd->deliver.port.vcpu_id != data->u.evtchn.deliver.port.vcpu) { + evtchnfd->deliver.port.vcpu_id = data->u.evtchn.deliver.port.vcpu; + evtchnfd->deliver.port.vcpu_idx = -1; + } + mutex_unlock(&kvm->lock); + return 0; +} + +/* + * Configure the target (eventfd or local port delivery) for sending on + * a given event channel. + */ +static int kvm_xen_eventfd_assign(struct kvm *kvm, + struct kvm_xen_hvm_attr *data) +{ + u32 port = data->u.evtchn.send_port; + struct eventfd_ctx *eventfd = NULL; + struct evtchnfd *evtchnfd = NULL; + int ret = -EINVAL; + + if (!port || port >= max_evtchn_port(kvm)) + return -EINVAL; + + evtchnfd = kzalloc(sizeof(struct evtchnfd), GFP_KERNEL); + if (!evtchnfd) + return -ENOMEM; + + switch(data->u.evtchn.type) { + case EVTCHNSTAT_ipi: + /* IPI must map back to the same port# */ + if (data->u.evtchn.deliver.port.port != data->u.evtchn.send_port) + goto out; /* -EINVAL */ + break; + + case EVTCHNSTAT_interdomain: + if (data->u.evtchn.deliver.port.port) { + if (data->u.evtchn.deliver.port.port >= max_evtchn_port(kvm)) + goto out; /* -EINVAL */ + } else { + eventfd = eventfd_ctx_fdget(data->u.evtchn.deliver.eventfd.fd); + if (IS_ERR(eventfd)) { + ret = PTR_ERR(eventfd); + goto out; + } + } + break; + + case EVTCHNSTAT_virq: + case EVTCHNSTAT_closed: + case EVTCHNSTAT_unbound: + case EVTCHNSTAT_pirq: + default: /* Unknown event channel type */ + goto out; /* -EINVAL */ + } + + evtchnfd->send_port = data->u.evtchn.send_port; + evtchnfd->type = data->u.evtchn.type; + if (eventfd) { + evtchnfd->deliver.eventfd.ctx = eventfd; + } else { + /* We only support 2 level event channels for now */ + if (data->u.evtchn.deliver.port.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) + goto out; /* -EINVAL; */ + + evtchnfd->deliver.port.port = data->u.evtchn.deliver.port.port; + evtchnfd->deliver.port.vcpu_id = data->u.evtchn.deliver.port.vcpu; + evtchnfd->deliver.port.vcpu_idx = -1; + evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority; + } + + mutex_lock(&kvm->lock); + ret = idr_alloc(&kvm->arch.xen.evtchn_ports, evtchnfd, port, port + 1, + GFP_KERNEL); + mutex_unlock(&kvm->lock); + if (ret >= 0) + return 0; + + if (ret == -ENOSPC) + ret = -EEXIST; +out: + if (eventfd) + eventfd_ctx_put(eventfd); + kfree(evtchnfd); + return ret; +} + +static int kvm_xen_eventfd_deassign(struct kvm *kvm, u32 port) +{ + struct evtchnfd *evtchnfd; + + mutex_lock(&kvm->lock); + evtchnfd = idr_remove(&kvm->arch.xen.evtchn_ports, port); + mutex_unlock(&kvm->lock); + + if (!evtchnfd) + return -ENOENT; + + if (kvm) + synchronize_srcu(&kvm->srcu); + if (!evtchnfd->deliver.port.port) + eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx); + kfree(evtchnfd); + return 0; +} + +static int kvm_xen_eventfd_reset(struct kvm *kvm) +{ + struct evtchnfd *evtchnfd; + int i; + + mutex_lock(&kvm->lock); + idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) { + idr_remove(&kvm->arch.xen.evtchn_ports, evtchnfd->send_port); + synchronize_srcu(&kvm->srcu); + if (!evtchnfd->deliver.port.port) + eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx); + kfree(evtchnfd); + } + mutex_unlock(&kvm->lock); + + return 0; +} + +static int kvm_xen_setattr_evtchn(struct kvm *kvm, struct kvm_xen_hvm_attr *data) +{ + u32 port = data->u.evtchn.send_port; + + if (data->u.evtchn.flags == KVM_XEN_EVTCHN_RESET) + return kvm_xen_eventfd_reset(kvm); + + if (!port || port >= max_evtchn_port(kvm)) + return -EINVAL; + + if (data->u.evtchn.flags == KVM_XEN_EVTCHN_DEASSIGN) + return kvm_xen_eventfd_deassign(kvm, port); + if (data->u.evtchn.flags == KVM_XEN_EVTCHN_UPDATE) + return kvm_xen_eventfd_update(kvm, data); + if (data->u.evtchn.flags) + return -EINVAL; + + return kvm_xen_eventfd_assign(kvm, data); +} + +static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r) +{ + struct evtchnfd *evtchnfd; + struct evtchn_send send; + gpa_t gpa; + int idx; + + idx = srcu_read_lock(&vcpu->kvm->srcu); + gpa = kvm_mmu_gva_to_gpa_system(vcpu, param, NULL); + srcu_read_unlock(&vcpu->kvm->srcu, idx); + + if (!gpa || kvm_vcpu_read_guest(vcpu, gpa, &send, sizeof(send))) { + *r = -EFAULT; + return true; + } + + /* The evtchn_ports idr is protected by vcpu->kvm->srcu */ + evtchnfd = idr_find(&vcpu->kvm->arch.xen.evtchn_ports, send.port); + if (!evtchnfd) + return false; + + if (evtchnfd->deliver.port.port) { + int ret = kvm_xen_set_evtchn(&evtchnfd->deliver.port, vcpu->kvm); + if (ret < 0 && ret != -ENOTCONN) + return false; + } else { + eventfd_signal(evtchnfd->deliver.eventfd.ctx, 1); + } + + *r = 0; + return true; +} + void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu) { kvm_gfn_to_pfn_cache_destroy(vcpu->kvm, @@ -1127,3 +1369,26 @@ void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu) kvm_gfn_to_pfn_cache_destroy(vcpu->kvm, &vcpu->arch.xen.vcpu_time_info_cache); } + +void kvm_xen_init_vm(struct kvm *kvm) +{ + idr_init(&kvm->arch.xen.evtchn_ports); +} + +void kvm_xen_destroy_vm(struct kvm *kvm) +{ + struct evtchnfd *evtchnfd; + int i; + + kvm_gfn_to_pfn_cache_destroy(kvm, &kvm->arch.xen.shinfo_cache); + + idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) { + if (!evtchnfd->deliver.port.port) + eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx); + kfree(evtchnfd); + } + idr_destroy(&kvm->arch.xen.evtchn_ports); + + if (kvm->arch.xen_hvm_config.msr) + static_branch_slow_dec_deferred(&kvm_xen_enabled); +} diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 49cd2e9e0f6a..623ed2cb228f 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1686,6 +1686,32 @@ struct kvm_xen_hvm_attr { struct { __u64 gfn; } shared_info; + struct { + __u32 send_port; + __u32 type; /* EVTCHNSTAT_ipi / EVTCHNSTAT_interdomain */ + __u32 flags; +#define KVM_XEN_EVTCHN_DEASSIGN (1 << 0) +#define KVM_XEN_EVTCHN_UPDATE (1 << 1) +#define KVM_XEN_EVTCHN_RESET (1 << 2) + /* + * Events sent by the guest are either looped back to + * the guest itself (potentially on a different port#) + * or signalled via an eventfd. + */ + union { + struct { + __u32 port; + __u32 vcpu; + __u32 priority; + } port; + struct { + __u32 port; /* Zero for eventfd */ + __s32 fd; + } eventfd; + __u32 padding[4]; + } deliver; + } evtchn; + __u64 pad[8]; } u; }; @@ -1694,6 +1720,8 @@ struct kvm_xen_hvm_attr { #define KVM_XEN_ATTR_TYPE_LONG_MODE 0x0 #define KVM_XEN_ATTR_TYPE_SHARED_INFO 0x1 #define KVM_XEN_ATTR_TYPE_UPCALL_VECTOR 0x2 +/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */ +#define KVM_XEN_ATTR_TYPE_EVTCHN 0x3 /* Per-vCPU Xen attributes */ #define KVM_XEN_VCPU_GET_ATTR _IOWR(KVMIO, 0xca, struct kvm_xen_vcpu_attr) -- cgit v1.2.3-59-g8ed1b From 942c2490c23f2800ad8143f5eb84a79b859aa743 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Thu, 3 Mar 2022 15:41:21 +0000 Subject: KVM: x86/xen: Add KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID In order to intercept hypercalls such as VCPUOP_set_singleshot_timer, we need to be aware of the Xen CPU numbering. This looks a lot like the Hyper-V handling of vpidx, for obvious reasons. Signed-off-by: David Woodhouse Signed-off-by: Paolo Bonzini Message-Id: <20220303154127.202856-12-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/x86.c | 1 + arch/x86/kvm/xen.c | 19 +++++++++++++++++++ arch/x86/kvm/xen.h | 5 +++++ include/uapi/linux/kvm.h | 3 +++ 5 files changed, 29 insertions(+) (limited to 'include/uapi/linux') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index b20f7d99d702..9e3408542b41 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -613,6 +613,7 @@ struct kvm_vcpu_xen { u64 runstate_entry_time; u64 runstate_times[4]; unsigned long evtchn_pending_sel; + u32 vcpu_id; /* The Xen / ACPI vCPU ID */ }; struct kvm_vcpu_arch { diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e6bc0b654bbb..3d7b65f33bd8 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -11262,6 +11262,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) vcpu->arch.arch_capabilities = kvm_get_arch_capabilities(); vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT; + kvm_xen_init_vcpu(vcpu); kvm_vcpu_mtrr_init(vcpu); vcpu_load(vcpu); kvm_set_tsc_khz(vcpu, max_tsc_khz); diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index 0dae583c4a1d..244cf3cd858a 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -603,6 +603,15 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) r = 0; break; + case KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID: + if (data->u.vcpu_id >= KVM_MAX_VCPUS) + r = -EINVAL; + else { + vcpu->arch.xen.vcpu_id = data->u.vcpu_id; + r = 0; + } + break; + default: break; } @@ -678,6 +687,11 @@ int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) r = -EINVAL; break; + case KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID: + data->u.vcpu_id = vcpu->arch.xen.vcpu_id; + r = 0; + break; + default: break; } @@ -1377,6 +1391,11 @@ static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r) return true; } +void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu) +{ + vcpu->arch.xen.vcpu_id = vcpu->vcpu_idx; +} + void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu) { kvm_gfn_to_pfn_cache_destroy(vcpu->kvm, diff --git a/arch/x86/kvm/xen.h b/arch/x86/kvm/xen.h index 852286de574e..54d587aae85b 100644 --- a/arch/x86/kvm/xen.h +++ b/arch/x86/kvm/xen.h @@ -25,6 +25,7 @@ int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data); int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc); void kvm_xen_init_vm(struct kvm *kvm); void kvm_xen_destroy_vm(struct kvm *kvm); +void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu); void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu); int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm); @@ -75,6 +76,10 @@ static inline void kvm_xen_destroy_vm(struct kvm *kvm) { } +static inline void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu) +{ +} + static inline void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu) { } diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 623ed2cb228f..4b65e9f0a4d9 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1747,6 +1747,7 @@ struct kvm_xen_vcpu_attr { __u64 time_blocked; __u64 time_offline; } runstate; + __u32 vcpu_id; } u; }; @@ -1757,6 +1758,8 @@ struct kvm_xen_vcpu_attr { #define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT 0x3 #define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA 0x4 #define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST 0x5 +/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */ +#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID 0x6 /* Secure Encrypted Virtualization command */ enum sev_cmd_id { -- cgit v1.2.3-59-g8ed1b From 536395260582be7443b0b35b0bbb89ffe3947f62 Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Thu, 3 Mar 2022 15:41:22 +0000 Subject: KVM: x86/xen: handle PV timers oneshot mode If the guest has offloaded the timer virq, handle the following hypercalls for programming the timer: VCPUOP_set_singleshot_timer VCPUOP_stop_singleshot_timer set_timer_op(timestamp_ns) The event channel corresponding to the timer virq is then used to inject events once timer deadlines are met. For now we back the PV timer with hrtimer. [ dwmw2: Add save/restore, 32-bit compat mode, immediate delivery, don't check timer in kvm_vcpu_has_event() ] Signed-off-by: Joao Martins Signed-off-by: David Woodhouse Signed-off-by: Paolo Bonzini Message-Id: <20220303154127.202856-13-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 4 + arch/x86/kvm/irq.c | 10 +- arch/x86/kvm/xen.c | 211 ++++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/xen.h | 28 ++++++ include/uapi/linux/kvm.h | 6 ++ 5 files changed, 257 insertions(+), 2 deletions(-) (limited to 'include/uapi/linux') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 9e3408542b41..8193d5285544 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -614,6 +614,10 @@ struct kvm_vcpu_xen { u64 runstate_times[4]; unsigned long evtchn_pending_sel; u32 vcpu_id; /* The Xen / ACPI vCPU ID */ + u32 timer_virq; + u64 timer_expires; /* In guest epoch */ + atomic_t timer_pending; + struct hrtimer timer; }; struct kvm_vcpu_arch { diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c index 172b05343cfd..f371f1292ca3 100644 --- a/arch/x86/kvm/irq.c +++ b/arch/x86/kvm/irq.c @@ -22,10 +22,14 @@ */ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) { + int r = 0; + if (lapic_in_kernel(vcpu)) - return apic_has_pending_timer(vcpu); + r = apic_has_pending_timer(vcpu); + if (kvm_xen_timer_enabled(vcpu)) + r += kvm_xen_has_pending_timer(vcpu); - return 0; + return r; } EXPORT_SYMBOL(kvm_cpu_has_pending_timer); @@ -143,6 +147,8 @@ void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu) { if (lapic_in_kernel(vcpu)) kvm_inject_apic_timer_irqs(vcpu); + if (kvm_xen_timer_enabled(vcpu)) + kvm_xen_inject_timer_irqs(vcpu); } EXPORT_SYMBOL_GPL(kvm_inject_pending_timer_irqs); diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index 244cf3cd858a..0e1595dc8c5f 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -23,6 +23,7 @@ #include "trace.h" +static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm); static int kvm_xen_setattr_evtchn(struct kvm *kvm, struct kvm_xen_hvm_attr *data); static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r); @@ -108,6 +109,66 @@ out: return ret; } +void kvm_xen_inject_timer_irqs(struct kvm_vcpu *vcpu) +{ + if (atomic_read(&vcpu->arch.xen.timer_pending) > 0) { + struct kvm_xen_evtchn e; + + e.vcpu_id = vcpu->vcpu_id; + e.vcpu_idx = vcpu->vcpu_idx; + e.port = vcpu->arch.xen.timer_virq; + e.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL; + + kvm_xen_set_evtchn(&e, vcpu->kvm); + + vcpu->arch.xen.timer_expires = 0; + atomic_set(&vcpu->arch.xen.timer_pending, 0); + } +} + +static enum hrtimer_restart xen_timer_callback(struct hrtimer *timer) +{ + struct kvm_vcpu *vcpu = container_of(timer, struct kvm_vcpu, + arch.xen.timer); + if (atomic_read(&vcpu->arch.xen.timer_pending)) + return HRTIMER_NORESTART; + + atomic_inc(&vcpu->arch.xen.timer_pending); + kvm_make_request(KVM_REQ_UNBLOCK, vcpu); + kvm_vcpu_kick(vcpu); + + return HRTIMER_NORESTART; +} + +static void kvm_xen_start_timer(struct kvm_vcpu *vcpu, u64 guest_abs, s64 delta_ns) +{ + atomic_set(&vcpu->arch.xen.timer_pending, 0); + vcpu->arch.xen.timer_expires = guest_abs; + + if (delta_ns <= 0) { + xen_timer_callback(&vcpu->arch.xen.timer); + } else { + ktime_t ktime_now = ktime_get(); + hrtimer_start(&vcpu->arch.xen.timer, + ktime_add_ns(ktime_now, delta_ns), + HRTIMER_MODE_ABS_HARD); + } +} + +static void kvm_xen_stop_timer(struct kvm_vcpu *vcpu) +{ + hrtimer_cancel(&vcpu->arch.xen.timer); + vcpu->arch.xen.timer_expires = 0; + atomic_set(&vcpu->arch.xen.timer_pending, 0); +} + +static void kvm_xen_init_timer(struct kvm_vcpu *vcpu) +{ + hrtimer_init(&vcpu->arch.xen.timer, CLOCK_MONOTONIC, + HRTIMER_MODE_ABS_HARD); + vcpu->arch.xen.timer.function = xen_timer_callback; +} + static void kvm_xen_update_runstate(struct kvm_vcpu *v, int state) { struct kvm_vcpu_xen *vx = &v->arch.xen; @@ -612,6 +673,28 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) } break; + case KVM_XEN_VCPU_ATTR_TYPE_TIMER: + if (data->u.timer.port) { + if (data->u.timer.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) { + r = -EINVAL; + break; + } + vcpu->arch.xen.timer_virq = data->u.timer.port; + kvm_xen_init_timer(vcpu); + + /* Restart the timer if it's set */ + if (data->u.timer.expires_ns) + kvm_xen_start_timer(vcpu, data->u.timer.expires_ns, + data->u.timer.expires_ns - + get_kvmclock_ns(vcpu->kvm)); + } else if (kvm_xen_timer_enabled(vcpu)) { + kvm_xen_stop_timer(vcpu); + vcpu->arch.xen.timer_virq = 0; + } + + r = 0; + break; + default: break; } @@ -692,6 +775,13 @@ int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) r = 0; break; + case KVM_XEN_VCPU_ATTR_TYPE_TIMER: + data->u.timer.port = vcpu->arch.xen.timer_virq; + data->u.timer.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL; + data->u.timer.expires_ns = vcpu->arch.xen.timer_expires; + r = 0; + break; + default: break; } @@ -827,6 +917,112 @@ static bool kvm_xen_hcall_sched_op(struct kvm_vcpu *vcpu, int cmd, u64 param, u6 return false; } +struct compat_vcpu_set_singleshot_timer { + uint64_t timeout_abs_ns; + uint32_t flags; +} __attribute__((packed)); + +static bool kvm_xen_hcall_vcpu_op(struct kvm_vcpu *vcpu, bool longmode, int cmd, + int vcpu_id, u64 param, u64 *r) +{ + struct vcpu_set_singleshot_timer oneshot; + s64 delta; + gpa_t gpa; + int idx; + + if (!kvm_xen_timer_enabled(vcpu)) + return false; + + switch (cmd) { + case VCPUOP_set_singleshot_timer: + if (vcpu->arch.xen.vcpu_id != vcpu_id) { + *r = -EINVAL; + return true; + } + idx = srcu_read_lock(&vcpu->kvm->srcu); + gpa = kvm_mmu_gva_to_gpa_system(vcpu, param, NULL); + srcu_read_unlock(&vcpu->kvm->srcu, idx); + + /* + * The only difference for 32-bit compat is the 4 bytes of + * padding after the interesting part of the structure. So + * for a faithful emulation of Xen we have to *try* to copy + * the padding and return -EFAULT if we can't. Otherwise we + * might as well just have copied the 12-byte 32-bit struct. + */ + BUILD_BUG_ON(offsetof(struct compat_vcpu_set_singleshot_timer, timeout_abs_ns) != + offsetof(struct vcpu_set_singleshot_timer, timeout_abs_ns)); + BUILD_BUG_ON(sizeof_field(struct compat_vcpu_set_singleshot_timer, timeout_abs_ns) != + sizeof_field(struct vcpu_set_singleshot_timer, timeout_abs_ns)); + BUILD_BUG_ON(offsetof(struct compat_vcpu_set_singleshot_timer, flags) != + offsetof(struct vcpu_set_singleshot_timer, flags)); + BUILD_BUG_ON(sizeof_field(struct compat_vcpu_set_singleshot_timer, flags) != + sizeof_field(struct vcpu_set_singleshot_timer, flags)); + + if (!gpa || + kvm_vcpu_read_guest(vcpu, gpa, &oneshot, longmode ? sizeof(oneshot) : + sizeof(struct compat_vcpu_set_singleshot_timer))) { + *r = -EFAULT; + return true; + } + + delta = oneshot.timeout_abs_ns - get_kvmclock_ns(vcpu->kvm); + if ((oneshot.flags & VCPU_SSHOTTMR_future) && delta < 0) { + *r = -ETIME; + return true; + } + + kvm_xen_start_timer(vcpu, oneshot.timeout_abs_ns, delta); + *r = 0; + return true; + + case VCPUOP_stop_singleshot_timer: + if (vcpu->arch.xen.vcpu_id != vcpu_id) { + *r = -EINVAL; + return true; + } + kvm_xen_stop_timer(vcpu); + *r = 0; + return true; + } + + return false; +} + +static bool kvm_xen_hcall_set_timer_op(struct kvm_vcpu *vcpu, uint64_t timeout, + u64 *r) +{ + if (!kvm_xen_timer_enabled(vcpu)) + return false; + + if (timeout) { + uint64_t guest_now = get_kvmclock_ns(vcpu->kvm); + int64_t delta = timeout - guest_now; + + /* Xen has a 'Linux workaround' in do_set_timer_op() which + * checks for negative absolute timeout values (caused by + * integer overflow), and for values about 13 days in the + * future (2^50ns) which would be caused by jiffies + * overflow. For those cases, it sets the timeout 100ms in + * the future (not *too* soon, since if a guest really did + * set a long timeout on purpose we don't want to keep + * churning CPU time by waking it up). + */ + if (unlikely((int64_t)timeout < 0 || + (delta > 0 && (uint32_t) (delta >> 50) != 0))) { + delta = 100 * NSEC_PER_MSEC; + timeout = guest_now + delta; + } + + kvm_xen_start_timer(vcpu, timeout, delta); + } else { + kvm_xen_stop_timer(vcpu); + } + + *r = 0; + return true; +} + int kvm_xen_hypercall(struct kvm_vcpu *vcpu) { bool longmode; @@ -870,6 +1066,18 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu) case __HYPERVISOR_sched_op: handled = kvm_xen_hcall_sched_op(vcpu, params[0], params[1], &r); break; + case __HYPERVISOR_vcpu_op: + handled = kvm_xen_hcall_vcpu_op(vcpu, longmode, params[0], params[1], + params[2], &r); + break; + case __HYPERVISOR_set_timer_op: { + u64 timeout = params[0]; + /* In 32-bit mode, the 64-bit timeout is in two 32-bit params. */ + if (!longmode) + timeout |= params[1] << 32; + handled = kvm_xen_hcall_set_timer_op(vcpu, timeout, &r); + break; + } default: break; } @@ -1398,6 +1606,9 @@ void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu) void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu) { + if (kvm_xen_timer_enabled(vcpu)) + kvm_xen_stop_timer(vcpu); + kvm_gfn_to_pfn_cache_destroy(vcpu->kvm, &vcpu->arch.xen.runstate_cache); kvm_gfn_to_pfn_cache_destroy(vcpu->kvm, diff --git a/arch/x86/kvm/xen.h b/arch/x86/kvm/xen.h index 54d587aae85b..ee5c4ae0755c 100644 --- a/arch/x86/kvm/xen.h +++ b/arch/x86/kvm/xen.h @@ -62,6 +62,20 @@ static inline bool kvm_xen_has_pending_events(struct kvm_vcpu *vcpu) vcpu->arch.xen.evtchn_pending_sel; } +static inline bool kvm_xen_timer_enabled(struct kvm_vcpu *vcpu) +{ + return !!vcpu->arch.xen.timer_virq; +} + +static inline int kvm_xen_has_pending_timer(struct kvm_vcpu *vcpu) +{ + if (kvm_xen_hypercall_enabled(vcpu->kvm) && kvm_xen_timer_enabled(vcpu)) + return atomic_read(&vcpu->arch.xen.timer_pending); + + return 0; +} + +void kvm_xen_inject_timer_irqs(struct kvm_vcpu *vcpu); #else static inline int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data) { @@ -107,6 +121,20 @@ static inline bool kvm_xen_has_pending_events(struct kvm_vcpu *vcpu) { return false; } + +static inline int kvm_xen_has_pending_timer(struct kvm_vcpu *vcpu) +{ + return 0; +} + +static inline void kvm_xen_inject_timer_irqs(struct kvm_vcpu *vcpu) +{ +} + +static inline bool kvm_xen_timer_enabled(struct kvm_vcpu *vcpu) +{ + return false; +} #endif int kvm_xen_hypercall(struct kvm_vcpu *vcpu); diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 4b65e9f0a4d9..cb223e425223 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1748,6 +1748,11 @@ struct kvm_xen_vcpu_attr { __u64 time_offline; } runstate; __u32 vcpu_id; + struct { + __u32 port; + __u32 priority; + __u64 expires_ns; + } timer; } u; }; @@ -1760,6 +1765,7 @@ struct kvm_xen_vcpu_attr { #define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST 0x5 /* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */ #define KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID 0x6 +#define KVM_XEN_VCPU_ATTR_TYPE_TIMER 0x7 /* Secure Encrypted Virtualization command */ enum sev_cmd_id { -- cgit v1.2.3-59-g8ed1b From 28d1629f751c4a5f9437fbaa0ee4ed81d1a8e587 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Thu, 3 Mar 2022 15:41:23 +0000 Subject: KVM: x86/xen: Kernel acceleration for XENVER_version Turns out this is a fast path for PV guests because they use it to trigger the event channel upcall. So letting it bounce all the way up to userspace is not great. Signed-off-by: David Woodhouse Signed-off-by: Paolo Bonzini Message-Id: <20220303154127.202856-14-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/xen.c | 19 +++++++++++++++++++ include/uapi/linux/kvm.h | 3 ++- 3 files changed, 22 insertions(+), 1 deletion(-) (limited to 'include/uapi/linux') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 8193d5285544..f3fba9d8ddc6 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1026,6 +1026,7 @@ struct msr_bitmap_range { /* Xen emulation context */ struct kvm_xen { + u32 xen_version; bool long_mode; u8 upcall_vector; struct gfn_to_pfn_cache shinfo_cache; diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index 0e1595dc8c5f..98438f27f6b3 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -465,6 +466,13 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) r = kvm_xen_setattr_evtchn(kvm, data); break; + case KVM_XEN_ATTR_TYPE_XEN_VERSION: + mutex_lock(&kvm->lock); + kvm->arch.xen.xen_version = data->u.xen_version; + mutex_unlock(&kvm->lock); + r = 0; + break; + default: break; } @@ -497,6 +505,11 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) r = 0; break; + case KVM_XEN_ATTR_TYPE_XEN_VERSION: + data->u.xen_version = kvm->arch.xen.xen_version; + r = 0; + break; + default: break; } @@ -1059,6 +1072,12 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu) params[3], params[4], params[5]); switch (input) { + case __HYPERVISOR_xen_version: + if (params[0] == XENVER_version && vcpu->kvm->arch.xen.xen_version) { + r = vcpu->kvm->arch.xen.xen_version; + handled = true; + } + break; case __HYPERVISOR_event_channel_op: if (params[0] == EVTCHNOP_send) handled = kvm_xen_hcall_evtchn_send(vcpu, params[1], &r); diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index cb223e425223..4dda3896ed71 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1711,7 +1711,7 @@ struct kvm_xen_hvm_attr { __u32 padding[4]; } deliver; } evtchn; - + __u32 xen_version; __u64 pad[8]; } u; }; @@ -1722,6 +1722,7 @@ struct kvm_xen_hvm_attr { #define KVM_XEN_ATTR_TYPE_UPCALL_VECTOR 0x2 /* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */ #define KVM_XEN_ATTR_TYPE_EVTCHN 0x3 +#define KVM_XEN_ATTR_TYPE_XEN_VERSION 0x4 /* Per-vCPU Xen attributes */ #define KVM_XEN_VCPU_GET_ATTR _IOWR(KVMIO, 0xca, struct kvm_xen_vcpu_attr) -- cgit v1.2.3-59-g8ed1b From fde0451be8fb3208d4d146b8602d99ee8139e515 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Thu, 3 Mar 2022 15:41:24 +0000 Subject: KVM: x86/xen: Support per-vCPU event channel upcall via local APIC Windows uses a per-vCPU vector, and it's delivered via the local APIC basically like an MSI (with associated EOI) unlike the traditional guest-wide vector which is just magically asserted by Xen (and in the KVM case by kvm_xen_has_interrupt() / kvm_cpu_get_extint()). Now that the kernel is able to raise event channel events for itself, being able to do so for Windows guests is also going to be useful. Signed-off-by: David Woodhouse Signed-off-by: Paolo Bonzini Message-Id: <20220303154127.202856-15-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/xen.c | 40 ++++++++++++++++++++++++++++++++++++++++ include/uapi/linux/kvm.h | 2 ++ 3 files changed, 43 insertions(+) (limited to 'include/uapi/linux') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index f3fba9d8ddc6..998caf7a3ce9 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -606,6 +606,7 @@ struct kvm_vcpu_hv { struct kvm_vcpu_xen { u64 hypercall_rip; u32 current_runstate; + u8 upcall_vector; struct gfn_to_pfn_cache vcpu_info_cache; struct gfn_to_pfn_cache vcpu_time_info_cache; struct gfn_to_pfn_cache runstate_cache; diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index 98438f27f6b3..5afaf7b59944 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -314,6 +314,22 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state) mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT); } +static void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *v) +{ + struct kvm_lapic_irq irq = { }; + int r; + + irq.dest_id = v->vcpu_id; + irq.vector = v->arch.xen.upcall_vector; + irq.dest_mode = APIC_DEST_PHYSICAL; + irq.shorthand = APIC_DEST_NOSHORT; + irq.delivery_mode = APIC_DM_FIXED; + irq.level = 1; + + /* The fast version will always work for physical unicast */ + WARN_ON_ONCE(!kvm_irq_delivery_to_apic_fast(v->kvm, NULL, &irq, &r, NULL)); +} + /* * On event channel delivery, the vcpu_info may not have been accessible. * In that case, there are bits in vcpu->arch.xen.evtchn_pending_sel which @@ -374,6 +390,10 @@ void kvm_xen_inject_pending_events(struct kvm_vcpu *v) } read_unlock_irqrestore(&gpc->lock, flags); + /* For the per-vCPU lapic vector, deliver it as MSI. */ + if (v->arch.xen.upcall_vector) + kvm_xen_inject_vcpu_vector(v); + mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT); } @@ -708,6 +728,15 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) r = 0; break; + case KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR: + if (data->u.vector && data->u.vector < 0x10) + r = -EINVAL; + else { + vcpu->arch.xen.upcall_vector = data->u.vector; + r = 0; + } + break; + default: break; } @@ -795,6 +824,11 @@ int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) r = 0; break; + case KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR: + data->u.vector = vcpu->arch.xen.upcall_vector; + r = 0; + break; + default: break; } @@ -1228,6 +1262,12 @@ int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm) kick_vcpu = true; } } + + /* For the per-vCPU lapic vector, deliver it as MSI. */ + if (kick_vcpu && vcpu->arch.xen.upcall_vector) { + kvm_xen_inject_vcpu_vector(vcpu); + kick_vcpu = false; + } } out_rcu: diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 4dda3896ed71..a9ba690c4f37 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1754,6 +1754,7 @@ struct kvm_xen_vcpu_attr { __u32 priority; __u64 expires_ns; } timer; + __u8 vector; } u; }; @@ -1767,6 +1768,7 @@ struct kvm_xen_vcpu_attr { /* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */ #define KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID 0x6 #define KVM_XEN_VCPU_ATTR_TYPE_TIMER 0x7 +#define KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR 0x8 /* Secure Encrypted Virtualization command */ enum sev_cmd_id { -- cgit v1.2.3-59-g8ed1b From 661a20fab7d156cf6b9a407c946a1e558a633151 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Thu, 3 Mar 2022 15:41:25 +0000 Subject: KVM: x86/xen: Advertise and document KVM_XEN_HVM_CONFIG_EVTCHN_SEND MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit At the end of the patch series adding this batch of event channel acceleration features, finally add the feature bit which advertises them and document it all. For SCHEDOP_poll we need to wake a polling vCPU when a given port is triggered, even when it's masked — and we want to implement that in the kernel, for efficiency. So we want the kernel to know that it has sole ownership of event channel delivery. Thus, we allow userspace to make the 'promise' by setting the corresponding feature bit in its KVM_XEN_HVM_CONFIG call. As we implement SCHEDOP_poll bypass later, we will do so only if that promise has been made by userspace. Signed-off-by: David Woodhouse Signed-off-by: Paolo Bonzini Message-Id: <20220303154127.202856-16-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini --- Documentation/virt/kvm/api.rst | 129 +++++++++++++++++++++++++++++++++++++---- arch/x86/kvm/x86.c | 3 +- arch/x86/kvm/xen.c | 6 +- include/uapi/linux/kvm.h | 1 + 4 files changed, 127 insertions(+), 12 deletions(-) (limited to 'include/uapi/linux') diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 700df350332b..fdfd9e1bc7f7 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -982,12 +982,22 @@ memory. __u8 pad2[30]; }; -If the KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL flag is returned from the -KVM_CAP_XEN_HVM check, it may be set in the flags field of this ioctl. -This requests KVM to generate the contents of the hypercall page -automatically; hypercalls will be intercepted and passed to userspace -through KVM_EXIT_XEN. In this case, all of the blob size and address -fields must be zero. +If certain flags are returned from the KVM_CAP_XEN_HVM check, they may +be set in the flags field of this ioctl: + +The KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL flag requests KVM to generate +the contents of the hypercall page automatically; hypercalls will be +intercepted and passed to userspace through KVM_EXIT_XEN. In this +ase, all of the blob size and address fields must be zero. + +The KVM_XEN_HVM_CONFIG_EVTCHN_SEND flag indicates to KVM that userspace +will always use the KVM_XEN_HVM_EVTCHN_SEND ioctl to deliver event +channel interrupts rather than manipulating the guest's shared_info +structures directly. This, in turn, may allow KVM to enable features +such as intercepting the SCHEDOP_poll hypercall to accelerate PV +spinlock operation for the guest. Userspace may still use the ioctl +to deliver events if it was advertised, even if userspace does not +send this indication that it will always do so No other flags are currently valid in the struct kvm_xen_hvm_config. @@ -5216,7 +5226,25 @@ have deterministic behavior. struct { __u64 gfn; } shared_info; - __u64 pad[4]; + struct { + __u32 send_port; + __u32 type; /* EVTCHNSTAT_ipi / EVTCHNSTAT_interdomain */ + __u32 flags; + union { + struct { + __u32 port; + __u32 vcpu; + __u32 priority; + } port; + struct { + __u32 port; /* Zero for eventfd */ + __s32 fd; + } eventfd; + __u32 padding[4]; + } deliver; + } evtchn; + __u32 xen_version; + __u64 pad[8]; } u; }; @@ -5247,6 +5275,30 @@ KVM_XEN_ATTR_TYPE_SHARED_INFO KVM_XEN_ATTR_TYPE_UPCALL_VECTOR Sets the exception vector used to deliver Xen event channel upcalls. + This is the HVM-wide vector injected directly by the hypervisor + (not through the local APIC), typically configured by a guest via + HVM_PARAM_CALLBACK_IRQ. + +KVM_XEN_ATTR_TYPE_EVTCHN + This attribute is available when the KVM_CAP_XEN_HVM ioctl indicates + support for KVM_XEN_HVM_CONFIG_EVTCHN_SEND features. It configures + an outbound port number for interception of EVTCHNOP_send requests + from the guest. A given sending port number may be directed back + to a specified vCPU (by APIC ID) / port / priority on the guest, + or to trigger events on an eventfd. The vCPU and priority can be + changed by setting KVM_XEN_EVTCHN_UPDATE in a subsequent call, + but other fields cannot change for a given sending port. A port + mapping is removed by using KVM_XEN_EVTCHN_DEASSIGN in the flags + field. + +KVM_XEN_ATTR_TYPE_XEN_VERSION + This attribute is available when the KVM_CAP_XEN_HVM ioctl indicates + support for KVM_XEN_HVM_CONFIG_EVTCHN_SEND features. It configures + the 32-bit version code returned to the guest when it invokes the + XENVER_version call; typically (XEN_MAJOR << 16 | XEN_MINOR). PV + Xen guests will often use this to as a dummy hypercall to trigger + event channel delivery, so responding within the kernel without + exiting to userspace is beneficial. 4.127 KVM_XEN_HVM_GET_ATTR -------------------------- @@ -5258,7 +5310,8 @@ KVM_XEN_ATTR_TYPE_UPCALL_VECTOR :Returns: 0 on success, < 0 on error Allows Xen VM attributes to be read. For the structure and types, -see KVM_XEN_HVM_SET_ATTR above. +see KVM_XEN_HVM_SET_ATTR above. The KVM_XEN_ATTR_TYPE_EVTCHN +attribute cannot be read. 4.128 KVM_XEN_VCPU_SET_ATTR --------------------------- @@ -5285,6 +5338,13 @@ see KVM_XEN_HVM_SET_ATTR above. __u64 time_blocked; __u64 time_offline; } runstate; + __u32 vcpu_id; + struct { + __u32 port; + __u32 priority; + __u64 expires_ns; + } timer; + __u8 vector; } u; }; @@ -5326,6 +5386,27 @@ KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST or RUNSTATE_offline) to set the current accounted state as of the adjusted state_entry_time. +KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID + This attribute is available when the KVM_CAP_XEN_HVM ioctl indicates + support for KVM_XEN_HVM_CONFIG_EVTCHN_SEND features. It sets the Xen + vCPU ID of the given vCPU, to allow timer-related VCPU operations to + be intercepted by KVM. + +KVM_XEN_VCPU_ATTR_TYPE_TIMER + This attribute is available when the KVM_CAP_XEN_HVM ioctl indicates + support for KVM_XEN_HVM_CONFIG_EVTCHN_SEND features. It sets the + event channel port/priority for the VIRQ_TIMER of the vCPU, as well + as allowing a pending timer to be saved/restored. + +KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR + This attribute is available when the KVM_CAP_XEN_HVM ioctl indicates + support for KVM_XEN_HVM_CONFIG_EVTCHN_SEND features. It sets the + per-vCPU local APIC upcall vector, configured by a Xen guest with + the HVMOP_set_evtchn_upcall_vector hypercall. This is typically + used by Windows guests, and is distinct from the HVM-wide upcall + vector configured with HVM_PARAM_CALLBACK_IRQ. + + 4.129 KVM_XEN_VCPU_GET_ATTR --------------------------- @@ -5645,6 +5726,25 @@ enabled with ``arch_prctl()``, but this may change in the future. The offsets of the state save areas in struct kvm_xsave follow the contents of CPUID leaf 0xD on the host. +4.135 KVM_XEN_HVM_EVTCHN_SEND +----------------------------- + +:Capability: KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND +:Architectures: x86 +:Type: vm ioctl +:Parameters: struct kvm_irq_routing_xen_evtchn +:Returns: 0 on success, < 0 on error + + +:: + + struct kvm_irq_routing_xen_evtchn { + __u32 port; + __u32 vcpu; + __u32 priority; + }; + +This ioctl injects an event channel interrupt directly to the guest vCPU. 5. The kvm_run structure ======================== @@ -7620,8 +7720,9 @@ PVHVM guests. Valid flags are:: #define KVM_XEN_HVM_CONFIG_HYPERCALL_MSR (1 << 0) #define KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL (1 << 1) #define KVM_XEN_HVM_CONFIG_SHARED_INFO (1 << 2) - #define KVM_XEN_HVM_CONFIG_RUNSTATE (1 << 2) - #define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL (1 << 3) + #define KVM_XEN_HVM_CONFIG_RUNSTATE (1 << 3) + #define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL (1 << 4) + #define KVM_XEN_HVM_CONFIG_EVTCHN_SEND (1 << 5) The KVM_XEN_HVM_CONFIG_HYPERCALL_MSR flag indicates that the KVM_XEN_HVM_CONFIG ioctl is available, for the guest to set its hypercall page. @@ -7645,6 +7746,14 @@ The KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL flag indicates that IRQ routing entries of the type KVM_IRQ_ROUTING_XEN_EVTCHN are supported, with the priority field set to indicate 2 level event channel delivery. +The KVM_XEN_HVM_CONFIG_EVTCHN_SEND flag indicates that KVM supports +injecting event channel events directly into the guest with the +KVM_XEN_HVM_EVTCHN_SEND ioctl. It also indicates support for the +KVM_XEN_ATTR_TYPE_EVTCHN/XEN_VERSION HVM attributes and the +KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID/TIMER/UPCALL_VECTOR vCPU attributes. +related to event channel delivery, timers, and the XENVER_version +interception. + 8.31 KVM_CAP_PPC_MULTITCE ------------------------- diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3d7b65f33bd8..1a7d1b5f7232 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4276,7 +4276,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = KVM_XEN_HVM_CONFIG_HYPERCALL_MSR | KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL | KVM_XEN_HVM_CONFIG_SHARED_INFO | - KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL; + KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL | + KVM_XEN_HVM_CONFIG_EVTCHN_SEND; if (sched_info_on()) r |= KVM_XEN_HVM_CONFIG_RUNSTATE; break; diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index 5afaf7b59944..53232c9ff89c 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -909,7 +909,11 @@ int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data) int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc) { - if (xhc->flags & ~KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) + /* Only some feature flags need to be *enabled* by userspace */ + u32 permitted_flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL | + KVM_XEN_HVM_CONFIG_EVTCHN_SEND; + + if (xhc->flags & ~permitted_flags) return -EINVAL; /* diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index a9ba690c4f37..ee5cc9e2a837 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1232,6 +1232,7 @@ struct kvm_x86_mce { #define KVM_XEN_HVM_CONFIG_SHARED_INFO (1 << 2) #define KVM_XEN_HVM_CONFIG_RUNSTATE (1 << 3) #define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL (1 << 4) +#define KVM_XEN_HVM_CONFIG_EVTCHN_SEND (1 << 5) struct kvm_xen_hvm_config { __u32 flags; -- cgit v1.2.3-59-g8ed1b From ffbb61d09fc56c85e28b110494f3788d0ed4d1f8 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Fri, 25 Feb 2022 14:53:02 +0000 Subject: KVM: x86: Accept KVM_[GS]ET_TSC_KHZ as a VM ioctl. This sets the default TSC frequency for subsequently created vCPUs. Signed-off-by: David Woodhouse Message-Id: <20220225145304.36166-2-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini --- Documentation/virt/kvm/api.rst | 11 +++++++---- arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/kvm/x86.c | 26 +++++++++++++++++++++++++- include/uapi/linux/kvm.h | 4 +++- 4 files changed, 37 insertions(+), 6 deletions(-) (limited to 'include/uapi/linux') diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index fdfd9e1bc7f7..b102ba7cf903 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -1897,22 +1897,25 @@ the future. 4.55 KVM_SET_TSC_KHZ -------------------- -:Capability: KVM_CAP_TSC_CONTROL +:Capability: KVM_CAP_TSC_CONTROL / KVM_CAP_VM_TSC_CONTROL :Architectures: x86 -:Type: vcpu ioctl +:Type: vcpu ioctl / vm ioctl :Parameters: virtual tsc_khz :Returns: 0 on success, -1 on error Specifies the tsc frequency for the virtual machine. The unit of the frequency is KHz. +If the KVM_CAP_VM_TSC_CONTROL capability is advertised, this can also +be used as a vm ioctl to set the initial tsc frequency of subsequently +created vCPUs. 4.56 KVM_GET_TSC_KHZ -------------------- -:Capability: KVM_CAP_GET_TSC_KHZ +:Capability: KVM_CAP_GET_TSC_KHZ / KVM_CAP_VM_TSC_CONTROL :Architectures: x86 -:Type: vcpu ioctl +:Type: vcpu ioctl / vm ioctl :Parameters: none :Returns: virtual tsc-khz on success, negative value on error diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 5370744b789c..fac990cc189d 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1126,6 +1126,8 @@ struct kvm_arch { u64 cur_tsc_generation; int nr_vcpus_matched_tsc; + u32 default_tsc_khz; + seqcount_raw_spinlock_t pvclock_sc; bool use_master_clock; u64 master_kernel_ns; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 1a7d1b5f7232..c069b97a8523 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4324,6 +4324,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = boot_cpu_has(X86_FEATURE_XSAVE); break; case KVM_CAP_TSC_CONTROL: + case KVM_CAP_VM_TSC_CONTROL: r = kvm_has_tsc_control; break; case KVM_CAP_X2APIC_API: @@ -6522,6 +6523,28 @@ set_pit2_out: case KVM_GET_CLOCK: r = kvm_vm_ioctl_get_clock(kvm, argp); break; + case KVM_SET_TSC_KHZ: { + u32 user_tsc_khz; + + r = -EINVAL; + user_tsc_khz = (u32)arg; + + if (kvm_has_tsc_control && + user_tsc_khz >= kvm_max_guest_tsc_khz) + goto out; + + if (user_tsc_khz == 0) + user_tsc_khz = tsc_khz; + + WRITE_ONCE(kvm->arch.default_tsc_khz, user_tsc_khz); + r = 0; + + goto out; + } + case KVM_GET_TSC_KHZ: { + r = READ_ONCE(kvm->arch.default_tsc_khz); + goto out; + } case KVM_MEMORY_ENCRYPT_OP: { r = -ENOTTY; if (!kvm_x86_ops.mem_enc_ioctl) @@ -11266,7 +11289,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) kvm_xen_init_vcpu(vcpu); kvm_vcpu_mtrr_init(vcpu); vcpu_load(vcpu); - kvm_set_tsc_khz(vcpu, max_tsc_khz); + kvm_set_tsc_khz(vcpu, vcpu->kvm->arch.default_tsc_khz); kvm_vcpu_reset(vcpu, false); kvm_init_mmu(vcpu); vcpu_put(vcpu); @@ -11714,6 +11737,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) pvclock_update_vm_gtod_copy(kvm); raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); + kvm->arch.default_tsc_khz = max_tsc_khz; kvm->arch.guest_can_read_msr_platform_info = true; kvm->arch.enable_pmu = enable_pmu; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index ee5cc9e2a837..8616af85dc5d 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1144,6 +1144,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_S390_MEM_OP_EXTENSION 211 #define KVM_CAP_PMU_CAPABILITY 212 #define KVM_CAP_DISABLE_QUIRKS2 213 +#define KVM_CAP_VM_TSC_CONTROL 214 #ifdef KVM_CAP_IRQ_ROUTING @@ -1471,7 +1472,8 @@ struct kvm_s390_ucas_mapping { #define KVM_SET_PIT2 _IOW(KVMIO, 0xa0, struct kvm_pit_state2) /* Available with KVM_CAP_PPC_GET_PVINFO */ #define KVM_PPC_GET_PVINFO _IOW(KVMIO, 0xa1, struct kvm_ppc_pvinfo) -/* Available with KVM_CAP_TSC_CONTROL */ +/* Available with KVM_CAP_TSC_CONTROL for a vCPU, or with +* KVM_CAP_VM_TSC_CONTROL to set defaults for a VM */ #define KVM_SET_TSC_KHZ _IO(KVMIO, 0xa2) #define KVM_GET_TSC_KHZ _IO(KVMIO, 0xa3) /* Available with KVM_CAP_PCI_2_3 */ -- cgit v1.2.3-59-g8ed1b From 66df0fdb5981052f3ad97c9879eda93712bdefc2 Mon Sep 17 00:00:00 2001 From: Haiyue Wang Date: Sun, 3 Apr 2022 19:53:26 +0800 Subject: bpf: Correct the comment for BTF kind bitfield The commit 8fd886911a6a ("bpf: Add BTF_KIND_FLOAT to uapi") has extended the BTF kind bitfield from 4 to 5 bits, correct the comment. Signed-off-by: Haiyue Wang Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220403115327.205964-1-haiyue.wang@intel.com --- include/uapi/linux/btf.h | 4 ++-- tools/include/uapi/linux/btf.h | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/btf.h b/include/uapi/linux/btf.h index b0d8fea1951d..a9162a6c0284 100644 --- a/include/uapi/linux/btf.h +++ b/include/uapi/linux/btf.h @@ -33,8 +33,8 @@ struct btf_type { /* "info" bits arrangement * bits 0-15: vlen (e.g. # of struct's members) * bits 16-23: unused - * bits 24-27: kind (e.g. int, ptr, array...etc) - * bits 28-30: unused + * bits 24-28: kind (e.g. int, ptr, array...etc) + * bits 29-30: unused * bit 31: kind_flag, currently used by * struct, union and fwd */ diff --git a/tools/include/uapi/linux/btf.h b/tools/include/uapi/linux/btf.h index b0d8fea1951d..a9162a6c0284 100644 --- a/tools/include/uapi/linux/btf.h +++ b/tools/include/uapi/linux/btf.h @@ -33,8 +33,8 @@ struct btf_type { /* "info" bits arrangement * bits 0-15: vlen (e.g. # of struct's members) * bits 16-23: unused - * bits 24-27: kind (e.g. int, ptr, array...etc) - * bits 28-30: unused + * bits 24-28: kind (e.g. int, ptr, array...etc) + * bits 29-30: unused * bit 31: kind_flag, currently used by * struct, union and fwd */ -- cgit v1.2.3-59-g8ed1b From 1ee375d77bb944321c969b456aa73994566cecf6 Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Mon, 4 Apr 2022 10:54:47 -0700 Subject: net, uapi: remove inclusion of arpa/inet.h In include/uapi/linux/tipc_config.h, there's a comment that it includes arpa/inet.h for ntohs; but ntohs is not defined in any UAPI header. For now, reuse the definitions from include/linux/byteorder/generic.h, since the various conversion functions do exist in UAPI headers: include/uapi/linux/byteorder/big_endian.h include/uapi/linux/byteorder/little_endian.h We would like to get to the point where we can build UAPI header tests with -nostdinc, meaning that kernel UAPI headers should not have a circular dependency on libc headers. Link: https://android-review.googlesource.com/c/platform/bionic/+/2048127 Suggested-by: Jakub Kicinski Signed-off-by: Nick Desaulniers Signed-off-by: David S. Miller --- include/uapi/linux/tipc_config.h | 28 ++++++++++++---------------- 1 file changed, 12 insertions(+), 16 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/tipc_config.h b/include/uapi/linux/tipc_config.h index 4dfc05651c98..c00adf2fe868 100644 --- a/include/uapi/linux/tipc_config.h +++ b/include/uapi/linux/tipc_config.h @@ -43,10 +43,6 @@ #include #include -#ifndef __KERNEL__ -#include /* for ntohs etc. */ -#endif - /* * Configuration * @@ -269,33 +265,33 @@ static inline int TLV_OK(const void *tlv, __u16 space) */ return (space >= TLV_SPACE(0)) && - (ntohs(((struct tlv_desc *)tlv)->tlv_len) <= space); + (__be16_to_cpu(((struct tlv_desc *)tlv)->tlv_len) <= space); } static inline int TLV_CHECK(const void *tlv, __u16 space, __u16 exp_type) { return TLV_OK(tlv, space) && - (ntohs(((struct tlv_desc *)tlv)->tlv_type) == exp_type); + (__be16_to_cpu(((struct tlv_desc *)tlv)->tlv_type) == exp_type); } static inline int TLV_GET_LEN(struct tlv_desc *tlv) { - return ntohs(tlv->tlv_len); + return __be16_to_cpu(tlv->tlv_len); } static inline void TLV_SET_LEN(struct tlv_desc *tlv, __u16 len) { - tlv->tlv_len = htons(len); + tlv->tlv_len = __cpu_to_be16(len); } static inline int TLV_CHECK_TYPE(struct tlv_desc *tlv, __u16 type) { - return (ntohs(tlv->tlv_type) == type); + return (__be16_to_cpu(tlv->tlv_type) == type); } static inline void TLV_SET_TYPE(struct tlv_desc *tlv, __u16 type) { - tlv->tlv_type = htons(type); + tlv->tlv_type = __cpu_to_be16(type); } static inline int TLV_SET(void *tlv, __u16 type, void *data, __u16 len) @@ -305,8 +301,8 @@ static inline int TLV_SET(void *tlv, __u16 type, void *data, __u16 len) tlv_len = TLV_LENGTH(len); tlv_ptr = (struct tlv_desc *)tlv; - tlv_ptr->tlv_type = htons(type); - tlv_ptr->tlv_len = htons(tlv_len); + tlv_ptr->tlv_type = __cpu_to_be16(type); + tlv_ptr->tlv_len = __cpu_to_be16(tlv_len); if (len && data) { memcpy(TLV_DATA(tlv_ptr), data, len); memset((char *)TLV_DATA(tlv_ptr) + len, 0, TLV_SPACE(len) - tlv_len); @@ -348,7 +344,7 @@ static inline void *TLV_LIST_DATA(struct tlv_list_desc *list) static inline void TLV_LIST_STEP(struct tlv_list_desc *list) { - __u16 tlv_space = TLV_ALIGN(ntohs(list->tlv_ptr->tlv_len)); + __u16 tlv_space = TLV_ALIGN(__be16_to_cpu(list->tlv_ptr->tlv_len)); list->tlv_ptr = (struct tlv_desc *)((char *)list->tlv_ptr + tlv_space); list->tlv_space -= tlv_space; @@ -404,9 +400,9 @@ static inline int TCM_SET(void *msg, __u16 cmd, __u16 flags, msg_len = TCM_LENGTH(data_len); tcm_hdr = (struct tipc_cfg_msg_hdr *)msg; - tcm_hdr->tcm_len = htonl(msg_len); - tcm_hdr->tcm_type = htons(cmd); - tcm_hdr->tcm_flags = htons(flags); + tcm_hdr->tcm_len = __cpu_to_be32(msg_len); + tcm_hdr->tcm_type = __cpu_to_be16(cmd); + tcm_hdr->tcm_flags = __cpu_to_be16(flags); if (data_len && data) { memcpy(TCM_DATA(msg), data, data_len); memset((char *)TCM_DATA(msg) + data_len, 0, TCM_SPACE(data_len) - msg_len); -- cgit v1.2.3-59-g8ed1b From fce96cf0443083e37455eff8f78fd240c621dae3 Mon Sep 17 00:00:00 2001 From: Brijesh Singh Date: Mon, 7 Mar 2022 15:33:53 -0600 Subject: virt: Add SEV-SNP guest driver The SEV-SNP specification provides the guest a mechanism to communicate with the PSP without risk from a malicious hypervisor who wishes to read, alter, drop or replay the messages sent. The driver uses snp_issue_guest_request() to issue GHCB SNP_GUEST_REQUEST or SNP_EXT_GUEST_REQUEST NAE events to submit the request to PSP. The PSP requires that all communication should be encrypted using key specified through a struct snp_guest_platform_data descriptor. Userspace can use SNP_GET_REPORT ioctl() to query the guest attestation report. See SEV-SNP spec section Guest Messages for more details. [ bp: Remove the "what" from the commit message, massage. ] Signed-off-by: Brijesh Singh Signed-off-by: Borislav Petkov Link: https://lore.kernel.org/r/20220307213356.2797205-44-brijesh.singh@amd.com --- Documentation/virt/coco/sevguest.rst | 86 +++++ Documentation/virt/index.rst | 1 + drivers/virt/Kconfig | 3 + drivers/virt/Makefile | 1 + drivers/virt/coco/sevguest/Kconfig | 14 + drivers/virt/coco/sevguest/Makefile | 2 + drivers/virt/coco/sevguest/sevguest.c | 607 ++++++++++++++++++++++++++++++++++ drivers/virt/coco/sevguest/sevguest.h | 98 ++++++ include/uapi/linux/sev-guest.h | 50 +++ 9 files changed, 862 insertions(+) create mode 100644 Documentation/virt/coco/sevguest.rst create mode 100644 drivers/virt/coco/sevguest/Kconfig create mode 100644 drivers/virt/coco/sevguest/Makefile create mode 100644 drivers/virt/coco/sevguest/sevguest.c create mode 100644 drivers/virt/coco/sevguest/sevguest.h create mode 100644 include/uapi/linux/sev-guest.h (limited to 'include/uapi/linux') diff --git a/Documentation/virt/coco/sevguest.rst b/Documentation/virt/coco/sevguest.rst new file mode 100644 index 000000000000..3da782e867a3 --- /dev/null +++ b/Documentation/virt/coco/sevguest.rst @@ -0,0 +1,86 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=================================================================== +The Definitive SEV Guest API Documentation +=================================================================== + +1. General description +====================== + +The SEV API is a set of ioctls that are used by the guest or hypervisor +to get or set a certain aspect of the SEV virtual machine. The ioctls belong +to the following classes: + + - Hypervisor ioctls: These query and set global attributes which affect the + whole SEV firmware. These ioctl are used by platform provisioning tools. + + - Guest ioctls: These query and set attributes of the SEV virtual machine. + +2. API description +================== + +This section describes ioctls that is used for querying the SEV guest report +from the SEV firmware. For each ioctl, the following information is provided +along with a description: + + Technology: + which SEV technology provides this ioctl. SEV, SEV-ES, SEV-SNP or all. + + Type: + hypervisor or guest. The ioctl can be used inside the guest or the + hypervisor. + + Parameters: + what parameters are accepted by the ioctl. + + Returns: + the return value. General error numbers (-ENOMEM, -EINVAL) + are not detailed, but errors with specific meanings are. + +The guest ioctl should be issued on a file descriptor of the /dev/sev-guest device. +The ioctl accepts struct snp_user_guest_request. The input and output structure is +specified through the req_data and resp_data field respectively. If the ioctl fails +to execute due to a firmware error, then fw_err code will be set otherwise the +fw_err will be set to 0x00000000000000ff. + +The firmware checks that the message sequence counter is one greater than +the guests message sequence counter. If guest driver fails to increment message +counter (e.g. counter overflow), then -EIO will be returned. + +:: + + struct snp_guest_request_ioctl { + /* Message version number */ + __u32 msg_version; + + /* Request and response structure address */ + __u64 req_data; + __u64 resp_data; + + /* firmware error code on failure (see psp-sev.h) */ + __u64 fw_err; + }; + +2.1 SNP_GET_REPORT +------------------ + +:Technology: sev-snp +:Type: guest ioctl +:Parameters (in): struct snp_report_req +:Returns (out): struct snp_report_resp on success, -negative on error + +The SNP_GET_REPORT ioctl can be used to query the attestation report from the +SEV-SNP firmware. The ioctl uses the SNP_GUEST_REQUEST (MSG_REPORT_REQ) command +provided by the SEV-SNP firmware to query the attestation report. + +On success, the snp_report_resp.data will contains the report. The report +contain the format described in the SEV-SNP specification. See the SEV-SNP +specification for further details. + + +Reference +--------- + +SEV-SNP and GHCB specification: developer.amd.com/sev + +The driver is based on SEV-SNP firmware spec 0.9 and GHCB spec version 2.0. diff --git a/Documentation/virt/index.rst b/Documentation/virt/index.rst index edea7fea95a8..40ad0d20032e 100644 --- a/Documentation/virt/index.rst +++ b/Documentation/virt/index.rst @@ -13,6 +13,7 @@ Linux Virtualization Support guest-halt-polling ne_overview acrn/index + coco/sevguest .. only:: html and subproject diff --git a/drivers/virt/Kconfig b/drivers/virt/Kconfig index 121b9293c737..7d3273cfab27 100644 --- a/drivers/virt/Kconfig +++ b/drivers/virt/Kconfig @@ -47,4 +47,7 @@ source "drivers/virt/vboxguest/Kconfig" source "drivers/virt/nitro_enclaves/Kconfig" source "drivers/virt/acrn/Kconfig" + +source "drivers/virt/coco/sevguest/Kconfig" + endif diff --git a/drivers/virt/Makefile b/drivers/virt/Makefile index 108d0ffcc9aa..7b87a7ba1972 100644 --- a/drivers/virt/Makefile +++ b/drivers/virt/Makefile @@ -9,3 +9,4 @@ obj-y += vboxguest/ obj-$(CONFIG_NITRO_ENCLAVES) += nitro_enclaves/ obj-$(CONFIG_ACRN_HSM) += acrn/ +obj-$(CONFIG_SEV_GUEST) += coco/sevguest/ diff --git a/drivers/virt/coco/sevguest/Kconfig b/drivers/virt/coco/sevguest/Kconfig new file mode 100644 index 000000000000..74ca1fe09437 --- /dev/null +++ b/drivers/virt/coco/sevguest/Kconfig @@ -0,0 +1,14 @@ +config SEV_GUEST + tristate "AMD SEV Guest driver" + default m + depends on AMD_MEM_ENCRYPT + select CRYPTO_AEAD2 + select CRYPTO_GCM + help + SEV-SNP firmware provides the guest a mechanism to communicate with + the PSP without risk from a malicious hypervisor who wishes to read, + alter, drop or replay the messages sent. The driver provides + userspace interface to communicate with the PSP to request the + attestation report and more. + + If you choose 'M' here, this module will be called sevguest. diff --git a/drivers/virt/coco/sevguest/Makefile b/drivers/virt/coco/sevguest/Makefile new file mode 100644 index 000000000000..b1ffb2b4177b --- /dev/null +++ b/drivers/virt/coco/sevguest/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_SEV_GUEST) += sevguest.o diff --git a/drivers/virt/coco/sevguest/sevguest.c b/drivers/virt/coco/sevguest/sevguest.c new file mode 100644 index 000000000000..beda93cdeb4f --- /dev/null +++ b/drivers/virt/coco/sevguest/sevguest.c @@ -0,0 +1,607 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * AMD Secure Encrypted Virtualization Nested Paging (SEV-SNP) guest request interface + * + * Copyright (C) 2021 Advanced Micro Devices, Inc. + * + * Author: Brijesh Singh + */ + +#define pr_fmt(fmt) "SNP: GUEST: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "sevguest.h" + +#define DEVICE_NAME "sev-guest" +#define AAD_LEN 48 +#define MSG_HDR_VER 1 + +struct snp_guest_crypto { + struct crypto_aead *tfm; + u8 *iv, *authtag; + int iv_len, a_len; +}; + +struct snp_guest_dev { + struct device *dev; + struct miscdevice misc; + + struct snp_guest_crypto *crypto; + struct snp_guest_msg *request, *response; + struct snp_secrets_page_layout *layout; + struct snp_req_data input; + u32 *os_area_msg_seqno; + u8 *vmpck; +}; + +static u32 vmpck_id; +module_param(vmpck_id, uint, 0444); +MODULE_PARM_DESC(vmpck_id, "The VMPCK ID to use when communicating with the PSP."); + +/* Mutex to serialize the shared buffer access and command handling. */ +static DEFINE_MUTEX(snp_cmd_mutex); + +static bool is_vmpck_empty(struct snp_guest_dev *snp_dev) +{ + char zero_key[VMPCK_KEY_LEN] = {0}; + + if (snp_dev->vmpck) + return !memcmp(snp_dev->vmpck, zero_key, VMPCK_KEY_LEN); + + return true; +} + +static void snp_disable_vmpck(struct snp_guest_dev *snp_dev) +{ + memzero_explicit(snp_dev->vmpck, VMPCK_KEY_LEN); + snp_dev->vmpck = NULL; +} + +static inline u64 __snp_get_msg_seqno(struct snp_guest_dev *snp_dev) +{ + u64 count; + + lockdep_assert_held(&snp_cmd_mutex); + + /* Read the current message sequence counter from secrets pages */ + count = *snp_dev->os_area_msg_seqno; + + return count + 1; +} + +/* Return a non-zero on success */ +static u64 snp_get_msg_seqno(struct snp_guest_dev *snp_dev) +{ + u64 count = __snp_get_msg_seqno(snp_dev); + + /* + * The message sequence counter for the SNP guest request is a 64-bit + * value but the version 2 of GHCB specification defines a 32-bit storage + * for it. If the counter exceeds the 32-bit value then return zero. + * The caller should check the return value, but if the caller happens to + * not check the value and use it, then the firmware treats zero as an + * invalid number and will fail the message request. + */ + if (count >= UINT_MAX) { + dev_err(snp_dev->dev, "request message sequence counter overflow\n"); + return 0; + } + + return count; +} + +static void snp_inc_msg_seqno(struct snp_guest_dev *snp_dev) +{ + /* + * The counter is also incremented by the PSP, so increment it by 2 + * and save in secrets page. + */ + *snp_dev->os_area_msg_seqno += 2; +} + +static inline struct snp_guest_dev *to_snp_dev(struct file *file) +{ + struct miscdevice *dev = file->private_data; + + return container_of(dev, struct snp_guest_dev, misc); +} + +static struct snp_guest_crypto *init_crypto(struct snp_guest_dev *snp_dev, u8 *key, size_t keylen) +{ + struct snp_guest_crypto *crypto; + + crypto = kzalloc(sizeof(*crypto), GFP_KERNEL_ACCOUNT); + if (!crypto) + return NULL; + + crypto->tfm = crypto_alloc_aead("gcm(aes)", 0, 0); + if (IS_ERR(crypto->tfm)) + goto e_free; + + if (crypto_aead_setkey(crypto->tfm, key, keylen)) + goto e_free_crypto; + + crypto->iv_len = crypto_aead_ivsize(crypto->tfm); + crypto->iv = kmalloc(crypto->iv_len, GFP_KERNEL_ACCOUNT); + if (!crypto->iv) + goto e_free_crypto; + + if (crypto_aead_authsize(crypto->tfm) > MAX_AUTHTAG_LEN) { + if (crypto_aead_setauthsize(crypto->tfm, MAX_AUTHTAG_LEN)) { + dev_err(snp_dev->dev, "failed to set authsize to %d\n", MAX_AUTHTAG_LEN); + goto e_free_iv; + } + } + + crypto->a_len = crypto_aead_authsize(crypto->tfm); + crypto->authtag = kmalloc(crypto->a_len, GFP_KERNEL_ACCOUNT); + if (!crypto->authtag) + goto e_free_auth; + + return crypto; + +e_free_auth: + kfree(crypto->authtag); +e_free_iv: + kfree(crypto->iv); +e_free_crypto: + crypto_free_aead(crypto->tfm); +e_free: + kfree(crypto); + + return NULL; +} + +static void deinit_crypto(struct snp_guest_crypto *crypto) +{ + crypto_free_aead(crypto->tfm); + kfree(crypto->iv); + kfree(crypto->authtag); + kfree(crypto); +} + +static int enc_dec_message(struct snp_guest_crypto *crypto, struct snp_guest_msg *msg, + u8 *src_buf, u8 *dst_buf, size_t len, bool enc) +{ + struct snp_guest_msg_hdr *hdr = &msg->hdr; + struct scatterlist src[3], dst[3]; + DECLARE_CRYPTO_WAIT(wait); + struct aead_request *req; + int ret; + + req = aead_request_alloc(crypto->tfm, GFP_KERNEL); + if (!req) + return -ENOMEM; + + /* + * AEAD memory operations: + * +------ AAD -------+------- DATA -----+---- AUTHTAG----+ + * | msg header | plaintext | hdr->authtag | + * | bytes 30h - 5Fh | or | | + * | | cipher | | + * +------------------+------------------+----------------+ + */ + sg_init_table(src, 3); + sg_set_buf(&src[0], &hdr->algo, AAD_LEN); + sg_set_buf(&src[1], src_buf, hdr->msg_sz); + sg_set_buf(&src[2], hdr->authtag, crypto->a_len); + + sg_init_table(dst, 3); + sg_set_buf(&dst[0], &hdr->algo, AAD_LEN); + sg_set_buf(&dst[1], dst_buf, hdr->msg_sz); + sg_set_buf(&dst[2], hdr->authtag, crypto->a_len); + + aead_request_set_ad(req, AAD_LEN); + aead_request_set_tfm(req, crypto->tfm); + aead_request_set_callback(req, 0, crypto_req_done, &wait); + + aead_request_set_crypt(req, src, dst, len, crypto->iv); + ret = crypto_wait_req(enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req), &wait); + + aead_request_free(req); + return ret; +} + +static int __enc_payload(struct snp_guest_dev *snp_dev, struct snp_guest_msg *msg, + void *plaintext, size_t len) +{ + struct snp_guest_crypto *crypto = snp_dev->crypto; + struct snp_guest_msg_hdr *hdr = &msg->hdr; + + memset(crypto->iv, 0, crypto->iv_len); + memcpy(crypto->iv, &hdr->msg_seqno, sizeof(hdr->msg_seqno)); + + return enc_dec_message(crypto, msg, plaintext, msg->payload, len, true); +} + +static int dec_payload(struct snp_guest_dev *snp_dev, struct snp_guest_msg *msg, + void *plaintext, size_t len) +{ + struct snp_guest_crypto *crypto = snp_dev->crypto; + struct snp_guest_msg_hdr *hdr = &msg->hdr; + + /* Build IV with response buffer sequence number */ + memset(crypto->iv, 0, crypto->iv_len); + memcpy(crypto->iv, &hdr->msg_seqno, sizeof(hdr->msg_seqno)); + + return enc_dec_message(crypto, msg, msg->payload, plaintext, len, false); +} + +static int verify_and_dec_payload(struct snp_guest_dev *snp_dev, void *payload, u32 sz) +{ + struct snp_guest_crypto *crypto = snp_dev->crypto; + struct snp_guest_msg *resp = snp_dev->response; + struct snp_guest_msg *req = snp_dev->request; + struct snp_guest_msg_hdr *req_hdr = &req->hdr; + struct snp_guest_msg_hdr *resp_hdr = &resp->hdr; + + dev_dbg(snp_dev->dev, "response [seqno %lld type %d version %d sz %d]\n", + resp_hdr->msg_seqno, resp_hdr->msg_type, resp_hdr->msg_version, resp_hdr->msg_sz); + + /* Verify that the sequence counter is incremented by 1 */ + if (unlikely(resp_hdr->msg_seqno != (req_hdr->msg_seqno + 1))) + return -EBADMSG; + + /* Verify response message type and version number. */ + if (resp_hdr->msg_type != (req_hdr->msg_type + 1) || + resp_hdr->msg_version != req_hdr->msg_version) + return -EBADMSG; + + /* + * If the message size is greater than our buffer length then return + * an error. + */ + if (unlikely((resp_hdr->msg_sz + crypto->a_len) > sz)) + return -EBADMSG; + + /* Decrypt the payload */ + return dec_payload(snp_dev, resp, payload, resp_hdr->msg_sz + crypto->a_len); +} + +static bool enc_payload(struct snp_guest_dev *snp_dev, u64 seqno, int version, u8 type, + void *payload, size_t sz) +{ + struct snp_guest_msg *req = snp_dev->request; + struct snp_guest_msg_hdr *hdr = &req->hdr; + + memset(req, 0, sizeof(*req)); + + hdr->algo = SNP_AEAD_AES_256_GCM; + hdr->hdr_version = MSG_HDR_VER; + hdr->hdr_sz = sizeof(*hdr); + hdr->msg_type = type; + hdr->msg_version = version; + hdr->msg_seqno = seqno; + hdr->msg_vmpck = vmpck_id; + hdr->msg_sz = sz; + + /* Verify the sequence number is non-zero */ + if (!hdr->msg_seqno) + return -ENOSR; + + dev_dbg(snp_dev->dev, "request [seqno %lld type %d version %d sz %d]\n", + hdr->msg_seqno, hdr->msg_type, hdr->msg_version, hdr->msg_sz); + + return __enc_payload(snp_dev, req, payload, sz); +} + +static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, int msg_ver, + u8 type, void *req_buf, size_t req_sz, void *resp_buf, + u32 resp_sz, __u64 *fw_err) +{ + unsigned long err; + u64 seqno; + int rc; + + /* Get message sequence and verify that its a non-zero */ + seqno = snp_get_msg_seqno(snp_dev); + if (!seqno) + return -EIO; + + memset(snp_dev->response, 0, sizeof(struct snp_guest_msg)); + + /* Encrypt the userspace provided payload */ + rc = enc_payload(snp_dev, seqno, msg_ver, type, req_buf, req_sz); + if (rc) + return rc; + + /* Call firmware to process the request */ + rc = snp_issue_guest_request(exit_code, &snp_dev->input, &err); + if (fw_err) + *fw_err = err; + + if (rc) + return rc; + + /* + * The verify_and_dec_payload() will fail only if the hypervisor is + * actively modifying the message header or corrupting the encrypted payload. + * This hints that hypervisor is acting in a bad faith. Disable the VMPCK so that + * the key cannot be used for any communication. The key is disabled to ensure + * that AES-GCM does not use the same IV while encrypting the request payload. + */ + rc = verify_and_dec_payload(snp_dev, resp_buf, resp_sz); + if (rc) { + dev_alert(snp_dev->dev, + "Detected unexpected decode failure, disabling the vmpck_id %d\n", + vmpck_id); + snp_disable_vmpck(snp_dev); + return rc; + } + + /* Increment to new message sequence after payload decryption was successful. */ + snp_inc_msg_seqno(snp_dev); + + return 0; +} + +static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg) +{ + struct snp_guest_crypto *crypto = snp_dev->crypto; + struct snp_report_resp *resp; + struct snp_report_req req; + int rc, resp_len; + + lockdep_assert_held(&snp_cmd_mutex); + + if (!arg->req_data || !arg->resp_data) + return -EINVAL; + + if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req))) + return -EFAULT; + + /* + * The intermediate response buffer is used while decrypting the + * response payload. Make sure that it has enough space to cover the + * authtag. + */ + resp_len = sizeof(resp->data) + crypto->a_len; + resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT); + if (!resp) + return -ENOMEM; + + rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg->msg_version, + SNP_MSG_REPORT_REQ, &req, sizeof(req), resp->data, + resp_len, &arg->fw_err); + if (rc) + goto e_free; + + if (copy_to_user((void __user *)arg->resp_data, resp, sizeof(*resp))) + rc = -EFAULT; + +e_free: + kfree(resp); + return rc; +} + +static long snp_guest_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) +{ + struct snp_guest_dev *snp_dev = to_snp_dev(file); + void __user *argp = (void __user *)arg; + struct snp_guest_request_ioctl input; + int ret = -ENOTTY; + + if (copy_from_user(&input, argp, sizeof(input))) + return -EFAULT; + + input.fw_err = 0xff; + + /* Message version must be non-zero */ + if (!input.msg_version) + return -EINVAL; + + mutex_lock(&snp_cmd_mutex); + + /* Check if the VMPCK is not empty */ + if (is_vmpck_empty(snp_dev)) { + dev_err_ratelimited(snp_dev->dev, "VMPCK is disabled\n"); + mutex_unlock(&snp_cmd_mutex); + return -ENOTTY; + } + + switch (ioctl) { + case SNP_GET_REPORT: + ret = get_report(snp_dev, &input); + break; + default: + break; + } + + mutex_unlock(&snp_cmd_mutex); + + if (input.fw_err && copy_to_user(argp, &input, sizeof(input))) + return -EFAULT; + + return ret; +} + +static void free_shared_pages(void *buf, size_t sz) +{ + unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT; + int ret; + + if (!buf) + return; + + ret = set_memory_encrypted((unsigned long)buf, npages); + if (ret) { + WARN_ONCE(ret, "failed to restore encryption mask (leak it)\n"); + return; + } + + __free_pages(virt_to_page(buf), get_order(sz)); +} + +static void *alloc_shared_pages(size_t sz) +{ + unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT; + struct page *page; + int ret; + + page = alloc_pages(GFP_KERNEL_ACCOUNT, get_order(sz)); + if (IS_ERR(page)) + return NULL; + + ret = set_memory_decrypted((unsigned long)page_address(page), npages); + if (ret) { + pr_err("failed to mark page shared, ret=%d\n", ret); + __free_pages(page, get_order(sz)); + return NULL; + } + + return page_address(page); +} + +static const struct file_operations snp_guest_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = snp_guest_ioctl, +}; + +static u8 *get_vmpck(int id, struct snp_secrets_page_layout *layout, u32 **seqno) +{ + u8 *key = NULL; + + switch (id) { + case 0: + *seqno = &layout->os_area.msg_seqno_0; + key = layout->vmpck0; + break; + case 1: + *seqno = &layout->os_area.msg_seqno_1; + key = layout->vmpck1; + break; + case 2: + *seqno = &layout->os_area.msg_seqno_2; + key = layout->vmpck2; + break; + case 3: + *seqno = &layout->os_area.msg_seqno_3; + key = layout->vmpck3; + break; + default: + break; + } + + return key; +} + +static int __init snp_guest_probe(struct platform_device *pdev) +{ + struct snp_secrets_page_layout *layout; + struct snp_guest_platform_data *data; + struct device *dev = &pdev->dev; + struct snp_guest_dev *snp_dev; + struct miscdevice *misc; + int ret; + + if (!dev->platform_data) + return -ENODEV; + + data = (struct snp_guest_platform_data *)dev->platform_data; + layout = (__force void *)ioremap_encrypted(data->secrets_gpa, PAGE_SIZE); + if (!layout) + return -ENODEV; + + ret = -ENOMEM; + snp_dev = devm_kzalloc(&pdev->dev, sizeof(struct snp_guest_dev), GFP_KERNEL); + if (!snp_dev) + goto e_unmap; + + ret = -EINVAL; + snp_dev->vmpck = get_vmpck(vmpck_id, layout, &snp_dev->os_area_msg_seqno); + if (!snp_dev->vmpck) { + dev_err(dev, "invalid vmpck id %d\n", vmpck_id); + goto e_unmap; + } + + /* Verify that VMPCK is not zero. */ + if (is_vmpck_empty(snp_dev)) { + dev_err(dev, "vmpck id %d is null\n", vmpck_id); + goto e_unmap; + } + + platform_set_drvdata(pdev, snp_dev); + snp_dev->dev = dev; + snp_dev->layout = layout; + + /* Allocate the shared page used for the request and response message. */ + snp_dev->request = alloc_shared_pages(sizeof(struct snp_guest_msg)); + if (!snp_dev->request) + goto e_unmap; + + snp_dev->response = alloc_shared_pages(sizeof(struct snp_guest_msg)); + if (!snp_dev->response) + goto e_free_request; + + ret = -EIO; + snp_dev->crypto = init_crypto(snp_dev, snp_dev->vmpck, VMPCK_KEY_LEN); + if (!snp_dev->crypto) + goto e_free_response; + + misc = &snp_dev->misc; + misc->minor = MISC_DYNAMIC_MINOR; + misc->name = DEVICE_NAME; + misc->fops = &snp_guest_fops; + + /* initial the input address for guest request */ + snp_dev->input.req_gpa = __pa(snp_dev->request); + snp_dev->input.resp_gpa = __pa(snp_dev->response); + + ret = misc_register(misc); + if (ret) + goto e_free_response; + + dev_info(dev, "Initialized SNP guest driver (using vmpck_id %d)\n", vmpck_id); + return 0; + +e_free_response: + free_shared_pages(snp_dev->response, sizeof(struct snp_guest_msg)); +e_free_request: + free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg)); +e_unmap: + iounmap(layout); + return ret; +} + +static int __exit snp_guest_remove(struct platform_device *pdev) +{ + struct snp_guest_dev *snp_dev = platform_get_drvdata(pdev); + + free_shared_pages(snp_dev->response, sizeof(struct snp_guest_msg)); + free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg)); + deinit_crypto(snp_dev->crypto); + misc_deregister(&snp_dev->misc); + + return 0; +} + +static struct platform_driver snp_guest_driver = { + .remove = __exit_p(snp_guest_remove), + .driver = { + .name = "snp-guest", + }, +}; + +module_platform_driver_probe(snp_guest_driver, snp_guest_probe); + +MODULE_AUTHOR("Brijesh Singh "); +MODULE_LICENSE("GPL"); +MODULE_VERSION("1.0.0"); +MODULE_DESCRIPTION("AMD SNP Guest Driver"); diff --git a/drivers/virt/coco/sevguest/sevguest.h b/drivers/virt/coco/sevguest/sevguest.h new file mode 100644 index 000000000000..d39bdd013765 --- /dev/null +++ b/drivers/virt/coco/sevguest/sevguest.h @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 Advanced Micro Devices, Inc. + * + * Author: Brijesh Singh + * + * SEV-SNP API spec is available at https://developer.amd.com/sev + */ + +#ifndef __VIRT_SEVGUEST_H__ +#define __VIRT_SEVGUEST_H__ + +#include + +#define MAX_AUTHTAG_LEN 32 + +/* See SNP spec SNP_GUEST_REQUEST section for the structure */ +enum msg_type { + SNP_MSG_TYPE_INVALID = 0, + SNP_MSG_CPUID_REQ, + SNP_MSG_CPUID_RSP, + SNP_MSG_KEY_REQ, + SNP_MSG_KEY_RSP, + SNP_MSG_REPORT_REQ, + SNP_MSG_REPORT_RSP, + SNP_MSG_EXPORT_REQ, + SNP_MSG_EXPORT_RSP, + SNP_MSG_IMPORT_REQ, + SNP_MSG_IMPORT_RSP, + SNP_MSG_ABSORB_REQ, + SNP_MSG_ABSORB_RSP, + SNP_MSG_VMRK_REQ, + SNP_MSG_VMRK_RSP, + + SNP_MSG_TYPE_MAX +}; + +enum aead_algo { + SNP_AEAD_INVALID, + SNP_AEAD_AES_256_GCM, +}; + +struct snp_guest_msg_hdr { + u8 authtag[MAX_AUTHTAG_LEN]; + u64 msg_seqno; + u8 rsvd1[8]; + u8 algo; + u8 hdr_version; + u16 hdr_sz; + u8 msg_type; + u8 msg_version; + u16 msg_sz; + u32 rsvd2; + u8 msg_vmpck; + u8 rsvd3[35]; +} __packed; + +struct snp_guest_msg { + struct snp_guest_msg_hdr hdr; + u8 payload[4000]; +} __packed; + +/* + * The secrets page contains 96-bytes of reserved field that can be used by + * the guest OS. The guest OS uses the area to save the message sequence + * number for each VMPCK. + * + * See the GHCB spec section Secret page layout for the format for this area. + */ +struct secrets_os_area { + u32 msg_seqno_0; + u32 msg_seqno_1; + u32 msg_seqno_2; + u32 msg_seqno_3; + u64 ap_jump_table_pa; + u8 rsvd[40]; + u8 guest_usage[32]; +} __packed; + +#define VMPCK_KEY_LEN 32 + +/* See the SNP spec version 0.9 for secrets page format */ +struct snp_secrets_page_layout { + u32 version; + u32 imien : 1, + rsvd1 : 31; + u32 fms; + u32 rsvd2; + u8 gosvw[16]; + u8 vmpck0[VMPCK_KEY_LEN]; + u8 vmpck1[VMPCK_KEY_LEN]; + u8 vmpck2[VMPCK_KEY_LEN]; + u8 vmpck3[VMPCK_KEY_LEN]; + struct secrets_os_area os_area; + u8 rsvd3[3840]; +} __packed; + +#endif /* __VIRT_SEVGUEST_H__ */ diff --git a/include/uapi/linux/sev-guest.h b/include/uapi/linux/sev-guest.h new file mode 100644 index 000000000000..38f11d723c68 --- /dev/null +++ b/include/uapi/linux/sev-guest.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Userspace interface for AMD SEV and SNP guest driver. + * + * Copyright (C) 2021 Advanced Micro Devices, Inc. + * + * Author: Brijesh Singh + * + * SEV API specification is available at: https://developer.amd.com/sev/ + */ + +#ifndef __UAPI_LINUX_SEV_GUEST_H_ +#define __UAPI_LINUX_SEV_GUEST_H_ + +#include + +struct snp_report_req { + /* user data that should be included in the report */ + __u8 user_data[64]; + + /* The vmpl level to be included in the report */ + __u32 vmpl; + + /* Must be zero filled */ + __u8 rsvd[28]; +}; + +struct snp_report_resp { + /* response data, see SEV-SNP spec for the format */ + __u8 data[4000]; +}; + +struct snp_guest_request_ioctl { + /* message version number (must be non-zero) */ + __u8 msg_version; + + /* Request and response structure address */ + __u64 req_data; + __u64 resp_data; + + /* firmware error code on failure (see psp-sev.h) */ + __u64 fw_err; +}; + +#define SNP_GUEST_REQ_IOC_TYPE 'S' + +/* Get SNP attestation report */ +#define SNP_GET_REPORT _IOWR(SNP_GUEST_REQ_IOC_TYPE, 0x0, struct snp_guest_request_ioctl) + +#endif /* __UAPI_LINUX_SEV_GUEST_H_ */ -- cgit v1.2.3-59-g8ed1b From 68de0b2f938642079c0c853b219bdb88c4dc4d13 Mon Sep 17 00:00:00 2001 From: Brijesh Singh Date: Thu, 24 Feb 2022 10:56:23 -0600 Subject: virt: sevguest: Add support to derive key The SNP_GET_DERIVED_KEY ioctl interface can be used by the SNP guest to ask the firmware to provide a key derived from a root key. The derived key may be used by the guest for any purposes it chooses, such as a sealing key or communicating with the external entities. See SEV-SNP firmware spec for more information. [ bp: No need to memset "req" - it will get overwritten. ] Signed-off-by: Brijesh Singh Signed-off-by: Borislav Petkov Reviewed-by: Liam Merwick Link: https://lore.kernel.org/r/20220307213356.2797205-45-brijesh.singh@amd.com --- Documentation/virt/coco/sevguest.rst | 17 +++++++++++++ drivers/virt/coco/sevguest/sevguest.c | 45 +++++++++++++++++++++++++++++++++++ include/uapi/linux/sev-guest.h | 17 +++++++++++++ 3 files changed, 79 insertions(+) (limited to 'include/uapi/linux') diff --git a/Documentation/virt/coco/sevguest.rst b/Documentation/virt/coco/sevguest.rst index 3da782e867a3..4135c1431241 100644 --- a/Documentation/virt/coco/sevguest.rst +++ b/Documentation/virt/coco/sevguest.rst @@ -77,6 +77,23 @@ On success, the snp_report_resp.data will contains the report. The report contain the format described in the SEV-SNP specification. See the SEV-SNP specification for further details. +2.2 SNP_GET_DERIVED_KEY +----------------------- +:Technology: sev-snp +:Type: guest ioctl +:Parameters (in): struct snp_derived_key_req +:Returns (out): struct snp_derived_key_resp on success, -negative on error + +The SNP_GET_DERIVED_KEY ioctl can be used to get a key derive from a root key. +The derived key can be used by the guest for any purpose, such as sealing keys +or communicating with external entities. + +The ioctl uses the SNP_GUEST_REQUEST (MSG_KEY_REQ) command provided by the +SEV-SNP firmware to derive the key. See SEV-SNP specification for further details +on the various fields passed in the key derivation request. + +On success, the snp_derived_key_resp.data contains the derived key value. See +the SEV-SNP specification for further details. Reference --------- diff --git a/drivers/virt/coco/sevguest/sevguest.c b/drivers/virt/coco/sevguest/sevguest.c index beda93cdeb4f..393777b72e5e 100644 --- a/drivers/virt/coco/sevguest/sevguest.c +++ b/drivers/virt/coco/sevguest/sevguest.c @@ -391,6 +391,48 @@ e_free: return rc; } +static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg) +{ + struct snp_guest_crypto *crypto = snp_dev->crypto; + struct snp_derived_key_resp resp = {0}; + struct snp_derived_key_req req; + int rc, resp_len; + /* Response data is 64 bytes and max authsize for GCM is 16 bytes. */ + u8 buf[64 + 16]; + + lockdep_assert_held(&snp_cmd_mutex); + + if (!arg->req_data || !arg->resp_data) + return -EINVAL; + + /* + * The intermediate response buffer is used while decrypting the + * response payload. Make sure that it has enough space to cover the + * authtag. + */ + resp_len = sizeof(resp.data) + crypto->a_len; + if (sizeof(buf) < resp_len) + return -ENOMEM; + + if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req))) + return -EFAULT; + + rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg->msg_version, + SNP_MSG_KEY_REQ, &req, sizeof(req), buf, resp_len, + &arg->fw_err); + if (rc) + return rc; + + memcpy(resp.data, buf, sizeof(resp.data)); + if (copy_to_user((void __user *)arg->resp_data, &resp, sizeof(resp))) + rc = -EFAULT; + + /* The response buffer contains the sensitive data, explicitly clear it. */ + memzero_explicit(buf, sizeof(buf)); + memzero_explicit(&resp, sizeof(resp)); + return rc; +} + static long snp_guest_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) { struct snp_guest_dev *snp_dev = to_snp_dev(file); @@ -420,6 +462,9 @@ static long snp_guest_ioctl(struct file *file, unsigned int ioctl, unsigned long case SNP_GET_REPORT: ret = get_report(snp_dev, &input); break; + case SNP_GET_DERIVED_KEY: + ret = get_derived_key(snp_dev, &input); + break; default: break; } diff --git a/include/uapi/linux/sev-guest.h b/include/uapi/linux/sev-guest.h index 38f11d723c68..598367f12064 100644 --- a/include/uapi/linux/sev-guest.h +++ b/include/uapi/linux/sev-guest.h @@ -30,6 +30,20 @@ struct snp_report_resp { __u8 data[4000]; }; +struct snp_derived_key_req { + __u32 root_key_select; + __u32 rsvd; + __u64 guest_field_select; + __u32 vmpl; + __u32 guest_svn; + __u64 tcb_version; +}; + +struct snp_derived_key_resp { + /* response data, see SEV-SNP spec for the format */ + __u8 data[64]; +}; + struct snp_guest_request_ioctl { /* message version number (must be non-zero) */ __u8 msg_version; @@ -47,4 +61,7 @@ struct snp_guest_request_ioctl { /* Get SNP attestation report */ #define SNP_GET_REPORT _IOWR(SNP_GUEST_REQ_IOC_TYPE, 0x0, struct snp_guest_request_ioctl) +/* Get a derived key from the root */ +#define SNP_GET_DERIVED_KEY _IOWR(SNP_GUEST_REQ_IOC_TYPE, 0x1, struct snp_guest_request_ioctl) + #endif /* __UAPI_LINUX_SEV_GUEST_H_ */ -- cgit v1.2.3-59-g8ed1b From d80b494f712317493d464a55652698c4d1b7bb0f Mon Sep 17 00:00:00 2001 From: Brijesh Singh Date: Mon, 7 Mar 2022 15:33:55 -0600 Subject: virt: sevguest: Add support to get extended report Version 2 of GHCB specification defines Non-Automatic-Exit (NAE) to get extended guest report which is similar to the SNP_GET_REPORT ioctl. The main difference is related to the additional data that will be returned. That additional data returned is a certificate blob that can be used by the SNP guest user. The certificate blob layout is defined in the GHCB specification. The driver simply treats the blob as a opaque data and copies it to userspace. [ bp: Massage commit message, cast 1st arg of access_ok() ] Signed-off-by: Brijesh Singh Signed-off-by: Borislav Petkov Link: https://lore.kernel.org/r/20220307213356.2797205-46-brijesh.singh@amd.com --- Documentation/virt/coco/sevguest.rst | 23 +++++++++ drivers/virt/coco/sevguest/sevguest.c | 92 ++++++++++++++++++++++++++++++++++- include/uapi/linux/sev-guest.h | 13 +++++ 3 files changed, 126 insertions(+), 2 deletions(-) (limited to 'include/uapi/linux') diff --git a/Documentation/virt/coco/sevguest.rst b/Documentation/virt/coco/sevguest.rst index 4135c1431241..625de22658ec 100644 --- a/Documentation/virt/coco/sevguest.rst +++ b/Documentation/virt/coco/sevguest.rst @@ -95,6 +95,29 @@ on the various fields passed in the key derivation request. On success, the snp_derived_key_resp.data contains the derived key value. See the SEV-SNP specification for further details. + +2.3 SNP_GET_EXT_REPORT +---------------------- +:Technology: sev-snp +:Type: guest ioctl +:Parameters (in/out): struct snp_ext_report_req +:Returns (out): struct snp_report_resp on success, -negative on error + +The SNP_GET_EXT_REPORT ioctl is similar to the SNP_GET_REPORT. The difference is +related to the additional certificate data that is returned with the report. +The certificate data returned is being provided by the hypervisor through the +SNP_SET_EXT_CONFIG. + +The ioctl uses the SNP_GUEST_REQUEST (MSG_REPORT_REQ) command provided by the SEV-SNP +firmware to get the attestation report. + +On success, the snp_ext_report_resp.data will contain the attestation report +and snp_ext_report_req.certs_address will contain the certificate blob. If the +length of the blob is smaller than expected then snp_ext_report_req.certs_len will +be updated with the expected value. + +See GHCB specification for further detail on how to parse the certificate blob. + Reference --------- diff --git a/drivers/virt/coco/sevguest/sevguest.c b/drivers/virt/coco/sevguest/sevguest.c index 393777b72e5e..15afb6ce8d19 100644 --- a/drivers/virt/coco/sevguest/sevguest.c +++ b/drivers/virt/coco/sevguest/sevguest.c @@ -43,6 +43,7 @@ struct snp_guest_dev { struct device *dev; struct miscdevice misc; + void *certs_data; struct snp_guest_crypto *crypto; struct snp_guest_msg *request, *response; struct snp_secrets_page_layout *layout; @@ -433,6 +434,82 @@ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_reque return rc; } +static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg) +{ + struct snp_guest_crypto *crypto = snp_dev->crypto; + struct snp_ext_report_req req; + struct snp_report_resp *resp; + int ret, npages = 0, resp_len; + + lockdep_assert_held(&snp_cmd_mutex); + + if (!arg->req_data || !arg->resp_data) + return -EINVAL; + + if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req))) + return -EFAULT; + + /* userspace does not want certificate data */ + if (!req.certs_len || !req.certs_address) + goto cmd; + + if (req.certs_len > SEV_FW_BLOB_MAX_SIZE || + !IS_ALIGNED(req.certs_len, PAGE_SIZE)) + return -EINVAL; + + if (!access_ok((const void __user *)req.certs_address, req.certs_len)) + return -EFAULT; + + /* + * Initialize the intermediate buffer with all zeros. This buffer + * is used in the guest request message to get the certs blob from + * the host. If host does not supply any certs in it, then copy + * zeros to indicate that certificate data was not provided. + */ + memset(snp_dev->certs_data, 0, req.certs_len); + npages = req.certs_len >> PAGE_SHIFT; +cmd: + /* + * The intermediate response buffer is used while decrypting the + * response payload. Make sure that it has enough space to cover the + * authtag. + */ + resp_len = sizeof(resp->data) + crypto->a_len; + resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT); + if (!resp) + return -ENOMEM; + + snp_dev->input.data_npages = npages; + ret = handle_guest_request(snp_dev, SVM_VMGEXIT_EXT_GUEST_REQUEST, arg->msg_version, + SNP_MSG_REPORT_REQ, &req.data, + sizeof(req.data), resp->data, resp_len, &arg->fw_err); + + /* If certs length is invalid then copy the returned length */ + if (arg->fw_err == SNP_GUEST_REQ_INVALID_LEN) { + req.certs_len = snp_dev->input.data_npages << PAGE_SHIFT; + + if (copy_to_user((void __user *)arg->req_data, &req, sizeof(req))) + ret = -EFAULT; + } + + if (ret) + goto e_free; + + if (npages && + copy_to_user((void __user *)req.certs_address, snp_dev->certs_data, + req.certs_len)) { + ret = -EFAULT; + goto e_free; + } + + if (copy_to_user((void __user *)arg->resp_data, resp, sizeof(*resp))) + ret = -EFAULT; + +e_free: + kfree(resp); + return ret; +} + static long snp_guest_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) { struct snp_guest_dev *snp_dev = to_snp_dev(file); @@ -465,6 +542,9 @@ static long snp_guest_ioctl(struct file *file, unsigned int ioctl, unsigned long case SNP_GET_DERIVED_KEY: ret = get_derived_key(snp_dev, &input); break; + case SNP_GET_EXT_REPORT: + ret = get_ext_report(snp_dev, &input); + break; default: break; } @@ -595,10 +675,14 @@ static int __init snp_guest_probe(struct platform_device *pdev) if (!snp_dev->response) goto e_free_request; + snp_dev->certs_data = alloc_shared_pages(SEV_FW_BLOB_MAX_SIZE); + if (!snp_dev->certs_data) + goto e_free_response; + ret = -EIO; snp_dev->crypto = init_crypto(snp_dev, snp_dev->vmpck, VMPCK_KEY_LEN); if (!snp_dev->crypto) - goto e_free_response; + goto e_free_cert_data; misc = &snp_dev->misc; misc->minor = MISC_DYNAMIC_MINOR; @@ -608,14 +692,17 @@ static int __init snp_guest_probe(struct platform_device *pdev) /* initial the input address for guest request */ snp_dev->input.req_gpa = __pa(snp_dev->request); snp_dev->input.resp_gpa = __pa(snp_dev->response); + snp_dev->input.data_gpa = __pa(snp_dev->certs_data); ret = misc_register(misc); if (ret) - goto e_free_response; + goto e_free_cert_data; dev_info(dev, "Initialized SNP guest driver (using vmpck_id %d)\n", vmpck_id); return 0; +e_free_cert_data: + free_shared_pages(snp_dev->certs_data, SEV_FW_BLOB_MAX_SIZE); e_free_response: free_shared_pages(snp_dev->response, sizeof(struct snp_guest_msg)); e_free_request: @@ -629,6 +716,7 @@ static int __exit snp_guest_remove(struct platform_device *pdev) { struct snp_guest_dev *snp_dev = platform_get_drvdata(pdev); + free_shared_pages(snp_dev->certs_data, SEV_FW_BLOB_MAX_SIZE); free_shared_pages(snp_dev->response, sizeof(struct snp_guest_msg)); free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg)); deinit_crypto(snp_dev->crypto); diff --git a/include/uapi/linux/sev-guest.h b/include/uapi/linux/sev-guest.h index 598367f12064..256aaeff7e65 100644 --- a/include/uapi/linux/sev-guest.h +++ b/include/uapi/linux/sev-guest.h @@ -56,6 +56,16 @@ struct snp_guest_request_ioctl { __u64 fw_err; }; +struct snp_ext_report_req { + struct snp_report_req data; + + /* where to copy the certificate blob */ + __u64 certs_address; + + /* length of the certificate blob */ + __u32 certs_len; +}; + #define SNP_GUEST_REQ_IOC_TYPE 'S' /* Get SNP attestation report */ @@ -64,4 +74,7 @@ struct snp_guest_request_ioctl { /* Get a derived key from the root */ #define SNP_GET_DERIVED_KEY _IOWR(SNP_GUEST_REQ_IOC_TYPE, 0x1, struct snp_guest_request_ioctl) +/* Get SNP extended report as defined in the GHCB specification version 2. */ +#define SNP_GET_EXT_REPORT _IOWR(SNP_GUEST_REQ_IOC_TYPE, 0x2, struct snp_guest_request_ioctl) + #endif /* __UAPI_LINUX_SEV_GUEST_H_ */ -- cgit v1.2.3-59-g8ed1b From 794c24e9921f32ded4422833a990ccf11dc3c00e Mon Sep 17 00:00:00 2001 From: Jeffrey Ji Date: Wed, 6 Apr 2022 17:26:00 +0000 Subject: net-core: rx_otherhost_dropped to core_stats Increment rx_otherhost_dropped counter when packet dropped due to mismatched dest MAC addr. An example when this drop can occur is when manually crafting raw packets that will be consumed by a user space application via a tap device. For testing purposes local traffic was generated using trafgen for the client and netcat to start a server Tested: Created 2 netns, sent 1 packet using trafgen from 1 to the other with "{eth(daddr=$INCORRECT_MAC...}", verified that iproute2 showed the counter was incremented. (Also had to modify iproute2 to show the stat, additional patch for that coming next.) Signed-off-by: Jeffrey Ji Reviewed-by: Brian Vazquez Reviewed-by: Eric Dumazet Link: https://lore.kernel.org/r/20220406172600.1141083-1-jeffreyjilinux@gmail.com Signed-off-by: Jakub Kicinski --- include/linux/netdevice.h | 2 ++ include/uapi/linux/if_link.h | 5 +++++ net/core/dev.c | 1 + net/ipv4/ip_input.c | 1 + net/ipv6/ip6_input.c | 1 + 5 files changed, 10 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 7e7b2a72e473..28ea4f8269d4 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -203,6 +203,7 @@ struct net_device_core_stats { local_t rx_dropped; local_t tx_dropped; local_t rx_nohandler; + local_t rx_otherhost_dropped; } __aligned(4 * sizeof(local_t)); #include @@ -3837,6 +3838,7 @@ static inline void dev_core_stats_##FIELD##_inc(struct net_device *dev) \ DEV_CORE_STATS_INC(rx_dropped) DEV_CORE_STATS_INC(tx_dropped) DEV_CORE_STATS_INC(rx_nohandler) +DEV_CORE_STATS_INC(rx_otherhost_dropped) static __always_inline int ____dev_forward_skb(struct net_device *dev, struct sk_buff *skb, diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index cc284c048e69..d1e600816b82 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -211,6 +211,9 @@ struct rtnl_link_stats { * @rx_nohandler: Number of packets received on the interface * but dropped by the networking stack because the device is * not designated to receive packets (e.g. backup link in a bond). + * + * @rx_otherhost_dropped: Number of packets dropped due to mismatch + * in destination MAC address. */ struct rtnl_link_stats64 { __u64 rx_packets; @@ -243,6 +246,8 @@ struct rtnl_link_stats64 { __u64 rx_compressed; __u64 tx_compressed; __u64 rx_nohandler; + + __u64 rx_otherhost_dropped; }; /* Subset of link stats useful for in-HW collection. Meaning of the fields is as diff --git a/net/core/dev.c b/net/core/dev.c index f00d29856b43..e027410e861b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -10358,6 +10358,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, storage->rx_dropped += local_read(&core_stats->rx_dropped); storage->tx_dropped += local_read(&core_stats->tx_dropped); storage->rx_nohandler += local_read(&core_stats->rx_nohandler); + storage->rx_otherhost_dropped += local_read(&core_stats->rx_otherhost_dropped); } } return storage; diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 95f7bb052784..b1165f717cd1 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -451,6 +451,7 @@ static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net) * that it receives, do not try to analyse it. */ if (skb->pkt_type == PACKET_OTHERHOST) { + dev_core_stats_rx_otherhost_dropped_inc(skb->dev); drop_reason = SKB_DROP_REASON_OTHERHOST; goto drop; } diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 5b5ea35635f9..b4880c7c84eb 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -150,6 +150,7 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev, struct inet6_dev *idev; if (skb->pkt_type == PACKET_OTHERHOST) { + dev_core_stats_rx_otherhost_dropped_inc(skb->dev); kfree_skb(skb); return NULL; } -- cgit v1.2.3-59-g8ed1b From 2d7991fe867974a8e5065ee9691451a406b9320d Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Fri, 11 Mar 2022 16:23:22 -0700 Subject: dmaengine: idxd: update IAA definitions for user header Add additional structure definitions for Intel In-memory Analytics Accelerator (IAA/IAX). See specification (1) for more details. 1: https://cdrdv2.intel.com/v1/dl/getContent/721858 Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/164704100212.1373038.18362680016033557757.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul --- include/uapi/linux/idxd.h | 31 ++++++++++++++++++++++++++++--- 1 file changed, 28 insertions(+), 3 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h index a8f0ff75c430..bce7c43657d5 100644 --- a/include/uapi/linux/idxd.h +++ b/include/uapi/linux/idxd.h @@ -53,6 +53,11 @@ enum idxd_scmd_stat { /* IAX */ #define IDXD_OP_FLAG_RD_SRC2_AECS 0x010000 +#define IDXD_OP_FLAG_RD_SRC2_2ND 0x020000 +#define IDXD_OP_FLAG_WR_SRC2_AECS_COMP 0x040000 +#define IDXD_OP_FLAG_WR_SRC2_AECS_OVFL 0x080000 +#define IDXD_OP_FLAG_SRC2_STS 0x100000 +#define IDXD_OP_FLAG_CRC_RFC3720 0x200000 /* Opcode */ enum dsa_opcode { @@ -81,6 +86,18 @@ enum iax_opcode { IAX_OPCODE_MEMMOVE, IAX_OPCODE_DECOMPRESS = 0x42, IAX_OPCODE_COMPRESS, + IAX_OPCODE_CRC64, + IAX_OPCODE_ZERO_DECOMP_32 = 0x48, + IAX_OPCODE_ZERO_DECOMP_16, + IAX_OPCODE_DECOMP_32 = 0x4c, + IAX_OPCODE_DECOMP_16, + IAX_OPCODE_SCAN = 0x50, + IAX_OPCODE_SET_MEMBER, + IAX_OPCODE_EXTRACT, + IAX_OPCODE_SELECT, + IAX_OPCODE_RLE_BURST, + IAX_OPCDE_FIND_UNIQUE, + IAX_OPCODE_EXPAND, }; /* Completion record status */ @@ -120,6 +137,7 @@ enum iax_completion_status { IAX_COMP_NONE = 0, IAX_COMP_SUCCESS, IAX_COMP_PAGE_FAULT_IR = 0x04, + IAX_COMP_ANALYTICS_ERROR = 0x0a, IAX_COMP_OUTBUF_OVERFLOW, IAX_COMP_BAD_OPCODE = 0x10, IAX_COMP_INVALID_FLAGS, @@ -140,7 +158,10 @@ enum iax_completion_status { IAX_COMP_WATCHDOG, IAX_COMP_INVALID_COMP_FLAG = 0x30, IAX_COMP_INVALID_FILTER_FLAG, - IAX_COMP_INVALID_NUM_ELEMS = 0x33, + IAX_COMP_INVALID_INPUT_SIZE, + IAX_COMP_INVALID_NUM_ELEMS, + IAX_COMP_INVALID_SRC1_WIDTH, + IAX_COMP_INVALID_INVERT_OUT, }; #define DSA_COMP_STATUS_MASK 0x7f @@ -319,8 +340,12 @@ struct iax_completion_record { uint32_t output_size; uint8_t output_bits; uint8_t rsvd3; - uint16_t rsvd4; - uint64_t rsvd5[4]; + uint16_t xor_csum; + uint32_t crc; + uint32_t min; + uint32_t max; + uint32_t sum; + uint64_t rsvd4[2]; } __attribute__((packed)); struct iax_raw_completion_record { -- cgit v1.2.3-59-g8ed1b From 545528d788556c724eeb5400757f828ef27782a8 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Wed, 13 Apr 2022 13:51:54 +0300 Subject: net: netlink: add NLM_F_BULK delete request modifier Add a new delete request modifier called NLM_F_BULK which, when supported, would cause the request to delete multiple objects. The flag is a convenient way to signal that a multiple delete operation is requested which can be gradually added to different delete requests. In order to make sure older kernels will error out if the operation is not supported instead of doing something unintended we have to break a required condition when implementing support for this flag, f.e. for neighbors we will omit the mandatory mac address attribute. Initially it will be used to add flush with filtering support for bridge fdbs, but it also opens the door to add similar support to others. Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/uapi/linux/netlink.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h index 4c0cde075c27..855dffb4c1c3 100644 --- a/include/uapi/linux/netlink.h +++ b/include/uapi/linux/netlink.h @@ -72,6 +72,7 @@ struct nlmsghdr { /* Modifiers to DELETE request */ #define NLM_F_NONREC 0x100 /* Do not delete recursively */ +#define NLM_F_BULK 0x200 /* Delete multiple objects */ /* Flags for ACK message */ #define NLM_F_CAPPED 0x100 /* request was capped */ -- cgit v1.2.3-59-g8ed1b From ea2c0f9e3fc2f94f090d693b7235c02af1289629 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Wed, 13 Apr 2022 13:52:00 +0300 Subject: net: rtnetlink: add ndm flags and state mask attributes Add ndm flags/state masks which will be used for bulk delete filtering. All of these are used by the bridge and vxlan drivers. Also minimal attr policy validation is added, it is up to ndo_fdb_del_bulk implementers to further validate them. Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/uapi/linux/neighbour.h | 2 ++ net/core/rtnetlink.c | 2 ++ 2 files changed, 4 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h index db05fb55055e..39c565e460c7 100644 --- a/include/uapi/linux/neighbour.h +++ b/include/uapi/linux/neighbour.h @@ -32,6 +32,8 @@ enum { NDA_NH_ID, NDA_FDB_EXT_ATTRS, NDA_FLAGS_EXT, + NDA_NDM_STATE_MASK, + NDA_NDM_FLAGS_MASK, __NDA_MAX }; diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 520d50fcaaea..ab7fb9a16da9 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -4172,6 +4172,8 @@ EXPORT_SYMBOL(ndo_dflt_fdb_del); static const struct nla_policy fdb_del_bulk_policy[NDA_MAX + 1] = { [NDA_VLAN] = { .type = NLA_U16 }, [NDA_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1), + [NDA_NDM_STATE_MASK] = { .type = NLA_U16 }, + [NDA_NDM_FLAGS_MASK] = { .type = NLA_U8 }, }; static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, -- cgit v1.2.3-59-g8ed1b From c24a950ec7d60c4da91dc3f273295c7f438b531e Mon Sep 17 00:00:00 2001 From: Peter Gonda Date: Thu, 7 Apr 2022 14:02:33 -0700 Subject: KVM, SEV: Add KVM_EXIT_SHUTDOWN metadata for SEV-ES If an SEV-ES guest requests termination, exit to userspace with KVM_EXIT_SYSTEM_EVENT and a dedicated SEV_TERM type instead of -EINVAL so that userspace can take appropriate action. See AMD's GHCB spec section '4.1.13 Termination Request' for more details. Suggested-by: Sean Christopherson Suggested-by: Paolo Bonzini Cc: kvm@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Peter Gonda Reported-by: kernel test robot Message-Id: <20220407210233.782250-1-pgonda@google.com> [Add documentatino. - Paolo] Signed-off-by: Paolo Bonzini --- Documentation/virt/kvm/api.rst | 12 +++++++++++- arch/x86/kvm/svm/sev.c | 9 +++++++-- include/uapi/linux/kvm.h | 5 ++++- 3 files changed, 22 insertions(+), 4 deletions(-) (limited to 'include/uapi/linux') diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index e7a0dfdc0178..72183ae628f7 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -6088,8 +6088,12 @@ should put the acknowledged interrupt vector into the 'epr' field. #define KVM_SYSTEM_EVENT_SHUTDOWN 1 #define KVM_SYSTEM_EVENT_RESET 2 #define KVM_SYSTEM_EVENT_CRASH 3 + #define KVM_SYSTEM_EVENT_SEV_TERM 4 + #define KVM_SYSTEM_EVENT_NDATA_VALID (1u << 31) __u32 type; + __u32 ndata; __u64 flags; + __u64 data[16]; } system_event; If exit_reason is KVM_EXIT_SYSTEM_EVENT then the vcpu has triggered @@ -6099,7 +6103,7 @@ HVC instruction based PSCI call from the vcpu. The 'type' field describes the system-level event type. The 'flags' field describes architecture specific flags for the system-level event. -Valid values for 'type' are: +Valid values for bits 30:0 of 'type' are: - KVM_SYSTEM_EVENT_SHUTDOWN -- the guest has requested a shutdown of the VM. Userspace is not obliged to honour this, and if it does honour @@ -6112,12 +6116,18 @@ Valid values for 'type' are: has requested a crash condition maintenance. Userspace can choose to ignore the request, or to gather VM memory core dump and/or reset/shutdown of the VM. + - KVM_SYSTEM_EVENT_SEV_TERM -- an AMD SEV guest requested termination. + The guest physical address of the guest's GHCB is stored in `data[0]`. Valid flags are: - KVM_SYSTEM_EVENT_RESET_FLAG_PSCI_RESET2 (arm64 only) -- the guest issued a SYSTEM_RESET2 call according to v1.1 of the PSCI specification. +Extra data for this event is stored in the `data[]` array, up to index +`ndata-1` included, if bit 31 is set in `type`. The data depends on the +`type` field. There is no extra data if bit 31 is clear or `ndata` is zero. + :: /* KVM_EXIT_IOAPIC_EOI */ diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 537aaddc852f..a93f0d01bb90 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -2738,8 +2738,13 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm) pr_info("SEV-ES guest requested termination: %#llx:%#llx\n", reason_set, reason_code); - ret = -EINVAL; - break; + vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; + vcpu->run->system_event.type = KVM_SYSTEM_EVENT_SEV_TERM | + KVM_SYSTEM_EVENT_NDATA_VALID; + vcpu->run->system_event.ndata = 1; + vcpu->run->system_event.data[1] = control->ghcb_gpa; + + return 0; } default: /* Error, keep GHCB MSR value as-is */ diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 8616af85dc5d..dd1d8167e71f 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -444,8 +444,11 @@ struct kvm_run { #define KVM_SYSTEM_EVENT_SHUTDOWN 1 #define KVM_SYSTEM_EVENT_RESET 2 #define KVM_SYSTEM_EVENT_CRASH 3 +#define KVM_SYSTEM_EVENT_SEV_TERM 4 +#define KVM_SYSTEM_EVENT_NDATA_VALID (1u << 31) __u32 type; - __u64 flags; + __u32 ndata; + __u64 data[16]; } system_event; /* KVM_EXIT_S390_STSI */ struct { -- cgit v1.2.3-59-g8ed1b From 4dc84c06a343fcb95fd5a0acb537aefa4ebdd1b0 Mon Sep 17 00:00:00 2001 From: Jie Wang Date: Tue, 12 Apr 2022 10:01:19 +0800 Subject: net: ethtool: extend ringparam set/get APIs for tx_push Currently tx push is a standard driver feature which controls use of a fast path descriptor push. So this patch extends the ringparam APIs and data structures to support set/get tx push by ethtool -G/g. Signed-off-by: Jie Wang Signed-off-by: Guangbin Huang Signed-off-by: Jakub Kicinski --- Documentation/networking/ethtool-netlink.rst | 8 ++++++++ include/linux/ethtool.h | 4 ++++ include/uapi/linux/ethtool_netlink.h | 1 + net/ethtool/netlink.h | 2 +- net/ethtool/rings.c | 18 ++++++++++++++++-- 5 files changed, 30 insertions(+), 3 deletions(-) (limited to 'include/uapi/linux') diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst index 24d9be69065d..dbca3e9ec782 100644 --- a/Documentation/networking/ethtool-netlink.rst +++ b/Documentation/networking/ethtool-netlink.rst @@ -862,6 +862,7 @@ Kernel response contents: ``ETHTOOL_A_RINGS_RX_BUF_LEN`` u32 size of buffers on the ring ``ETHTOOL_A_RINGS_TCP_DATA_SPLIT`` u8 TCP header / data split ``ETHTOOL_A_RINGS_CQE_SIZE`` u32 Size of TX/RX CQE + ``ETHTOOL_A_RINGS_TX_PUSH`` u8 flag of TX Push mode ==================================== ====== =========================== ``ETHTOOL_A_RINGS_TCP_DATA_SPLIT`` indicates whether the device is usable with @@ -871,6 +872,12 @@ separate buffers. The device configuration must make it possible to receive full memory pages of data, for example because MTU is high enough or through HW-GRO. +``ETHTOOL_A_RINGS_TX_PUSH`` flag is used to enable descriptor fast +path to send packets. In ordinary path, driver fills descriptors in DRAM and +notifies NIC hardware. In fast path, driver pushes descriptors to the device +through MMIO writes, thus reducing the latency. However, enabling this feature +may increase the CPU cost. Drivers may enforce additional per-packet +eligibility checks (e.g. on packet size). RINGS_SET ========= @@ -887,6 +894,7 @@ Request contents: ``ETHTOOL_A_RINGS_TX`` u32 size of TX ring ``ETHTOOL_A_RINGS_RX_BUF_LEN`` u32 size of buffers on the ring ``ETHTOOL_A_RINGS_CQE_SIZE`` u32 Size of TX/RX CQE + ``ETHTOOL_A_RINGS_TX_PUSH`` u8 flag of TX Push mode ==================================== ====== =========================== Kernel checks that requested ring sizes do not exceed limits reported by diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 4af58459a1e7..99dc7bfbcd3c 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -71,11 +71,13 @@ enum { * struct kernel_ethtool_ringparam - RX/TX ring configuration * @rx_buf_len: Current length of buffers on the rx ring. * @tcp_data_split: Scatter packet headers and data to separate buffers + * @tx_push: The flag of tx push mode * @cqe_size: Size of TX/RX completion queue event */ struct kernel_ethtool_ringparam { u32 rx_buf_len; u8 tcp_data_split; + u8 tx_push; u32 cqe_size; }; @@ -83,10 +85,12 @@ struct kernel_ethtool_ringparam { * enum ethtool_supported_ring_param - indicator caps for setting ring params * @ETHTOOL_RING_USE_RX_BUF_LEN: capture for setting rx_buf_len * @ETHTOOL_RING_USE_CQE_SIZE: capture for setting cqe_size + * @ETHTOOL_RING_USE_TX_PUSH: capture for setting tx_push */ enum ethtool_supported_ring_param { ETHTOOL_RING_USE_RX_BUF_LEN = BIT(0), ETHTOOL_RING_USE_CQE_SIZE = BIT(1), + ETHTOOL_RING_USE_TX_PUSH = BIT(2), }; #define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit)) diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h index 979850221b8d..d2fb4f7be61b 100644 --- a/include/uapi/linux/ethtool_netlink.h +++ b/include/uapi/linux/ethtool_netlink.h @@ -338,6 +338,7 @@ enum { ETHTOOL_A_RINGS_RX_BUF_LEN, /* u32 */ ETHTOOL_A_RINGS_TCP_DATA_SPLIT, /* u8 */ ETHTOOL_A_RINGS_CQE_SIZE, /* u32 */ + ETHTOOL_A_RINGS_TX_PUSH, /* u8 */ /* add new constants above here */ __ETHTOOL_A_RINGS_CNT, diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h index 29d01662a48b..7919ddb2371c 100644 --- a/net/ethtool/netlink.h +++ b/net/ethtool/netlink.h @@ -363,7 +363,7 @@ extern const struct nla_policy ethnl_features_set_policy[ETHTOOL_A_FEATURES_WANT extern const struct nla_policy ethnl_privflags_get_policy[ETHTOOL_A_PRIVFLAGS_HEADER + 1]; extern const struct nla_policy ethnl_privflags_set_policy[ETHTOOL_A_PRIVFLAGS_FLAGS + 1]; extern const struct nla_policy ethnl_rings_get_policy[ETHTOOL_A_RINGS_HEADER + 1]; -extern const struct nla_policy ethnl_rings_set_policy[ETHTOOL_A_RINGS_CQE_SIZE + 1]; +extern const struct nla_policy ethnl_rings_set_policy[ETHTOOL_A_RINGS_TX_PUSH + 1]; extern const struct nla_policy ethnl_channels_get_policy[ETHTOOL_A_CHANNELS_HEADER + 1]; extern const struct nla_policy ethnl_channels_set_policy[ETHTOOL_A_CHANNELS_COMBINED_COUNT + 1]; extern const struct nla_policy ethnl_coalesce_get_policy[ETHTOOL_A_COALESCE_HEADER + 1]; diff --git a/net/ethtool/rings.c b/net/ethtool/rings.c index 9f33c9689b56..9ed60c507d97 100644 --- a/net/ethtool/rings.c +++ b/net/ethtool/rings.c @@ -55,7 +55,8 @@ static int rings_reply_size(const struct ethnl_req_info *req_base, nla_total_size(sizeof(u32)) + /* _RINGS_TX */ nla_total_size(sizeof(u32)) + /* _RINGS_RX_BUF_LEN */ nla_total_size(sizeof(u8)) + /* _RINGS_TCP_DATA_SPLIT */ - nla_total_size(sizeof(u32)); /* _RINGS_CQE_SIZE */ + nla_total_size(sizeof(u32) + /* _RINGS_CQE_SIZE */ + nla_total_size(sizeof(u8))); /* _RINGS_TX_PUSH */ } static int rings_fill_reply(struct sk_buff *skb, @@ -94,7 +95,8 @@ static int rings_fill_reply(struct sk_buff *skb, (nla_put_u8(skb, ETHTOOL_A_RINGS_TCP_DATA_SPLIT, kr->tcp_data_split))) || (kr->cqe_size && - (nla_put_u32(skb, ETHTOOL_A_RINGS_CQE_SIZE, kr->cqe_size)))) + (nla_put_u32(skb, ETHTOOL_A_RINGS_CQE_SIZE, kr->cqe_size))) || + nla_put_u8(skb, ETHTOOL_A_RINGS_TX_PUSH, !!kr->tx_push)) return -EMSGSIZE; return 0; @@ -123,6 +125,7 @@ const struct nla_policy ethnl_rings_set_policy[] = { [ETHTOOL_A_RINGS_TX] = { .type = NLA_U32 }, [ETHTOOL_A_RINGS_RX_BUF_LEN] = NLA_POLICY_MIN(NLA_U32, 1), [ETHTOOL_A_RINGS_CQE_SIZE] = NLA_POLICY_MIN(NLA_U32, 1), + [ETHTOOL_A_RINGS_TX_PUSH] = NLA_POLICY_MAX(NLA_U8, 1), }; int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info) @@ -149,6 +152,15 @@ int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info) if (!ops->get_ringparam || !ops->set_ringparam) goto out_dev; + if (tb[ETHTOOL_A_RINGS_TX_PUSH] && + !(ops->supported_ring_params & ETHTOOL_RING_USE_TX_PUSH)) { + ret = -EOPNOTSUPP; + NL_SET_ERR_MSG_ATTR(info->extack, + tb[ETHTOOL_A_RINGS_TX_PUSH], + "setting tx push not supported"); + goto out_dev; + } + rtnl_lock(); ret = ethnl_ops_begin(dev); if (ret < 0) @@ -165,6 +177,8 @@ int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info) tb[ETHTOOL_A_RINGS_RX_BUF_LEN], &mod); ethnl_update_u32(&kernel_ringparam.cqe_size, tb[ETHTOOL_A_RINGS_CQE_SIZE], &mod); + ethnl_update_u8(&kernel_ringparam.tx_push, + tb[ETHTOOL_A_RINGS_TX_PUSH], &mod); ret = 0; if (!mod) goto out_ops; -- cgit v1.2.3-59-g8ed1b From f9a2fb73318eb4dbf8cd84866b8b0dd012d8b116 Mon Sep 17 00:00:00 2001 From: Arun Ajith S Date: Fri, 15 Apr 2022 08:34:02 +0000 Subject: net/ipv6: Introduce accept_unsolicited_na knob to implement router-side changes for RFC9131 Add a new neighbour cache entry in STALE state for routers on receiving an unsolicited (gratuitous) neighbour advertisement with target link-layer-address option specified. This is similar to the arp_accept configuration for IPv4. A new sysctl endpoint is created to turn on this behaviour: /proc/sys/net/ipv6/conf/interface/accept_unsolicited_na. Signed-off-by: Arun Ajith S Reviewed-by: David Ahern Signed-off-by: David S. Miller --- Documentation/networking/ip-sysctl.rst | 27 +++ include/linux/ipv6.h | 1 + include/uapi/linux/ipv6.h | 1 + net/ipv6/addrconf.c | 10 + net/ipv6/ndisc.c | 20 +- tools/testing/selftests/net/Makefile | 1 + .../selftests/net/ndisc_unsolicited_na_test.sh | 255 +++++++++++++++++++++ 7 files changed, 314 insertions(+), 1 deletion(-) create mode 100755 tools/testing/selftests/net/ndisc_unsolicited_na_test.sh (limited to 'include/uapi/linux') diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst index b0024aa7b051..433f2e4a5fed 100644 --- a/Documentation/networking/ip-sysctl.rst +++ b/Documentation/networking/ip-sysctl.rst @@ -2467,6 +2467,33 @@ drop_unsolicited_na - BOOLEAN By default this is turned off. +accept_unsolicited_na - BOOLEAN + Add a new neighbour cache entry in STALE state for routers on receiving an + unsolicited neighbour advertisement with target link-layer address option + specified. This is as per router-side behavior documented in RFC9131. + This has lower precedence than drop_unsolicited_na. + + ==== ====== ====== ============================================== + drop accept fwding behaviour + ---- ------ ------ ---------------------------------------------- + 1 X X Drop NA packet and don't pass up the stack + 0 0 X Pass NA packet up the stack, don't update NC + 0 1 0 Pass NA packet up the stack, don't update NC + 0 1 1 Pass NA packet up the stack, and add a STALE + NC entry + ==== ====== ====== ============================================== + + This will optimize the return path for the initial off-link communication + that is initiated by a directly connected host, by ensuring that + the first-hop router which turns on this setting doesn't have to + buffer the initial return packets to do neighbour-solicitation. + The prerequisite is that the host is configured to send + unsolicited neighbour advertisements on interface bringup. + This setting should be used in conjunction with the ndisc_notify setting + on the host to satisfy this prerequisite. + + By default this is turned off. + enhanced_dad - BOOLEAN Include a nonce option in the IPv6 neighbor solicitation messages used for duplicate address detection per RFC7527. A received DAD NS will only signal diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 16870f86c74d..918bfea4ef5f 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -61,6 +61,7 @@ struct ipv6_devconf { __s32 suppress_frag_ndisc; __s32 accept_ra_mtu; __s32 drop_unsolicited_na; + __s32 accept_unsolicited_na; struct ipv6_stable_secret { bool initialized; struct in6_addr secret; diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h index d4178dace0bf..549ddeaf788b 100644 --- a/include/uapi/linux/ipv6.h +++ b/include/uapi/linux/ipv6.h @@ -194,6 +194,7 @@ enum { DEVCONF_IOAM6_ID, DEVCONF_IOAM6_ID_WIDE, DEVCONF_NDISC_EVICT_NOCARRIER, + DEVCONF_ACCEPT_UNSOLICITED_NA, DEVCONF_MAX }; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 1afc4c024981..6473dc84b71d 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -5587,6 +5587,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf, array[DEVCONF_IOAM6_ID] = cnf->ioam6_id; array[DEVCONF_IOAM6_ID_WIDE] = cnf->ioam6_id_wide; array[DEVCONF_NDISC_EVICT_NOCARRIER] = cnf->ndisc_evict_nocarrier; + array[DEVCONF_ACCEPT_UNSOLICITED_NA] = cnf->accept_unsolicited_na; } static inline size_t inet6_ifla6_size(void) @@ -7037,6 +7038,15 @@ static const struct ctl_table addrconf_sysctl[] = { .extra1 = (void *)SYSCTL_ZERO, .extra2 = (void *)SYSCTL_ONE, }, + { + .procname = "accept_unsolicited_na", + .data = &ipv6_devconf.accept_unsolicited_na, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + .extra1 = (void *)SYSCTL_ZERO, + .extra2 = (void *)SYSCTL_ONE, + }, { /* sentinel */ } diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index fcb288b0ae13..254addad0dd3 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -979,6 +979,7 @@ static void ndisc_recv_na(struct sk_buff *skb) struct inet6_dev *idev = __in6_dev_get(dev); struct inet6_ifaddr *ifp; struct neighbour *neigh; + bool create_neigh; if (skb->len < sizeof(struct nd_msg)) { ND_PRINTK(2, warn, "NA: packet too short\n"); @@ -999,6 +1000,7 @@ static void ndisc_recv_na(struct sk_buff *skb) /* For some 802.11 wireless deployments (and possibly other networks), * there will be a NA proxy and unsolicitd packets are attacks * and thus should not be accepted. + * drop_unsolicited_na takes precedence over accept_unsolicited_na */ if (!msg->icmph.icmp6_solicited && idev && idev->cnf.drop_unsolicited_na) @@ -1039,7 +1041,23 @@ static void ndisc_recv_na(struct sk_buff *skb) in6_ifa_put(ifp); return; } - neigh = neigh_lookup(&nd_tbl, &msg->target, dev); + /* RFC 9131 updates original Neighbour Discovery RFC 4861. + * An unsolicited NA can now create a neighbour cache entry + * on routers if it has Target LL Address option. + * + * drop accept fwding behaviour + * ---- ------ ------ ---------------------------------------------- + * 1 X X Drop NA packet and don't pass up the stack + * 0 0 X Pass NA packet up the stack, don't update NC + * 0 1 0 Pass NA packet up the stack, don't update NC + * 0 1 1 Pass NA packet up the stack, and add a STALE + * NC entry + * Note that we don't do a (daddr == all-routers-mcast) check. + */ + create_neigh = !msg->icmph.icmp6_solicited && lladdr && + idev && idev->cnf.forwarding && + idev->cnf.accept_unsolicited_na; + neigh = __neigh_lookup(&nd_tbl, &msg->target, dev, create_neigh); if (neigh) { u8 old_flags = neigh->flags; diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index 3fe2515aa616..af7f6e6ff182 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -36,6 +36,7 @@ TEST_PROGS += srv6_end_dt4_l3vpn_test.sh TEST_PROGS += srv6_end_dt6_l3vpn_test.sh TEST_PROGS += vrf_strict_mode_test.sh TEST_PROGS += arp_ndisc_evict_nocarrier.sh +TEST_PROGS += ndisc_unsolicited_na_test.sh TEST_PROGS_EXTENDED := in_netns.sh setup_loopback.sh setup_veth.sh TEST_PROGS_EXTENDED += toeplitz_client.sh toeplitz.sh TEST_GEN_FILES = socket nettest diff --git a/tools/testing/selftests/net/ndisc_unsolicited_na_test.sh b/tools/testing/selftests/net/ndisc_unsolicited_na_test.sh new file mode 100755 index 000000000000..f508657ee126 --- /dev/null +++ b/tools/testing/selftests/net/ndisc_unsolicited_na_test.sh @@ -0,0 +1,255 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +# This test is for the accept_unsolicited_na feature to +# enable RFC9131 behaviour. The following is the test-matrix. +# drop accept fwding behaviour +# ---- ------ ------ ---------------------------------------------- +# 1 X X Drop NA packet and don't pass up the stack +# 0 0 X Pass NA packet up the stack, don't update NC +# 0 1 0 Pass NA packet up the stack, don't update NC +# 0 1 1 Pass NA packet up the stack, and add a STALE +# NC entry + +ret=0 +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + +PAUSE_ON_FAIL=no +PAUSE=no + +HOST_NS="ns-host" +ROUTER_NS="ns-router" + +HOST_INTF="veth-host" +ROUTER_INTF="veth-router" + +ROUTER_ADDR="2000:20::1" +HOST_ADDR="2000:20::2" +SUBNET_WIDTH=64 +ROUTER_ADDR_WITH_MASK="${ROUTER_ADDR}/${SUBNET_WIDTH}" +HOST_ADDR_WITH_MASK="${HOST_ADDR}/${SUBNET_WIDTH}" + +IP_HOST="ip -6 -netns ${HOST_NS}" +IP_HOST_EXEC="ip netns exec ${HOST_NS}" +IP_ROUTER="ip -6 -netns ${ROUTER_NS}" +IP_ROUTER_EXEC="ip netns exec ${ROUTER_NS}" + +tcpdump_stdout= +tcpdump_stderr= + +log_test() +{ + local rc=$1 + local expected=$2 + local msg="$3" + + if [ ${rc} -eq ${expected} ]; then + printf " TEST: %-60s [ OK ]\n" "${msg}" + nsuccess=$((nsuccess+1)) + else + ret=1 + nfail=$((nfail+1)) + printf " TEST: %-60s [FAIL]\n" "${msg}" + if [ "${PAUSE_ON_FAIL}" = "yes" ]; then + echo + echo "hit enter to continue, 'q' to quit" + read a + [ "$a" = "q" ] && exit 1 + fi + fi + + if [ "${PAUSE}" = "yes" ]; then + echo + echo "hit enter to continue, 'q' to quit" + read a + [ "$a" = "q" ] && exit 1 + fi +} + +setup() +{ + set -e + + local drop_unsolicited_na=$1 + local accept_unsolicited_na=$2 + local forwarding=$3 + + # Setup two namespaces and a veth tunnel across them. + # On end of the tunnel is a router and the other end is a host. + ip netns add ${HOST_NS} + ip netns add ${ROUTER_NS} + ${IP_ROUTER} link add ${ROUTER_INTF} type veth \ + peer name ${HOST_INTF} netns ${HOST_NS} + + # Enable IPv6 on both router and host, and configure static addresses. + # The router here is the DUT + # Setup router configuration as specified by the arguments. + # forwarding=0 case is to check that a non-router + # doesn't add neighbour entries. + ROUTER_CONF=net.ipv6.conf.${ROUTER_INTF} + ${IP_ROUTER_EXEC} sysctl -qw \ + ${ROUTER_CONF}.forwarding=${forwarding} + ${IP_ROUTER_EXEC} sysctl -qw \ + ${ROUTER_CONF}.drop_unsolicited_na=${drop_unsolicited_na} + ${IP_ROUTER_EXEC} sysctl -qw \ + ${ROUTER_CONF}.accept_unsolicited_na=${accept_unsolicited_na} + ${IP_ROUTER_EXEC} sysctl -qw ${ROUTER_CONF}.disable_ipv6=0 + ${IP_ROUTER} addr add ${ROUTER_ADDR_WITH_MASK} dev ${ROUTER_INTF} + + # Turn on ndisc_notify on host interface so that + # the host sends unsolicited NAs. + HOST_CONF=net.ipv6.conf.${HOST_INTF} + ${IP_HOST_EXEC} sysctl -qw ${HOST_CONF}.ndisc_notify=1 + ${IP_HOST_EXEC} sysctl -qw ${HOST_CONF}.disable_ipv6=0 + ${IP_HOST} addr add ${HOST_ADDR_WITH_MASK} dev ${HOST_INTF} + + set +e +} + +start_tcpdump() { + set -e + tcpdump_stdout=`mktemp` + tcpdump_stderr=`mktemp` + ${IP_ROUTER_EXEC} timeout 15s \ + tcpdump --immediate-mode -tpni ${ROUTER_INTF} -c 1 \ + "icmp6 && icmp6[0] == 136 && src ${HOST_ADDR}" \ + > ${tcpdump_stdout} 2> /dev/null + set +e +} + +cleanup_tcpdump() +{ + set -e + [[ ! -z ${tcpdump_stdout} ]] && rm -f ${tcpdump_stdout} + [[ ! -z ${tcpdump_stderr} ]] && rm -f ${tcpdump_stderr} + tcpdump_stdout= + tcpdump_stderr= + set +e +} + +cleanup() +{ + cleanup_tcpdump + ip netns del ${HOST_NS} + ip netns del ${ROUTER_NS} +} + +link_up() { + set -e + ${IP_ROUTER} link set dev ${ROUTER_INTF} up + ${IP_HOST} link set dev ${HOST_INTF} up + set +e +} + +verify_ndisc() { + local drop_unsolicited_na=$1 + local accept_unsolicited_na=$2 + local forwarding=$3 + + neigh_show_output=$(${IP_ROUTER} neigh show \ + to ${HOST_ADDR} dev ${ROUTER_INTF} nud stale) + if [ ${drop_unsolicited_na} -eq 0 ] && \ + [ ${accept_unsolicited_na} -eq 1 ] && \ + [ ${forwarding} -eq 1 ]; then + # Neighbour entry expected to be present for 011 case + [[ ${neigh_show_output} ]] + else + # Neighbour entry expected to be absent for all other cases + [[ -z ${neigh_show_output} ]] + fi +} + +test_unsolicited_na_common() +{ + # Setup the test bed, but keep links down + setup $1 $2 $3 + + # Bring the link up, wait for the NA, + # and add a delay to ensure neighbour processing is done. + link_up + start_tcpdump + + # Verify the neighbour table + verify_ndisc $1 $2 $3 + +} + +test_unsolicited_na_combination() { + test_unsolicited_na_common $1 $2 $3 + test_msg=("test_unsolicited_na: " + "drop_unsolicited_na=$1 " + "accept_unsolicited_na=$2 " + "forwarding=$3") + log_test $? 0 "${test_msg[*]}" + cleanup +} + +test_unsolicited_na_combinations() { + # Args: drop_unsolicited_na accept_unsolicited_na forwarding + + # Expect entry + test_unsolicited_na_combination 0 1 1 + + # Expect no entry + test_unsolicited_na_combination 0 0 0 + test_unsolicited_na_combination 0 0 1 + test_unsolicited_na_combination 0 1 0 + test_unsolicited_na_combination 1 0 0 + test_unsolicited_na_combination 1 0 1 + test_unsolicited_na_combination 1 1 0 + test_unsolicited_na_combination 1 1 1 +} + +############################################################################### +# usage + +usage() +{ + cat < /dev/null + +test_unsolicited_na_combinations + +printf "\nTests passed: %3d\n" ${nsuccess} +printf "Tests failed: %3d\n" ${nfail} + +exit $ret -- cgit v1.2.3-59-g8ed1b From c246f9b5fd617fe487f8b6f18851703f468501d6 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 18 Apr 2022 09:42:25 +0300 Subject: devlink: add support to create line card and expose to user Extend the devlink API so the driver is going to be able to create and destroy linecard instances. There can be multiple line cards per devlink device. Expose this new type of object over devlink netlink API to the userspace, with notifications. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- include/net/devlink.h | 4 + include/uapi/linux/devlink.h | 7 ++ net/core/devlink.c | 270 ++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 280 insertions(+), 1 deletion(-) (limited to 'include/uapi/linux') diff --git a/include/net/devlink.h b/include/net/devlink.h index a30180c0988a..7aabdfb3bc6a 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -22,6 +22,7 @@ #include struct devlink; +struct devlink_linecard; struct devlink_port_phys_attrs { u32 port_number; /* Same value as "split group". @@ -1536,6 +1537,9 @@ void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port, int devlink_rate_leaf_create(struct devlink_port *port, void *priv); void devlink_rate_leaf_destroy(struct devlink_port *devlink_port); void devlink_rate_nodes_destroy(struct devlink *devlink); +struct devlink_linecard *devlink_linecard_create(struct devlink *devlink, + unsigned int linecard_index); +void devlink_linecard_destroy(struct devlink_linecard *linecard); int devlink_sb_register(struct devlink *devlink, unsigned int sb_index, u32 size, u16 ingress_pools_count, u16 egress_pools_count, u16 ingress_tc_count, diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index b897b80770f6..59c33ed2d3e7 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -131,6 +131,11 @@ enum devlink_command { DEVLINK_CMD_RATE_NEW, DEVLINK_CMD_RATE_DEL, + DEVLINK_CMD_LINECARD_GET, /* can dump */ + DEVLINK_CMD_LINECARD_SET, + DEVLINK_CMD_LINECARD_NEW, + DEVLINK_CMD_LINECARD_DEL, + /* add new commands above here */ __DEVLINK_CMD_MAX, DEVLINK_CMD_MAX = __DEVLINK_CMD_MAX - 1 @@ -553,6 +558,8 @@ enum devlink_attr { DEVLINK_ATTR_REGION_MAX_SNAPSHOTS, /* u32 */ + DEVLINK_ATTR_LINECARD_INDEX, /* u32 */ + /* add new attributes above here, update the policy in devlink.c */ __DEVLINK_ATTR_MAX, diff --git a/net/core/devlink.c b/net/core/devlink.c index aeca13b6e57b..4cdacd74b82a 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -54,6 +54,8 @@ struct devlink { struct list_head trap_list; struct list_head trap_group_list; struct list_head trap_policer_list; + struct list_head linecard_list; + struct mutex linecards_lock; /* protects linecard_list */ const struct devlink_ops *ops; u64 features; struct xarray snapshot_ids; @@ -70,6 +72,13 @@ struct devlink { char priv[] __aligned(NETDEV_ALIGN); }; +struct devlink_linecard { + struct list_head list; + struct devlink *devlink; + unsigned int index; + refcount_t refcount; +}; + /** * struct devlink_resource - devlink resource * @name: name of the resource @@ -397,6 +406,56 @@ devlink_rate_get_from_info(struct devlink *devlink, struct genl_info *info) return ERR_PTR(-EINVAL); } +static struct devlink_linecard * +devlink_linecard_get_by_index(struct devlink *devlink, + unsigned int linecard_index) +{ + struct devlink_linecard *devlink_linecard; + + list_for_each_entry(devlink_linecard, &devlink->linecard_list, list) { + if (devlink_linecard->index == linecard_index) + return devlink_linecard; + } + return NULL; +} + +static bool devlink_linecard_index_exists(struct devlink *devlink, + unsigned int linecard_index) +{ + return devlink_linecard_get_by_index(devlink, linecard_index); +} + +static struct devlink_linecard * +devlink_linecard_get_from_attrs(struct devlink *devlink, struct nlattr **attrs) +{ + if (attrs[DEVLINK_ATTR_LINECARD_INDEX]) { + u32 linecard_index = nla_get_u32(attrs[DEVLINK_ATTR_LINECARD_INDEX]); + struct devlink_linecard *linecard; + + mutex_lock(&devlink->linecards_lock); + linecard = devlink_linecard_get_by_index(devlink, linecard_index); + if (linecard) + refcount_inc(&linecard->refcount); + mutex_unlock(&devlink->linecards_lock); + if (!linecard) + return ERR_PTR(-ENODEV); + return linecard; + } + return ERR_PTR(-EINVAL); +} + +static struct devlink_linecard * +devlink_linecard_get_from_info(struct devlink *devlink, struct genl_info *info) +{ + return devlink_linecard_get_from_attrs(devlink, info->attrs); +} + +static void devlink_linecard_put(struct devlink_linecard *linecard) +{ + if (refcount_dec_and_test(&linecard->refcount)) + kfree(linecard); +} + struct devlink_sb { struct list_head list; unsigned int index; @@ -617,16 +676,18 @@ devlink_region_snapshot_get_by_id(struct devlink_region *region, u32 id) #define DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT BIT(1) #define DEVLINK_NL_FLAG_NEED_RATE BIT(2) #define DEVLINK_NL_FLAG_NEED_RATE_NODE BIT(3) +#define DEVLINK_NL_FLAG_NEED_LINECARD BIT(4) /* The per devlink instance lock is taken by default in the pre-doit * operation, yet several commands do not require this. The global * devlink lock is taken and protects from disruption by user-calls. */ -#define DEVLINK_NL_FLAG_NO_LOCK BIT(4) +#define DEVLINK_NL_FLAG_NO_LOCK BIT(5) static int devlink_nl_pre_doit(const struct genl_ops *ops, struct sk_buff *skb, struct genl_info *info) { + struct devlink_linecard *linecard; struct devlink_port *devlink_port; struct devlink *devlink; int err; @@ -669,6 +730,13 @@ static int devlink_nl_pre_doit(const struct genl_ops *ops, goto unlock; } info->user_ptr[1] = rate_node; + } else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_LINECARD) { + linecard = devlink_linecard_get_from_info(devlink, info); + if (IS_ERR(linecard)) { + err = PTR_ERR(linecard); + goto unlock; + } + info->user_ptr[1] = linecard; } return 0; @@ -683,9 +751,14 @@ unlock: static void devlink_nl_post_doit(const struct genl_ops *ops, struct sk_buff *skb, struct genl_info *info) { + struct devlink_linecard *linecard; struct devlink *devlink; devlink = info->user_ptr[0]; + if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_LINECARD) { + linecard = info->user_ptr[1]; + devlink_linecard_put(linecard); + } if (~ops->internal_flags & DEVLINK_NL_FLAG_NO_LOCK) mutex_unlock(&devlink->lock); devlink_put(devlink); @@ -1964,6 +2037,132 @@ static int devlink_nl_cmd_rate_del_doit(struct sk_buff *skb, return err; } +static int devlink_nl_linecard_fill(struct sk_buff *msg, + struct devlink *devlink, + struct devlink_linecard *linecard, + enum devlink_command cmd, u32 portid, + u32 seq, int flags, + struct netlink_ext_ack *extack) +{ + void *hdr; + + hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd); + if (!hdr) + return -EMSGSIZE; + + if (devlink_nl_put_handle(msg, devlink)) + goto nla_put_failure; + if (nla_put_u32(msg, DEVLINK_ATTR_LINECARD_INDEX, linecard->index)) + goto nla_put_failure; + + genlmsg_end(msg, hdr); + return 0; + +nla_put_failure: + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; +} + +static void devlink_linecard_notify(struct devlink_linecard *linecard, + enum devlink_command cmd) +{ + struct devlink *devlink = linecard->devlink; + struct sk_buff *msg; + int err; + + WARN_ON(cmd != DEVLINK_CMD_LINECARD_NEW && + cmd != DEVLINK_CMD_LINECARD_DEL); + + if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED)) + return; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return; + + err = devlink_nl_linecard_fill(msg, devlink, linecard, cmd, 0, 0, 0, + NULL); + if (err) { + nlmsg_free(msg); + return; + } + + genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink), + msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL); +} + +static int devlink_nl_cmd_linecard_get_doit(struct sk_buff *skb, + struct genl_info *info) +{ + struct devlink_linecard *linecard = info->user_ptr[1]; + struct devlink *devlink = linecard->devlink; + struct sk_buff *msg; + int err; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + err = devlink_nl_linecard_fill(msg, devlink, linecard, + DEVLINK_CMD_LINECARD_NEW, + info->snd_portid, info->snd_seq, 0, + info->extack); + if (err) { + nlmsg_free(msg); + return err; + } + + return genlmsg_reply(msg, info); +} + +static int devlink_nl_cmd_linecard_get_dumpit(struct sk_buff *msg, + struct netlink_callback *cb) +{ + struct devlink_linecard *linecard; + struct devlink *devlink; + int start = cb->args[0]; + unsigned long index; + int idx = 0; + int err; + + mutex_lock(&devlink_mutex); + xa_for_each_marked(&devlinks, index, devlink, DEVLINK_REGISTERED) { + if (!devlink_try_get(devlink)) + continue; + + if (!net_eq(devlink_net(devlink), sock_net(msg->sk))) + goto retry; + + mutex_lock(&devlink->linecards_lock); + list_for_each_entry(linecard, &devlink->linecard_list, list) { + if (idx < start) { + idx++; + continue; + } + err = devlink_nl_linecard_fill(msg, devlink, linecard, + DEVLINK_CMD_LINECARD_NEW, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + NLM_F_MULTI, + cb->extack); + if (err) { + mutex_unlock(&devlink->linecards_lock); + devlink_put(devlink); + goto out; + } + idx++; + } + mutex_unlock(&devlink->linecards_lock); +retry: + devlink_put(devlink); + } +out: + mutex_unlock(&devlink_mutex); + + cb->args[0] = idx; + return msg->len; +} + static int devlink_nl_sb_fill(struct sk_buff *msg, struct devlink *devlink, struct devlink_sb *devlink_sb, enum devlink_command cmd, u32 portid, @@ -8589,6 +8788,7 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = { [DEVLINK_ATTR_RATE_TX_MAX] = { .type = NLA_U64 }, [DEVLINK_ATTR_RATE_NODE_NAME] = { .type = NLA_NUL_STRING }, [DEVLINK_ATTR_RATE_PARENT_NODE_NAME] = { .type = NLA_NUL_STRING }, + [DEVLINK_ATTR_LINECARD_INDEX] = { .type = NLA_U32 }, }; static const struct genl_small_ops devlink_nl_ops[] = { @@ -8664,6 +8864,13 @@ static const struct genl_small_ops devlink_nl_ops[] = { .flags = GENL_ADMIN_PERM, .internal_flags = DEVLINK_NL_FLAG_NO_LOCK, }, + { + .cmd = DEVLINK_CMD_LINECARD_GET, + .doit = devlink_nl_cmd_linecard_get_doit, + .dumpit = devlink_nl_cmd_linecard_get_dumpit, + .internal_flags = DEVLINK_NL_FLAG_NEED_LINECARD, + /* can be retrieved by unprivileged users */ + }, { .cmd = DEVLINK_CMD_SB_GET, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, @@ -9043,6 +9250,7 @@ struct devlink *devlink_alloc_ns(const struct devlink_ops *ops, write_pnet(&devlink->_net, net); INIT_LIST_HEAD(&devlink->port_list); INIT_LIST_HEAD(&devlink->rate_list); + INIT_LIST_HEAD(&devlink->linecard_list); INIT_LIST_HEAD(&devlink->sb_list); INIT_LIST_HEAD_RCU(&devlink->dpipe_table_list); INIT_LIST_HEAD(&devlink->resource_list); @@ -9054,6 +9262,7 @@ struct devlink *devlink_alloc_ns(const struct devlink_ops *ops, INIT_LIST_HEAD(&devlink->trap_policer_list); mutex_init(&devlink->lock); mutex_init(&devlink->reporters_lock); + mutex_init(&devlink->linecards_lock); refcount_set(&devlink->refcount, 1); init_completion(&devlink->comp); @@ -9080,10 +9289,14 @@ static void devlink_notify_register(struct devlink *devlink) struct devlink_param_item *param_item; struct devlink_trap_item *trap_item; struct devlink_port *devlink_port; + struct devlink_linecard *linecard; struct devlink_rate *rate_node; struct devlink_region *region; devlink_notify(devlink, DEVLINK_CMD_NEW); + list_for_each_entry(linecard, &devlink->linecard_list, list) + devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW); + list_for_each_entry(devlink_port, &devlink->port_list, list) devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW); @@ -9191,6 +9404,7 @@ void devlink_free(struct devlink *devlink) { ASSERT_DEVLINK_NOT_REGISTERED(devlink); + mutex_destroy(&devlink->linecards_lock); mutex_destroy(&devlink->reporters_lock); mutex_destroy(&devlink->lock); WARN_ON(!list_empty(&devlink->trap_policer_list)); @@ -9203,6 +9417,7 @@ void devlink_free(struct devlink *devlink) WARN_ON(!list_empty(&devlink->dpipe_table_list)); WARN_ON(!list_empty(&devlink->sb_list)); WARN_ON(!list_empty(&devlink->rate_list)); + WARN_ON(!list_empty(&devlink->linecard_list)); WARN_ON(!list_empty(&devlink->port_list)); xa_destroy(&devlink->snapshot_ids); @@ -9747,6 +9962,59 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port, return 0; } +/** + * devlink_linecard_create - Create devlink linecard + * + * @devlink: devlink + * @linecard_index: driver-specific numerical identifier of the linecard + * + * Create devlink linecard instance with provided linecard index. + * Caller can use any indexing, even hw-related one. + */ +struct devlink_linecard *devlink_linecard_create(struct devlink *devlink, + unsigned int linecard_index) +{ + struct devlink_linecard *linecard; + + mutex_lock(&devlink->linecards_lock); + if (devlink_linecard_index_exists(devlink, linecard_index)) { + mutex_unlock(&devlink->linecards_lock); + return ERR_PTR(-EEXIST); + } + + linecard = kzalloc(sizeof(*linecard), GFP_KERNEL); + if (!linecard) { + mutex_unlock(&devlink->linecards_lock); + return ERR_PTR(-ENOMEM); + } + + linecard->devlink = devlink; + linecard->index = linecard_index; + list_add_tail(&linecard->list, &devlink->linecard_list); + refcount_set(&linecard->refcount, 1); + mutex_unlock(&devlink->linecards_lock); + devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW); + return linecard; +} +EXPORT_SYMBOL_GPL(devlink_linecard_create); + +/** + * devlink_linecard_destroy - Destroy devlink linecard + * + * @linecard: devlink linecard + */ +void devlink_linecard_destroy(struct devlink_linecard *linecard) +{ + struct devlink *devlink = linecard->devlink; + + devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_DEL); + mutex_lock(&devlink->linecards_lock); + list_del(&linecard->list); + mutex_unlock(&devlink->linecards_lock); + devlink_linecard_put(linecard); +} +EXPORT_SYMBOL_GPL(devlink_linecard_destroy); + int devlink_sb_register(struct devlink *devlink, unsigned int sb_index, u32 size, u16 ingress_pools_count, u16 egress_pools_count, u16 ingress_tc_count, -- cgit v1.2.3-59-g8ed1b From fcdc8ce23a309c26a67fc613a741d9b21a248311 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 18 Apr 2022 09:42:26 +0300 Subject: devlink: implement line card provisioning In order to be able to configure all needed stuff on a port/netdevice of a line card without the line card being present, introduce line card provisioning. Basically by setting a type, provisioning process will start and driver is supposed to create a placeholder for instances (ports/netdevices) for a line card type. Allow the user to query the supported line card types over line card get command. Then implement two netlink command SET to allow user to set/unset the card type. On the driver API side, add provision/unprovision ops and supported types array to be advertised. Upon provision op call, the driver should take care of creating the instances for the particular line card type. Introduce provision_set/clear() functions to be called by the driver once the provisioning/unprovisioning is done on its side. These helpers are not to be called directly due to the async nature of provisioning. Example: $ devlink port # No ports are listed $ devlink lc pci/0000:01:00.0: lc 1 state unprovisioned supported_types: 16x100G lc 2 state unprovisioned supported_types: 16x100G lc 3 state unprovisioned supported_types: 16x100G lc 4 state unprovisioned supported_types: 16x100G lc 5 state unprovisioned supported_types: 16x100G lc 6 state unprovisioned supported_types: 16x100G lc 7 state unprovisioned supported_types: 16x100G lc 8 state unprovisioned supported_types: 16x100G $ devlink lc set pci/0000:01:00.0 lc 8 type 16x100G $ devlink lc show pci/0000:01:00.0 lc 8 pci/0000:01:00.0: lc 8 state active type 16x100G supported_types: 16x100G $ devlink port pci/0000:01:00.0/0: type notset flavour cpu port 0 splittable false pci/0000:01:00.0/53: type eth netdev enp1s0nl8p1 flavour physical lc 8 port 1 splittable true lanes 4 pci/0000:01:00.0/54: type eth netdev enp1s0nl8p2 flavour physical lc 8 port 2 splittable true lanes 4 pci/0000:01:00.0/55: type eth netdev enp1s0nl8p3 flavour physical lc 8 port 3 splittable true lanes 4 pci/0000:01:00.0/56: type eth netdev enp1s0nl8p4 flavour physical lc 8 port 4 splittable true lanes 4 pci/0000:01:00.0/57: type eth netdev enp1s0nl8p5 flavour physical lc 8 port 5 splittable true lanes 4 pci/0000:01:00.0/58: type eth netdev enp1s0nl8p6 flavour physical lc 8 port 6 splittable true lanes 4 pci/0000:01:00.0/59: type eth netdev enp1s0nl8p7 flavour physical lc 8 port 7 splittable true lanes 4 pci/0000:01:00.0/60: type eth netdev enp1s0nl8p8 flavour physical lc 8 port 8 splittable true lanes 4 pci/0000:01:00.0/61: type eth netdev enp1s0nl8p9 flavour physical lc 8 port 9 splittable true lanes 4 pci/0000:01:00.0/62: type eth netdev enp1s0nl8p10 flavour physical lc 8 port 10 splittable true lanes 4 pci/0000:01:00.0/63: type eth netdev enp1s0nl8p11 flavour physical lc 8 port 11 splittable true lanes 4 pci/0000:01:00.0/64: type eth netdev enp1s0nl8p12 flavour physical lc 8 port 12 splittable true lanes 4 pci/0000:01:00.0/125: type eth netdev enp1s0nl8p13 flavour physical lc 8 port 13 splittable true lanes 4 pci/0000:01:00.0/126: type eth netdev enp1s0nl8p14 flavour physical lc 8 port 14 splittable true lanes 4 pci/0000:01:00.0/127: type eth netdev enp1s0nl8p15 flavour physical lc 8 port 15 splittable true lanes 4 pci/0000:01:00.0/128: type eth netdev enp1s0nl8p16 flavour physical lc 8 port 16 splittable true lanes 4 $ devlink lc set pci/0000:01:00.0 lc 8 notype Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../networking/devlink/devlink-linecard.rst | 121 ++++++++ Documentation/networking/devlink/index.rst | 1 + include/net/devlink.h | 43 ++- include/uapi/linux/devlink.h | 15 + net/core/devlink.c | 322 ++++++++++++++++++++- 5 files changed, 497 insertions(+), 5 deletions(-) create mode 100644 Documentation/networking/devlink/devlink-linecard.rst (limited to 'include/uapi/linux') diff --git a/Documentation/networking/devlink/devlink-linecard.rst b/Documentation/networking/devlink/devlink-linecard.rst new file mode 100644 index 000000000000..63ccd17f40ac --- /dev/null +++ b/Documentation/networking/devlink/devlink-linecard.rst @@ -0,0 +1,121 @@ +.. SPDX-License-Identifier: GPL-2.0 + +================= +Devlink Line card +================= + +Background +========== + +The ``devlink-linecard`` mechanism is targeted for manipulation of +line cards that serve as a detachable PHY modules for modular switch +system. Following operations are provided: + + * Get a list of supported line card types. + * Provision of a slot with specific line card type. + * Get and monitor of line card state and its change. + +Line card according to the type may contain one or more gearboxes +to mux the lanes with certain speed to multiple ports with lanes +of different speed. Line card ensures N:M mapping between +the switch ASIC modules and physical front panel ports. + +Overview +======== + +Each line card devlink object is created by device driver, +according to the physical line card slots available on the device. + +Similar to splitter cable, where the device might have no way +of detection of the splitter cable geometry, the device +might not have a way to detect line card type. For that devices, +concept of provisioning is introduced. It allows the user to: + + * Provision a line card slot with certain line card type + + - Device driver would instruct the ASIC to prepare all + resources accordingly. The device driver would + create all instances, namely devlink port and netdevices + that reside on the line card, according to the line card type + * Manipulate of line card entities even without line card + being physically connected or powered-up + * Setup splitter cable on line card ports + + - As on the ordinary ports, user may provision a splitter + cable of a certain type, without the need to + be physically connected to the port + * Configure devlink ports and netdevices + +Netdevice carrier is decided as follows: + + * Line card is not inserted or powered-down + + - The carrier is always down + * Line card is inserted and powered up + + - The carrier is decided as for ordinary port netdevice + +Line card state +=============== + +The ``devlink-linecard`` mechanism supports the following line card states: + + * ``unprovisioned``: Line card is not provisioned on the slot. + * ``unprovisioning``: Line card slot is currently being unprovisioned. + * ``provisioning``: Line card slot is currently in a process of being provisioned + with a line card type. + * ``provisioning_failed``: Provisioning was not successful. + * ``provisioned``: Line card slot is provisioned with a type. + +The following diagram provides a general overview of ``devlink-linecard`` +state transitions:: + + +-------------------------+ + | | + +----------------------------------> unprovisioned | + | | | + | +--------|-------^--------+ + | | | + | | | + | +--------v-------|--------+ + | | | + | | provisioning | + | | | + | +------------|------------+ + | | + | +-----------------------------+ + | | | + | +------------v------------+ +------------v------------+ + | | | | | + +----- provisioning_failed | | provisioned | + | | | | | + | +------------^------------+ +------------|------------+ + | | | + | | | + | | +------------v------------+ + | | | | + | | | unprovisioning | + | | | | + | | +------------|------------+ + | | | + | +-----------------------------+ + | | + +-----------------------------------------------+ + + +Example usage +============= + +.. code:: shell + + $ devlink lc show [ DEV [ lc LC_INDEX ] ] + $ devlink lc set DEV lc LC_INDEX [ { type LC_TYPE | notype } ] + + # Show current line card configuration and status for all slots: + $ devlink lc + + # Set slot 8 to be provisioned with type "16x100G": + $ devlink lc set pci/0000:01:00.0 lc 8 type 16x100G + + # Set slot 8 to be unprovisioned: + $ devlink lc set pci/0000:01:00.0 lc 8 notype diff --git a/Documentation/networking/devlink/index.rst b/Documentation/networking/devlink/index.rst index c17cdb079611..850715512293 100644 --- a/Documentation/networking/devlink/index.rst +++ b/Documentation/networking/devlink/index.rst @@ -39,6 +39,7 @@ general. devlink-resource devlink-reload devlink-trap + devlink-linecard Driver-specific documentation ----------------------------- diff --git a/include/net/devlink.h b/include/net/devlink.h index 7aabdfb3bc6a..3e49d4ff498c 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -149,6 +149,40 @@ struct devlink_port_new_attrs { sfnum_valid:1; }; +/** + * struct devlink_linecard_ops - Linecard operations + * @provision: callback to provision the linecard slot with certain + * type of linecard. As a result of this operation, + * driver is expected to eventually (could be after + * the function call returns) call one of: + * devlink_linecard_provision_set() + * devlink_linecard_provision_fail() + * @unprovision: callback to unprovision the linecard slot. As a result + * of this operation, driver is expected to eventually + * (could be after the function call returns) call + * devlink_linecard_provision_clear() + * devlink_linecard_provision_fail() + * @same_provision: callback to ask the driver if linecard is already + * provisioned in the same way user asks this linecard to be + * provisioned. + * @types_count: callback to get number of supported types + * @types_get: callback to get next type in list + */ +struct devlink_linecard_ops { + int (*provision)(struct devlink_linecard *linecard, void *priv, + const char *type, const void *type_priv, + struct netlink_ext_ack *extack); + int (*unprovision)(struct devlink_linecard *linecard, void *priv, + struct netlink_ext_ack *extack); + bool (*same_provision)(struct devlink_linecard *linecard, void *priv, + const char *type, const void *type_priv); + unsigned int (*types_count)(struct devlink_linecard *linecard, + void *priv); + void (*types_get)(struct devlink_linecard *linecard, + void *priv, unsigned int index, const char **type, + const void **type_priv); +}; + struct devlink_sb_pool_info { enum devlink_sb_pool_type pool_type; u32 size; @@ -1537,9 +1571,14 @@ void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port, int devlink_rate_leaf_create(struct devlink_port *port, void *priv); void devlink_rate_leaf_destroy(struct devlink_port *devlink_port); void devlink_rate_nodes_destroy(struct devlink *devlink); -struct devlink_linecard *devlink_linecard_create(struct devlink *devlink, - unsigned int linecard_index); +struct devlink_linecard * +devlink_linecard_create(struct devlink *devlink, unsigned int linecard_index, + const struct devlink_linecard_ops *ops, void *priv); void devlink_linecard_destroy(struct devlink_linecard *linecard); +void devlink_linecard_provision_set(struct devlink_linecard *linecard, + const char *type); +void devlink_linecard_provision_clear(struct devlink_linecard *linecard); +void devlink_linecard_provision_fail(struct devlink_linecard *linecard); int devlink_sb_register(struct devlink *devlink, unsigned int sb_index, u32 size, u16 ingress_pools_count, u16 egress_pools_count, u16 ingress_tc_count, diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index 59c33ed2d3e7..de91e4a0d476 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -343,6 +343,18 @@ enum devlink_reload_limit { #define DEVLINK_RELOAD_LIMITS_VALID_MASK (_BITUL(__DEVLINK_RELOAD_LIMIT_MAX) - 1) +enum devlink_linecard_state { + DEVLINK_LINECARD_STATE_UNSPEC, + DEVLINK_LINECARD_STATE_UNPROVISIONED, + DEVLINK_LINECARD_STATE_UNPROVISIONING, + DEVLINK_LINECARD_STATE_PROVISIONING, + DEVLINK_LINECARD_STATE_PROVISIONING_FAILED, + DEVLINK_LINECARD_STATE_PROVISIONED, + + __DEVLINK_LINECARD_STATE_MAX, + DEVLINK_LINECARD_STATE_MAX = __DEVLINK_LINECARD_STATE_MAX - 1 +}; + enum devlink_attr { /* don't change the order or add anything between, this is ABI! */ DEVLINK_ATTR_UNSPEC, @@ -559,6 +571,9 @@ enum devlink_attr { DEVLINK_ATTR_REGION_MAX_SNAPSHOTS, /* u32 */ DEVLINK_ATTR_LINECARD_INDEX, /* u32 */ + DEVLINK_ATTR_LINECARD_STATE, /* u8 */ + DEVLINK_ATTR_LINECARD_TYPE, /* string */ + DEVLINK_ATTR_LINECARD_SUPPORTED_TYPES, /* nested */ /* add new attributes above here, update the policy in devlink.c */ diff --git a/net/core/devlink.c b/net/core/devlink.c index 4cdacd74b82a..b7c3a82fbd4b 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -72,11 +72,21 @@ struct devlink { char priv[] __aligned(NETDEV_ALIGN); }; +struct devlink_linecard_ops; +struct devlink_linecard_type; + struct devlink_linecard { struct list_head list; struct devlink *devlink; unsigned int index; refcount_t refcount; + const struct devlink_linecard_ops *ops; + void *priv; + enum devlink_linecard_state state; + struct mutex state_lock; /* Protects state */ + const char *type; + struct devlink_linecard_type *types; + unsigned int types_count; }; /** @@ -452,8 +462,10 @@ devlink_linecard_get_from_info(struct devlink *devlink, struct genl_info *info) static void devlink_linecard_put(struct devlink_linecard *linecard) { - if (refcount_dec_and_test(&linecard->refcount)) + if (refcount_dec_and_test(&linecard->refcount)) { + mutex_destroy(&linecard->state_lock); kfree(linecard); + } } struct devlink_sb { @@ -2037,6 +2049,11 @@ static int devlink_nl_cmd_rate_del_doit(struct sk_buff *skb, return err; } +struct devlink_linecard_type { + const char *type; + const void *priv; +}; + static int devlink_nl_linecard_fill(struct sk_buff *msg, struct devlink *devlink, struct devlink_linecard *linecard, @@ -2044,7 +2061,10 @@ static int devlink_nl_linecard_fill(struct sk_buff *msg, u32 seq, int flags, struct netlink_ext_ack *extack) { + struct devlink_linecard_type *linecard_type; + struct nlattr *attr; void *hdr; + int i; hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd); if (!hdr) @@ -2054,6 +2074,27 @@ static int devlink_nl_linecard_fill(struct sk_buff *msg, goto nla_put_failure; if (nla_put_u32(msg, DEVLINK_ATTR_LINECARD_INDEX, linecard->index)) goto nla_put_failure; + if (nla_put_u8(msg, DEVLINK_ATTR_LINECARD_STATE, linecard->state)) + goto nla_put_failure; + if (linecard->type && + nla_put_string(msg, DEVLINK_ATTR_LINECARD_TYPE, linecard->type)) + goto nla_put_failure; + + if (linecard->types_count) { + attr = nla_nest_start(msg, + DEVLINK_ATTR_LINECARD_SUPPORTED_TYPES); + if (!attr) + goto nla_put_failure; + for (i = 0; i < linecard->types_count; i++) { + linecard_type = &linecard->types[i]; + if (nla_put_string(msg, DEVLINK_ATTR_LINECARD_TYPE, + linecard_type->type)) { + nla_nest_cancel(msg, attr); + goto nla_put_failure; + } + } + nla_nest_end(msg, attr); + } genlmsg_end(msg, hdr); return 0; @@ -2103,10 +2144,12 @@ static int devlink_nl_cmd_linecard_get_doit(struct sk_buff *skb, if (!msg) return -ENOMEM; + mutex_lock(&linecard->state_lock); err = devlink_nl_linecard_fill(msg, devlink, linecard, DEVLINK_CMD_LINECARD_NEW, info->snd_portid, info->snd_seq, 0, info->extack); + mutex_unlock(&linecard->state_lock); if (err) { nlmsg_free(msg); return err; @@ -2139,12 +2182,14 @@ static int devlink_nl_cmd_linecard_get_dumpit(struct sk_buff *msg, idx++; continue; } + mutex_lock(&linecard->state_lock); err = devlink_nl_linecard_fill(msg, devlink, linecard, DEVLINK_CMD_LINECARD_NEW, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->extack); + mutex_unlock(&linecard->state_lock); if (err) { mutex_unlock(&devlink->linecards_lock); devlink_put(devlink); @@ -2163,6 +2208,163 @@ out: return msg->len; } +static struct devlink_linecard_type * +devlink_linecard_type_lookup(struct devlink_linecard *linecard, + const char *type) +{ + struct devlink_linecard_type *linecard_type; + int i; + + for (i = 0; i < linecard->types_count; i++) { + linecard_type = &linecard->types[i]; + if (!strcmp(type, linecard_type->type)) + return linecard_type; + } + return NULL; +} + +static int devlink_linecard_type_set(struct devlink_linecard *linecard, + const char *type, + struct netlink_ext_ack *extack) +{ + const struct devlink_linecard_ops *ops = linecard->ops; + struct devlink_linecard_type *linecard_type; + int err; + + mutex_lock(&linecard->state_lock); + if (linecard->state == DEVLINK_LINECARD_STATE_PROVISIONING) { + NL_SET_ERR_MSG_MOD(extack, "Line card is currently being provisioned"); + err = -EBUSY; + goto out; + } + if (linecard->state == DEVLINK_LINECARD_STATE_UNPROVISIONING) { + NL_SET_ERR_MSG_MOD(extack, "Line card is currently being unprovisioned"); + err = -EBUSY; + goto out; + } + + linecard_type = devlink_linecard_type_lookup(linecard, type); + if (!linecard_type) { + NL_SET_ERR_MSG_MOD(extack, "Unsupported line card type provided"); + err = -EINVAL; + goto out; + } + + if (linecard->state != DEVLINK_LINECARD_STATE_UNPROVISIONED && + linecard->state != DEVLINK_LINECARD_STATE_PROVISIONING_FAILED) { + NL_SET_ERR_MSG_MOD(extack, "Line card already provisioned"); + err = -EBUSY; + /* Check if the line card is provisioned in the same + * way the user asks. In case it is, make the operation + * to return success. + */ + if (ops->same_provision && + ops->same_provision(linecard, linecard->priv, + linecard_type->type, + linecard_type->priv)) + err = 0; + goto out; + } + + linecard->state = DEVLINK_LINECARD_STATE_PROVISIONING; + linecard->type = linecard_type->type; + devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW); + mutex_unlock(&linecard->state_lock); + err = ops->provision(linecard, linecard->priv, linecard_type->type, + linecard_type->priv, extack); + if (err) { + /* Provisioning failed. Assume the linecard is unprovisioned + * for future operations. + */ + mutex_lock(&linecard->state_lock); + linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED; + linecard->type = NULL; + devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW); + mutex_unlock(&linecard->state_lock); + } + return err; + +out: + mutex_unlock(&linecard->state_lock); + return err; +} + +static int devlink_linecard_type_unset(struct devlink_linecard *linecard, + struct netlink_ext_ack *extack) +{ + int err; + + mutex_lock(&linecard->state_lock); + if (linecard->state == DEVLINK_LINECARD_STATE_PROVISIONING) { + NL_SET_ERR_MSG_MOD(extack, "Line card is currently being provisioned"); + err = -EBUSY; + goto out; + } + if (linecard->state == DEVLINK_LINECARD_STATE_UNPROVISIONING) { + NL_SET_ERR_MSG_MOD(extack, "Line card is currently being unprovisioned"); + err = -EBUSY; + goto out; + } + if (linecard->state == DEVLINK_LINECARD_STATE_PROVISIONING_FAILED) { + linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED; + linecard->type = NULL; + devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW); + err = 0; + goto out; + } + + if (linecard->state == DEVLINK_LINECARD_STATE_UNPROVISIONED) { + NL_SET_ERR_MSG_MOD(extack, "Line card is not provisioned"); + err = 0; + goto out; + } + linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONING; + devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW); + mutex_unlock(&linecard->state_lock); + err = linecard->ops->unprovision(linecard, linecard->priv, + extack); + if (err) { + /* Unprovisioning failed. Assume the linecard is unprovisioned + * for future operations. + */ + mutex_lock(&linecard->state_lock); + linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED; + linecard->type = NULL; + devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW); + mutex_unlock(&linecard->state_lock); + } + return err; + +out: + mutex_unlock(&linecard->state_lock); + return err; +} + +static int devlink_nl_cmd_linecard_set_doit(struct sk_buff *skb, + struct genl_info *info) +{ + struct devlink_linecard *linecard = info->user_ptr[1]; + struct netlink_ext_ack *extack = info->extack; + int err; + + if (info->attrs[DEVLINK_ATTR_LINECARD_TYPE]) { + const char *type; + + type = nla_data(info->attrs[DEVLINK_ATTR_LINECARD_TYPE]); + if (strcmp(type, "")) { + err = devlink_linecard_type_set(linecard, type, extack); + if (err) + return err; + } else { + err = devlink_linecard_type_unset(linecard, extack); + if (err) + return err; + } + } + + return 0; +} + static int devlink_nl_sb_fill(struct sk_buff *msg, struct devlink *devlink, struct devlink_sb *devlink_sb, enum devlink_command cmd, u32 portid, @@ -8789,6 +8991,7 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = { [DEVLINK_ATTR_RATE_NODE_NAME] = { .type = NLA_NUL_STRING }, [DEVLINK_ATTR_RATE_PARENT_NODE_NAME] = { .type = NLA_NUL_STRING }, [DEVLINK_ATTR_LINECARD_INDEX] = { .type = NLA_U32 }, + [DEVLINK_ATTR_LINECARD_TYPE] = { .type = NLA_NUL_STRING }, }; static const struct genl_small_ops devlink_nl_ops[] = { @@ -8871,6 +9074,12 @@ static const struct genl_small_ops devlink_nl_ops[] = { .internal_flags = DEVLINK_NL_FLAG_NEED_LINECARD, /* can be retrieved by unprivileged users */ }, + { + .cmd = DEVLINK_CMD_LINECARD_SET, + .doit = devlink_nl_cmd_linecard_set_doit, + .flags = GENL_ADMIN_PERM, + .internal_flags = DEVLINK_NL_FLAG_NEED_LINECARD, + }, { .cmd = DEVLINK_CMD_SB_GET, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, @@ -9962,19 +10171,56 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port, return 0; } +static int devlink_linecard_types_init(struct devlink_linecard *linecard) +{ + struct devlink_linecard_type *linecard_type; + unsigned int count; + int i; + + count = linecard->ops->types_count(linecard, linecard->priv); + linecard->types = kmalloc_array(count, sizeof(*linecard_type), + GFP_KERNEL); + if (!linecard->types) + return -ENOMEM; + linecard->types_count = count; + + for (i = 0; i < count; i++) { + linecard_type = &linecard->types[i]; + linecard->ops->types_get(linecard, linecard->priv, i, + &linecard_type->type, + &linecard_type->priv); + } + return 0; +} + +static void devlink_linecard_types_fini(struct devlink_linecard *linecard) +{ + kfree(linecard->types); +} + /** * devlink_linecard_create - Create devlink linecard * * @devlink: devlink * @linecard_index: driver-specific numerical identifier of the linecard + * @ops: linecards ops + * @priv: user priv pointer * * Create devlink linecard instance with provided linecard index. * Caller can use any indexing, even hw-related one. + * + * Return: Line card structure or an ERR_PTR() encoded error code. */ -struct devlink_linecard *devlink_linecard_create(struct devlink *devlink, - unsigned int linecard_index) +struct devlink_linecard * +devlink_linecard_create(struct devlink *devlink, unsigned int linecard_index, + const struct devlink_linecard_ops *ops, void *priv) { struct devlink_linecard *linecard; + int err; + + if (WARN_ON(!ops || !ops->provision || !ops->unprovision || + !ops->types_count || !ops->types_get)) + return ERR_PTR(-EINVAL); mutex_lock(&devlink->linecards_lock); if (devlink_linecard_index_exists(devlink, linecard_index)) { @@ -9990,6 +10236,19 @@ struct devlink_linecard *devlink_linecard_create(struct devlink *devlink, linecard->devlink = devlink; linecard->index = linecard_index; + linecard->ops = ops; + linecard->priv = priv; + linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED; + mutex_init(&linecard->state_lock); + + err = devlink_linecard_types_init(linecard); + if (err) { + mutex_destroy(&linecard->state_lock); + kfree(linecard); + mutex_unlock(&devlink->linecards_lock); + return ERR_PTR(err); + } + list_add_tail(&linecard->list, &devlink->linecard_list); refcount_set(&linecard->refcount, 1); mutex_unlock(&devlink->linecards_lock); @@ -10010,11 +10269,68 @@ void devlink_linecard_destroy(struct devlink_linecard *linecard) devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_DEL); mutex_lock(&devlink->linecards_lock); list_del(&linecard->list); + devlink_linecard_types_fini(linecard); mutex_unlock(&devlink->linecards_lock); devlink_linecard_put(linecard); } EXPORT_SYMBOL_GPL(devlink_linecard_destroy); +/** + * devlink_linecard_provision_set - Set provisioning on linecard + * + * @linecard: devlink linecard + * @type: linecard type + * + * This is either called directly from the provision() op call or + * as a result of the provision() op call asynchronously. + */ +void devlink_linecard_provision_set(struct devlink_linecard *linecard, + const char *type) +{ + mutex_lock(&linecard->state_lock); + WARN_ON(linecard->type && strcmp(linecard->type, type)); + linecard->state = DEVLINK_LINECARD_STATE_PROVISIONED; + linecard->type = type; + devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW); + mutex_unlock(&linecard->state_lock); +} +EXPORT_SYMBOL_GPL(devlink_linecard_provision_set); + +/** + * devlink_linecard_provision_clear - Clear provisioning on linecard + * + * @linecard: devlink linecard + * + * This is either called directly from the unprovision() op call or + * as a result of the unprovision() op call asynchronously. + */ +void devlink_linecard_provision_clear(struct devlink_linecard *linecard) +{ + mutex_lock(&linecard->state_lock); + linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED; + linecard->type = NULL; + devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW); + mutex_unlock(&linecard->state_lock); +} +EXPORT_SYMBOL_GPL(devlink_linecard_provision_clear); + +/** + * devlink_linecard_provision_fail - Fail provisioning on linecard + * + * @linecard: devlink linecard + * + * This is either called directly from the provision() op call or + * as a result of the provision() op call asynchronously. + */ +void devlink_linecard_provision_fail(struct devlink_linecard *linecard) +{ + mutex_lock(&linecard->state_lock); + linecard->state = DEVLINK_LINECARD_STATE_PROVISIONING_FAILED; + devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW); + mutex_unlock(&linecard->state_lock); +} +EXPORT_SYMBOL_GPL(devlink_linecard_provision_fail); + int devlink_sb_register(struct devlink *devlink, unsigned int sb_index, u32 size, u16 ingress_pools_count, u16 egress_pools_count, u16 ingress_tc_count, -- cgit v1.2.3-59-g8ed1b From fc9f50d5b366cd9f35bdee22fe3f8d77833cb1d8 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 18 Apr 2022 09:42:27 +0300 Subject: devlink: implement line card active state Allow driver to mark a line card as active. Expose this state to the userspace over devlink netlink interface with proper notifications. 'active' state means that line card was plugged in after being provisioned. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../networking/devlink/devlink-linecard.rst | 11 +++--- include/net/devlink.h | 2 ++ include/uapi/linux/devlink.h | 1 + net/core/devlink.c | 41 ++++++++++++++++++++++ 4 files changed, 50 insertions(+), 5 deletions(-) (limited to 'include/uapi/linux') diff --git a/Documentation/networking/devlink/devlink-linecard.rst b/Documentation/networking/devlink/devlink-linecard.rst index 63ccd17f40ac..6c0b8928bc13 100644 --- a/Documentation/networking/devlink/devlink-linecard.rst +++ b/Documentation/networking/devlink/devlink-linecard.rst @@ -66,6 +66,7 @@ The ``devlink-linecard`` mechanism supports the following line card states: with a line card type. * ``provisioning_failed``: Provisioning was not successful. * ``provisioned``: Line card slot is provisioned with a type. + * ``active``: Line card is powered-up and active. The following diagram provides a general overview of ``devlink-linecard`` state transitions:: @@ -85,11 +86,11 @@ state transitions:: | | | +-----------------------------+ | | | - | +------------v------------+ +------------v------------+ - | | | | | - +----- provisioning_failed | | provisioned | - | | | | | - | +------------^------------+ +------------|------------+ + | +------------v------------+ +------------v------------+ +-------------------------+ + | | | | ----> | + +----- provisioning_failed | | provisioned | | active | + | | | | <---- | + | +------------^------------+ +------------|------------+ +-------------------------+ | | | | | | | | +------------v------------+ diff --git a/include/net/devlink.h b/include/net/devlink.h index 3e49d4ff498c..d8061a11fee6 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -1579,6 +1579,8 @@ void devlink_linecard_provision_set(struct devlink_linecard *linecard, const char *type); void devlink_linecard_provision_clear(struct devlink_linecard *linecard); void devlink_linecard_provision_fail(struct devlink_linecard *linecard); +void devlink_linecard_activate(struct devlink_linecard *linecard); +void devlink_linecard_deactivate(struct devlink_linecard *linecard); int devlink_sb_register(struct devlink *devlink, unsigned int sb_index, u32 size, u16 ingress_pools_count, u16 egress_pools_count, u16 ingress_tc_count, diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index de91e4a0d476..b3d40a5d72ff 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -350,6 +350,7 @@ enum devlink_linecard_state { DEVLINK_LINECARD_STATE_PROVISIONING, DEVLINK_LINECARD_STATE_PROVISIONING_FAILED, DEVLINK_LINECARD_STATE_PROVISIONED, + DEVLINK_LINECARD_STATE_ACTIVE, __DEVLINK_LINECARD_STATE_MAX, DEVLINK_LINECARD_STATE_MAX = __DEVLINK_LINECARD_STATE_MAX - 1 diff --git a/net/core/devlink.c b/net/core/devlink.c index b7c3a82fbd4b..aec0a517282c 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -10331,6 +10331,47 @@ void devlink_linecard_provision_fail(struct devlink_linecard *linecard) } EXPORT_SYMBOL_GPL(devlink_linecard_provision_fail); +/** + * devlink_linecard_activate - Set linecard active + * + * @linecard: devlink linecard + */ +void devlink_linecard_activate(struct devlink_linecard *linecard) +{ + mutex_lock(&linecard->state_lock); + WARN_ON(linecard->state != DEVLINK_LINECARD_STATE_PROVISIONED); + linecard->state = DEVLINK_LINECARD_STATE_ACTIVE; + devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW); + mutex_unlock(&linecard->state_lock); +} +EXPORT_SYMBOL_GPL(devlink_linecard_activate); + +/** + * devlink_linecard_deactivate - Set linecard inactive + * + * @linecard: devlink linecard + */ +void devlink_linecard_deactivate(struct devlink_linecard *linecard) +{ + mutex_lock(&linecard->state_lock); + switch (linecard->state) { + case DEVLINK_LINECARD_STATE_ACTIVE: + linecard->state = DEVLINK_LINECARD_STATE_PROVISIONED; + devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW); + break; + case DEVLINK_LINECARD_STATE_UNPROVISIONING: + /* Line card is being deactivated as part + * of unprovisioning flow. + */ + break; + default: + WARN_ON(1); + break; + } + mutex_unlock(&linecard->state_lock); +} +EXPORT_SYMBOL_GPL(devlink_linecard_deactivate); + int devlink_sb_register(struct devlink *devlink, unsigned int sb_index, u32 size, u16 ingress_pools_count, u16 egress_pools_count, u16 ingress_tc_count, -- cgit v1.2.3-59-g8ed1b From 38a6f0865796e26fc38fff4858f681d9ae76fa0f Mon Sep 17 00:00:00 2001 From: Tonghao Zhang Date: Sat, 16 Apr 2022 00:40:46 +0800 Subject: net: sched: support hash selecting tx queue This patch allows users to pick queue_mapping, range from A to B. Then we can load balance packets from A to B tx queue. The range is an unsigned 16bit value in decimal format. $ tc filter ... action skbedit queue_mapping skbhash A B "skbedit queue_mapping QUEUE_MAPPING" (from "man 8 tc-skbedit") is enhanced with flags: SKBEDIT_F_TXQ_SKBHASH +----+ +----+ +----+ | P1 | | P2 | | Pn | +----+ +----+ +----+ | | | +-----------+-----------+ | | clsact/skbedit | MQ v +-----------+-----------+ | q0 | qn | qm v v v HTB/FQ FIFO ... FIFO For example: If P1 sends out packets to different Pods on other host, and we want distribute flows from qn - qm. Then we can use skb->hash as hash. setup commands: $ NETDEV=eth0 $ ip netns add n1 $ ip link add ipv1 link $NETDEV type ipvlan mode l2 $ ip link set ipv1 netns n1 $ ip netns exec n1 ifconfig ipv1 2.2.2.100/24 up $ tc qdisc add dev $NETDEV clsact $ tc filter add dev $NETDEV egress protocol ip prio 1 \ flower skip_hw src_ip 2.2.2.100 action skbedit queue_mapping skbhash 2 6 $ tc qdisc add dev $NETDEV handle 1: root mq $ tc qdisc add dev $NETDEV parent 1:1 handle 2: htb $ tc class add dev $NETDEV parent 2: classid 2:1 htb rate 100kbit $ tc class add dev $NETDEV parent 2: classid 2:2 htb rate 200kbit $ tc qdisc add dev $NETDEV parent 1:2 tbf rate 100mbit burst 100mb latency 1 $ tc qdisc add dev $NETDEV parent 1:3 pfifo $ tc qdisc add dev $NETDEV parent 1:4 pfifo $ tc qdisc add dev $NETDEV parent 1:5 pfifo $ tc qdisc add dev $NETDEV parent 1:6 pfifo $ tc qdisc add dev $NETDEV parent 1:7 pfifo $ ip netns exec n1 iperf3 -c 2.2.2.1 -i 1 -t 10 -P 10 pick txqueue from 2 - 6: $ ethtool -S $NETDEV | grep -i tx_queue_[0-9]_bytes tx_queue_0_bytes: 42 tx_queue_1_bytes: 0 tx_queue_2_bytes: 11442586444 tx_queue_3_bytes: 7383615334 tx_queue_4_bytes: 3981365579 tx_queue_5_bytes: 3983235051 tx_queue_6_bytes: 6706236461 tx_queue_7_bytes: 42 tx_queue_8_bytes: 0 tx_queue_9_bytes: 0 txqueues 2 - 6 are mapped to classid 1:3 - 1:7 $ tc -s class show dev $NETDEV ... class mq 1:3 root leaf 8002: Sent 11949133672 bytes 7929798 pkt (dropped 0, overlimits 0 requeues 0) backlog 0b 0p requeues 0 class mq 1:4 root leaf 8003: Sent 7710449050 bytes 5117279 pkt (dropped 0, overlimits 0 requeues 0) backlog 0b 0p requeues 0 class mq 1:5 root leaf 8004: Sent 4157648675 bytes 2758990 pkt (dropped 0, overlimits 0 requeues 0) backlog 0b 0p requeues 0 class mq 1:6 root leaf 8005: Sent 4159632195 bytes 2759990 pkt (dropped 0, overlimits 0 requeues 0) backlog 0b 0p requeues 0 class mq 1:7 root leaf 8006: Sent 7003169603 bytes 4646912 pkt (dropped 0, overlimits 0 requeues 0) backlog 0b 0p requeues 0 ... Cc: Jamal Hadi Salim Cc: Cong Wang Cc: Jiri Pirko Cc: "David S. Miller" Cc: Jakub Kicinski Cc: Jonathan Lemon Cc: Eric Dumazet Cc: Alexander Lobakin Cc: Paolo Abeni Cc: Talal Ahmad Cc: Kevin Hao Cc: Ilias Apalodimas Cc: Kees Cook Cc: Kumar Kartikeya Dwivedi Cc: Antoine Tenart Cc: Wei Wang Cc: Arnd Bergmann Signed-off-by: Tonghao Zhang Reviewed-by: Jamal Hadi Salim Signed-off-by: Paolo Abeni --- include/net/tc_act/tc_skbedit.h | 1 + include/uapi/linux/tc_act/tc_skbedit.h | 2 ++ net/sched/act_skbedit.c | 49 ++++++++++++++++++++++++++++++++-- 3 files changed, 50 insertions(+), 2 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/net/tc_act/tc_skbedit.h b/include/net/tc_act/tc_skbedit.h index cab8229b9bed..dc1079f28e13 100644 --- a/include/net/tc_act/tc_skbedit.h +++ b/include/net/tc_act/tc_skbedit.h @@ -17,6 +17,7 @@ struct tcf_skbedit_params { u32 mark; u32 mask; u16 queue_mapping; + u16 mapping_mod; u16 ptype; struct rcu_head rcu; }; diff --git a/include/uapi/linux/tc_act/tc_skbedit.h b/include/uapi/linux/tc_act/tc_skbedit.h index 800e93377218..6cb6101208d0 100644 --- a/include/uapi/linux/tc_act/tc_skbedit.h +++ b/include/uapi/linux/tc_act/tc_skbedit.h @@ -29,6 +29,7 @@ #define SKBEDIT_F_PTYPE 0x8 #define SKBEDIT_F_MASK 0x10 #define SKBEDIT_F_INHERITDSFIELD 0x20 +#define SKBEDIT_F_TXQ_SKBHASH 0x40 struct tc_skbedit { tc_gen; @@ -45,6 +46,7 @@ enum { TCA_SKBEDIT_PTYPE, TCA_SKBEDIT_MASK, TCA_SKBEDIT_FLAGS, + TCA_SKBEDIT_QUEUE_MAPPING_MAX, __TCA_SKBEDIT_MAX }; #define TCA_SKBEDIT_MAX (__TCA_SKBEDIT_MAX - 1) diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index 1c5fdb6e7c2f..e3bd11dfe1ca 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -23,6 +23,20 @@ static unsigned int skbedit_net_id; static struct tc_action_ops act_skbedit_ops; +static u16 tcf_skbedit_hash(struct tcf_skbedit_params *params, + struct sk_buff *skb) +{ + u16 queue_mapping = params->queue_mapping; + + if (params->flags & SKBEDIT_F_TXQ_SKBHASH) { + u32 hash = skb_get_hash(skb); + + queue_mapping += hash % params->mapping_mod; + } + + return netdev_cap_txqueue(skb->dev, queue_mapping); +} + static int tcf_skbedit_act(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res) { @@ -62,7 +76,7 @@ static int tcf_skbedit_act(struct sk_buff *skb, const struct tc_action *a, #ifdef CONFIG_NET_EGRESS netdev_xmit_skip_txqueue(true); #endif - skb_set_queue_mapping(skb, params->queue_mapping); + skb_set_queue_mapping(skb, tcf_skbedit_hash(params, skb)); } if (params->flags & SKBEDIT_F_MARK) { skb->mark &= ~params->mask; @@ -96,6 +110,7 @@ static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = { [TCA_SKBEDIT_PTYPE] = { .len = sizeof(u16) }, [TCA_SKBEDIT_MASK] = { .len = sizeof(u32) }, [TCA_SKBEDIT_FLAGS] = { .len = sizeof(u64) }, + [TCA_SKBEDIT_QUEUE_MAPPING_MAX] = { .len = sizeof(u16) }, }; static int tcf_skbedit_init(struct net *net, struct nlattr *nla, @@ -112,6 +127,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla, struct tcf_skbedit *d; u32 flags = 0, *priority = NULL, *mark = NULL, *mask = NULL; u16 *queue_mapping = NULL, *ptype = NULL; + u16 mapping_mod = 1; bool exists = false; int ret = 0, err; u32 index; @@ -157,6 +173,25 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla, if (tb[TCA_SKBEDIT_FLAGS] != NULL) { u64 *pure_flags = nla_data(tb[TCA_SKBEDIT_FLAGS]); + if (*pure_flags & SKBEDIT_F_TXQ_SKBHASH) { + u16 *queue_mapping_max; + + if (!tb[TCA_SKBEDIT_QUEUE_MAPPING] || + !tb[TCA_SKBEDIT_QUEUE_MAPPING_MAX]) { + NL_SET_ERR_MSG_MOD(extack, "Missing required range of queue_mapping."); + return -EINVAL; + } + + queue_mapping_max = + nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING_MAX]); + if (*queue_mapping_max < *queue_mapping) { + NL_SET_ERR_MSG_MOD(extack, "The range of queue_mapping is invalid, max < min."); + return -EINVAL; + } + + mapping_mod = *queue_mapping_max - *queue_mapping + 1; + flags |= SKBEDIT_F_TXQ_SKBHASH; + } if (*pure_flags & SKBEDIT_F_INHERITDSFIELD) flags |= SKBEDIT_F_INHERITDSFIELD; } @@ -208,8 +243,10 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla, params_new->flags = flags; if (flags & SKBEDIT_F_PRIORITY) params_new->priority = *priority; - if (flags & SKBEDIT_F_QUEUE_MAPPING) + if (flags & SKBEDIT_F_QUEUE_MAPPING) { params_new->queue_mapping = *queue_mapping; + params_new->mapping_mod = mapping_mod; + } if (flags & SKBEDIT_F_MARK) params_new->mark = *mark; if (flags & SKBEDIT_F_PTYPE) @@ -276,6 +313,13 @@ static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a, goto nla_put_failure; if (params->flags & SKBEDIT_F_INHERITDSFIELD) pure_flags |= SKBEDIT_F_INHERITDSFIELD; + if (params->flags & SKBEDIT_F_TXQ_SKBHASH) { + if (nla_put_u16(skb, TCA_SKBEDIT_QUEUE_MAPPING_MAX, + params->queue_mapping + params->mapping_mod - 1)) + goto nla_put_failure; + + pure_flags |= SKBEDIT_F_TXQ_SKBHASH; + } if (pure_flags != 0 && nla_put(skb, TCA_SKBEDIT_FLAGS, sizeof(pure_flags), &pure_flags)) goto nla_put_failure; @@ -325,6 +369,7 @@ static size_t tcf_skbedit_get_fill_size(const struct tc_action *act) return nla_total_size(sizeof(struct tc_skbedit)) + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_PRIORITY */ + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_QUEUE_MAPPING */ + + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_QUEUE_MAPPING_MAX */ + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MARK */ + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_PTYPE */ + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MASK */ -- cgit v1.2.3-59-g8ed1b From b617be33502d2bfefffef71924c7a7ba50264ff6 Mon Sep 17 00:00:00 2001 From: Baruch Siach Date: Mon, 11 Apr 2022 21:45:27 +0300 Subject: spi: add SPI_RX_CPHA_FLIP mode bit Some SPI devices latch MOSI bits on one clock phase, but produce valid MISO bits on the other phase. Add SPI_RX_CPHA_FLIP mode to instruct the controller driver to flip CPHA for Rx (MISO) only transfers. Signed-off-by: Baruch Siach Link: https://lore.kernel.org/r/a715ca92713ca02071f33dcca9960a66a03c949a.1649702729.git.baruch@tkos.co.il Signed-off-by: Mark Brown --- include/uapi/linux/spi/spi.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/spi/spi.h b/include/uapi/linux/spi/spi.h index 236a85f08ded..9d5f58059703 100644 --- a/include/uapi/linux/spi/spi.h +++ b/include/uapi/linux/spi/spi.h @@ -27,6 +27,7 @@ #define SPI_TX_OCTAL _BITUL(13) /* transmit with 8 wires */ #define SPI_RX_OCTAL _BITUL(14) /* receive with 8 wires */ #define SPI_3WIRE_HIZ _BITUL(15) /* high impedance turnaround */ +#define SPI_RX_CPHA_FLIP _BITUL(16) /* flip CPHA on Rx only xfer */ /* * All the bits defined above should be covered by SPI_MODE_USER_MASK. @@ -36,6 +37,6 @@ * These bits must not overlap. A static assert check should make sure of that. * If adding extra bits, make sure to increase the bit index below as well. */ -#define SPI_MODE_USER_MASK (_BITUL(16) - 1) +#define SPI_MODE_USER_MASK (_BITUL(17) - 1) #endif /* _UAPI_SPI_H */ -- cgit v1.2.3-59-g8ed1b From b4000312822615ba2222e368188029e9b725dbf4 Mon Sep 17 00:00:00 2001 From: Boris Sukholitko Date: Tue, 19 Apr 2022 11:14:33 +0300 Subject: net/sched: flower: Add number of vlan tags filter These are bookkeeping parts of the new num_of_vlans filter. Defines, dump, load and set are being done here. Signed-off-by: Boris Sukholitko Signed-off-by: David S. Miller --- include/uapi/linux/pkt_cls.h | 2 ++ net/sched/cls_flower.c | 14 ++++++++++++++ 2 files changed, 16 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index 404f97fb239c..9a2ee1e39fad 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -587,6 +587,8 @@ enum { TCA_FLOWER_KEY_HASH, /* u32 */ TCA_FLOWER_KEY_HASH_MASK, /* u32 */ + TCA_FLOWER_KEY_NUM_OF_VLANS, /* u8 */ + __TCA_FLOWER_MAX, }; diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 86fd0420ac4f..4ec4d742e82f 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -72,6 +72,7 @@ struct fl_flow_key { } tp_range; struct flow_dissector_key_ct ct; struct flow_dissector_key_hash hash; + struct flow_dissector_key_num_of_vlans num_of_vlans; } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */ struct fl_flow_mask_range { @@ -712,6 +713,7 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = { [TCA_FLOWER_FLAGS] = { .type = NLA_U32 }, [TCA_FLOWER_KEY_HASH] = { .type = NLA_U32 }, [TCA_FLOWER_KEY_HASH_MASK] = { .type = NLA_U32 }, + [TCA_FLOWER_KEY_NUM_OF_VLANS] = { .type = NLA_U8 }, }; @@ -1615,6 +1617,11 @@ static int fl_set_key(struct net *net, struct nlattr **tb, fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC, mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK, sizeof(key->eth.src)); + fl_set_key_val(tb, &key->num_of_vlans, + TCA_FLOWER_KEY_NUM_OF_VLANS, + &mask->num_of_vlans, + TCA_FLOWER_UNSPEC, + sizeof(key->num_of_vlans)); if (is_vlan_key(tb[TCA_FLOWER_KEY_ETH_TYPE], ðertype, key, mask)) { fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID, @@ -1906,6 +1913,8 @@ static void fl_init_dissector(struct flow_dissector *dissector, FLOW_DISSECTOR_KEY_CT, ct); FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_HASH, hash); + FL_KEY_SET_IF_MASKED(mask, keys, cnt, + FLOW_DISSECTOR_KEY_NUM_OF_VLANS, num_of_vlans); skb_flow_dissector_init(dissector, keys, cnt); } @@ -2994,6 +3003,11 @@ static int fl_dump_key(struct sk_buff *skb, struct net *net, sizeof(key->basic.n_proto))) goto nla_put_failure; + if (mask->num_of_vlans.num_of_vlans) { + if (nla_put_u8(skb, TCA_FLOWER_KEY_NUM_OF_VLANS, key->num_of_vlans.num_of_vlans)) + goto nla_put_failure; + } + if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls)) goto nla_put_failure; -- cgit v1.2.3-59-g8ed1b From 9e4ab6c89109472082616f8d2f6ada7deaffe161 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 12:22:19 +0100 Subject: arm64/sme: Implement vector length configuration prctl()s As for SVE provide a prctl() interface which allows processes to configure their SME vector length. Signed-off-by: Mark Brown Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20220419112247.711548-12-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/fpsimd.h | 4 ++++ arch/arm64/include/asm/processor.h | 4 +++- arch/arm64/include/asm/thread_info.h | 1 + arch/arm64/kernel/fpsimd.c | 32 ++++++++++++++++++++++++++++++++ include/uapi/linux/prctl.h | 9 +++++++++ kernel/sys.c | 12 ++++++++++++ 6 files changed, 61 insertions(+), 1 deletion(-) (limited to 'include/uapi/linux') diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index 32cd682258d9..38fd6aab7feb 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -288,6 +288,8 @@ static inline int sme_max_virtualisable_vl(void) } extern unsigned int sme_get_vl(void); +extern int sme_set_current_vl(unsigned long arg); +extern int sme_get_current_vl(void); #else @@ -299,6 +301,8 @@ static inline void sme_setup(void) { } static inline unsigned int sme_get_vl(void) { return 0; } static inline int sme_max_vl(void) { return 0; } static inline int sme_max_virtualisable_vl(void) { return 0; } +static inline int sme_set_current_vl(unsigned long arg) { return -EINVAL; } +static inline int sme_get_current_vl(void) { return -EINVAL; } #endif /* ! CONFIG_ARM64_SME */ diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index abf34a9c2eab..7a57cbff8a03 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -355,9 +355,11 @@ extern void __init minsigstksz_setup(void); */ #include -/* Userspace interface for PR_SVE_{SET,GET}_VL prctl()s: */ +/* Userspace interface for PR_S[MV]E_{SET,GET}_VL prctl()s: */ #define SVE_SET_VL(arg) sve_set_current_vl(arg) #define SVE_GET_VL() sve_get_current_vl() +#define SME_SET_VL(arg) sme_set_current_vl(arg) +#define SME_GET_VL() sme_get_current_vl() /* PR_PAC_RESET_KEYS prctl */ #define PAC_RESET_KEYS(tsk, arg) ptrauth_prctl_reset_keys(tsk, arg) diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index e1317b7c4525..4e6b58dcd6f9 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -82,6 +82,7 @@ int arch_dup_task_struct(struct task_struct *dst, #define TIF_SVE_VL_INHERIT 24 /* Inherit SVE vl_onexec across exec */ #define TIF_SSBD 25 /* Wants SSB mitigation */ #define TIF_TAGGED_ADDR 26 /* Allow tagged user addresses */ +#define TIF_SME_VL_INHERIT 28 /* Inherit SME vl_onexec across exec */ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 754a96563f6f..39f44fcb9b99 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -149,6 +149,8 @@ static unsigned int vec_vl_inherit_flag(enum vec_type type) switch (type) { case ARM64_VEC_SVE: return TIF_SVE_VL_INHERIT; + case ARM64_VEC_SME: + return TIF_SME_VL_INHERIT; default: WARN_ON_ONCE(1); return 0; @@ -807,6 +809,36 @@ int sve_get_current_vl(void) return vec_prctl_status(ARM64_VEC_SVE, 0); } +#ifdef CONFIG_ARM64_SME +/* PR_SME_SET_VL */ +int sme_set_current_vl(unsigned long arg) +{ + unsigned long vl, flags; + int ret; + + vl = arg & PR_SME_VL_LEN_MASK; + flags = arg & ~vl; + + if (!system_supports_sme() || is_compat_task()) + return -EINVAL; + + ret = vec_set_vector_length(current, ARM64_VEC_SME, vl, flags); + if (ret) + return ret; + + return vec_prctl_status(ARM64_VEC_SME, flags); +} + +/* PR_SME_GET_VL */ +int sme_get_current_vl(void) +{ + if (!system_supports_sme() || is_compat_task()) + return -EINVAL; + + return vec_prctl_status(ARM64_VEC_SME, 0); +} +#endif /* CONFIG_ARM64_SME */ + static void vec_probe_vqs(struct vl_info *info, DECLARE_BITMAP(map, SVE_VQ_MAX)) { diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index e998764f0262..a5e06dcbba13 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -272,6 +272,15 @@ struct prctl_mm_map { # define PR_SCHED_CORE_SCOPE_THREAD_GROUP 1 # define PR_SCHED_CORE_SCOPE_PROCESS_GROUP 2 +/* arm64 Scalable Matrix Extension controls */ +/* Flag values must be in sync with SVE versions */ +#define PR_SME_SET_VL 63 /* set task vector length */ +# define PR_SME_SET_VL_ONEXEC (1 << 18) /* defer effect until exec */ +#define PR_SME_GET_VL 64 /* get task vector length */ +/* Bits common to PR_SME_SET_VL and PR_SME_GET_VL */ +# define PR_SME_VL_LEN_MASK 0xffff +# define PR_SME_VL_INHERIT (1 << 17) /* inherit across exec */ + #define PR_SET_VMA 0x53564d41 # define PR_SET_VMA_ANON_NAME 0 diff --git a/kernel/sys.c b/kernel/sys.c index 374f83e95239..b911fa6d81ab 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -117,6 +117,12 @@ #ifndef SVE_GET_VL # define SVE_GET_VL() (-EINVAL) #endif +#ifndef SME_SET_VL +# define SME_SET_VL(a) (-EINVAL) +#endif +#ifndef SME_GET_VL +# define SME_GET_VL() (-EINVAL) +#endif #ifndef PAC_RESET_KEYS # define PAC_RESET_KEYS(a, b) (-EINVAL) #endif @@ -2541,6 +2547,12 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, case PR_SVE_GET_VL: error = SVE_GET_VL(); break; + case PR_SME_SET_VL: + error = SME_SET_VL(arg2); + break; + case PR_SME_GET_VL: + error = SME_GET_VL(); + break; case PR_GET_SPECULATION_CTRL: if (arg3 || arg4 || arg5) return -EINVAL; -- cgit v1.2.3-59-g8ed1b From e12310a0d30f260b26297bc8d7c95769489af038 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 12:22:28 +0100 Subject: arm64/sme: Implement ptrace support for streaming mode SVE registers The streaming mode SVE registers are represented using the same data structures as for SVE but since the vector lengths supported and in use may not be the same as SVE we represent them with a new type NT_ARM_SSVE. Unfortunately we only have a single 16 bit reserved field available in the header so there is no space to fit the current and maximum vector length for both standard and streaming SVE mode without redefining the structure in a way the creates a complicatd and fragile ABI. Since FFR is not present in streaming mode it is read and written as zero. Setting NT_ARM_SSVE registers will put the task into streaming mode, similarly setting NT_ARM_SVE registers will exit it. Reads that do not correspond to the current mode of the task will return the header with no register data. For compatibility reasons on write setting no flag for the register type will be interpreted as setting SVE registers, though users can provide no register data as an alternative mechanism for doing so. Signed-off-by: Mark Brown Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20220419112247.711548-21-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/fpsimd.h | 1 + arch/arm64/include/uapi/asm/ptrace.h | 13 ++- arch/arm64/kernel/fpsimd.c | 31 +++-- arch/arm64/kernel/ptrace.c | 214 +++++++++++++++++++++++++++-------- include/uapi/linux/elf.h | 1 + 5 files changed, 201 insertions(+), 59 deletions(-) (limited to 'include/uapi/linux') diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index 6c33bc832ed4..5afcd0709aae 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -144,6 +144,7 @@ struct vl_info { extern void sve_alloc(struct task_struct *task); extern void fpsimd_release_task(struct task_struct *task); extern void fpsimd_sync_to_sve(struct task_struct *task); +extern void fpsimd_force_sync_to_sve(struct task_struct *task); extern void sve_sync_to_fpsimd(struct task_struct *task); extern void sve_sync_from_fpsimd_zeropad(struct task_struct *task); diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h index 758ae984ff97..522b925a78c1 100644 --- a/arch/arm64/include/uapi/asm/ptrace.h +++ b/arch/arm64/include/uapi/asm/ptrace.h @@ -109,7 +109,7 @@ struct user_hwdebug_state { } dbg_regs[16]; }; -/* SVE/FP/SIMD state (NT_ARM_SVE) */ +/* SVE/FP/SIMD state (NT_ARM_SVE & NT_ARM_SSVE) */ struct user_sve_header { __u32 size; /* total meaningful regset content in bytes */ @@ -220,6 +220,7 @@ struct user_sve_header { (SVE_PT_SVE_PREG_OFFSET(vq, __SVE_NUM_PREGS) - \ SVE_PT_SVE_PREGS_OFFSET(vq)) +/* For streaming mode SVE (SSVE) FFR must be read and written as zero */ #define SVE_PT_SVE_FFR_OFFSET(vq) \ (SVE_PT_REGS_OFFSET + __SVE_FFR_OFFSET(vq)) @@ -240,10 +241,12 @@ struct user_sve_header { - SVE_PT_SVE_OFFSET + (__SVE_VQ_BYTES - 1)) \ / __SVE_VQ_BYTES * __SVE_VQ_BYTES) -#define SVE_PT_SIZE(vq, flags) \ - (((flags) & SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE ? \ - SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, flags) \ - : SVE_PT_FPSIMD_OFFSET + SVE_PT_FPSIMD_SIZE(vq, flags)) +#define SVE_PT_SIZE(vq, flags) \ + (((flags) & SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE ? \ + SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, flags) \ + : ((((flags) & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD ? \ + SVE_PT_FPSIMD_OFFSET + SVE_PT_FPSIMD_SIZE(vq, flags) \ + : SVE_PT_REGS_OFFSET))) /* pointer authentication masks (NT_ARM_PAC_MASK) */ diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 80f7ca12f855..94f06e9d37cf 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -643,7 +643,7 @@ static void fpsimd_to_sve(struct task_struct *task) if (!system_supports_sve()) return; - vq = sve_vq_from_vl(task_get_sve_vl(task)); + vq = sve_vq_from_vl(thread_get_cur_vl(&task->thread)); __fpsimd_to_sve(sst, fst, vq); } @@ -660,7 +660,7 @@ static void fpsimd_to_sve(struct task_struct *task) */ static void sve_to_fpsimd(struct task_struct *task) { - unsigned int vq; + unsigned int vq, vl; void const *sst = task->thread.sve_state; struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state; unsigned int i; @@ -669,7 +669,8 @@ static void sve_to_fpsimd(struct task_struct *task) if (!system_supports_sve()) return; - vq = sve_vq_from_vl(task_get_sve_vl(task)); + vl = thread_get_cur_vl(&task->thread); + vq = sve_vq_from_vl(vl); for (i = 0; i < SVE_NUM_ZREGS; ++i) { p = (__uint128_t const *)ZREG(sst, vq, i); fst->vregs[i] = arm64_le128_to_cpu(*p); @@ -717,6 +718,19 @@ void sve_alloc(struct task_struct *task) } +/* + * Force the FPSIMD state shared with SVE to be updated in the SVE state + * even if the SVE state is the current active state. + * + * This should only be called by ptrace. task must be non-runnable. + * task->thread.sve_state must point to at least sve_state_size(task) + * bytes of allocated kernel memory. + */ +void fpsimd_force_sync_to_sve(struct task_struct *task) +{ + fpsimd_to_sve(task); +} + /* * Ensure that task->thread.sve_state is up to date with respect to * the user task, irrespective of when SVE is in use or not. @@ -727,7 +741,8 @@ void sve_alloc(struct task_struct *task) */ void fpsimd_sync_to_sve(struct task_struct *task) { - if (!test_tsk_thread_flag(task, TIF_SVE)) + if (!test_tsk_thread_flag(task, TIF_SVE) && + !thread_sm_enabled(&task->thread)) fpsimd_to_sve(task); } @@ -741,7 +756,8 @@ void fpsimd_sync_to_sve(struct task_struct *task) */ void sve_sync_to_fpsimd(struct task_struct *task) { - if (test_tsk_thread_flag(task, TIF_SVE)) + if (test_tsk_thread_flag(task, TIF_SVE) || + thread_sm_enabled(&task->thread)) sve_to_fpsimd(task); } @@ -766,7 +782,7 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task) if (!test_tsk_thread_flag(task, TIF_SVE)) return; - vq = sve_vq_from_vl(task_get_sve_vl(task)); + vq = sve_vq_from_vl(thread_get_cur_vl(&task->thread)); memset(sst, 0, SVE_SIG_REGS_SIZE(vq)); __fpsimd_to_sve(sst, fst, vq); @@ -810,8 +826,7 @@ int vec_set_vector_length(struct task_struct *task, enum vec_type type, /* * To ensure the FPSIMD bits of the SVE vector registers are preserved, * write any live register state back to task_struct, and convert to a - * regular FPSIMD thread. Since the vector length can only be changed - * with a syscall we can't be in streaming mode while reconfiguring. + * regular FPSIMD thread. */ if (task == current) { get_cpu_fpsimd_context(); diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 230a47b9189e..60185c27b394 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -713,21 +713,51 @@ static int system_call_set(struct task_struct *target, #ifdef CONFIG_ARM64_SVE static void sve_init_header_from_task(struct user_sve_header *header, - struct task_struct *target) + struct task_struct *target, + enum vec_type type) { unsigned int vq; + bool active; + bool fpsimd_only; + enum vec_type task_type; memset(header, 0, sizeof(*header)); - header->flags = test_tsk_thread_flag(target, TIF_SVE) ? - SVE_PT_REGS_SVE : SVE_PT_REGS_FPSIMD; - if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT)) - header->flags |= SVE_PT_VL_INHERIT; + /* Check if the requested registers are active for the task */ + if (thread_sm_enabled(&target->thread)) + task_type = ARM64_VEC_SME; + else + task_type = ARM64_VEC_SVE; + active = (task_type == type); + + switch (type) { + case ARM64_VEC_SVE: + if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT)) + header->flags |= SVE_PT_VL_INHERIT; + fpsimd_only = !test_tsk_thread_flag(target, TIF_SVE); + break; + case ARM64_VEC_SME: + if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT)) + header->flags |= SVE_PT_VL_INHERIT; + fpsimd_only = false; + break; + default: + WARN_ON_ONCE(1); + return; + } - header->vl = task_get_sve_vl(target); + if (active) { + if (fpsimd_only) { + header->flags |= SVE_PT_REGS_FPSIMD; + } else { + header->flags |= SVE_PT_REGS_SVE; + } + } + + header->vl = task_get_vl(target, type); vq = sve_vq_from_vl(header->vl); - header->max_vl = sve_max_vl(); + header->max_vl = vec_max_vl(type); header->size = SVE_PT_SIZE(vq, header->flags); header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl), SVE_PT_REGS_SVE); @@ -738,19 +768,17 @@ static unsigned int sve_size_from_header(struct user_sve_header const *header) return ALIGN(header->size, SVE_VQ_BYTES); } -static int sve_get(struct task_struct *target, - const struct user_regset *regset, - struct membuf to) +static int sve_get_common(struct task_struct *target, + const struct user_regset *regset, + struct membuf to, + enum vec_type type) { struct user_sve_header header; unsigned int vq; unsigned long start, end; - if (!system_supports_sve()) - return -EINVAL; - /* Header */ - sve_init_header_from_task(&header, target); + sve_init_header_from_task(&header, target, type); vq = sve_vq_from_vl(header.vl); membuf_write(&to, &header, sizeof(header)); @@ -758,49 +786,61 @@ static int sve_get(struct task_struct *target, if (target == current) fpsimd_preserve_current_state(); - /* Registers: FPSIMD-only case */ - BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header)); - if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) + BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header)); + + switch ((header.flags & SVE_PT_REGS_MASK)) { + case SVE_PT_REGS_FPSIMD: return __fpr_get(target, regset, to); - /* Otherwise: full SVE case */ + case SVE_PT_REGS_SVE: + start = SVE_PT_SVE_OFFSET; + end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq); + membuf_write(&to, target->thread.sve_state, end - start); - BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header)); - start = SVE_PT_SVE_OFFSET; - end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq); - membuf_write(&to, target->thread.sve_state, end - start); + start = end; + end = SVE_PT_SVE_FPSR_OFFSET(vq); + membuf_zero(&to, end - start); - start = end; - end = SVE_PT_SVE_FPSR_OFFSET(vq); - membuf_zero(&to, end - start); + /* + * Copy fpsr, and fpcr which must follow contiguously in + * struct fpsimd_state: + */ + start = end; + end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; + membuf_write(&to, &target->thread.uw.fpsimd_state.fpsr, + end - start); - /* - * Copy fpsr, and fpcr which must follow contiguously in - * struct fpsimd_state: - */ - start = end; - end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; - membuf_write(&to, &target->thread.uw.fpsimd_state.fpsr, end - start); + start = end; + end = sve_size_from_header(&header); + return membuf_zero(&to, end - start); - start = end; - end = sve_size_from_header(&header); - return membuf_zero(&to, end - start); + default: + return 0; + } } -static int sve_set(struct task_struct *target, +static int sve_get(struct task_struct *target, const struct user_regset *regset, - unsigned int pos, unsigned int count, - const void *kbuf, const void __user *ubuf) + struct membuf to) +{ + if (!system_supports_sve()) + return -EINVAL; + + return sve_get_common(target, regset, to, ARM64_VEC_SVE); +} + +static int sve_set_common(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf, + enum vec_type type) { int ret; struct user_sve_header header; unsigned int vq; unsigned long start, end; - if (!system_supports_sve()) - return -EINVAL; - /* Header */ if (count < sizeof(header)) return -EINVAL; @@ -813,13 +853,37 @@ static int sve_set(struct task_struct *target, * Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are consumed by * vec_set_vector_length(), which will also validate them for us: */ - ret = vec_set_vector_length(target, ARM64_VEC_SVE, header.vl, + ret = vec_set_vector_length(target, type, header.vl, ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16); if (ret) goto out; /* Actual VL set may be less than the user asked for: */ - vq = sve_vq_from_vl(task_get_sve_vl(target)); + vq = sve_vq_from_vl(task_get_vl(target, type)); + + /* Enter/exit streaming mode */ + if (system_supports_sme()) { + u64 old_svcr = target->thread.svcr; + + switch (type) { + case ARM64_VEC_SVE: + target->thread.svcr &= ~SYS_SVCR_EL0_SM_MASK; + break; + case ARM64_VEC_SME: + target->thread.svcr |= SYS_SVCR_EL0_SM_MASK; + break; + default: + WARN_ON_ONCE(1); + return -EINVAL; + } + + /* + * If we switched then invalidate any existing SVE + * state and ensure there's storage. + */ + if (target->thread.svcr != old_svcr) + sve_alloc(target); + } /* Registers: FPSIMD-only case */ @@ -828,10 +892,15 @@ static int sve_set(struct task_struct *target, ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, SVE_PT_FPSIMD_OFFSET); clear_tsk_thread_flag(target, TIF_SVE); + if (type == ARM64_VEC_SME) + fpsimd_force_sync_to_sve(target); goto out; } - /* Otherwise: full SVE case */ + /* + * Otherwise: no registers or full SVE case. For backwards + * compatibility reasons we treat empty flags as SVE registers. + */ /* * If setting a different VL from the requested VL and there is @@ -852,8 +921,9 @@ static int sve_set(struct task_struct *target, /* * Ensure target->thread.sve_state is up to date with target's - * FPSIMD regs, so that a short copyin leaves trailing registers - * unmodified. + * FPSIMD regs, so that a short copyin leaves trailing + * registers unmodified. Always enable SVE even if going into + * streaming mode. */ fpsimd_sync_to_sve(target); set_tsk_thread_flag(target, TIF_SVE); @@ -889,8 +959,46 @@ out: return ret; } +static int sve_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + if (!system_supports_sve()) + return -EINVAL; + + return sve_set_common(target, regset, pos, count, kbuf, ubuf, + ARM64_VEC_SVE); +} + #endif /* CONFIG_ARM64_SVE */ +#ifdef CONFIG_ARM64_SME + +static int ssve_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + if (!system_supports_sme()) + return -EINVAL; + + return sve_get_common(target, regset, to, ARM64_VEC_SME); +} + +static int ssve_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + if (!system_supports_sme()) + return -EINVAL; + + return sve_set_common(target, regset, pos, count, kbuf, ubuf, + ARM64_VEC_SME); +} + +#endif /* CONFIG_ARM64_SME */ + #ifdef CONFIG_ARM64_PTR_AUTH static int pac_mask_get(struct task_struct *target, const struct user_regset *regset, @@ -1108,6 +1216,9 @@ enum aarch64_regset { #ifdef CONFIG_ARM64_SVE REGSET_SVE, #endif +#ifdef CONFIG_ARM64_SVE + REGSET_SSVE, +#endif #ifdef CONFIG_ARM64_PTR_AUTH REGSET_PAC_MASK, REGSET_PAC_ENABLED_KEYS, @@ -1188,6 +1299,17 @@ static const struct user_regset aarch64_regsets[] = { .set = sve_set, }, #endif +#ifdef CONFIG_ARM64_SME + [REGSET_SSVE] = { /* Streaming mode SVE */ + .core_note_type = NT_ARM_SSVE, + .n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE), + SVE_VQ_BYTES), + .size = SVE_VQ_BYTES, + .align = SVE_VQ_BYTES, + .regset_get = ssve_get, + .set = ssve_set, + }, +#endif #ifdef CONFIG_ARM64_PTR_AUTH [REGSET_PAC_MASK] = { .core_note_type = NT_ARM_PAC_MASK, diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h index 787c657bfae8..a8dc688e1826 100644 --- a/include/uapi/linux/elf.h +++ b/include/uapi/linux/elf.h @@ -431,6 +431,7 @@ typedef struct elf64_shdr { #define NT_ARM_PACG_KEYS 0x408 /* ARM pointer authentication generic key */ #define NT_ARM_TAGGED_ADDR_CTRL 0x409 /* arm64 tagged address control (prctl()) */ #define NT_ARM_PAC_ENABLED_KEYS 0x40a /* arm64 ptr auth enabled keys (prctl()) */ +#define NT_ARM_SSVE 0x40b /* ARM Streaming SVE registers */ #define NT_ARC_V2 0x600 /* ARCv2 accumulator/extra registers */ #define NT_VMCOREDD 0x700 /* Vmcore Device Dump Note */ #define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers */ -- cgit v1.2.3-59-g8ed1b From 776b4a1cf36411e96972455ca72906b722b80ea1 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 12:22:29 +0100 Subject: arm64/sme: Add ptrace support for ZA The ZA array can be read and written with the NT_ARM_ZA. Similarly to our interface for the SVE vector registers the regset consists of a header with information on the current vector length followed by an optional register data payload, represented as for signals as a series of horizontal vectors from 0 to VL/8 in the endianness independent format used for vectors. On get if ZA is enabled then register data will be provided, otherwise it will be omitted. On set if register data is provided then ZA is enabled and initialized using the provided data, otherwise it is disabled. Signed-off-by: Mark Brown Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20220419112247.711548-22-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/uapi/asm/ptrace.h | 56 ++++++++++++++ arch/arm64/kernel/ptrace.c | 144 +++++++++++++++++++++++++++++++++++ include/uapi/linux/elf.h | 1 + 3 files changed, 201 insertions(+) (limited to 'include/uapi/linux') diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h index 522b925a78c1..7fa2f7036aa7 100644 --- a/arch/arm64/include/uapi/asm/ptrace.h +++ b/arch/arm64/include/uapi/asm/ptrace.h @@ -268,6 +268,62 @@ struct user_pac_generic_keys { __uint128_t apgakey; }; +/* ZA state (NT_ARM_ZA) */ + +struct user_za_header { + __u32 size; /* total meaningful regset content in bytes */ + __u32 max_size; /* maxmium possible size for this thread */ + __u16 vl; /* current vector length */ + __u16 max_vl; /* maximum possible vector length */ + __u16 flags; + __u16 __reserved; +}; + +/* + * Common ZA_PT_* flags: + * These must be kept in sync with prctl interface in + */ +#define ZA_PT_VL_INHERIT ((1 << 17) /* PR_SME_VL_INHERIT */ >> 16) +#define ZA_PT_VL_ONEXEC ((1 << 18) /* PR_SME_SET_VL_ONEXEC */ >> 16) + + +/* + * The remainder of the ZA state follows struct user_za_header. The + * total size of the ZA state (including header) depends on the + * metadata in the header: ZA_PT_SIZE(vq, flags) gives the total size + * of the state in bytes, including the header. + * + * Refer to for details of how to pass the correct + * "vq" argument to these macros. + */ + +/* Offset from the start of struct user_za_header to the register data */ +#define ZA_PT_ZA_OFFSET \ + ((sizeof(struct user_za_header) + (__SVE_VQ_BYTES - 1)) \ + / __SVE_VQ_BYTES * __SVE_VQ_BYTES) + +/* + * The payload starts at offset ZA_PT_ZA_OFFSET, and is of size + * ZA_PT_ZA_SIZE(vq, flags). + * + * The ZA array is stored as a sequence of horizontal vectors ZAV of SVL/8 + * bytes each, starting from vector 0. + * + * Additional data might be appended in the future. + * + * The ZA matrix is represented in memory in an endianness-invariant layout + * which differs from the layout used for the FPSIMD V-registers on big-endian + * systems: see sigcontext.h for more explanation. + */ + +#define ZA_PT_ZAV_OFFSET(vq, n) \ + (ZA_PT_ZA_OFFSET + ((vq * __SVE_VQ_BYTES) * n)) + +#define ZA_PT_ZA_SIZE(vq) ((vq * __SVE_VQ_BYTES) * (vq * __SVE_VQ_BYTES)) + +#define ZA_PT_SIZE(vq) \ + (ZA_PT_ZA_OFFSET + ZA_PT_ZA_SIZE(vq)) + #endif /* __ASSEMBLY__ */ #endif /* _UAPI__ASM_PTRACE_H */ diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 60185c27b394..47d8a7472171 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -997,6 +997,141 @@ static int ssve_set(struct task_struct *target, ARM64_VEC_SME); } +static int za_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + struct user_za_header header; + unsigned int vq; + unsigned long start, end; + + if (!system_supports_sme()) + return -EINVAL; + + /* Header */ + memset(&header, 0, sizeof(header)); + + if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT)) + header.flags |= ZA_PT_VL_INHERIT; + + header.vl = task_get_sme_vl(target); + vq = sve_vq_from_vl(header.vl); + header.max_vl = sme_max_vl(); + header.max_size = ZA_PT_SIZE(vq); + + /* If ZA is not active there is only the header */ + if (thread_za_enabled(&target->thread)) + header.size = ZA_PT_SIZE(vq); + else + header.size = ZA_PT_ZA_OFFSET; + + membuf_write(&to, &header, sizeof(header)); + + BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header)); + end = ZA_PT_ZA_OFFSET; + + if (target == current) + fpsimd_preserve_current_state(); + + /* Any register data to include? */ + if (thread_za_enabled(&target->thread)) { + start = end; + end = ZA_PT_SIZE(vq); + membuf_write(&to, target->thread.za_state, end - start); + } + + /* Zero any trailing padding */ + start = end; + end = ALIGN(header.size, SVE_VQ_BYTES); + return membuf_zero(&to, end - start); +} + +static int za_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + struct user_za_header header; + unsigned int vq; + unsigned long start, end; + + if (!system_supports_sme()) + return -EINVAL; + + /* Header */ + if (count < sizeof(header)) + return -EINVAL; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header, + 0, sizeof(header)); + if (ret) + goto out; + + /* + * All current ZA_PT_* flags are consumed by + * vec_set_vector_length(), which will also validate them for + * us: + */ + ret = vec_set_vector_length(target, ARM64_VEC_SME, header.vl, + ((unsigned long)header.flags) << 16); + if (ret) + goto out; + + /* Actual VL set may be less than the user asked for: */ + vq = sve_vq_from_vl(task_get_sme_vl(target)); + + /* Ensure there is some SVE storage for streaming mode */ + if (!target->thread.sve_state) { + sve_alloc(target); + if (!target->thread.sve_state) { + clear_thread_flag(TIF_SME); + ret = -ENOMEM; + goto out; + } + } + + /* Allocate/reinit ZA storage */ + sme_alloc(target); + if (!target->thread.za_state) { + ret = -ENOMEM; + clear_tsk_thread_flag(target, TIF_SME); + goto out; + } + + /* If there is no data then disable ZA */ + if (!count) { + target->thread.svcr &= ~SYS_SVCR_EL0_ZA_MASK; + goto out; + } + + /* + * If setting a different VL from the requested VL and there is + * register data, the data layout will be wrong: don't even + * try to set the registers in this case. + */ + if (vq != sve_vq_from_vl(header.vl)) { + ret = -EIO; + goto out; + } + + BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header)); + start = ZA_PT_ZA_OFFSET; + end = ZA_PT_SIZE(vq); + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + target->thread.za_state, + start, end); + if (ret) + goto out; + + /* Mark ZA as active and let userspace use it */ + set_tsk_thread_flag(target, TIF_SME); + target->thread.svcr |= SYS_SVCR_EL0_ZA_MASK; + +out: + fpsimd_flush_task_state(target); + return ret; +} + #endif /* CONFIG_ARM64_SME */ #ifdef CONFIG_ARM64_PTR_AUTH @@ -1218,6 +1353,7 @@ enum aarch64_regset { #endif #ifdef CONFIG_ARM64_SVE REGSET_SSVE, + REGSET_ZA, #endif #ifdef CONFIG_ARM64_PTR_AUTH REGSET_PAC_MASK, @@ -1309,6 +1445,14 @@ static const struct user_regset aarch64_regsets[] = { .regset_get = ssve_get, .set = ssve_set, }, + [REGSET_ZA] = { /* SME ZA */ + .core_note_type = NT_ARM_ZA, + .n = DIV_ROUND_UP(ZA_PT_ZA_SIZE(SVE_VQ_MAX), SVE_VQ_BYTES), + .size = SVE_VQ_BYTES, + .align = SVE_VQ_BYTES, + .regset_get = za_get, + .set = za_set, + }, #endif #ifdef CONFIG_ARM64_PTR_AUTH [REGSET_PAC_MASK] = { diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h index a8dc688e1826..97808f958903 100644 --- a/include/uapi/linux/elf.h +++ b/include/uapi/linux/elf.h @@ -432,6 +432,7 @@ typedef struct elf64_shdr { #define NT_ARM_TAGGED_ADDR_CTRL 0x409 /* arm64 tagged address control (prctl()) */ #define NT_ARM_PAC_ENABLED_KEYS 0x40a /* arm64 ptr auth enabled keys (prctl()) */ #define NT_ARM_SSVE 0x40b /* ARM Streaming SVE registers */ +#define NT_ARM_ZA 0x40c /* ARM SME ZA registers */ #define NT_ARC_V2 0x600 /* ARCv2 accumulator/extra registers */ #define NT_VMCOREDD 0x700 /* Vmcore Device Dump Note */ #define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers */ -- cgit v1.2.3-59-g8ed1b From 26f89535a5bb17915a2e1062c3999a2ee797c7b0 Mon Sep 17 00:00:00 2001 From: Alison Schofield Date: Wed, 13 Apr 2022 22:12:46 -0700 Subject: cxl/mbox: Use type __u32 for mailbox payload sizes Payload sizes for mailbox commands are expected to be positive values coming from userspace. The documentation correctly describes these as always unsigned values. The mailbox and send structures that support the mailbox commands however, use __s32 types for the payloads. Replace __s32 with __u32 in the mailbox and send command structures and update usages. Kernel users of the interface already block all negative values and there is no known ability for userspace to have grown a dependency on submitting negative values to the kernel. The known user of the IOCTL, the CXL command line interface (cxl-cli) already enforces positive size values. A Smatch warning of a signedness uncovered this issue. Reported-by: kernel test robot Reported-by: Dan Carpenter Signed-off-by: Alison Schofield Link: https://lore.kernel.org/r/20220414051246.1244575-1-alison.schofield@intel.com Signed-off-by: Dan Williams --- drivers/cxl/core/mbox.c | 28 +++++++++++++++------------- include/uapi/linux/cxl_mem.h | 14 +++++++------- 2 files changed, 22 insertions(+), 20 deletions(-) (limited to 'include/uapi/linux') diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index 8a8388599a85..d54a6d175fff 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -35,6 +35,7 @@ static bool cxl_raw_allow_all; .flags = _flags, \ } +#define CXL_VARIABLE_PAYLOAD ~0U /* * This table defines the supported mailbox commands for the driver. This table * is made up of a UAPI structure. Non-negative values as parameters in the @@ -44,26 +45,26 @@ static bool cxl_raw_allow_all; static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = { CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE), #ifdef CONFIG_CXL_MEM_RAW_COMMANDS - CXL_CMD(RAW, ~0, ~0, 0), + CXL_CMD(RAW, CXL_VARIABLE_PAYLOAD, CXL_VARIABLE_PAYLOAD, 0), #endif - CXL_CMD(GET_SUPPORTED_LOGS, 0, ~0, CXL_CMD_FLAG_FORCE_ENABLE), + CXL_CMD(GET_SUPPORTED_LOGS, 0, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE), CXL_CMD(GET_FW_INFO, 0, 0x50, 0), CXL_CMD(GET_PARTITION_INFO, 0, 0x20, 0), - CXL_CMD(GET_LSA, 0x8, ~0, 0), + CXL_CMD(GET_LSA, 0x8, CXL_VARIABLE_PAYLOAD, 0), CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0), - CXL_CMD(GET_LOG, 0x18, ~0, CXL_CMD_FLAG_FORCE_ENABLE), + CXL_CMD(GET_LOG, 0x18, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE), CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0), - CXL_CMD(SET_LSA, ~0, 0, 0), + CXL_CMD(SET_LSA, CXL_VARIABLE_PAYLOAD, 0, 0), CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0), CXL_CMD(SET_ALERT_CONFIG, 0xc, 0, 0), CXL_CMD(GET_SHUTDOWN_STATE, 0, 0x1, 0), CXL_CMD(SET_SHUTDOWN_STATE, 0x1, 0, 0), - CXL_CMD(GET_POISON, 0x10, ~0, 0), + CXL_CMD(GET_POISON, 0x10, CXL_VARIABLE_PAYLOAD, 0), CXL_CMD(INJECT_POISON, 0x8, 0, 0), CXL_CMD(CLEAR_POISON, 0x48, 0, 0), CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0), CXL_CMD(SCAN_MEDIA, 0x11, 0, 0), - CXL_CMD(GET_SCAN_MEDIA, 0, ~0, 0), + CXL_CMD(GET_SCAN_MEDIA, 0, CXL_VARIABLE_PAYLOAD, 0), }; /* @@ -187,9 +188,10 @@ int cxl_mbox_send_cmd(struct cxl_dev_state *cxlds, u16 opcode, void *in, * Variable sized commands can't be validated and so it's up to the * caller to do that if they wish. */ - if (cmd->info.size_out >= 0 && mbox_cmd.size_out != out_size) - return -EIO; - + if (cmd->info.size_out != CXL_VARIABLE_PAYLOAD) { + if (mbox_cmd.size_out != out_size) + return -EIO; + } return 0; } EXPORT_SYMBOL_NS_GPL(cxl_mbox_send_cmd, CXL); @@ -275,7 +277,7 @@ static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox, } /* Prepare to handle a full payload for variable sized output */ - if (out_size < 0) + if (out_size == CXL_VARIABLE_PAYLOAD) mbox->size_out = cxlds->payload_size; else mbox->size_out = out_size; @@ -353,11 +355,11 @@ static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd, return -EBUSY; /* Check the input buffer is the expected size */ - if (info->size_in >= 0 && info->size_in != send_cmd->in.size) + if (info->size_in != send_cmd->in.size) return -ENOMEM; /* Check the output buffer is at least large enough */ - if (info->size_out >= 0 && send_cmd->out.size < info->size_out) + if (send_cmd->out.size < info->size_out) return -ENOMEM; *mem_cmd = (struct cxl_mem_command) { diff --git a/include/uapi/linux/cxl_mem.h b/include/uapi/linux/cxl_mem.h index 8d206f27bb6d..c71021a2a9ed 100644 --- a/include/uapi/linux/cxl_mem.h +++ b/include/uapi/linux/cxl_mem.h @@ -68,8 +68,8 @@ static const struct { * struct cxl_command_info - Command information returned from a query. * @id: ID number for the command. * @flags: Flags that specify command behavior. - * @size_in: Expected input size, or -1 if variable length. - * @size_out: Expected output size, or -1 if variable length. + * @size_in: Expected input size, or ~0 if variable length. + * @size_out: Expected output size, or ~0 if variable length. * * Represents a single command that is supported by both the driver and the * hardware. This is returned as part of an array from the query ioctl. The @@ -78,7 +78,7 @@ static const struct { * * - @id = 10 * - @flags = 0 - * - @size_in = -1 + * - @size_in = ~0 * - @size_out = 0 * * See struct cxl_mem_query_commands. @@ -89,8 +89,8 @@ struct cxl_command_info { __u32 flags; #define CXL_MEM_COMMAND_FLAG_MASK GENMASK(0, 0) - __s32 size_in; - __s32 size_out; + __u32 size_in; + __u32 size_out; }; /** @@ -169,13 +169,13 @@ struct cxl_send_command { __u32 retval; struct { - __s32 size; + __u32 size; __u32 rsvd; __u64 payload; } in; struct { - __s32 size; + __u32 size; __u32 rsvd; __u64 payload; } out; -- cgit v1.2.3-59-g8ed1b From 567f882a401346779d05a90beb8f21865ebdd398 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Wed, 9 Mar 2022 10:55:43 +0000 Subject: media: cec.h: add cec_msg_recv_is_rx/tx_result helpers These two helper functions return true if the received message contains the result of a previous non-blocking transmit. Either the tx_status result (cec_msg_recv_is_tx_result) of the transmit, or the rx_status result (cec_msg_recv_is_rx_result) of the reply to the original transmit. Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- include/uapi/linux/cec.h | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/cec.h b/include/uapi/linux/cec.h index de936f5e446d..1d48da926216 100644 --- a/include/uapi/linux/cec.h +++ b/include/uapi/linux/cec.h @@ -142,6 +142,26 @@ static inline void cec_msg_set_reply_to(struct cec_msg *msg, msg->reply = msg->timeout = 0; } +/** + * cec_msg_recv_is_tx_result - return true if this message contains the + * result of an earlier non-blocking transmit + * @msg: the message structure from CEC_RECEIVE + */ +static inline int cec_msg_recv_is_tx_result(const struct cec_msg *msg) +{ + return msg->sequence && msg->tx_status && !msg->rx_status; +} + +/** + * cec_msg_recv_is_rx_result - return true if this message contains the + * reply of an earlier non-blocking transmit + * @msg: the message structure from CEC_RECEIVE + */ +static inline int cec_msg_recv_is_rx_result(const struct cec_msg *msg) +{ + return msg->sequence && !msg->tx_status && msg->rx_status; +} + /* cec_msg flags field */ #define CEC_MSG_FL_REPLY_TO_FOLLOWERS (1 << 0) #define CEC_MSG_FL_RAW (1 << 1) -- cgit v1.2.3-59-g8ed1b From 4e4dab4bb6029dbee63f12a249ddc44b0124ea63 Mon Sep 17 00:00:00 2001 From: Daniel Scally Date: Wed, 2 Mar 2022 22:03:01 +0000 Subject: media: media.h: Add new media link type To describe in the kernel the connection between devices and their supporting peripherals (for example, a camera sensor and the vcm driving the focusing lens for it), add a new type of media link to introduce the concept of these ancillary links. Add some elements to the uAPI documentation to explain the new link type, their purpose and some aspects of their current implementation. Reviewed-by: Laurent Pinchart Signed-off-by: Daniel Scally Reviewed-by: Jean-Michel Hautbois Signed-off-by: Sakari Ailus Signed-off-by: Mauro Carvalho Chehab --- .../media/mediactl/media-controller-model.rst | 6 ++++++ .../userspace-api/media/mediactl/media-types.rst | 17 ++++++++++++----- include/uapi/linux/media.h | 1 + 3 files changed, 19 insertions(+), 5 deletions(-) (limited to 'include/uapi/linux') diff --git a/Documentation/userspace-api/media/mediactl/media-controller-model.rst b/Documentation/userspace-api/media/mediactl/media-controller-model.rst index 222cb99debb5..78bfdfb2a322 100644 --- a/Documentation/userspace-api/media/mediactl/media-controller-model.rst +++ b/Documentation/userspace-api/media/mediactl/media-controller-model.rst @@ -33,3 +33,9 @@ are: - An **interface link** is a point-to-point bidirectional control connection between a Linux Kernel interface and an entity. + +- An **ancillary link** is a point-to-point connection denoting that two + entities form a single logical unit. For example this could represent the + fact that a particular camera sensor and lens controller form a single + physical module, meaning this lens controller drives the lens for this + camera sensor. \ No newline at end of file diff --git a/Documentation/userspace-api/media/mediactl/media-types.rst b/Documentation/userspace-api/media/mediactl/media-types.rst index 0a26397bd01d..0ffeece1e0c8 100644 --- a/Documentation/userspace-api/media/mediactl/media-types.rst +++ b/Documentation/userspace-api/media/mediactl/media-types.rst @@ -412,14 +412,21 @@ must be set for every pad. is set by drivers and is read-only for applications. * - ``MEDIA_LNK_FL_LINK_TYPE`` - - This is a bitmask that defines the type of the link. Currently, - two types of links are supported: + - This is a bitmask that defines the type of the link. The following + link types are currently supported: .. _MEDIA-LNK-FL-DATA-LINK: - ``MEDIA_LNK_FL_DATA_LINK`` if the link is between two pads + ``MEDIA_LNK_FL_DATA_LINK`` for links that represent a data connection + between two pads. .. _MEDIA-LNK-FL-INTERFACE-LINK: - ``MEDIA_LNK_FL_INTERFACE_LINK`` if the link is between an - interface and an entity + ``MEDIA_LNK_FL_INTERFACE_LINK`` for links that associate an entity to its + interface. + + .. _MEDIA-LNK-FL-ANCILLARY-LINK: + + ``MEDIA_LNK_FL_ANCILLARY_LINK`` for links that represent a physical + relationship between two entities. The link may or may not be + immutable, so applications must not assume either case. diff --git a/include/uapi/linux/media.h b/include/uapi/linux/media.h index 200fa8462b90..afbae7213d35 100644 --- a/include/uapi/linux/media.h +++ b/include/uapi/linux/media.h @@ -226,6 +226,7 @@ struct media_pad_desc { #define MEDIA_LNK_FL_LINK_TYPE (0xf << 28) # define MEDIA_LNK_FL_DATA_LINK (0 << 28) # define MEDIA_LNK_FL_INTERFACE_LINK (1 << 28) +# define MEDIA_LNK_FL_ANCILLARY_LINK (2 << 28) struct media_link_desc { struct media_pad_desc source; -- cgit v1.2.3-59-g8ed1b From 3d22dd432889f2f538b53f36f9f6bcd54825fc22 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 28 Mar 2022 17:01:53 +0100 Subject: media: media.h: remove unneeded inclusion Commit b3b7a9f138b7 ("[media] media-device: Use u64 ints for pointers") added this #include , presumably in order to use uintptr_t. Now that it is gone, we can compile this for userspace without . Signed-off-by: Masahiro Yamada Reviewed-by: Laurent Pinchart Signed-off-by: Sakari Ailus Signed-off-by: Mauro Carvalho Chehab --- include/uapi/linux/media.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/media.h b/include/uapi/linux/media.h index afbae7213d35..3ddadaea849f 100644 --- a/include/uapi/linux/media.h +++ b/include/uapi/linux/media.h @@ -20,9 +20,6 @@ #ifndef __LINUX_MEDIA_H #define __LINUX_MEDIA_H -#ifndef __KERNEL__ -#include -#endif #include #include -- cgit v1.2.3-59-g8ed1b From 2308d5aff8d083a44aa02197d2f5687b73d98f82 Mon Sep 17 00:00:00 2001 From: Stanimir Varbanov Date: Thu, 3 Mar 2022 15:06:31 +0000 Subject: media: v4l: Add Qualcomm custom compressed pixel formats Add custom Qualcomm raw compressed pixel formats. They are used in Qualcomm SoCs to optimize the interconnect bandwidth. Signed-off-by: Stanimir Varbanov Acked-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- .../userspace-api/media/v4l/pixfmt-reserved.rst | 19 +++++++++++++++++++ drivers/media/v4l2-core/v4l2-ioctl.c | 2 ++ include/uapi/linux/videodev2.h | 2 ++ 3 files changed, 23 insertions(+) (limited to 'include/uapi/linux') diff --git a/Documentation/userspace-api/media/v4l/pixfmt-reserved.rst b/Documentation/userspace-api/media/v4l/pixfmt-reserved.rst index cabfa34b7db5..0ff68cd8cf62 100644 --- a/Documentation/userspace-api/media/v4l/pixfmt-reserved.rst +++ b/Documentation/userspace-api/media/v4l/pixfmt-reserved.rst @@ -239,6 +239,25 @@ please make a proposal on the linux-media mailing list. It remains an opaque intermediate format and the MDP hardware must be used to convert ``V4L2_PIX_FMT_MT21C`` to ``V4L2_PIX_FMT_NV12M``, ``V4L2_PIX_FMT_YUV420M`` or ``V4L2_PIX_FMT_YVU420``. + * .. _V4L2-PIX-FMT-QC08C: + + - ``V4L2_PIX_FMT_QC08C`` + - 'QC08C' + - Compressed Macro-tile 8-Bit YUV420 format used by Qualcomm platforms. + It is an opaque intermediate format. The used compression is lossless + and it is used by various multimedia hardware blocks like GPU, display + controllers, ISP and video accelerators. + It contains four planes for progressive video and eight planes for + interlaced video. + * .. _V4L2-PIX-FMT-QC10C: + + - ``V4L2_PIX_FMT_QC10C`` + - 'QC10C' + - Compressed Macro-tile 10-Bit YUV420 format used by Qualcomm platforms. + It is an opaque intermediate format. The used compression is lossless + and it is used by various multimedia hardware blocks like GPU, display + controllers, ISP and video accelerators. + It contains four planes for progressive video. .. raw:: latex \normalsize diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index db5947fbd9a9..e2636539c9db 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -1445,6 +1445,8 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt) case V4L2_PIX_FMT_SE401: descr = "GSPCA SE401"; break; case V4L2_PIX_FMT_S5C_UYVY_JPG: descr = "S5C73MX interleaved UYVY/JPEG"; break; case V4L2_PIX_FMT_MT21C: descr = "Mediatek Compressed Format"; break; + case V4L2_PIX_FMT_QC08C: descr = "QCOM Compressed 8-bit Format"; break; + case V4L2_PIX_FMT_QC10C: descr = "QCOM Compressed 10-bit Format"; break; default: if (fmt->description[0]) return; diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index 3768a0a80830..6d465dc443b7 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -746,6 +746,8 @@ struct v4l2_pix_format { #define V4L2_PIX_FMT_INZI v4l2_fourcc('I', 'N', 'Z', 'I') /* Intel Planar Greyscale 10-bit and Depth 16-bit */ #define V4L2_PIX_FMT_CNF4 v4l2_fourcc('C', 'N', 'F', '4') /* Intel 4-bit packed depth confidence information */ #define V4L2_PIX_FMT_HI240 v4l2_fourcc('H', 'I', '2', '4') /* BTTV 8-bit dithered RGB */ +#define V4L2_PIX_FMT_QC08C v4l2_fourcc('Q', '0', '8', 'C') /* Qualcomm 8-bit compressed */ +#define V4L2_PIX_FMT_QC10C v4l2_fourcc('Q', '1', '0', 'C') /* Qualcomm 10-bit compressed */ /* 10bit raw bayer packed, 32 bytes for every 25 pixels, last LSB 6 bits unused */ #define V4L2_PIX_FMT_IPU3_SBGGR10 v4l2_fourcc('i', 'p', '3', 'b') /* IPU3 packed 10-bit BGGR bayer */ -- cgit v1.2.3-59-g8ed1b From fcbc4acf8b8dff5fc420a14026bd4ab1798cf465 Mon Sep 17 00:00:00 2001 From: Dikshita Agarwal Date: Tue, 19 Apr 2022 06:06:42 +0100 Subject: media: v4l2-ctrls: Add intra-refresh type control Add a control to set intra-refresh type. Signed-off-by: Dikshita Agarwal Reviewed-by: Nicolas Dufresne Acked-by: Hans Verkuil Signed-off-by: Stanimir Varbanov Signed-off-by: Mauro Carvalho Chehab --- .../userspace-api/media/v4l/ext-ctrls-codec.rst | 22 ++++++++++++++++++++++ drivers/media/v4l2-core/v4l2-ctrls-defs.c | 9 +++++++++ include/uapi/linux/v4l2-controls.h | 5 +++++ 3 files changed, 36 insertions(+) (limited to 'include/uapi/linux') diff --git a/Documentation/userspace-api/media/v4l/ext-ctrls-codec.rst b/Documentation/userspace-api/media/v4l/ext-ctrls-codec.rst index 4cd7c541fc30..c24977fa7329 100644 --- a/Documentation/userspace-api/media/v4l/ext-ctrls-codec.rst +++ b/Documentation/userspace-api/media/v4l/ext-ctrls-codec.rst @@ -1180,6 +1180,28 @@ enum v4l2_mpeg_video_h264_entropy_mode - is set to non zero value. Applicable to H264, H263 and MPEG4 encoder. +``V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE (enum)`` + +enum v4l2_mpeg_video_intra_refresh_period_type - + Sets the type of intra refresh. The period to refresh + the whole frame is specified by V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD. + Note that if this control is not present, then it is undefined what + refresh type is used and it is up to the driver to decide. + Applicable to H264 and HEVC encoders. Possible values are: + +.. tabularcolumns:: |p{9.6cm}|p{7.9cm}| + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE_RANDOM`` + - The whole frame is completely refreshed randomly + after the specified period. + * - ``V4L2_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE_CYCLIC`` + - The whole frame MBs are completely refreshed in cyclic order + after the specified period. + ``V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD (integer)`` Intra macroblock refresh period. This sets the period to refresh the whole frame. In other words, this defines the number of frames diff --git a/drivers/media/v4l2-core/v4l2-ctrls-defs.c b/drivers/media/v4l2-core/v4l2-ctrls-defs.c index 54ca4e6b820b..16f42d2fd359 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls-defs.c +++ b/drivers/media/v4l2-core/v4l2-ctrls-defs.c @@ -572,6 +572,11 @@ const char * const *v4l2_ctrl_get_menu(u32 id) "VBV/CPB Limit", NULL, }; + static const char * const intra_refresh_period_type[] = { + "Random", + "Cyclic", + NULL, + }; switch (id) { case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: @@ -705,6 +710,8 @@ const char * const *v4l2_ctrl_get_menu(u32 id) return hevc_start_code; case V4L2_CID_CAMERA_ORIENTATION: return camera_orientation; + case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE: + return intra_refresh_period_type; default: return NULL; } @@ -834,6 +841,7 @@ const char *v4l2_ctrl_get_name(u32 id) case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE: return "Decoder Slice Interface"; case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER: return "MPEG4 Loop Filter Enable"; case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB: return "Number of Intra Refresh MBs"; + case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE: return "Intra Refresh Period Type"; case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD: return "Intra Refresh Period"; case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE: return "Frame Level Rate Control Enable"; case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE: return "H264 MB Level Rate Control"; @@ -1360,6 +1368,7 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type, case V4L2_CID_STATELESS_H264_DECODE_MODE: case V4L2_CID_STATELESS_H264_START_CODE: case V4L2_CID_CAMERA_ORIENTATION: + case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE: *type = V4L2_CTRL_TYPE_MENU; break; case V4L2_CID_LINK_FREQ: diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h index bb40129446d4..dfff69ed88f7 100644 --- a/include/uapi/linux/v4l2-controls.h +++ b/include/uapi/linux/v4l2-controls.h @@ -449,6 +449,11 @@ enum v4l2_mpeg_video_multi_slice_mode { #define V4L2_CID_MPEG_VIDEO_USE_LTR_FRAMES (V4L2_CID_CODEC_BASE+234) #define V4L2_CID_MPEG_VIDEO_DEC_CONCEAL_COLOR (V4L2_CID_CODEC_BASE+235) #define V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD (V4L2_CID_CODEC_BASE+236) +#define V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE (V4L2_CID_CODEC_BASE+237) +enum v4l2_mpeg_video_intra_refresh_period_type { + V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE_RANDOM = 0, + V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE_CYCLIC = 1, +}; /* CIDs for the MPEG-2 Part 2 (H.262) codec */ #define V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL (V4L2_CID_CODEC_BASE+270) -- cgit v1.2.3-59-g8ed1b From 8e29da69feade64ec7fe9e1a2824b967c5183a21 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 18 Apr 2022 10:44:00 -0600 Subject: io_uring: add support for IORING_ASYNC_CANCEL_ALL The current cancelation will lookup and cancel the first request it finds based on the key passed in. Add a flag that allows to cancel any request that matches they key. It completes with the number of requests found and canceled, or res < 0 if an error occured. Signed-off-by: Jens Axboe Link: https://lore.kernel.org/r/20220418164402.75259-4-axboe@kernel.dk --- fs/io-wq.h | 1 + fs/io_uring.c | 91 +++++++++++++++++++++++++++++++++---------- include/uapi/linux/io_uring.h | 7 ++++ 3 files changed, 78 insertions(+), 21 deletions(-) (limited to 'include/uapi/linux') diff --git a/fs/io-wq.h b/fs/io-wq.h index dbecd27656c7..ba6eee76d028 100644 --- a/fs/io-wq.h +++ b/fs/io-wq.h @@ -155,6 +155,7 @@ struct io_wq_work_node *wq_stack_extract(struct io_wq_work_node *stack) struct io_wq_work { struct io_wq_work_node list; unsigned flags; + int cancel_seq; }; static inline struct io_wq_work *wq_next_work(struct io_wq_work *work) diff --git a/fs/io_uring.c b/fs/io_uring.c index b72323c3fe9b..b43cdf1a4555 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -403,6 +403,7 @@ struct io_ring_ctx { */ struct io_rsrc_node *rsrc_node; int rsrc_cached_refs; + atomic_t cancel_seq; struct io_file_table file_table; unsigned nr_user_files; unsigned nr_user_bufs; @@ -585,6 +586,7 @@ struct io_sync { struct io_cancel { struct file *file; u64 addr; + u32 flags; }; struct io_timeout { @@ -991,6 +993,8 @@ struct io_defer_entry { struct io_cancel_data { struct io_ring_ctx *ctx; u64 data; + u32 flags; + int seq; }; struct io_op_def { @@ -1726,6 +1730,7 @@ static void io_prep_async_work(struct io_kiocb *req) req->work.list.next = NULL; req->work.flags = 0; + req->work.cancel_seq = atomic_read(&ctx->cancel_seq); if (req->flags & REQ_F_FORCE_ASYNC) req->work.flags |= IO_WQ_WORK_CONCURRENT; @@ -6159,6 +6164,7 @@ static int __io_arm_poll_handler(struct io_kiocb *req, int v; INIT_HLIST_NODE(&req->hash_node); + req->work.cancel_seq = atomic_read(&ctx->cancel_seq); io_init_poll_iocb(poll, mask, io_poll_wake); poll->file = req->file; @@ -6316,6 +6322,11 @@ static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only, continue; if (poll_only && req->opcode != IORING_OP_POLL_ADD) continue; + if (cd->flags & IORING_ASYNC_CANCEL_ALL) { + if (cd->seq == req->work.cancel_seq) + continue; + req->work.cancel_seq = cd->seq; + } return req; } return NULL; @@ -6501,9 +6512,15 @@ static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx, bool found = false; list_for_each_entry(req, &ctx->timeout_list, timeout.list) { - found = cd->data == req->cqe.user_data; - if (found) - break; + if (cd->data != req->cqe.user_data) + continue; + if (cd->flags & IORING_ASYNC_CANCEL_ALL) { + if (cd->seq == req->work.cancel_seq) + continue; + req->work.cancel_seq = cd->seq; + } + found = true; + break; } if (!found) return ERR_PTR(-ENOENT); @@ -6777,7 +6794,16 @@ static bool io_cancel_cb(struct io_wq_work *work, void *data) struct io_kiocb *req = container_of(work, struct io_kiocb, work); struct io_cancel_data *cd = data; - return req->ctx == cd->ctx && req->cqe.user_data == cd->data; + if (req->ctx != cd->ctx) + return false; + if (req->cqe.user_data != cd->data) + return false; + if (cd->flags & IORING_ASYNC_CANCEL_ALL) { + if (cd->seq == req->work.cancel_seq) + return false; + req->work.cancel_seq = cd->seq; + } + return true; } static int io_async_cancel_one(struct io_uring_task *tctx, @@ -6789,7 +6815,8 @@ static int io_async_cancel_one(struct io_uring_task *tctx, if (!tctx || !tctx->io_wq) return -ENOENT; - cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, false); + cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, + cd->flags & IORING_ASYNC_CANCEL_ALL); switch (cancel_ret) { case IO_WQ_CANCEL_OK: ret = 0; @@ -6837,27 +6864,33 @@ static int io_async_cancel_prep(struct io_kiocb *req, return -EINVAL; if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) return -EINVAL; - if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags || - sqe->splice_fd_in) + if (sqe->ioprio || sqe->off || sqe->len || sqe->splice_fd_in) return -EINVAL; req->cancel.addr = READ_ONCE(sqe->addr); + req->cancel.flags = READ_ONCE(sqe->cancel_flags); + if (req->cancel.flags & ~IORING_ASYNC_CANCEL_ALL) + return -EINVAL; + return 0; } -static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) +static int __io_async_cancel(struct io_cancel_data *cd, struct io_kiocb *req, + unsigned int issue_flags) { - struct io_ring_ctx *ctx = req->ctx; - struct io_cancel_data cd = { - .ctx = ctx, - .data = req->cancel.addr, - }; + bool cancel_all = cd->flags & IORING_ASYNC_CANCEL_ALL; + struct io_ring_ctx *ctx = cd->ctx; struct io_tctx_node *node; - int ret; + int ret, nr = 0; - ret = io_try_cancel(req, &cd); - if (ret != -ENOENT) - goto done; + do { + ret = io_try_cancel(req, cd); + if (ret == -ENOENT) + break; + if (!cancel_all) + return ret; + nr++; + } while (1); /* slow path, try all io-wq's */ io_ring_submit_lock(ctx, issue_flags); @@ -6865,12 +6898,28 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) list_for_each_entry(node, &ctx->tctx_list, ctx_node) { struct io_uring_task *tctx = node->task->io_uring; - ret = io_async_cancel_one(tctx, &cd); - if (ret != -ENOENT) - break; + ret = io_async_cancel_one(tctx, cd); + if (ret != -ENOENT) { + if (!cancel_all) + break; + nr++; + } } io_ring_submit_unlock(ctx, issue_flags); -done: + return cancel_all ? nr : ret; +} + +static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) +{ + struct io_cancel_data cd = { + .ctx = req->ctx, + .data = req->cancel.addr, + .flags = req->cancel.flags, + .seq = atomic_inc_return(&req->ctx->cancel_seq), + }; + int ret; + + ret = __io_async_cancel(&cd, req, issue_flags); if (ret < 0) req_set_fail(req); io_req_complete_post(req, ret, 0); diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 1845cf7c80ba..476e58a2837f 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -187,6 +187,13 @@ enum { #define IORING_POLL_UPDATE_EVENTS (1U << 1) #define IORING_POLL_UPDATE_USER_DATA (1U << 2) +/* + * ASYNC_CANCEL flags. + * + * IORING_ASYNC_CANCEL_ALL Cancel all requests that match the given key + */ +#define IORING_ASYNC_CANCEL_ALL (1U << 0) + /* * IO completion data structure (Completion Queue Entry) */ -- cgit v1.2.3-59-g8ed1b From 4bf94615b8886305199ed5755cb72fea88258d15 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 18 Apr 2022 10:44:01 -0600 Subject: io_uring: allow IORING_OP_ASYNC_CANCEL with 'fd' key Currently sqe->addr must contain the user_data of the request being canceled. Introduce the IORING_ASYNC_CANCEL_FD flag, which tells the kernel that we're keying off the file fd instead for cancelation. This allows canceling any request that a) uses a file, and b) was assigned the file based on the value being passed in. Signed-off-by: Jens Axboe Link: https://lore.kernel.org/r/20220418164402.75259-5-axboe@kernel.dk --- fs/io_uring.c | 67 ++++++++++++++++++++++++++++++++++++++----- include/uapi/linux/io_uring.h | 3 ++ 2 files changed, 63 insertions(+), 7 deletions(-) (limited to 'include/uapi/linux') diff --git a/fs/io_uring.c b/fs/io_uring.c index b43cdf1a4555..cf0d5437b77d 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -587,6 +587,7 @@ struct io_cancel { struct file *file; u64 addr; u32 flags; + s32 fd; }; struct io_timeout { @@ -992,7 +993,10 @@ struct io_defer_entry { struct io_cancel_data { struct io_ring_ctx *ctx; - u64 data; + union { + u64 data; + struct file *file; + }; u32 flags; int seq; }; @@ -6332,6 +6336,29 @@ static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only, return NULL; } +static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx, + struct io_cancel_data *cd) + __must_hold(&ctx->completion_lock) +{ + struct io_kiocb *req; + int i; + + for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) { + struct hlist_head *list; + + list = &ctx->cancel_hash[i]; + hlist_for_each_entry(req, list, hash_node) { + if (req->file != cd->file) + continue; + if (cd->seq == req->work.cancel_seq) + continue; + req->work.cancel_seq = cd->seq; + return req; + } + } + return NULL; +} + static bool io_poll_disarm(struct io_kiocb *req) __must_hold(&ctx->completion_lock) { @@ -6345,8 +6372,12 @@ static bool io_poll_disarm(struct io_kiocb *req) static int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd) __must_hold(&ctx->completion_lock) { - struct io_kiocb *req = io_poll_find(ctx, false, cd); + struct io_kiocb *req; + if (cd->flags & IORING_ASYNC_CANCEL_FD) + req = io_poll_file_find(ctx, cd); + else + req = io_poll_find(ctx, false, cd); if (!req) return -ENOENT; io_poll_cancel_req(req); @@ -6796,8 +6827,13 @@ static bool io_cancel_cb(struct io_wq_work *work, void *data) if (req->ctx != cd->ctx) return false; - if (req->cqe.user_data != cd->data) - return false; + if (cd->flags & IORING_ASYNC_CANCEL_FD) { + if (req->file != cd->file) + return false; + } else { + if (req->cqe.user_data != cd->data) + return false; + } if (cd->flags & IORING_ASYNC_CANCEL_ALL) { if (cd->seq == req->work.cancel_seq) return false; @@ -6851,7 +6887,8 @@ static int io_try_cancel(struct io_kiocb *req, struct io_cancel_data *cd) ret = io_poll_cancel(ctx, cd); if (ret != -ENOENT) goto out; - ret = io_timeout_cancel(ctx, cd); + if (!(cd->flags & IORING_ASYNC_CANCEL_FD)) + ret = io_timeout_cancel(ctx, cd); out: spin_unlock(&ctx->completion_lock); return ret; @@ -6862,15 +6899,17 @@ static int io_async_cancel_prep(struct io_kiocb *req, { if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; - if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) + if (unlikely(req->flags & REQ_F_BUFFER_SELECT)) return -EINVAL; if (sqe->ioprio || sqe->off || sqe->len || sqe->splice_fd_in) return -EINVAL; req->cancel.addr = READ_ONCE(sqe->addr); req->cancel.flags = READ_ONCE(sqe->cancel_flags); - if (req->cancel.flags & ~IORING_ASYNC_CANCEL_ALL) + if (req->cancel.flags & ~(IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_FD)) return -EINVAL; + if (req->cancel.flags & IORING_ASYNC_CANCEL_FD) + req->cancel.fd = READ_ONCE(sqe->fd); return 0; } @@ -6919,7 +6958,21 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) }; int ret; + if (cd.flags & IORING_ASYNC_CANCEL_FD) { + if (req->flags & REQ_F_FIXED_FILE) + req->file = io_file_get_fixed(req, req->cancel.fd, + issue_flags); + else + req->file = io_file_get_normal(req, req->cancel.fd); + if (!req->file) { + ret = -EBADF; + goto done; + } + cd.file = req->file; + } + ret = __io_async_cancel(&cd, req, issue_flags); +done: if (ret < 0) req_set_fail(req); io_req_complete_post(req, ret, 0); diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 476e58a2837f..cc7fe82a1798 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -191,8 +191,11 @@ enum { * ASYNC_CANCEL flags. * * IORING_ASYNC_CANCEL_ALL Cancel all requests that match the given key + * IORING_ASYNC_CANCEL_FD Key off 'fd' for cancelation rather than the + * request 'user_data' */ #define IORING_ASYNC_CANCEL_ALL (1U << 0) +#define IORING_ASYNC_CANCEL_FD (1U << 1) /* * IO completion data structure (Completion Queue Entry) -- cgit v1.2.3-59-g8ed1b From 970f256edb8c1259c8ed48d52b38215135396126 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 18 Apr 2022 10:44:02 -0600 Subject: io_uring: add support for IORING_ASYNC_CANCEL_ANY Rather than match on a specific key, be it user_data or file, allow canceling any request that we can lookup. Works like IORING_ASYNC_CANCEL_ALL in that it cancels multiple requests, but it doesn't key off user_data or the file. Can't be set with IORING_ASYNC_CANCEL_FD, as that's a key selector. Only one may be used at the time. Signed-off-by: Jens Axboe Link: https://lore.kernel.org/r/20220418164402.75259-6-axboe@kernel.dk Signed-off-by: Jens Axboe --- fs/io_uring.c | 39 +++++++++++++++++++++++++-------------- include/uapi/linux/io_uring.h | 2 ++ 2 files changed, 27 insertions(+), 14 deletions(-) (limited to 'include/uapi/linux') diff --git a/fs/io_uring.c b/fs/io_uring.c index cf0d5437b77d..03134ec070a3 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6348,7 +6348,8 @@ static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx, list = &ctx->cancel_hash[i]; hlist_for_each_entry(req, list, hash_node) { - if (req->file != cd->file) + if (!(cd->flags & IORING_ASYNC_CANCEL_ANY) && + req->file != cd->file) continue; if (cd->seq == req->work.cancel_seq) continue; @@ -6374,7 +6375,7 @@ static int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd) { struct io_kiocb *req; - if (cd->flags & IORING_ASYNC_CANCEL_FD) + if (cd->flags & (IORING_ASYNC_CANCEL_FD|IORING_ASYNC_CANCEL_ANY)) req = io_poll_file_find(ctx, cd); else req = io_poll_find(ctx, false, cd); @@ -6543,9 +6544,10 @@ static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx, bool found = false; list_for_each_entry(req, &ctx->timeout_list, timeout.list) { - if (cd->data != req->cqe.user_data) + if (!(cd->flags & IORING_ASYNC_CANCEL_ANY) && + cd->data != req->cqe.user_data) continue; - if (cd->flags & IORING_ASYNC_CANCEL_ALL) { + if (cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY)) { if (cd->seq == req->work.cancel_seq) continue; req->work.cancel_seq = cd->seq; @@ -6827,14 +6829,16 @@ static bool io_cancel_cb(struct io_wq_work *work, void *data) if (req->ctx != cd->ctx) return false; - if (cd->flags & IORING_ASYNC_CANCEL_FD) { + if (cd->flags & IORING_ASYNC_CANCEL_ANY) { + ; + } else if (cd->flags & IORING_ASYNC_CANCEL_FD) { if (req->file != cd->file) return false; } else { if (req->cqe.user_data != cd->data) return false; } - if (cd->flags & IORING_ASYNC_CANCEL_ALL) { + if (cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY)) { if (cd->seq == req->work.cancel_seq) return false; req->work.cancel_seq = cd->seq; @@ -6847,12 +6851,13 @@ static int io_async_cancel_one(struct io_uring_task *tctx, { enum io_wq_cancel cancel_ret; int ret = 0; + bool all; if (!tctx || !tctx->io_wq) return -ENOENT; - cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, - cd->flags & IORING_ASYNC_CANCEL_ALL); + all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY); + cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, all); switch (cancel_ret) { case IO_WQ_CANCEL_OK: ret = 0; @@ -6894,6 +6899,9 @@ out: return ret; } +#define CANCEL_FLAGS (IORING_ASYNC_CANCEL_ALL | IORING_ASYNC_CANCEL_FD | \ + IORING_ASYNC_CANCEL_ANY) + static int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { @@ -6906,10 +6914,13 @@ static int io_async_cancel_prep(struct io_kiocb *req, req->cancel.addr = READ_ONCE(sqe->addr); req->cancel.flags = READ_ONCE(sqe->cancel_flags); - if (req->cancel.flags & ~(IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_FD)) + if (req->cancel.flags & ~CANCEL_FLAGS) return -EINVAL; - if (req->cancel.flags & IORING_ASYNC_CANCEL_FD) + if (req->cancel.flags & IORING_ASYNC_CANCEL_FD) { + if (req->cancel.flags & IORING_ASYNC_CANCEL_ANY) + return -EINVAL; req->cancel.fd = READ_ONCE(sqe->fd); + } return 0; } @@ -6917,7 +6928,7 @@ static int io_async_cancel_prep(struct io_kiocb *req, static int __io_async_cancel(struct io_cancel_data *cd, struct io_kiocb *req, unsigned int issue_flags) { - bool cancel_all = cd->flags & IORING_ASYNC_CANCEL_ALL; + bool all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY); struct io_ring_ctx *ctx = cd->ctx; struct io_tctx_node *node; int ret, nr = 0; @@ -6926,7 +6937,7 @@ static int __io_async_cancel(struct io_cancel_data *cd, struct io_kiocb *req, ret = io_try_cancel(req, cd); if (ret == -ENOENT) break; - if (!cancel_all) + if (!all) return ret; nr++; } while (1); @@ -6939,13 +6950,13 @@ static int __io_async_cancel(struct io_cancel_data *cd, struct io_kiocb *req, ret = io_async_cancel_one(tctx, cd); if (ret != -ENOENT) { - if (!cancel_all) + if (!all) break; nr++; } } io_ring_submit_unlock(ctx, issue_flags); - return cancel_all ? nr : ret; + return all ? nr : ret; } static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index cc7fe82a1798..980d82eb196e 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -193,9 +193,11 @@ enum { * IORING_ASYNC_CANCEL_ALL Cancel all requests that match the given key * IORING_ASYNC_CANCEL_FD Key off 'fd' for cancelation rather than the * request 'user_data' + * IORING_ASYNC_CANCEL_ANY Match any request */ #define IORING_ASYNC_CANCEL_ALL (1U << 0) #define IORING_ASYNC_CANCEL_FD (1U << 1) +#define IORING_ASYNC_CANCEL_ANY (1U << 2) /* * IO completion data structure (Completion Queue Entry) -- cgit v1.2.3-59-g8ed1b From e9621e2bec80fe63f677a759066a5089b292f43a Mon Sep 17 00:00:00 2001 From: Stefan Roesch Date: Wed, 23 Mar 2022 08:44:19 -0700 Subject: io_uring: add fsetxattr and setxattr support This adds support to io_uring for the fsetxattr and setxattr API. Signed-off-by: Stefan Roesch Acked-by: Christian Brauner Link: https://lore.kernel.org/r/20220323154420.3301504-4-shr@fb.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 165 ++++++++++++++++++++++++++++++++++++++++++ include/uapi/linux/io_uring.h | 6 +- 2 files changed, 170 insertions(+), 1 deletion(-) (limited to 'include/uapi/linux') diff --git a/fs/io_uring.c b/fs/io_uring.c index e57d47a23682..bb812d007d30 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -80,6 +80,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include @@ -779,6 +780,12 @@ struct io_async_rw { struct wait_page_queue wpq; }; +struct io_xattr { + struct file *file; + struct xattr_ctx ctx; + struct filename *filename; +}; + enum { REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT, REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT, @@ -943,6 +950,7 @@ struct io_kiocb { struct io_symlink symlink; struct io_hardlink hardlink; struct io_msg msg; + struct io_xattr xattr; }; u8 opcode; @@ -1211,6 +1219,10 @@ static const struct io_op_def io_op_defs[] = { [IORING_OP_MSG_RING] = { .needs_file = 1, }, + [IORING_OP_FSETXATTR] = { + .needs_file = 1 + }, + [IORING_OP_SETXATTR] = {}, }; /* requests with any of those set should undergo io_disarm_next() */ @@ -4190,6 +4202,144 @@ static int io_renameat(struct io_kiocb *req, unsigned int issue_flags) return 0; } +static inline void __io_xattr_finish(struct io_kiocb *req) +{ + struct io_xattr *ix = &req->xattr; + + if (ix->filename) + putname(ix->filename); + + kfree(ix->ctx.kname); + kvfree(ix->ctx.kvalue); +} + +static void io_xattr_finish(struct io_kiocb *req, int ret) +{ + req->flags &= ~REQ_F_NEED_CLEANUP; + + __io_xattr_finish(req); + if (ret < 0) + req_set_fail(req); + + io_req_complete(req, ret); +} + +static int __io_setxattr_prep(struct io_kiocb *req, + const struct io_uring_sqe *sqe) +{ + struct io_xattr *ix = &req->xattr; + const char __user *name; + int ret; + + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) + return -EINVAL; + if (unlikely(sqe->ioprio)) + return -EINVAL; + if (unlikely(req->flags & REQ_F_FIXED_FILE)) + return -EBADF; + + ix->filename = NULL; + name = u64_to_user_ptr(READ_ONCE(sqe->addr)); + ix->ctx.cvalue = u64_to_user_ptr(READ_ONCE(sqe->addr2)); + ix->ctx.kvalue = NULL; + ix->ctx.size = READ_ONCE(sqe->len); + ix->ctx.flags = READ_ONCE(sqe->xattr_flags); + + ix->ctx.kname = kmalloc(sizeof(*ix->ctx.kname), GFP_KERNEL); + if (!ix->ctx.kname) + return -ENOMEM; + + ret = setxattr_copy(name, &ix->ctx); + if (ret) { + kfree(ix->ctx.kname); + return ret; + } + + req->flags |= REQ_F_NEED_CLEANUP; + return 0; +} + +static int io_setxattr_prep(struct io_kiocb *req, + const struct io_uring_sqe *sqe) +{ + struct io_xattr *ix = &req->xattr; + const char __user *path; + int ret; + + ret = __io_setxattr_prep(req, sqe); + if (ret) + return ret; + + path = u64_to_user_ptr(READ_ONCE(sqe->addr3)); + + ix->filename = getname_flags(path, LOOKUP_FOLLOW, NULL); + if (IS_ERR(ix->filename)) { + ret = PTR_ERR(ix->filename); + ix->filename = NULL; + } + + return ret; +} + +static int io_fsetxattr_prep(struct io_kiocb *req, + const struct io_uring_sqe *sqe) +{ + return __io_setxattr_prep(req, sqe); +} + +static int __io_setxattr(struct io_kiocb *req, unsigned int issue_flags, + struct path *path) +{ + struct io_xattr *ix = &req->xattr; + int ret; + + ret = mnt_want_write(path->mnt); + if (!ret) { + ret = do_setxattr(mnt_user_ns(path->mnt), path->dentry, &ix->ctx); + mnt_drop_write(path->mnt); + } + + return ret; +} + +static int io_fsetxattr(struct io_kiocb *req, unsigned int issue_flags) +{ + int ret; + + if (issue_flags & IO_URING_F_NONBLOCK) + return -EAGAIN; + + ret = __io_setxattr(req, issue_flags, &req->file->f_path); + io_xattr_finish(req, ret); + + return 0; +} + +static int io_setxattr(struct io_kiocb *req, unsigned int issue_flags) +{ + struct io_xattr *ix = &req->xattr; + unsigned int lookup_flags = LOOKUP_FOLLOW; + struct path path; + int ret; + + if (issue_flags & IO_URING_F_NONBLOCK) + return -EAGAIN; + +retry: + ret = filename_lookup(AT_FDCWD, ix->filename, lookup_flags, &path, NULL); + if (!ret) { + ret = __io_setxattr(req, issue_flags, &path); + path_put(&path); + if (retry_estale(ret, lookup_flags)) { + lookup_flags |= LOOKUP_REVAL; + goto retry; + } + } + + io_xattr_finish(req, ret); + return 0; +} + static int io_unlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { @@ -7151,6 +7301,10 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return io_linkat_prep(req, sqe); case IORING_OP_MSG_RING: return io_msg_ring_prep(req, sqe); + case IORING_OP_FSETXATTR: + return io_fsetxattr_prep(req, sqe); + case IORING_OP_SETXATTR: + return io_setxattr_prep(req, sqe); } printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n", @@ -7295,6 +7449,10 @@ static void io_clean_op(struct io_kiocb *req) if (req->statx.filename) putname(req->statx.filename); break; + case IORING_OP_SETXATTR: + case IORING_OP_FSETXATTR: + __io_xattr_finish(req); + break; } } if ((req->flags & REQ_F_POLLED) && req->apoll) { @@ -7451,6 +7609,12 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) case IORING_OP_MSG_RING: ret = io_msg_ring(req, issue_flags); break; + case IORING_OP_FSETXATTR: + ret = io_fsetxattr(req, issue_flags); + break; + case IORING_OP_SETXATTR: + ret = io_setxattr(req, issue_flags); + break; default: ret = -EINVAL; break; @@ -12012,6 +12176,7 @@ static int __init io_uring_init(void) BUILD_BUG_SQE_ELEM(42, __u16, personality); BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in); BUILD_BUG_SQE_ELEM(44, __u32, file_index); + BUILD_BUG_SQE_ELEM(48, __u64, addr3); BUILD_BUG_ON(sizeof(struct io_uring_files_update) != sizeof(struct io_uring_rsrc_update)); diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 980d82eb196e..864bd6a4d4ff 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -45,6 +45,7 @@ struct io_uring_sqe { __u32 rename_flags; __u32 unlink_flags; __u32 hardlink_flags; + __u32 xattr_flags; }; __u64 user_data; /* data to be passed back at completion time */ /* pack this to avoid bogus arm OABI complaints */ @@ -60,7 +61,8 @@ struct io_uring_sqe { __s32 splice_fd_in; __u32 file_index; }; - __u64 __pad2[2]; + __u64 addr3; + __u64 __pad2[1]; }; enum { @@ -145,6 +147,8 @@ enum { IORING_OP_SYMLINKAT, IORING_OP_LINKAT, IORING_OP_MSG_RING, + IORING_OP_FSETXATTR, + IORING_OP_SETXATTR, /* this goes last, obviously */ IORING_OP_LAST, -- cgit v1.2.3-59-g8ed1b From a56834e0fafe0adf7f22a28a5dbec3e8c3031a0e Mon Sep 17 00:00:00 2001 From: Stefan Roesch Date: Wed, 23 Mar 2022 08:44:20 -0700 Subject: io_uring: add fgetxattr and getxattr support This adds support to io_uring for the fgetxattr and getxattr API. Signed-off-by: Stefan Roesch Acked-by: Christian Brauner Link: https://lore.kernel.org/r/20220323154420.3301504-5-shr@fb.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 129 ++++++++++++++++++++++++++++++++++++++++++ include/uapi/linux/io_uring.h | 2 + 2 files changed, 131 insertions(+) (limited to 'include/uapi/linux') diff --git a/fs/io_uring.c b/fs/io_uring.c index bb812d007d30..2221fe01b1c4 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1223,6 +1223,10 @@ static const struct io_op_def io_op_defs[] = { .needs_file = 1 }, [IORING_OP_SETXATTR] = {}, + [IORING_OP_FGETXATTR] = { + .needs_file = 1 + }, + [IORING_OP_GETXATTR] = {}, }; /* requests with any of those set should undergo io_disarm_next() */ @@ -4224,6 +4228,119 @@ static void io_xattr_finish(struct io_kiocb *req, int ret) io_req_complete(req, ret); } +static int __io_getxattr_prep(struct io_kiocb *req, + const struct io_uring_sqe *sqe) +{ + struct io_xattr *ix = &req->xattr; + const char __user *name; + int ret; + + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) + return -EINVAL; + if (unlikely(sqe->ioprio)) + return -EINVAL; + if (unlikely(req->flags & REQ_F_FIXED_FILE)) + return -EBADF; + + ix->filename = NULL; + ix->ctx.kvalue = NULL; + name = u64_to_user_ptr(READ_ONCE(sqe->addr)); + ix->ctx.cvalue = u64_to_user_ptr(READ_ONCE(sqe->addr2)); + ix->ctx.size = READ_ONCE(sqe->len); + ix->ctx.flags = READ_ONCE(sqe->xattr_flags); + + if (ix->ctx.flags) + return -EINVAL; + + ix->ctx.kname = kmalloc(sizeof(*ix->ctx.kname), GFP_KERNEL); + if (!ix->ctx.kname) + return -ENOMEM; + + ret = strncpy_from_user(ix->ctx.kname->name, name, + sizeof(ix->ctx.kname->name)); + if (!ret || ret == sizeof(ix->ctx.kname->name)) + ret = -ERANGE; + if (ret < 0) { + kfree(ix->ctx.kname); + return ret; + } + + req->flags |= REQ_F_NEED_CLEANUP; + return 0; +} + +static int io_fgetxattr_prep(struct io_kiocb *req, + const struct io_uring_sqe *sqe) +{ + return __io_getxattr_prep(req, sqe); +} + +static int io_getxattr_prep(struct io_kiocb *req, + const struct io_uring_sqe *sqe) +{ + struct io_xattr *ix = &req->xattr; + const char __user *path; + int ret; + + ret = __io_getxattr_prep(req, sqe); + if (ret) + return ret; + + path = u64_to_user_ptr(READ_ONCE(sqe->addr3)); + + ix->filename = getname_flags(path, LOOKUP_FOLLOW, NULL); + if (IS_ERR(ix->filename)) { + ret = PTR_ERR(ix->filename); + ix->filename = NULL; + } + + return ret; +} + +static int io_fgetxattr(struct io_kiocb *req, unsigned int issue_flags) +{ + struct io_xattr *ix = &req->xattr; + int ret; + + if (issue_flags & IO_URING_F_NONBLOCK) + return -EAGAIN; + + ret = do_getxattr(mnt_user_ns(req->file->f_path.mnt), + req->file->f_path.dentry, + &ix->ctx); + + io_xattr_finish(req, ret); + return 0; +} + +static int io_getxattr(struct io_kiocb *req, unsigned int issue_flags) +{ + struct io_xattr *ix = &req->xattr; + unsigned int lookup_flags = LOOKUP_FOLLOW; + struct path path; + int ret; + + if (issue_flags & IO_URING_F_NONBLOCK) + return -EAGAIN; + +retry: + ret = filename_lookup(AT_FDCWD, ix->filename, lookup_flags, &path, NULL); + if (!ret) { + ret = do_getxattr(mnt_user_ns(path.mnt), + path.dentry, + &ix->ctx); + + path_put(&path); + if (retry_estale(ret, lookup_flags)) { + lookup_flags |= LOOKUP_REVAL; + goto retry; + } + } + + io_xattr_finish(req, ret); + return 0; +} + static int __io_setxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { @@ -7305,6 +7422,10 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return io_fsetxattr_prep(req, sqe); case IORING_OP_SETXATTR: return io_setxattr_prep(req, sqe); + case IORING_OP_FGETXATTR: + return io_fgetxattr_prep(req, sqe); + case IORING_OP_GETXATTR: + return io_getxattr_prep(req, sqe); } printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n", @@ -7451,6 +7572,8 @@ static void io_clean_op(struct io_kiocb *req) break; case IORING_OP_SETXATTR: case IORING_OP_FSETXATTR: + case IORING_OP_GETXATTR: + case IORING_OP_FGETXATTR: __io_xattr_finish(req); break; } @@ -7615,6 +7738,12 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) case IORING_OP_SETXATTR: ret = io_setxattr(req, issue_flags); break; + case IORING_OP_FGETXATTR: + ret = io_fgetxattr(req, issue_flags); + break; + case IORING_OP_GETXATTR: + ret = io_getxattr(req, issue_flags); + break; default: ret = -EINVAL; break; diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 864bd6a4d4ff..8ca1d9ae56d6 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -149,6 +149,8 @@ enum { IORING_OP_MSG_RING, IORING_OP_FSETXATTR, IORING_OP_SETXATTR, + IORING_OP_FGETXATTR, + IORING_OP_GETXATTR, /* this goes last, obviously */ IORING_OP_LAST, -- cgit v1.2.3-59-g8ed1b From 1374e08e2d44863c931910797852589803997668 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 12 Apr 2022 14:22:40 -0600 Subject: io_uring: add socket(2) support Supports both regular socket(2) where a normal file descriptor is instantiated when called, or direct descriptors. Link: https://lore.kernel.org/r/20220412202240.234207-3-axboe@kernel.dk Signed-off-by: Jens Axboe --- fs/io_uring.c | 76 +++++++++++++++++++++++++++++++++++++++++++ include/uapi/linux/io_uring.h | 1 + 2 files changed, 77 insertions(+) (limited to 'include/uapi/linux') diff --git a/fs/io_uring.c b/fs/io_uring.c index 2221fe01b1c4..bf95ef9240e5 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -576,6 +576,16 @@ struct io_accept { unsigned long nofile; }; +struct io_socket { + struct file *file; + int domain; + int type; + int protocol; + int flags; + u32 file_slot; + unsigned long nofile; +}; + struct io_sync { struct file *file; loff_t len; @@ -951,6 +961,7 @@ struct io_kiocb { struct io_hardlink hardlink; struct io_msg msg; struct io_xattr xattr; + struct io_socket sock; }; u8 opcode; @@ -1227,6 +1238,9 @@ static const struct io_op_def io_op_defs[] = { .needs_file = 1 }, [IORING_OP_GETXATTR] = {}, + [IORING_OP_SOCKET] = { + .audit_skip = 1, + }, }; /* requests with any of those set should undergo io_disarm_next() */ @@ -6017,6 +6031,62 @@ static int io_accept(struct io_kiocb *req, unsigned int issue_flags) return 0; } +static int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + struct io_socket *sock = &req->sock; + + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) + return -EINVAL; + if (sqe->ioprio || sqe->addr || sqe->rw_flags || sqe->buf_index) + return -EINVAL; + + sock->domain = READ_ONCE(sqe->fd); + sock->type = READ_ONCE(sqe->off); + sock->protocol = READ_ONCE(sqe->len); + sock->file_slot = READ_ONCE(sqe->file_index); + sock->nofile = rlimit(RLIMIT_NOFILE); + + sock->flags = sock->type & ~SOCK_TYPE_MASK; + if (sock->file_slot && (sock->flags & SOCK_CLOEXEC)) + return -EINVAL; + if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) + return -EINVAL; + return 0; +} + +static int io_socket(struct io_kiocb *req, unsigned int issue_flags) +{ + struct io_socket *sock = &req->sock; + bool fixed = !!sock->file_slot; + struct file *file; + int ret, fd; + + if (!fixed) { + fd = __get_unused_fd_flags(sock->flags, sock->nofile); + if (unlikely(fd < 0)) + return fd; + } + file = __sys_socket_file(sock->domain, sock->type, sock->protocol); + if (IS_ERR(file)) { + if (!fixed) + put_unused_fd(fd); + ret = PTR_ERR(file); + if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) + return -EAGAIN; + if (ret == -ERESTARTSYS) + ret = -EINTR; + req_set_fail(req); + } else if (!fixed) { + fd_install(fd, file); + ret = fd; + } else { + ret = io_install_fixed_file(req, file, issue_flags, + sock->file_slot - 1); + } + __io_req_complete(req, issue_flags, ret, 0); + return 0; +} + static int io_connect_prep_async(struct io_kiocb *req) { struct io_async_connect *io = req->async_data; @@ -6105,6 +6175,7 @@ IO_NETOP_PREP_ASYNC(sendmsg); IO_NETOP_PREP_ASYNC(recvmsg); IO_NETOP_PREP_ASYNC(connect); IO_NETOP_PREP(accept); +IO_NETOP_PREP(socket); IO_NETOP_FN(send); IO_NETOP_FN(recv); #endif /* CONFIG_NET */ @@ -7426,6 +7497,8 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return io_fgetxattr_prep(req, sqe); case IORING_OP_GETXATTR: return io_getxattr_prep(req, sqe); + case IORING_OP_SOCKET: + return io_socket_prep(req, sqe); } printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n", @@ -7744,6 +7817,9 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) case IORING_OP_GETXATTR: ret = io_getxattr(req, issue_flags); break; + case IORING_OP_SOCKET: + ret = io_socket(req, issue_flags); + break; default: ret = -EINVAL; break; diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 8ca1d9ae56d6..5fb52bf32435 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -151,6 +151,7 @@ enum { IORING_OP_SETXATTR, IORING_OP_FGETXATTR, IORING_OP_GETXATTR, + IORING_OP_SOCKET, /* this goes last, obviously */ IORING_OP_LAST, -- cgit v1.2.3-59-g8ed1b From 8d92e4fbcf0fb7ecb24223b7b1ce95b9beb4dfa2 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 25 Apr 2022 06:44:21 +0300 Subject: devlink: introduce line card devices support Line card can contain one or more devices that makes sense to make visible to the user. For example, this can be a gearbox with flash memory, which could be updated. Provide the driver possibility to attach such devices to a line card and expose those to user. Example: $ devlink lc show pci/0000:01:00.0 lc 8 pci/0000:01:00.0: lc 8 state active type 16x100G supported_types: 16x100G devices: device 0 device 1 device 2 device 3 Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- include/net/devlink.h | 7 +++ include/uapi/linux/devlink.h | 3 ++ net/core/devlink.c | 104 ++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 113 insertions(+), 1 deletion(-) (limited to 'include/uapi/linux') diff --git a/include/net/devlink.h b/include/net/devlink.h index 2a2a2a0c93f7..c84b52fb9ff0 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -1578,6 +1578,13 @@ struct devlink_linecard * devlink_linecard_create(struct devlink *devlink, unsigned int linecard_index, const struct devlink_linecard_ops *ops, void *priv); void devlink_linecard_destroy(struct devlink_linecard *linecard); +struct devlink_linecard_device; +struct devlink_linecard_device * +devlink_linecard_device_create(struct devlink_linecard *linecard, + unsigned int device_index); +void +devlink_linecard_device_destroy(struct devlink_linecard *linecard, + struct devlink_linecard_device *linecard_device); void devlink_linecard_provision_set(struct devlink_linecard *linecard, const char *type); void devlink_linecard_provision_clear(struct devlink_linecard *linecard); diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index b3d40a5d72ff..cd578645f94f 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -575,6 +575,9 @@ enum devlink_attr { DEVLINK_ATTR_LINECARD_STATE, /* u8 */ DEVLINK_ATTR_LINECARD_TYPE, /* string */ DEVLINK_ATTR_LINECARD_SUPPORTED_TYPES, /* nested */ + DEVLINK_ATTR_LINECARD_DEVICE_LIST, /* nested */ + DEVLINK_ATTR_LINECARD_DEVICE, /* nested */ + DEVLINK_ATTR_LINECARD_DEVICE_INDEX, /* u32 */ /* add new attributes above here, update the policy in devlink.c */ diff --git a/net/core/devlink.c b/net/core/devlink.c index 5cc88490f18f..41d9631ceada 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -83,10 +83,11 @@ struct devlink_linecard { const struct devlink_linecard_ops *ops; void *priv; enum devlink_linecard_state state; - struct mutex state_lock; /* Protects state */ + struct mutex state_lock; /* Protects state and device_list */ const char *type; struct devlink_linecard_type *types; unsigned int types_count; + struct list_head device_list; }; /** @@ -2058,6 +2059,55 @@ struct devlink_linecard_type { const void *priv; }; +struct devlink_linecard_device { + struct list_head list; + unsigned int index; +}; + +static int +devlink_nl_linecard_device_fill(struct sk_buff *msg, + struct devlink_linecard_device *linecard_device) +{ + struct nlattr *attr; + + attr = nla_nest_start(msg, DEVLINK_ATTR_LINECARD_DEVICE); + if (!attr) + return -EMSGSIZE; + if (nla_put_u32(msg, DEVLINK_ATTR_LINECARD_DEVICE_INDEX, + linecard_device->index)) { + nla_nest_cancel(msg, attr); + return -EMSGSIZE; + } + nla_nest_end(msg, attr); + + return 0; +} + +static int devlink_nl_linecard_devices_fill(struct sk_buff *msg, + struct devlink_linecard *linecard) +{ + struct devlink_linecard_device *linecard_device; + struct nlattr *attr; + int err; + + if (list_empty(&linecard->device_list)) + return 0; + + attr = nla_nest_start(msg, DEVLINK_ATTR_LINECARD_DEVICE_LIST); + if (!attr) + return -EMSGSIZE; + list_for_each_entry(linecard_device, &linecard->device_list, list) { + err = devlink_nl_linecard_device_fill(msg, linecard_device); + if (err) { + nla_nest_cancel(msg, attr); + return err; + } + } + nla_nest_end(msg, attr); + + return 0; +} + static int devlink_nl_linecard_fill(struct sk_buff *msg, struct devlink *devlink, struct devlink_linecard *linecard, @@ -2068,6 +2118,7 @@ static int devlink_nl_linecard_fill(struct sk_buff *msg, struct devlink_linecard_type *linecard_type; struct nlattr *attr; void *hdr; + int err; int i; hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd); @@ -2100,6 +2151,10 @@ static int devlink_nl_linecard_fill(struct sk_buff *msg, nla_nest_end(msg, attr); } + err = devlink_nl_linecard_devices_fill(msg, linecard); + if (err) + goto nla_put_failure; + genlmsg_end(msg, hdr); return 0; @@ -10264,6 +10319,7 @@ devlink_linecard_create(struct devlink *devlink, unsigned int linecard_index, linecard->priv = priv; linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED; mutex_init(&linecard->state_lock); + INIT_LIST_HEAD(&linecard->device_list); err = devlink_linecard_types_init(linecard); if (err) { @@ -10291,6 +10347,7 @@ void devlink_linecard_destroy(struct devlink_linecard *linecard) struct devlink *devlink = linecard->devlink; devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_DEL); + WARN_ON(!list_empty(&linecard->device_list)); mutex_lock(&devlink->linecards_lock); list_del(&linecard->list); devlink_linecard_types_fini(linecard); @@ -10299,6 +10356,50 @@ void devlink_linecard_destroy(struct devlink_linecard *linecard) } EXPORT_SYMBOL_GPL(devlink_linecard_destroy); +/** + * devlink_linecard_device_create - Create a device on linecard + * + * @linecard: devlink linecard + * @device_index: index of the linecard device + * + * Return: Line card device structure or an ERR_PTR() encoded error code. + */ +struct devlink_linecard_device * +devlink_linecard_device_create(struct devlink_linecard *linecard, + unsigned int device_index) +{ + struct devlink_linecard_device *linecard_device; + + linecard_device = kzalloc(sizeof(*linecard_device), GFP_KERNEL); + if (!linecard_device) + return ERR_PTR(-ENOMEM); + linecard_device->index = device_index; + mutex_lock(&linecard->state_lock); + list_add_tail(&linecard_device->list, &linecard->device_list); + devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW); + mutex_unlock(&linecard->state_lock); + return linecard_device; +} +EXPORT_SYMBOL_GPL(devlink_linecard_device_create); + +/** + * devlink_linecard_device_destroy - Destroy device on linecard + * + * @linecard: devlink linecard + * @linecard_device: devlink linecard device + */ +void +devlink_linecard_device_destroy(struct devlink_linecard *linecard, + struct devlink_linecard_device *linecard_device) +{ + mutex_lock(&linecard->state_lock); + list_del(&linecard_device->list); + devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW); + mutex_unlock(&linecard->state_lock); + kfree(linecard_device); +} +EXPORT_SYMBOL_GPL(devlink_linecard_device_destroy); + /** * devlink_linecard_provision_set - Set provisioning on linecard * @@ -10331,6 +10432,7 @@ EXPORT_SYMBOL_GPL(devlink_linecard_provision_set); void devlink_linecard_provision_clear(struct devlink_linecard *linecard) { mutex_lock(&linecard->state_lock); + WARN_ON(!list_empty(&linecard->device_list)); linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED; linecard->type = NULL; devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW); -- cgit v1.2.3-59-g8ed1b From 276910aecc6a4076f5fbfd8160ff70695d6c1eb5 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 25 Apr 2022 06:44:22 +0300 Subject: devlink: introduce line card info get message Allow the driver to provide per line card info get op to fill-up info, similar to the "devlink dev info". Example: $ devlink lc info pci/0000:01:00.0 lc 8 pci/0000:01:00.0: lc 8 versions: fixed: hw.revision 0 running: ini.version 4 Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../networking/devlink/devlink-linecard.rst | 4 + include/net/devlink.h | 7 +- include/uapi/linux/devlink.h | 2 + net/core/devlink.c | 130 ++++++++++++++++++++- 4 files changed, 138 insertions(+), 5 deletions(-) (limited to 'include/uapi/linux') diff --git a/Documentation/networking/devlink/devlink-linecard.rst b/Documentation/networking/devlink/devlink-linecard.rst index 6c0b8928bc13..5a8d5989702a 100644 --- a/Documentation/networking/devlink/devlink-linecard.rst +++ b/Documentation/networking/devlink/devlink-linecard.rst @@ -14,6 +14,7 @@ system. Following operations are provided: * Get a list of supported line card types. * Provision of a slot with specific line card type. * Get and monitor of line card state and its change. + * Get information about line card versions. Line card according to the type may contain one or more gearboxes to mux the lanes with certain speed to multiple ports with lanes @@ -120,3 +121,6 @@ Example usage # Set slot 8 to be unprovisioned: $ devlink lc set pci/0000:01:00.0 lc 8 notype + + # Set info for slot 8: + $ devlink lc info pci/0000:01:00.0 lc 8 diff --git a/include/net/devlink.h b/include/net/devlink.h index c84b52fb9ff0..f96dcb376630 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -150,6 +150,8 @@ struct devlink_port_new_attrs { sfnum_valid:1; }; +struct devlink_info_req; + /** * struct devlink_linecard_ops - Linecard operations * @provision: callback to provision the linecard slot with certain @@ -168,6 +170,7 @@ struct devlink_port_new_attrs { * provisioned. * @types_count: callback to get number of supported types * @types_get: callback to get next type in list + * @info_get: callback to get linecard info */ struct devlink_linecard_ops { int (*provision)(struct devlink_linecard *linecard, void *priv, @@ -182,6 +185,9 @@ struct devlink_linecard_ops { void (*types_get)(struct devlink_linecard *linecard, void *priv, unsigned int index, const char **type, const void **type_priv); + int (*info_get)(struct devlink_linecard *linecard, void *priv, + struct devlink_info_req *req, + struct netlink_ext_ack *extack); }; struct devlink_sb_pool_info { @@ -628,7 +634,6 @@ struct devlink_flash_update_params { #define DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK BIT(1) struct devlink_region; -struct devlink_info_req; /** * struct devlink_region_ops - Region operations diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index cd578645f94f..fb8c3864457f 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -136,6 +136,8 @@ enum devlink_command { DEVLINK_CMD_LINECARD_NEW, DEVLINK_CMD_LINECARD_DEL, + DEVLINK_CMD_LINECARD_INFO_GET, /* can dump */ + /* add new commands above here */ __DEVLINK_CMD_MAX, DEVLINK_CMD_MAX = __DEVLINK_CMD_MAX - 1 diff --git a/net/core/devlink.c b/net/core/devlink.c index 41d9631ceada..5facd10de64a 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -2424,6 +2424,125 @@ static int devlink_nl_cmd_linecard_set_doit(struct sk_buff *skb, return 0; } +struct devlink_info_req { + struct sk_buff *msg; +}; + +static int +devlink_nl_linecard_info_fill(struct sk_buff *msg, struct devlink *devlink, + struct devlink_linecard *linecard, + enum devlink_command cmd, u32 portid, + u32 seq, int flags, struct netlink_ext_ack *extack) +{ + struct devlink_info_req req; + void *hdr; + int err; + + hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd); + if (!hdr) + return -EMSGSIZE; + + err = -EMSGSIZE; + if (devlink_nl_put_handle(msg, devlink)) + goto nla_put_failure; + if (nla_put_u32(msg, DEVLINK_ATTR_LINECARD_INDEX, linecard->index)) + goto nla_put_failure; + + req.msg = msg; + err = linecard->ops->info_get(linecard, linecard->priv, &req, extack); + if (err) + goto nla_put_failure; + + genlmsg_end(msg, hdr); + return 0; + +nla_put_failure: + genlmsg_cancel(msg, hdr); + return err; +} + +static int devlink_nl_cmd_linecard_info_get_doit(struct sk_buff *skb, + struct genl_info *info) +{ + struct devlink_linecard *linecard = info->user_ptr[1]; + struct devlink *devlink = linecard->devlink; + struct sk_buff *msg; + int err; + + if (!linecard->ops->info_get) + return -EOPNOTSUPP; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + mutex_lock(&linecard->state_lock); + err = devlink_nl_linecard_info_fill(msg, devlink, linecard, + DEVLINK_CMD_LINECARD_INFO_GET, + info->snd_portid, info->snd_seq, 0, + info->extack); + mutex_unlock(&linecard->state_lock); + if (err) { + nlmsg_free(msg); + return err; + } + + return genlmsg_reply(msg, info); +} + +static int devlink_nl_cmd_linecard_info_get_dumpit(struct sk_buff *msg, + struct netlink_callback *cb) +{ + struct devlink_linecard *linecard; + struct devlink *devlink; + int start = cb->args[0]; + unsigned long index; + int idx = 0; + int err = 0; + + mutex_lock(&devlink_mutex); + xa_for_each_marked(&devlinks, index, devlink, DEVLINK_REGISTERED) { + if (!devlink_try_get(devlink)) + continue; + + if (!net_eq(devlink_net(devlink), sock_net(msg->sk))) + goto retry; + + mutex_lock(&devlink->linecards_lock); + list_for_each_entry(linecard, &devlink->linecard_list, list) { + if (idx < start || !linecard->ops->info_get) { + idx++; + continue; + } + mutex_lock(&linecard->state_lock); + err = devlink_nl_linecard_info_fill(msg, devlink, linecard, + DEVLINK_CMD_LINECARD_INFO_GET, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + NLM_F_MULTI, + cb->extack); + mutex_unlock(&linecard->state_lock); + if (err) { + mutex_unlock(&devlink->linecards_lock); + devlink_put(devlink); + goto out; + } + idx++; + } + mutex_unlock(&devlink->linecards_lock); +retry: + devlink_put(devlink); + } +out: + mutex_unlock(&devlink_mutex); + + if (err != -EMSGSIZE) + return err; + + cb->args[0] = idx; + return msg->len; +} + static int devlink_nl_sb_fill(struct sk_buff *msg, struct devlink *devlink, struct devlink_sb *devlink_sb, enum devlink_command cmd, u32 portid, @@ -6416,10 +6535,6 @@ out_dev: return err; } -struct devlink_info_req { - struct sk_buff *msg; -}; - int devlink_info_driver_name_put(struct devlink_info_req *req, const char *name) { return nla_put_string(req->msg, DEVLINK_ATTR_INFO_DRIVER_NAME, name); @@ -9139,6 +9254,13 @@ static const struct genl_small_ops devlink_nl_ops[] = { .flags = GENL_ADMIN_PERM, .internal_flags = DEVLINK_NL_FLAG_NEED_LINECARD, }, + { + .cmd = DEVLINK_CMD_LINECARD_INFO_GET, + .doit = devlink_nl_cmd_linecard_info_get_doit, + .dumpit = devlink_nl_cmd_linecard_info_get_dumpit, + .internal_flags = DEVLINK_NL_FLAG_NEED_LINECARD, + /* can be retrieved by unprivileged users */ + }, { .cmd = DEVLINK_CMD_SB_GET, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, -- cgit v1.2.3-59-g8ed1b From e8e7fbb6a39cd6761c843d97851eb40c5885e922 Mon Sep 17 00:00:00 2001 From: Pali Rohár Date: Tue, 12 Apr 2022 11:49:43 +0200 Subject: PCI: Add PCI_EXP_SLTCTL_ASPL_DISABLE macro MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add macro defining Auto Slot Power Limit Disable bit in Slot Control Register. Link: https://lore.kernel.org/r/20220412094946.27069-2-pali@kernel.org Signed-off-by: Pali Rohár Signed-off-by: Marek Behún Signed-off-by: Lorenzo Pieralisi Acked-by: Bjorn Helgaas --- include/uapi/linux/pci_regs.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h index bee1a9ed6e66..108f8523fa04 100644 --- a/include/uapi/linux/pci_regs.h +++ b/include/uapi/linux/pci_regs.h @@ -616,6 +616,7 @@ #define PCI_EXP_SLTCTL_PWR_OFF 0x0400 /* Power Off */ #define PCI_EXP_SLTCTL_EIC 0x0800 /* Electromechanical Interlock Control */ #define PCI_EXP_SLTCTL_DLLSCE 0x1000 /* Data Link Layer State Changed Enable */ +#define PCI_EXP_SLTCTL_ASPL_DISABLE 0x2000 /* Auto Slot Power Limit Disable */ #define PCI_EXP_SLTCTL_IBPD_DISABLE 0x4000 /* In-band PD disable */ #define PCI_EXP_SLTSTA 0x1a /* Slot Status */ #define PCI_EXP_SLTSTA_ABP 0x0001 /* Attention Button Pressed */ -- cgit v1.2.3-59-g8ed1b From 7d5e005d982527e4029b0139823d179986e34cdc Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Fri, 22 Apr 2022 15:03:25 +0300 Subject: fanotify: implement "evictable" inode marks When an inode mark is created with flag FAN_MARK_EVICTABLE, it will not pin the marked inode to inode cache, so when inode is evicted from cache due to memory pressure, the mark will be lost. When an inode mark with flag FAN_MARK_EVICATBLE is updated without using this flag, the marked inode is pinned to inode cache. When an inode mark is updated with flag FAN_MARK_EVICTABLE but an existing mark already has the inode pinned, the mark update fails with error EEXIST. Evictable inode marks can be used to setup inode marks with ignored mask to suppress events from uninteresting files or directories in a lazy manner, upon receiving the first event, without having to iterate all the uninteresting files or directories before hand. The evictbale inode mark feature allows performing this lazy marks setup without exhausting the system memory with pinned inodes. This change does not enable the feature yet. Link: https://lore.kernel.org/linux-fsdevel/CAOQ4uxiRDpuS=2uA6+ZUM7yG9vVU-u212tkunBmSnP_u=mkv=Q@mail.gmail.com/ Link: https://lore.kernel.org/r/20220422120327.3459282-15-amir73il@gmail.com Signed-off-by: Amir Goldstein Signed-off-by: Jan Kara --- fs/notify/fanotify/fanotify.h | 2 ++ fs/notify/fanotify/fanotify_user.c | 38 ++++++++++++++++++++++++++++++++++++-- include/uapi/linux/fanotify.h | 1 + 3 files changed, 39 insertions(+), 2 deletions(-) (limited to 'include/uapi/linux') diff --git a/fs/notify/fanotify/fanotify.h b/fs/notify/fanotify/fanotify.h index 87142bc0131a..80e0ec95b113 100644 --- a/fs/notify/fanotify/fanotify.h +++ b/fs/notify/fanotify/fanotify.h @@ -497,6 +497,8 @@ static inline unsigned int fanotify_mark_user_flags(struct fsnotify_mark *mark) if (mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY) mflags |= FAN_MARK_IGNORED_SURV_MODIFY; + if (mark->flags & FSNOTIFY_MARK_FLAG_NO_IREF) + mflags |= FAN_MARK_EVICTABLE; return mflags; } diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index 4005ee8e6e2c..ae36138afead 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -1084,6 +1084,7 @@ static int fanotify_remove_inode_mark(struct fsnotify_group *group, static bool fanotify_mark_update_flags(struct fsnotify_mark *fsn_mark, unsigned int fan_flags) { + bool want_iref = !(fan_flags & FAN_MARK_EVICTABLE); bool recalc = false; /* @@ -1099,7 +1100,18 @@ static bool fanotify_mark_update_flags(struct fsnotify_mark *fsn_mark, recalc = true; } - return recalc; + if (fsn_mark->connector->type != FSNOTIFY_OBJ_TYPE_INODE || + want_iref == !(fsn_mark->flags & FSNOTIFY_MARK_FLAG_NO_IREF)) + return recalc; + + /* + * NO_IREF may be removed from a mark, but not added. + * When removed, fsnotify_recalc_mask() will take the inode ref. + */ + WARN_ON_ONCE(!want_iref); + fsn_mark->flags &= ~FSNOTIFY_MARK_FLAG_NO_IREF; + + return true; } static bool fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark, @@ -1125,6 +1137,7 @@ static bool fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark, static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group, fsnotify_connp_t *connp, unsigned int obj_type, + unsigned int fan_flags, __kernel_fsid_t *fsid) { struct ucounts *ucounts = group->fanotify_data.ucounts; @@ -1147,6 +1160,9 @@ static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group, } fsnotify_init_mark(mark, group); + if (fan_flags & FAN_MARK_EVICTABLE) + mark->flags |= FSNOTIFY_MARK_FLAG_NO_IREF; + ret = fsnotify_add_mark_locked(mark, connp, obj_type, 0, fsid); if (ret) { fsnotify_put_mark(mark); @@ -1183,13 +1199,23 @@ static int fanotify_add_mark(struct fsnotify_group *group, mutex_lock(&group->mark_mutex); fsn_mark = fsnotify_find_mark(connp, group); if (!fsn_mark) { - fsn_mark = fanotify_add_new_mark(group, connp, obj_type, fsid); + fsn_mark = fanotify_add_new_mark(group, connp, obj_type, + fan_flags, fsid); if (IS_ERR(fsn_mark)) { mutex_unlock(&group->mark_mutex); return PTR_ERR(fsn_mark); } } + /* + * Non evictable mark cannot be downgraded to evictable mark. + */ + if (fan_flags & FAN_MARK_EVICTABLE && + !(fsn_mark->flags & FSNOTIFY_MARK_FLAG_NO_IREF)) { + ret = -EEXIST; + goto out; + } + /* * Error events are pre-allocated per group, only if strictly * needed (i.e. FAN_FS_ERROR was requested). @@ -1601,6 +1627,14 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask, mark_type != FAN_MARK_FILESYSTEM) goto fput_and_out; + /* + * Evictable is only relevant for inode marks, because only inode object + * can be evicted on memory pressure. + */ + if (flags & FAN_MARK_EVICTABLE && + mark_type != FAN_MARK_INODE) + goto fput_and_out; + /* * Events that do not carry enough information to report * event->fd require a group that supports reporting fid. Those diff --git a/include/uapi/linux/fanotify.h b/include/uapi/linux/fanotify.h index e8ac38cc2fd6..f1f89132d60e 100644 --- a/include/uapi/linux/fanotify.h +++ b/include/uapi/linux/fanotify.h @@ -82,6 +82,7 @@ #define FAN_MARK_IGNORED_SURV_MODIFY 0x00000040 #define FAN_MARK_FLUSH 0x00000080 /* FAN_MARK_FILESYSTEM is 0x00000100 */ +#define FAN_MARK_EVICTABLE 0x00000200 /* These are NOT bitwise flags. Both bits can be used togther. */ #define FAN_MARK_INODE 0x00000000 -- cgit v1.2.3-59-g8ed1b From c0a5a21c25f37c9fd7b36072f9968cdff1e4aa13 Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Mon, 25 Apr 2022 03:18:51 +0530 Subject: bpf: Allow storing referenced kptr in map Extending the code in previous commits, introduce referenced kptr support, which needs to be tagged using 'kptr_ref' tag instead. Unlike unreferenced kptr, referenced kptr have a lot more restrictions. In addition to the type matching, only a newly introduced bpf_kptr_xchg helper is allowed to modify the map value at that offset. This transfers the referenced pointer being stored into the map, releasing the references state for the program, and returning the old value and creating new reference state for the returned pointer. Similar to unreferenced pointer case, return value for this case will also be PTR_TO_BTF_ID_OR_NULL. The reference for the returned pointer must either be eventually released by calling the corresponding release function, otherwise it must be transferred into another map. It is also allowed to call bpf_kptr_xchg with a NULL pointer, to clear the value, and obtain the old value if any. BPF_LDX, BPF_STX, and BPF_ST cannot access referenced kptr. A future commit will permit using BPF_LDX for such pointers, but attempt at making it safe, since the lifetime of object won't be guaranteed. There are valid reasons to enforce the restriction of permitting only bpf_kptr_xchg to operate on referenced kptr. The pointer value must be consistent in face of concurrent modification, and any prior values contained in the map must also be released before a new one is moved into the map. To ensure proper transfer of this ownership, bpf_kptr_xchg returns the old value, which the verifier would require the user to either free or move into another map, and releases the reference held for the pointer being moved in. In the future, direct BPF_XCHG instruction may also be permitted to work like bpf_kptr_xchg helper. Note that process_kptr_func doesn't have to call check_helper_mem_access, since we already disallow rdonly/wronly flags for map, which is what check_map_access_type checks, and we already ensure the PTR_TO_MAP_VALUE refers to kptr by obtaining its off_desc, so check_map_access is also not required. Signed-off-by: Kumar Kartikeya Dwivedi Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220424214901.2743946-4-memxor@gmail.com --- include/linux/bpf.h | 8 ++++ include/uapi/linux/bpf.h | 12 ++++++ kernel/bpf/btf.c | 10 ++++- kernel/bpf/helpers.c | 24 +++++++++++ kernel/bpf/verifier.c | 98 ++++++++++++++++++++++++++++++++++++------ tools/include/uapi/linux/bpf.h | 12 ++++++ 6 files changed, 151 insertions(+), 13 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 492edd2c5713..24310837bafc 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -160,8 +160,14 @@ enum { BPF_MAP_VALUE_OFF_MAX = 8, }; +enum bpf_kptr_type { + BPF_KPTR_UNREF, + BPF_KPTR_REF, +}; + struct bpf_map_value_off_desc { u32 offset; + enum bpf_kptr_type type; struct { struct btf *btf; u32 btf_id; @@ -418,6 +424,7 @@ enum bpf_arg_type { ARG_PTR_TO_STACK, /* pointer to stack */ ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */ ARG_PTR_TO_TIMER, /* pointer to bpf_timer */ + ARG_PTR_TO_KPTR, /* pointer to referenced kptr */ __BPF_ARG_TYPE_MAX, /* Extended arg_types. */ @@ -427,6 +434,7 @@ enum bpf_arg_type { ARG_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_SOCKET, ARG_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_ALLOC_MEM, ARG_PTR_TO_STACK_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_STACK, + ARG_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_BTF_ID, /* This must be the last entry. Its purpose is to ensure the enum is * wide enough to hold the higher bits reserved for bpf_type_flag. diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index d14b10b85e51..444fe6f1cf35 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -5143,6 +5143,17 @@ union bpf_attr { * The **hash_algo** is returned on success, * **-EOPNOTSUP** if the hash calculation failed or **-EINVAL** if * invalid arguments are passed. + * + * void *bpf_kptr_xchg(void *map_value, void *ptr) + * Description + * Exchange kptr at pointer *map_value* with *ptr*, and return the + * old value. *ptr* can be NULL, otherwise it must be a referenced + * pointer which will be released when this helper is called. + * Return + * The old value of kptr (which can be NULL). The returned pointer + * if not NULL, is a reference which must be released using its + * corresponding release function, or moved into a BPF map before + * program exit. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5339,6 +5350,7 @@ union bpf_attr { FN(copy_from_user_task), \ FN(skb_set_tstamp), \ FN(ima_file_hash), \ + FN(kptr_xchg), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index f0287342204f..4138c51728dd 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -3177,6 +3177,7 @@ enum { struct btf_field_info { u32 type_id; u32 off; + enum bpf_kptr_type type; }; static int btf_find_struct(const struct btf *btf, const struct btf_type *t, @@ -3193,6 +3194,7 @@ static int btf_find_struct(const struct btf *btf, const struct btf_type *t, static int btf_find_kptr(const struct btf *btf, const struct btf_type *t, u32 off, int sz, struct btf_field_info *info) { + enum bpf_kptr_type type; u32 res_id; /* For PTR, sz is always == 8 */ @@ -3205,7 +3207,11 @@ static int btf_find_kptr(const struct btf *btf, const struct btf_type *t, /* Reject extra tags */ if (btf_type_is_type_tag(btf_type_by_id(btf, t->type))) return -EINVAL; - if (strcmp("kptr", __btf_name_by_offset(btf, t->name_off))) + if (!strcmp("kptr", __btf_name_by_offset(btf, t->name_off))) + type = BPF_KPTR_UNREF; + else if (!strcmp("kptr_ref", __btf_name_by_offset(btf, t->name_off))) + type = BPF_KPTR_REF; + else return -EINVAL; /* Get the base type */ @@ -3216,6 +3222,7 @@ static int btf_find_kptr(const struct btf *btf, const struct btf_type *t, info->type_id = res_id; info->off = off; + info->type = type; return BTF_FIELD_FOUND; } @@ -3420,6 +3427,7 @@ struct bpf_map_value_off *btf_parse_kptrs(const struct btf *btf, } tab->off[i].offset = info_arr[i].off; + tab->off[i].type = info_arr[i].type; tab->off[i].kptr.btf_id = id; tab->off[i].kptr.btf = kernel_btf; } diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 315053ef6a75..3e709fed5306 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -1374,6 +1374,28 @@ out: kfree(t); } +BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr) +{ + unsigned long *kptr = map_value; + + return xchg(kptr, (unsigned long)ptr); +} + +/* Unlike other PTR_TO_BTF_ID helpers the btf_id in bpf_kptr_xchg() + * helper is determined dynamically by the verifier. + */ +#define BPF_PTR_POISON ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA)) + +const struct bpf_func_proto bpf_kptr_xchg_proto = { + .func = bpf_kptr_xchg, + .gpl_only = false, + .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, + .ret_btf_id = BPF_PTR_POISON, + .arg1_type = ARG_PTR_TO_KPTR, + .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL | OBJ_RELEASE, + .arg2_btf_id = BPF_PTR_POISON, +}; + const struct bpf_func_proto bpf_get_current_task_proto __weak; const struct bpf_func_proto bpf_get_current_task_btf_proto __weak; const struct bpf_func_proto bpf_probe_read_user_proto __weak; @@ -1452,6 +1474,8 @@ bpf_base_func_proto(enum bpf_func_id func_id) return &bpf_timer_start_proto; case BPF_FUNC_timer_cancel: return &bpf_timer_cancel_proto; + case BPF_FUNC_kptr_xchg: + return &bpf_kptr_xchg_proto; default: break; } diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 5426bab7f02c..c9ee44efed89 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -258,6 +258,7 @@ struct bpf_call_arg_meta { struct btf *ret_btf; u32 ret_btf_id; u32 subprogno; + struct bpf_map_value_off_desc *kptr_off_desc; }; struct btf *btf_vmlinux; @@ -489,7 +490,8 @@ static bool is_acquire_function(enum bpf_func_id func_id, if (func_id == BPF_FUNC_sk_lookup_tcp || func_id == BPF_FUNC_sk_lookup_udp || func_id == BPF_FUNC_skc_lookup_tcp || - func_id == BPF_FUNC_ringbuf_reserve) + func_id == BPF_FUNC_ringbuf_reserve || + func_id == BPF_FUNC_kptr_xchg) return true; if (func_id == BPF_FUNC_map_lookup_elem && @@ -3514,6 +3516,12 @@ static int map_kptr_match_type(struct bpf_verifier_env *env, /* We need to verify reg->type and reg->btf, before accessing reg->btf */ reg_name = kernel_type_name(reg->btf, reg->btf_id); + /* For ref_ptr case, release function check should ensure we get one + * referenced PTR_TO_BTF_ID, and that its fixed offset is 0. For the + * normal store of unreferenced kptr, we must ensure var_off is zero. + * Since ref_ptr cannot be accessed directly by BPF insns, checks for + * reg->off and reg->ref_obj_id are not needed here. + */ if (__check_ptr_off_reg(env, reg, regno, true)) return -EACCES; @@ -3569,6 +3577,12 @@ static int check_map_kptr_access(struct bpf_verifier_env *env, u32 regno, return -EACCES; } + /* We cannot directly access kptr_ref */ + if (off_desc->type == BPF_KPTR_REF) { + verbose(env, "accessing referenced kptr disallowed\n"); + return -EACCES; + } + if (class == BPF_LDX) { val_reg = reg_state(env, value_regno); /* We can simply mark the value_regno receiving the pointer @@ -5293,6 +5307,53 @@ static int process_timer_func(struct bpf_verifier_env *env, int regno, return 0; } +static int process_kptr_func(struct bpf_verifier_env *env, int regno, + struct bpf_call_arg_meta *meta) +{ + struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; + struct bpf_map_value_off_desc *off_desc; + struct bpf_map *map_ptr = reg->map_ptr; + u32 kptr_off; + int ret; + + if (!tnum_is_const(reg->var_off)) { + verbose(env, + "R%d doesn't have constant offset. kptr has to be at the constant offset\n", + regno); + return -EINVAL; + } + if (!map_ptr->btf) { + verbose(env, "map '%s' has to have BTF in order to use bpf_kptr_xchg\n", + map_ptr->name); + return -EINVAL; + } + if (!map_value_has_kptrs(map_ptr)) { + ret = PTR_ERR(map_ptr->kptr_off_tab); + if (ret == -E2BIG) + verbose(env, "map '%s' has more than %d kptr\n", map_ptr->name, + BPF_MAP_VALUE_OFF_MAX); + else if (ret == -EEXIST) + verbose(env, "map '%s' has repeating kptr BTF tags\n", map_ptr->name); + else + verbose(env, "map '%s' has no valid kptr\n", map_ptr->name); + return -EINVAL; + } + + meta->map_ptr = map_ptr; + kptr_off = reg->off + reg->var_off.value; + off_desc = bpf_map_kptr_off_contains(map_ptr, kptr_off); + if (!off_desc) { + verbose(env, "off=%d doesn't point to kptr\n", kptr_off); + return -EACCES; + } + if (off_desc->type != BPF_KPTR_REF) { + verbose(env, "off=%d kptr isn't referenced kptr\n", kptr_off); + return -EACCES; + } + meta->kptr_off_desc = off_desc; + return 0; +} + static bool arg_type_is_mem_ptr(enum bpf_arg_type type) { return base_type(type) == ARG_PTR_TO_MEM || @@ -5433,6 +5494,7 @@ static const struct bpf_reg_types func_ptr_types = { .types = { PTR_TO_FUNC } }; static const struct bpf_reg_types stack_ptr_types = { .types = { PTR_TO_STACK } }; static const struct bpf_reg_types const_str_ptr_types = { .types = { PTR_TO_MAP_VALUE } }; static const struct bpf_reg_types timer_types = { .types = { PTR_TO_MAP_VALUE } }; +static const struct bpf_reg_types kptr_types = { .types = { PTR_TO_MAP_VALUE } }; static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = { [ARG_PTR_TO_MAP_KEY] = &map_key_value_types, @@ -5460,11 +5522,13 @@ static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = { [ARG_PTR_TO_STACK] = &stack_ptr_types, [ARG_PTR_TO_CONST_STR] = &const_str_ptr_types, [ARG_PTR_TO_TIMER] = &timer_types, + [ARG_PTR_TO_KPTR] = &kptr_types, }; static int check_reg_type(struct bpf_verifier_env *env, u32 regno, enum bpf_arg_type arg_type, - const u32 *arg_btf_id) + const u32 *arg_btf_id, + struct bpf_call_arg_meta *meta) { struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; enum bpf_reg_type expected, type = reg->type; @@ -5517,8 +5581,11 @@ found: arg_btf_id = compatible->btf_id; } - if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off, - btf_vmlinux, *arg_btf_id)) { + if (meta->func_id == BPF_FUNC_kptr_xchg) { + if (map_kptr_match_type(env, meta->kptr_off_desc, reg, regno)) + return -EACCES; + } else if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off, + btf_vmlinux, *arg_btf_id)) { verbose(env, "R%d is of type %s but %s is expected\n", regno, kernel_type_name(reg->btf, reg->btf_id), kernel_type_name(btf_vmlinux, *arg_btf_id)); @@ -5625,7 +5692,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg, */ goto skip_type_check; - err = check_reg_type(env, regno, arg_type, fn->arg_btf_id[arg]); + err = check_reg_type(env, regno, arg_type, fn->arg_btf_id[arg], meta); if (err) return err; @@ -5801,6 +5868,9 @@ skip_type_check: verbose(env, "string is not zero-terminated\n"); return -EINVAL; } + } else if (arg_type == ARG_PTR_TO_KPTR) { + if (process_kptr_func(env, regno, meta)) + return -EACCES; } return err; @@ -6143,10 +6213,10 @@ static bool check_btf_id_ok(const struct bpf_func_proto *fn) int i; for (i = 0; i < ARRAY_SIZE(fn->arg_type); i++) { - if (fn->arg_type[i] == ARG_PTR_TO_BTF_ID && !fn->arg_btf_id[i]) + if (base_type(fn->arg_type[i]) == ARG_PTR_TO_BTF_ID && !fn->arg_btf_id[i]) return false; - if (fn->arg_type[i] != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i]) + if (base_type(fn->arg_type[i]) != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i]) return false; } @@ -7012,21 +7082,25 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn regs[BPF_REG_0].btf_id = meta.ret_btf_id; } } else if (base_type(ret_type) == RET_PTR_TO_BTF_ID) { + struct btf *ret_btf; int ret_btf_id; mark_reg_known_zero(env, regs, BPF_REG_0); regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag; - ret_btf_id = *fn->ret_btf_id; + if (func_id == BPF_FUNC_kptr_xchg) { + ret_btf = meta.kptr_off_desc->kptr.btf; + ret_btf_id = meta.kptr_off_desc->kptr.btf_id; + } else { + ret_btf = btf_vmlinux; + ret_btf_id = *fn->ret_btf_id; + } if (ret_btf_id == 0) { verbose(env, "invalid return type %u of func %s#%d\n", base_type(ret_type), func_id_name(func_id), func_id); return -EINVAL; } - /* current BPF helper definitions are only coming from - * built-in code with type IDs from vmlinux BTF - */ - regs[BPF_REG_0].btf = btf_vmlinux; + regs[BPF_REG_0].btf = ret_btf; regs[BPF_REG_0].btf_id = ret_btf_id; } else { verbose(env, "unknown return type %u of func %s#%d\n", diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index d14b10b85e51..444fe6f1cf35 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -5143,6 +5143,17 @@ union bpf_attr { * The **hash_algo** is returned on success, * **-EOPNOTSUP** if the hash calculation failed or **-EINVAL** if * invalid arguments are passed. + * + * void *bpf_kptr_xchg(void *map_value, void *ptr) + * Description + * Exchange kptr at pointer *map_value* with *ptr*, and return the + * old value. *ptr* can be NULL, otherwise it must be a referenced + * pointer which will be released when this helper is called. + * Return + * The old value of kptr (which can be NULL). The returned pointer + * if not NULL, is a reference which must be released using its + * corresponding release function, or moved into a BPF map before + * program exit. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5339,6 +5350,7 @@ union bpf_attr { FN(copy_from_user_task), \ FN(skb_set_tstamp), \ FN(ima_file_hash), \ + FN(kptr_xchg), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper -- cgit v1.2.3-59-g8ed1b From d8fc1c7c4c9b705ce5f5bba772ad66a0137c685d Mon Sep 17 00:00:00 2001 From: Andrew Davis Date: Mon, 25 Apr 2022 09:16:17 -0500 Subject: tee: remove flags TEE_IOCTL_SHM_MAPPED and TEE_IOCTL_SHM_DMA_BUF These look to be leftover from an early edition of this driver. Userspace does not need this information. Checking all users of this that I have access to I have verified no one is using them. They leak internal use flags out to userspace. Even more they are not correct anymore after a45ea4efa358. Lets drop these flags before someone does try to use them for something and they become ABI. Signed-off-by: Andrew Davis Acked-by: Sumit Garg Signed-off-by: Jens Wiklander --- drivers/tee/tee_core.c | 2 -- include/uapi/linux/tee.h | 4 ---- 2 files changed, 6 deletions(-) (limited to 'include/uapi/linux') diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c index 8aa1a4836b92..af0f7c603fa4 100644 --- a/drivers/tee/tee_core.c +++ b/drivers/tee/tee_core.c @@ -302,7 +302,6 @@ static int tee_ioctl_shm_alloc(struct tee_context *ctx, return PTR_ERR(shm); data.id = shm->id; - data.flags = shm->flags; data.size = shm->size; if (copy_to_user(udata, &data, sizeof(data))) @@ -339,7 +338,6 @@ tee_ioctl_shm_register(struct tee_context *ctx, return PTR_ERR(shm); data.id = shm->id; - data.flags = shm->flags; data.length = shm->size; if (copy_to_user(udata, &data, sizeof(data))) diff --git a/include/uapi/linux/tee.h b/include/uapi/linux/tee.h index 25a6c534beb1..23e57164693c 100644 --- a/include/uapi/linux/tee.h +++ b/include/uapi/linux/tee.h @@ -42,10 +42,6 @@ #define TEE_IOC_MAGIC 0xa4 #define TEE_IOC_BASE 0 -/* Flags relating to shared memory */ -#define TEE_IOCTL_SHM_MAPPED 0x1 /* memory mapped in normal world */ -#define TEE_IOCTL_SHM_DMA_BUF 0x2 /* dma-buf handle on shared memory */ - #define TEE_MAX_ARG_SIZE 1024 #define TEE_GEN_CAP_GP (1 << 0)/* GlobalPlatform compliant TEE */ -- cgit v1.2.3-59-g8ed1b From cc51eaa8b530bf070e76847a717adcbf603469b7 Mon Sep 17 00:00:00 2001 From: Dylan Yudaken Date: Tue, 26 Apr 2022 01:29:04 -0700 Subject: io_uring: add type to op enum It is useful to have a type enum for opcodes, to allow the compiler to assert that every value is used in a switch statement. Signed-off-by: Dylan Yudaken Link: https://lore.kernel.org/r/20220426082907.3600028-2-dylany@fb.com Signed-off-by: Jens Axboe --- include/uapi/linux/io_uring.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 5fb52bf32435..49d1f3994f8d 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -105,7 +105,7 @@ enum { #define IORING_SETUP_R_DISABLED (1U << 6) /* start with ring disabled */ #define IORING_SETUP_SUBMIT_ALL (1U << 7) /* continue submit on error */ -enum { +enum io_uring_op { IORING_OP_NOP, IORING_OP_READV, IORING_OP_WRITEV, -- cgit v1.2.3-59-g8ed1b From 052e1f01bfae8be6f31b61ed3a2356edfca855dc Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 26 Apr 2022 10:54:33 -0700 Subject: net: atm: remove support for ZeitNet ZN122x ATM devices This driver received nothing but automated fixes in the last 15 years. Since it's using virt_to_bus it's unlikely to be used on any modern platform. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- arch/mips/configs/gpr_defconfig | 1 - arch/mips/configs/mtx1_defconfig | 1 - drivers/atm/Kconfig | 20 - drivers/atm/Makefile | 1 - drivers/atm/uPD98401.h | 293 ------- drivers/atm/uPD98402.c | 266 ------ drivers/atm/uPD98402.h | 107 --- drivers/atm/zatm.c | 1652 -------------------------------------- drivers/atm/zatm.h | 104 --- include/uapi/linux/atm_zatm.h | 47 -- 10 files changed, 2492 deletions(-) delete mode 100644 drivers/atm/uPD98401.h delete mode 100644 drivers/atm/uPD98402.c delete mode 100644 drivers/atm/uPD98402.h delete mode 100644 drivers/atm/zatm.c delete mode 100644 drivers/atm/zatm.h delete mode 100644 include/uapi/linux/atm_zatm.h (limited to 'include/uapi/linux') diff --git a/arch/mips/configs/gpr_defconfig b/arch/mips/configs/gpr_defconfig index 7ed202db9ef0..d82f4ebf687f 100644 --- a/arch/mips/configs/gpr_defconfig +++ b/arch/mips/configs/gpr_defconfig @@ -178,7 +178,6 @@ CONFIG_NETCONSOLE=m CONFIG_ATM_TCP=m CONFIG_ATM_LANAI=m CONFIG_ATM_ENI=m -CONFIG_ATM_ZATM=m CONFIG_ATM_NICSTAR=m CONFIG_ATM_IDT77252=m CONFIG_ATM_IA=m diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig index f46ad2e294fa..0cb4d9aa14d1 100644 --- a/arch/mips/configs/mtx1_defconfig +++ b/arch/mips/configs/mtx1_defconfig @@ -255,7 +255,6 @@ CONFIG_ARCNET_COM20020_CS=m CONFIG_ATM_TCP=m CONFIG_ATM_LANAI=m CONFIG_ATM_ENI=m -CONFIG_ATM_ZATM=m CONFIG_ATM_NICSTAR=m CONFIG_ATM_IDT77252=m CONFIG_ATM_IA=m diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig index 9c778308722a..63cdb46a3439 100644 --- a/drivers/atm/Kconfig +++ b/drivers/atm/Kconfig @@ -146,26 +146,6 @@ config ATM_ENI_BURST_RX_2W try this if you have disabled 4W and 8W bursts. Enabling 2W if 4W or 8W are also set may or may not improve throughput. -config ATM_ZATM - tristate "ZeitNet ZN1221/ZN1225" - depends on PCI && VIRT_TO_BUS - help - Driver for the ZeitNet ZN1221 (MMF) and ZN1225 (UTP-5) 155 Mbps ATM - adapters. - - To compile this driver as a module, choose M here: the module will - be called zatm. - -config ATM_ZATM_DEBUG - bool "Enable extended debugging" - depends on ATM_ZATM - help - Extended debugging records various events and displays that list - when an inconsistency is detected. This mechanism is faster than - generally using printks, but still has some impact on performance. - Note that extended debugging may create certain race conditions - itself. Enable this ONLY if you suspect problems with the driver. - config ATM_NICSTAR tristate "IDT 77201 (NICStAR) (ForeRunnerLE)" depends on PCI diff --git a/drivers/atm/Makefile b/drivers/atm/Makefile index 1b6a8ddaf007..c9eade92019b 100644 --- a/drivers/atm/Makefile +++ b/drivers/atm/Makefile @@ -5,7 +5,6 @@ fore_200e-y := fore200e.o -obj-$(CONFIG_ATM_ZATM) += zatm.o uPD98402.o obj-$(CONFIG_ATM_NICSTAR) += nicstar.o obj-$(CONFIG_ATM_IA) += iphase.o suni.o obj-$(CONFIG_ATM_FORE200E) += fore_200e.o diff --git a/drivers/atm/uPD98401.h b/drivers/atm/uPD98401.h deleted file mode 100644 index f766a5ef0c5d..000000000000 --- a/drivers/atm/uPD98401.h +++ /dev/null @@ -1,293 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* drivers/atm/uPD98401.h - NEC uPD98401 (SAR) declarations */ - -/* Written 1995 by Werner Almesberger, EPFL LRC */ - - -#ifndef DRIVERS_ATM_uPD98401_H -#define DRIVERS_ATM_uPD98401_H - - -#define MAX_CRAM_SIZE (1 << 18) /* 2^18 words */ -#define RAM_INCREMENT 1024 /* check in 4 kB increments */ - -#define uPD98401_PORTS 0x24 /* probably more ? */ - - -/* - * Commands - */ - -#define uPD98401_OPEN_CHAN 0x20000000 /* open channel */ -#define uPD98401_CHAN_ADDR 0x0003fff8 /* channel address */ -#define uPD98401_CHAN_ADDR_SHIFT 3 -#define uPD98401_CLOSE_CHAN 0x24000000 /* close channel */ -#define uPD98401_CHAN_RT 0x02000000 /* RX/TX (0 TX, 1 RX) */ -#define uPD98401_DEACT_CHAN 0x28000000 /* deactivate channel */ -#define uPD98401_TX_READY 0x30000000 /* TX ready */ -#define uPD98401_ADD_BAT 0x34000000 /* add batches */ -#define uPD98401_POOL 0x000f0000 /* pool number */ -#define uPD98401_POOL_SHIFT 16 -#define uPD98401_POOL_NUMBAT 0x0000ffff /* number of batches */ -#define uPD98401_NOP 0x3f000000 /* NOP */ -#define uPD98401_IND_ACC 0x00000000 /* Indirect Access */ -#define uPD98401_IA_RW 0x10000000 /* Read/Write (0 W, 1 R) */ -#define uPD98401_IA_B3 0x08000000 /* Byte select, 1 enable */ -#define uPD98401_IA_B2 0x04000000 -#define uPD98401_IA_B1 0x02000000 -#define uPD98401_IA_B0 0x01000000 -#define uPD98401_IA_BALL 0x0f000000 /* whole longword */ -#define uPD98401_IA_TGT 0x000c0000 /* Target */ -#define uPD98401_IA_TGT_SHIFT 18 -#define uPD98401_IA_TGT_CM 0 /* - Control Memory */ -#define uPD98401_IA_TGT_SAR 1 /* - uPD98401 registers */ -#define uPD98401_IA_TGT_PHY 3 /* - PHY device */ -#define uPD98401_IA_ADDR 0x0003ffff - -/* - * Command Register Status - */ - -#define uPD98401_BUSY 0x80000000 /* SAR is busy */ -#define uPD98401_LOCKED 0x40000000 /* SAR is locked by other CPU */ - -/* - * Indications - */ - -/* Normal (AAL5) Receive Indication */ -#define uPD98401_AAL5_UINFO 0xffff0000 /* user-supplied information */ -#define uPD98401_AAL5_UINFO_SHIFT 16 -#define uPD98401_AAL5_SIZE 0x0000ffff /* PDU size (in _CELLS_ !!) */ -#define uPD98401_AAL5_CHAN 0x7fff0000 /* Channel number */ -#define uPD98401_AAL5_CHAN_SHIFT 16 -#define uPD98401_AAL5_ERR 0x00008000 /* Error indication */ -#define uPD98401_AAL5_CI 0x00004000 /* Congestion Indication */ -#define uPD98401_AAL5_CLP 0x00002000 /* CLP (>= 1 cell had CLP=1) */ -#define uPD98401_AAL5_ES 0x00000f00 /* Error Status */ -#define uPD98401_AAL5_ES_SHIFT 8 -#define uPD98401_AAL5_ES_NONE 0 /* No error */ -#define uPD98401_AAL5_ES_FREE 1 /* Receiver free buf underflow */ -#define uPD98401_AAL5_ES_FIFO 2 /* Receiver FIFO overrun */ -#define uPD98401_AAL5_ES_TOOBIG 3 /* Maximum length violation */ -#define uPD98401_AAL5_ES_CRC 4 /* CRC error */ -#define uPD98401_AAL5_ES_ABORT 5 /* User abort */ -#define uPD98401_AAL5_ES_LENGTH 6 /* Length violation */ -#define uPD98401_AAL5_ES_T1 7 /* T1 error (timeout) */ -#define uPD98401_AAL5_ES_DEACT 8 /* Deactivated with DEACT_CHAN */ -#define uPD98401_AAL5_POOL 0x0000001f /* Free buffer pool number */ - -/* Raw Cell Indication */ -#define uPD98401_RAW_UINFO uPD98401_AAL5_UINFO -#define uPD98401_RAW_UINFO_SHIFT uPD98401_AAL5_UINFO_SHIFT -#define uPD98401_RAW_HEC 0x000000ff /* HEC */ -#define uPD98401_RAW_CHAN uPD98401_AAL5_CHAN -#define uPD98401_RAW_CHAN_SHIFT uPD98401_AAL5_CHAN_SHIFT - -/* Transmit Indication */ -#define uPD98401_TXI_CONN 0x7fff0000 /* Connection Number */ -#define uPD98401_TXI_CONN_SHIFT 16 -#define uPD98401_TXI_ACTIVE 0x00008000 /* Channel remains active */ -#define uPD98401_TXI_PQP 0x00007fff /* Packet Queue Pointer */ - -/* - * Directly Addressable Registers - */ - -#define uPD98401_GMR 0x00 /* General Mode Register */ -#define uPD98401_GSR 0x01 /* General Status Register */ -#define uPD98401_IMR 0x02 /* Interrupt Mask Register */ -#define uPD98401_RQU 0x03 /* Receive Queue Underrun */ -#define uPD98401_RQA 0x04 /* Receive Queue Alert */ -#define uPD98401_ADDR 0x05 /* Last Burst Address */ -#define uPD98401_VER 0x06 /* Version Number */ -#define uPD98401_SWR 0x07 /* Software Reset */ -#define uPD98401_CMR 0x08 /* Command Register */ -#define uPD98401_CMR_L 0x09 /* Command Register and Lock/Unlock */ -#define uPD98401_CER 0x0a /* Command Extension Register */ -#define uPD98401_CER_L 0x0b /* Command Ext Reg and Lock/Unlock */ - -#define uPD98401_MSH(n) (0x10+(n)) /* Mailbox n Start Address High */ -#define uPD98401_MSL(n) (0x14+(n)) /* Mailbox n Start Address High */ -#define uPD98401_MBA(n) (0x18+(n)) /* Mailbox n Bottom Address */ -#define uPD98401_MTA(n) (0x1c+(n)) /* Mailbox n Tail Address */ -#define uPD98401_MWA(n) (0x20+(n)) /* Mailbox n Write Address */ - -/* GMR is at 0x00 */ -#define uPD98401_GMR_ONE 0x80000000 /* Must be set to one */ -#define uPD98401_GMR_SLM 0x40000000 /* Address mode (0 word, 1 byte) */ -#define uPD98401_GMR_CPE 0x00008000 /* Control Memory Parity Enable */ -#define uPD98401_GMR_LP 0x00004000 /* Loopback */ -#define uPD98401_GMR_WA 0x00002000 /* Early Bus Write Abort/RDY */ -#define uPD98401_GMR_RA 0x00001000 /* Early Read Abort/RDY */ -#define uPD98401_GMR_SZ 0x00000f00 /* Burst Size Enable */ -#define uPD98401_BURST16 0x00000800 /* 16-word burst */ -#define uPD98401_BURST8 0x00000400 /* 8-word burst */ -#define uPD98401_BURST4 0x00000200 /* 4-word burst */ -#define uPD98401_BURST2 0x00000100 /* 2-word burst */ -#define uPD98401_GMR_AD 0x00000080 /* Address (burst resolution) Disable */ -#define uPD98401_GMR_BO 0x00000040 /* Byte Order (0 little, 1 big) */ -#define uPD98401_GMR_PM 0x00000020 /* Bus Parity Mode (0 byte, 1 word)*/ -#define uPD98401_GMR_PC 0x00000010 /* Bus Parity Control (0even,1odd) */ -#define uPD98401_GMR_BPE 0x00000008 /* Bus Parity Enable */ -#define uPD98401_GMR_DR 0x00000004 /* Receive Drop Mode (0drop,1don't)*/ -#define uPD98401_GMR_SE 0x00000002 /* Shapers Enable */ -#define uPD98401_GMR_RE 0x00000001 /* Receiver Enable */ - -/* GSR is at 0x01, IMR is at 0x02 */ -#define uPD98401_INT_PI 0x80000000 /* PHY interrupt */ -#define uPD98401_INT_RQA 0x40000000 /* Receive Queue Alert */ -#define uPD98401_INT_RQU 0x20000000 /* Receive Queue Underrun */ -#define uPD98401_INT_RD 0x10000000 /* Receiver Deactivated */ -#define uPD98401_INT_SPE 0x08000000 /* System Parity Error */ -#define uPD98401_INT_CPE 0x04000000 /* Control Memory Parity Error */ -#define uPD98401_INT_SBE 0x02000000 /* System Bus Error */ -#define uPD98401_INT_IND 0x01000000 /* Initialization Done */ -#define uPD98401_INT_RCR 0x0000ff00 /* Raw Cell Received */ -#define uPD98401_INT_RCR_SHIFT 8 -#define uPD98401_INT_MF 0x000000f0 /* Mailbox Full */ -#define uPD98401_INT_MF_SHIFT 4 -#define uPD98401_INT_MM 0x0000000f /* Mailbox Modified */ - -/* VER is at 0x06 */ -#define uPD98401_MAJOR 0x0000ff00 /* Major revision */ -#define uPD98401_MAJOR_SHIFT 8 -#define uPD98401_MINOR 0x000000ff /* Minor revision */ - -/* - * Indirectly Addressable Registers - */ - -#define uPD98401_IM(n) (0x40000+(n)) /* Scheduler n I and M */ -#define uPD98401_X(n) (0x40010+(n)) /* Scheduler n X */ -#define uPD98401_Y(n) (0x40020+(n)) /* Scheduler n Y */ -#define uPD98401_PC(n) (0x40030+(n)) /* Scheduler n P, C, p and c */ -#define uPD98401_PS(n) (0x40040+(n)) /* Scheduler n priority and status */ - -/* IM contents */ -#define uPD98401_IM_I 0xff000000 /* I */ -#define uPD98401_IM_I_SHIFT 24 -#define uPD98401_IM_M 0x00ffffff /* M */ - -/* PC contents */ -#define uPD98401_PC_P 0xff000000 /* P */ -#define uPD98401_PC_P_SHIFT 24 -#define uPD98401_PC_C 0x00ff0000 /* C */ -#define uPD98401_PC_C_SHIFT 16 -#define uPD98401_PC_p 0x0000ff00 /* p */ -#define uPD98401_PC_p_SHIFT 8 -#define uPD98401_PC_c 0x000000ff /* c */ - -/* PS contents */ -#define uPD98401_PS_PRIO 0xf0 /* Priority level (0 high, 15 low) */ -#define uPD98401_PS_PRIO_SHIFT 4 -#define uPD98401_PS_S 0x08 /* Scan - must be 0 (internal) */ -#define uPD98401_PS_R 0x04 /* Round Robin (internal) */ -#define uPD98401_PS_A 0x02 /* Active (internal) */ -#define uPD98401_PS_E 0x01 /* Enabled */ - -#define uPD98401_TOS 0x40100 /* Top of Stack Control Memory Address */ -#define uPD98401_SMA 0x40200 /* Shapers Control Memory Start Address */ -#define uPD98401_PMA 0x40201 /* Receive Pool Control Memory Start Address */ -#define uPD98401_T1R 0x40300 /* T1 Register */ -#define uPD98401_VRR 0x40301 /* VPI/VCI Reduction Register/Recv. Shutdown */ -#define uPD98401_TSR 0x40302 /* Time-Stamp Register */ - -/* VRR is at 0x40301 */ -#define uPD98401_VRR_SDM 0x80000000 /* Shutdown Mode */ -#define uPD98401_VRR_SHIFT 0x000f0000 /* VPI/VCI Shift */ -#define uPD98401_VRR_SHIFT_SHIFT 16 -#define uPD98401_VRR_MASK 0x0000ffff /* VPI/VCI mask */ - -/* - * TX packet descriptor - */ - -#define uPD98401_TXPD_SIZE 16 /* descriptor size (in bytes) */ - -#define uPD98401_TXPD_V 0x80000000 /* Valid bit */ -#define uPD98401_TXPD_DP 0x40000000 /* Descriptor (1) or Pointer (0) */ -#define uPD98401_TXPD_SM 0x20000000 /* Single (1) or Multiple (0) */ -#define uPD98401_TXPD_CLPM 0x18000000 /* CLP mode */ -#define uPD98401_CLPM_0 0 /* 00 CLP = 0 */ -#define uPD98401_CLPM_1 3 /* 11 CLP = 1 */ -#define uPD98401_CLPM_LAST 1 /* 01 CLP unless last cell */ -#define uPD98401_TXPD_CLPM_SHIFT 27 -#define uPD98401_TXPD_PTI 0x07000000 /* PTI pattern */ -#define uPD98401_TXPD_PTI_SHIFT 24 -#define uPD98401_TXPD_GFC 0x00f00000 /* GFC pattern */ -#define uPD98401_TXPD_GFC_SHIFT 20 -#define uPD98401_TXPD_C10 0x00040000 /* insert CRC-10 */ -#define uPD98401_TXPD_AAL5 0x00020000 /* AAL5 processing */ -#define uPD98401_TXPD_MB 0x00010000 /* TX mailbox number */ -#define uPD98401_TXPD_UU 0x0000ff00 /* CPCS-UU */ -#define uPD98401_TXPD_UU_SHIFT 8 -#define uPD98401_TXPD_CPI 0x000000ff /* CPI */ - -/* - * TX buffer descriptor - */ - -#define uPD98401_TXBD_SIZE 8 /* descriptor size (in bytes) */ - -#define uPD98401_TXBD_LAST 0x80000000 /* last buffer in packet */ - -/* - * TX VC table - */ - -/* 1st word has the same structure as in a TX packet descriptor */ -#define uPD98401_TXVC_L 0x80000000 /* last buffer */ -#define uPD98401_TXVC_SHP 0x0f000000 /* shaper number */ -#define uPD98401_TXVC_SHP_SHIFT 24 -#define uPD98401_TXVC_VPI 0x00ff0000 /* VPI */ -#define uPD98401_TXVC_VPI_SHIFT 16 -#define uPD98401_TXVC_VCI 0x0000ffff /* VCI */ -#define uPD98401_TXVC_QRP 6 /* Queue Read Pointer is in word 6 */ - -/* - * RX free buffer pools descriptor - */ - -#define uPD98401_RXFP_ALERT 0x70000000 /* low water mark */ -#define uPD98401_RXFP_ALERT_SHIFT 28 -#define uPD98401_RXFP_BFSZ 0x0f000000 /* buffer size, 64*2^n */ -#define uPD98401_RXFP_BFSZ_SHIFT 24 -#define uPD98401_RXFP_BTSZ 0x00ff0000 /* batch size, n+1 */ -#define uPD98401_RXFP_BTSZ_SHIFT 16 -#define uPD98401_RXFP_REMAIN 0x0000ffff /* remaining batches in pool */ - -/* - * RX VC table - */ - -#define uPD98401_RXVC_BTSZ 0xff000000 /* remaining free buffers in batch */ -#define uPD98401_RXVC_BTSZ_SHIFT 24 -#define uPD98401_RXVC_MB 0x00200000 /* RX mailbox number */ -#define uPD98401_RXVC_POOL 0x001f0000 /* free buffer pool number */ -#define uPD98401_RXVC_POOL_SHIFT 16 -#define uPD98401_RXVC_UINFO 0x0000ffff /* user-supplied information */ -#define uPD98401_RXVC_T1 0xffff0000 /* T1 timestamp */ -#define uPD98401_RXVC_T1_SHIFT 16 -#define uPD98401_RXVC_PR 0x00008000 /* Packet Reception, 1 if busy */ -#define uPD98401_RXVC_DR 0x00004000 /* FIFO Drop */ -#define uPD98401_RXVC_OD 0x00001000 /* Drop OAM cells */ -#define uPD98401_RXVC_AR 0x00000800 /* AAL5 or raw cell; 1 if AAL5 */ -#define uPD98401_RXVC_MAXSEG 0x000007ff /* max number of segments per PDU */ -#define uPD98401_RXVC_REM 0xfffe0000 /* remaining words in curr buffer */ -#define uPD98401_RXVC_REM_SHIFT 17 -#define uPD98401_RXVC_CLP 0x00010000 /* CLP received */ -#define uPD98401_RXVC_BFA 0x00008000 /* Buffer Assigned */ -#define uPD98401_RXVC_BTA 0x00004000 /* Batch Assigned */ -#define uPD98401_RXVC_CI 0x00002000 /* Congestion Indication */ -#define uPD98401_RXVC_DD 0x00001000 /* Dropping incoming cells */ -#define uPD98401_RXVC_DP 0x00000800 /* like PR ? */ -#define uPD98401_RXVC_CURSEG 0x000007ff /* Current Segment count */ - -/* - * RX lookup table - */ - -#define uPD98401_RXLT_ENBL 0x8000 /* Enable */ - -#endif diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c deleted file mode 100644 index 239852d85558..000000000000 --- a/drivers/atm/uPD98402.c +++ /dev/null @@ -1,266 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* drivers/atm/uPD98402.c - NEC uPD98402 (PHY) declarations */ - -/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ - - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "uPD98402.h" - - -#if 0 -#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args) -#else -#define DPRINTK(format,args...) -#endif - - -struct uPD98402_priv { - struct k_sonet_stats sonet_stats;/* link diagnostics */ - unsigned char framing; /* SONET/SDH framing */ - int loop_mode; /* loopback mode */ - spinlock_t lock; -}; - - -#define PRIV(dev) ((struct uPD98402_priv *) dev->phy_data) - -#define PUT(val,reg) dev->ops->phy_put(dev,val,uPD98402_##reg) -#define GET(reg) dev->ops->phy_get(dev,uPD98402_##reg) - - -static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int zero) -{ - struct sonet_stats tmp; - int error = 0; - - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs); - sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp); - if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp)); - if (zero && !error) { - /* unused fields are reported as -1, but we must not "adjust" - them */ - tmp.corr_hcs = tmp.tx_cells = tmp.rx_cells = 0; - sonet_subtract_stats(&PRIV(dev)->sonet_stats,&tmp); - } - return error ? -EFAULT : 0; -} - - -static int set_framing(struct atm_dev *dev,unsigned char framing) -{ - static const unsigned char sonet[] = { 1,2,3,0 }; - static const unsigned char sdh[] = { 1,0,0,2 }; - const char *set; - unsigned long flags; - - switch (framing) { - case SONET_FRAME_SONET: - set = sonet; - break; - case SONET_FRAME_SDH: - set = sdh; - break; - default: - return -EINVAL; - } - spin_lock_irqsave(&PRIV(dev)->lock, flags); - PUT(set[0],C11T); - PUT(set[1],C12T); - PUT(set[2],C13T); - PUT((GET(MDR) & ~uPD98402_MDR_SS_MASK) | (set[3] << - uPD98402_MDR_SS_SHIFT),MDR); - spin_unlock_irqrestore(&PRIV(dev)->lock, flags); - return 0; -} - - -static int get_sense(struct atm_dev *dev,u8 __user *arg) -{ - unsigned long flags; - unsigned char s[3]; - - spin_lock_irqsave(&PRIV(dev)->lock, flags); - s[0] = GET(C11R); - s[1] = GET(C12R); - s[2] = GET(C13R); - spin_unlock_irqrestore(&PRIV(dev)->lock, flags); - return (put_user(s[0], arg) || put_user(s[1], arg+1) || - put_user(s[2], arg+2) || put_user(0xff, arg+3) || - put_user(0xff, arg+4) || put_user(0xff, arg+5)) ? -EFAULT : 0; -} - - -static int set_loopback(struct atm_dev *dev,int mode) -{ - unsigned char mode_reg; - - mode_reg = GET(MDR) & ~(uPD98402_MDR_TPLP | uPD98402_MDR_ALP | - uPD98402_MDR_RPLP); - switch (__ATM_LM_XTLOC(mode)) { - case __ATM_LM_NONE: - break; - case __ATM_LM_PHY: - mode_reg |= uPD98402_MDR_TPLP; - break; - case __ATM_LM_ATM: - mode_reg |= uPD98402_MDR_ALP; - break; - default: - return -EINVAL; - } - switch (__ATM_LM_XTRMT(mode)) { - case __ATM_LM_NONE: - break; - case __ATM_LM_PHY: - mode_reg |= uPD98402_MDR_RPLP; - break; - default: - return -EINVAL; - } - PUT(mode_reg,MDR); - PRIV(dev)->loop_mode = mode; - return 0; -} - - -static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg) -{ - switch (cmd) { - - case SONET_GETSTATZ: - case SONET_GETSTAT: - return fetch_stats(dev,arg, cmd == SONET_GETSTATZ); - case SONET_SETFRAMING: - return set_framing(dev, (int)(unsigned long)arg); - case SONET_GETFRAMING: - return put_user(PRIV(dev)->framing,(int __user *)arg) ? - -EFAULT : 0; - case SONET_GETFRSENSE: - return get_sense(dev,arg); - case ATM_SETLOOP: - return set_loopback(dev, (int)(unsigned long)arg); - case ATM_GETLOOP: - return put_user(PRIV(dev)->loop_mode,(int __user *)arg) ? - -EFAULT : 0; - case ATM_QUERYLOOP: - return put_user(ATM_LM_LOC_PHY | ATM_LM_LOC_ATM | - ATM_LM_RMT_PHY,(int __user *)arg) ? -EFAULT : 0; - default: - return -ENOIOCTLCMD; - } -} - - -#define ADD_LIMITED(s,v) \ - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \ - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \ - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); } - - -static void stat_event(struct atm_dev *dev) -{ - unsigned char events; - - events = GET(PCR); - if (events & uPD98402_PFM_PFEB) ADD_LIMITED(path_febe,PFECB); - if (events & uPD98402_PFM_LFEB) ADD_LIMITED(line_febe,LECCT); - if (events & uPD98402_PFM_B3E) ADD_LIMITED(path_bip,B3ECT); - if (events & uPD98402_PFM_B2E) ADD_LIMITED(line_bip,B2ECT); - if (events & uPD98402_PFM_B1E) ADD_LIMITED(section_bip,B1ECT); -} - - -#undef ADD_LIMITED - - -static void uPD98402_int(struct atm_dev *dev) -{ - static unsigned long silence = 0; - unsigned char reason; - - while ((reason = GET(PICR))) { - if (reason & uPD98402_INT_LOS) - printk(KERN_NOTICE "%s(itf %d): signal lost\n", - dev->type,dev->number); - if (reason & uPD98402_INT_PFM) stat_event(dev); - if (reason & uPD98402_INT_PCO) { - (void) GET(PCOCR); /* clear interrupt cause */ - atomic_add(GET(HECCT), - &PRIV(dev)->sonet_stats.uncorr_hcs); - } - if ((reason & uPD98402_INT_RFO) && - (time_after(jiffies, silence) || silence == 0)) { - printk(KERN_WARNING "%s(itf %d): uPD98402 receive " - "FIFO overflow\n",dev->type,dev->number); - silence = (jiffies+HZ/2)|1; - } - } -} - - -static int uPD98402_start(struct atm_dev *dev) -{ - DPRINTK("phy_start\n"); - if (!(dev->phy_data = kmalloc(sizeof(struct uPD98402_priv),GFP_KERNEL))) - return -ENOMEM; - spin_lock_init(&PRIV(dev)->lock); - memset(&PRIV(dev)->sonet_stats,0,sizeof(struct k_sonet_stats)); - (void) GET(PCR); /* clear performance events */ - PUT(uPD98402_PFM_FJ,PCMR); /* ignore frequency adj */ - (void) GET(PCOCR); /* clear overflows */ - PUT(~uPD98402_PCO_HECC,PCOMR); - (void) GET(PICR); /* clear interrupts */ - PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO | - uPD98402_INT_LOS),PIMR); /* enable them */ - (void) fetch_stats(dev,NULL,1); /* clear kernel counters */ - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1); - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1); - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1); - return 0; -} - - -static int uPD98402_stop(struct atm_dev *dev) -{ - /* let SAR driver worry about stopping interrupts */ - kfree(PRIV(dev)); - return 0; -} - - -static const struct atmphy_ops uPD98402_ops = { - .start = uPD98402_start, - .ioctl = uPD98402_ioctl, - .interrupt = uPD98402_int, - .stop = uPD98402_stop, -}; - - -int uPD98402_init(struct atm_dev *dev) -{ -DPRINTK("phy_init\n"); - dev->phy = &uPD98402_ops; - return 0; -} - - -MODULE_LICENSE("GPL"); - -EXPORT_SYMBOL(uPD98402_init); - -static __init int uPD98402_module_init(void) -{ - return 0; -} -module_init(uPD98402_module_init); -/* module_exit not defined so not unloadable */ diff --git a/drivers/atm/uPD98402.h b/drivers/atm/uPD98402.h deleted file mode 100644 index 437cfaa20c96..000000000000 --- a/drivers/atm/uPD98402.h +++ /dev/null @@ -1,107 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* drivers/atm/uPD98402.h - NEC uPD98402 (PHY) declarations */ - -/* Written 1995 by Werner Almesberger, EPFL LRC */ - - -#ifndef DRIVERS_ATM_uPD98402_H -#define DRIVERS_ATM_uPD98402_H - -/* - * Registers - */ - -#define uPD98402_CMR 0x00 /* Command Register */ -#define uPD98402_MDR 0x01 /* Mode Register */ -#define uPD98402_PICR 0x02 /* PHY Interrupt Cause Register */ -#define uPD98402_PIMR 0x03 /* PHY Interrupt Mask Register */ -#define uPD98402_ACR 0x04 /* Alarm Cause Register */ -#define uPD98402_ACMR 0x05 /* Alarm Cause Mask Register */ -#define uPD98402_PCR 0x06 /* Performance Cause Register */ -#define uPD98402_PCMR 0x07 /* Performance Cause Mask Register */ -#define uPD98402_IACM 0x08 /* Internal Alarm Cause Mask Register */ -#define uPD98402_B1ECT 0x09 /* B1 Error Count Register */ -#define uPD98402_B2ECT 0x0a /* B2 Error Count Register */ -#define uPD98402_B3ECT 0x0b /* B3 Error Count Regster */ -#define uPD98402_PFECB 0x0c /* Path FEBE Count Register */ -#define uPD98402_LECCT 0x0d /* Line FEBE Count Register */ -#define uPD98402_HECCT 0x0e /* HEC Error Count Register */ -#define uPD98402_FJCT 0x0f /* Frequence Justification Count Reg */ -#define uPD98402_PCOCR 0x10 /* Perf. Counter Overflow Cause Reg */ -#define uPD98402_PCOMR 0x11 /* Perf. Counter Overflow Mask Reg */ -#define uPD98402_C11T 0x20 /* C11T Data Register */ -#define uPD98402_C12T 0x21 /* C12T Data Register */ -#define uPD98402_C13T 0x22 /* C13T Data Register */ -#define uPD98402_F1T 0x23 /* F1T Data Register */ -#define uPD98402_K2T 0x25 /* K2T Data Register */ -#define uPD98402_C2T 0x26 /* C2T Data Register */ -#define uPD98402_F2T 0x27 /* F2T Data Register */ -#define uPD98402_C11R 0x30 /* C11T Data Register */ -#define uPD98402_C12R 0x31 /* C12T Data Register */ -#define uPD98402_C13R 0x32 /* C13T Data Register */ -#define uPD98402_F1R 0x33 /* F1T Data Register */ -#define uPD98402_K2R 0x35 /* K2T Data Register */ -#define uPD98402_C2R 0x36 /* C2T Data Register */ -#define uPD98402_F2R 0x37 /* F2T Data Register */ - -/* CMR is at 0x00 */ -#define uPD98402_CMR_PFRF 0x01 /* Send path FERF */ -#define uPD98402_CMR_LFRF 0x02 /* Send line FERF */ -#define uPD98402_CMR_PAIS 0x04 /* Send path AIS */ -#define uPD98402_CMR_LAIS 0x08 /* Send line AIS */ - -/* MDR is at 0x01 */ -#define uPD98402_MDR_ALP 0x01 /* ATM layer loopback */ -#define uPD98402_MDR_TPLP 0x02 /* PMD loopback, to host */ -#define uPD98402_MDR_RPLP 0x04 /* PMD loopback, to network */ -#define uPD98402_MDR_SS0 0x08 /* SS0 */ -#define uPD98402_MDR_SS1 0x10 /* SS1 */ -#define uPD98402_MDR_SS_MASK 0x18 /* mask */ -#define uPD98402_MDR_SS_SHIFT 3 /* shift */ -#define uPD98402_MDR_HEC 0x20 /* disable HEC inbound processing */ -#define uPD98402_MDR_FSR 0x40 /* disable frame scrambler */ -#define uPD98402_MDR_CSR 0x80 /* disable cell scrambler */ - -/* PICR is at 0x02, PIMR is at 0x03 */ -#define uPD98402_INT_PFM 0x01 /* performance counter has changed */ -#define uPD98402_INT_ALM 0x02 /* line fault */ -#define uPD98402_INT_RFO 0x04 /* receive FIFO overflow */ -#define uPD98402_INT_PCO 0x08 /* performance counter overflow */ -#define uPD98402_INT_OTD 0x20 /* OTD has occurred */ -#define uPD98402_INT_LOS 0x40 /* Loss Of Signal */ -#define uPD98402_INT_LOF 0x80 /* Loss Of Frame */ - -/* ACR is as 0x04, ACMR is at 0x05 */ -#define uPD98402_ALM_PFRF 0x01 /* path FERF */ -#define uPD98402_ALM_LFRF 0x02 /* line FERF */ -#define uPD98402_ALM_PAIS 0x04 /* path AIS */ -#define uPD98402_ALM_LAIS 0x08 /* line AIS */ -#define uPD98402_ALM_LOD 0x10 /* loss of delineation */ -#define uPD98402_ALM_LOP 0x20 /* loss of pointer */ -#define uPD98402_ALM_OOF 0x40 /* out of frame */ - -/* PCR is at 0x06, PCMR is at 0x07 */ -#define uPD98402_PFM_PFEB 0x01 /* path FEBE */ -#define uPD98402_PFM_LFEB 0x02 /* line FEBE */ -#define uPD98402_PFM_B3E 0x04 /* B3 error */ -#define uPD98402_PFM_B2E 0x08 /* B2 error */ -#define uPD98402_PFM_B1E 0x10 /* B1 error */ -#define uPD98402_PFM_FJ 0x20 /* frequency justification */ - -/* IACM is at 0x08 */ -#define uPD98402_IACM_PFRF 0x01 /* don't generate path FERF */ -#define uPD98402_IACM_LFRF 0x02 /* don't generate line FERF */ - -/* PCOCR is at 0x010, PCOMR is at 0x11 */ -#define uPD98402_PCO_B1EC 0x01 /* B1ECT overflow */ -#define uPD98402_PCO_B2EC 0x02 /* B2ECT overflow */ -#define uPD98402_PCO_B3EC 0x04 /* B3ECT overflow */ -#define uPD98402_PCO_PFBC 0x08 /* PFEBC overflow */ -#define uPD98402_PCO_LFBC 0x10 /* LFEVC overflow */ -#define uPD98402_PCO_HECC 0x20 /* HECCT overflow */ -#define uPD98402_PCO_FJC 0x40 /* FJCT overflow */ - - -int uPD98402_init(struct atm_dev *dev); - -#endif diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c deleted file mode 100644 index cf5fffcf98a1..000000000000 --- a/drivers/atm/zatm.c +++ /dev/null @@ -1,1652 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* drivers/atm/zatm.c - ZeitNet ZN122x device driver */ - -/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ - - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "uPD98401.h" -#include "uPD98402.h" -#include "zeprom.h" -#include "zatm.h" - - -/* - * TODO: - * - * Minor features - * - support 64 kB SDUs (will have to use multibuffer batches then :-( ) - * - proper use of CDV, credit = max(1,CDVT*PCR) - * - AAL0 - * - better receive timestamps - * - OAM - */ - -#define ZATM_COPPER 1 - -#if 0 -#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args) -#else -#define DPRINTK(format,args...) -#endif - -#ifndef CONFIG_ATM_ZATM_DEBUG - - -#define NULLCHECK(x) - -#define EVENT(s,a,b) - - -static void event_dump(void) -{ -} - - -#else - - -/* - * NULL pointer checking - */ - -#define NULLCHECK(x) \ - if ((unsigned long) (x) < 0x30) printk(KERN_CRIT #x "==0x%x\n", (int) (x)) - -/* - * Very extensive activity logging. Greatly improves bug detection speed but - * costs a few Mbps if enabled. - */ - -#define EV 64 - -static const char *ev[EV]; -static unsigned long ev_a[EV],ev_b[EV]; -static int ec = 0; - - -static void EVENT(const char *s,unsigned long a,unsigned long b) -{ - ev[ec] = s; - ev_a[ec] = a; - ev_b[ec] = b; - ec = (ec+1) % EV; -} - - -static void event_dump(void) -{ - int n,i; - - printk(KERN_NOTICE "----- event dump follows -----\n"); - for (n = 0; n < EV; n++) { - i = (ec+n) % EV; - printk(KERN_NOTICE); - printk(ev[i] ? ev[i] : "(null)",ev_a[i],ev_b[i]); - } - printk(KERN_NOTICE "----- event dump ends here -----\n"); -} - - -#endif /* CONFIG_ATM_ZATM_DEBUG */ - - -#define RING_BUSY 1 /* indication from do_tx that PDU has to be - backlogged */ - -static struct atm_dev *zatm_boards = NULL; -static unsigned long dummy[2] = {0,0}; - - -#define zin_n(r) inl(zatm_dev->base+r*4) -#define zin(r) inl(zatm_dev->base+uPD98401_##r*4) -#define zout(v,r) outl(v,zatm_dev->base+uPD98401_##r*4) -#define zwait() do {} while (zin(CMR) & uPD98401_BUSY) - -/* RX0, RX1, TX0, TX1 */ -static const int mbx_entries[NR_MBX] = { 1024,1024,1024,1024 }; -static const int mbx_esize[NR_MBX] = { 16,16,4,4 }; /* entry size in bytes */ - -#define MBX_SIZE(i) (mbx_entries[i]*mbx_esize[i]) - - -/*-------------------------------- utilities --------------------------------*/ - - -static void zpokel(struct zatm_dev *zatm_dev,u32 value,u32 addr) -{ - zwait(); - zout(value,CER); - zout(uPD98401_IND_ACC | uPD98401_IA_BALL | - (uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR); -} - - -static u32 zpeekl(struct zatm_dev *zatm_dev,u32 addr) -{ - zwait(); - zout(uPD98401_IND_ACC | uPD98401_IA_BALL | uPD98401_IA_RW | - (uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR); - zwait(); - return zin(CER); -} - - -/*------------------------------- free lists --------------------------------*/ - - -/* - * Free buffer head structure: - * [0] pointer to buffer (for SAR) - * [1] buffer descr link pointer (for SAR) - * [2] back pointer to skb (for poll_rx) - * [3] data - * ... - */ - -struct rx_buffer_head { - u32 buffer; /* pointer to buffer (for SAR) */ - u32 link; /* buffer descriptor link pointer (for SAR) */ - struct sk_buff *skb; /* back pointer to skb (for poll_rx) */ -}; - - -static void refill_pool(struct atm_dev *dev,int pool) -{ - struct zatm_dev *zatm_dev; - struct sk_buff *skb; - struct rx_buffer_head *first; - unsigned long flags; - int align,offset,free,count,size; - - EVENT("refill_pool\n",0,0); - zatm_dev = ZATM_DEV(dev); - size = (64 << (pool <= ZATM_AAL5_POOL_BASE ? 0 : - pool-ZATM_AAL5_POOL_BASE))+sizeof(struct rx_buffer_head); - if (size < PAGE_SIZE) { - align = 32; /* for 32 byte alignment */ - offset = sizeof(struct rx_buffer_head); - } - else { - align = 4096; - offset = zatm_dev->pool_info[pool].offset+ - sizeof(struct rx_buffer_head); - } - size += align; - spin_lock_irqsave(&zatm_dev->lock, flags); - free = zpeekl(zatm_dev,zatm_dev->pool_base+2*pool) & - uPD98401_RXFP_REMAIN; - spin_unlock_irqrestore(&zatm_dev->lock, flags); - if (free >= zatm_dev->pool_info[pool].low_water) return; - EVENT("starting ... POOL: 0x%x, 0x%x\n", - zpeekl(zatm_dev,zatm_dev->pool_base+2*pool), - zpeekl(zatm_dev,zatm_dev->pool_base+2*pool+1)); - EVENT("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]); - count = 0; - first = NULL; - while (free < zatm_dev->pool_info[pool].high_water) { - struct rx_buffer_head *head; - - skb = alloc_skb(size,GFP_ATOMIC); - if (!skb) { - printk(KERN_WARNING DEV_LABEL "(Itf %d): got no new " - "skb (%d) with %d free\n",dev->number,size,free); - break; - } - skb_reserve(skb,(unsigned char *) ((((unsigned long) skb->data+ - align+offset-1) & ~(unsigned long) (align-1))-offset)- - skb->data); - head = (struct rx_buffer_head *) skb->data; - skb_reserve(skb,sizeof(struct rx_buffer_head)); - if (!first) first = head; - count++; - head->buffer = virt_to_bus(skb->data); - head->link = 0; - head->skb = skb; - EVENT("enq skb 0x%08lx/0x%08lx\n",(unsigned long) skb, - (unsigned long) head); - spin_lock_irqsave(&zatm_dev->lock, flags); - if (zatm_dev->last_free[pool]) - ((struct rx_buffer_head *) (zatm_dev->last_free[pool]-> - data))[-1].link = virt_to_bus(head); - zatm_dev->last_free[pool] = skb; - skb_queue_tail(&zatm_dev->pool[pool],skb); - spin_unlock_irqrestore(&zatm_dev->lock, flags); - free++; - } - if (first) { - spin_lock_irqsave(&zatm_dev->lock, flags); - zwait(); - zout(virt_to_bus(first),CER); - zout(uPD98401_ADD_BAT | (pool << uPD98401_POOL_SHIFT) | count, - CMR); - spin_unlock_irqrestore(&zatm_dev->lock, flags); - EVENT ("POOL: 0x%x, 0x%x\n", - zpeekl(zatm_dev,zatm_dev->pool_base+2*pool), - zpeekl(zatm_dev,zatm_dev->pool_base+2*pool+1)); - EVENT("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]); - } -} - - -static void drain_free(struct atm_dev *dev,int pool) -{ - skb_queue_purge(&ZATM_DEV(dev)->pool[pool]); -} - - -static int pool_index(int max_pdu) -{ - int i; - - if (max_pdu % ATM_CELL_PAYLOAD) - printk(KERN_ERR DEV_LABEL ": driver error in pool_index: " - "max_pdu is %d\n",max_pdu); - if (max_pdu > 65536) return -1; - for (i = 0; (64 << i) < max_pdu; i++); - return i+ZATM_AAL5_POOL_BASE; -} - - -/* use_pool isn't reentrant */ - - -static void use_pool(struct atm_dev *dev,int pool) -{ - struct zatm_dev *zatm_dev; - unsigned long flags; - int size; - - zatm_dev = ZATM_DEV(dev); - if (!(zatm_dev->pool_info[pool].ref_count++)) { - skb_queue_head_init(&zatm_dev->pool[pool]); - size = pool-ZATM_AAL5_POOL_BASE; - if (size < 0) size = 0; /* 64B... */ - else if (size > 10) size = 10; /* ... 64kB */ - spin_lock_irqsave(&zatm_dev->lock, flags); - zpokel(zatm_dev,((zatm_dev->pool_info[pool].low_water/4) << - uPD98401_RXFP_ALERT_SHIFT) | - (1 << uPD98401_RXFP_BTSZ_SHIFT) | - (size << uPD98401_RXFP_BFSZ_SHIFT), - zatm_dev->pool_base+pool*2); - zpokel(zatm_dev,(unsigned long) dummy,zatm_dev->pool_base+ - pool*2+1); - spin_unlock_irqrestore(&zatm_dev->lock, flags); - zatm_dev->last_free[pool] = NULL; - refill_pool(dev,pool); - } - DPRINTK("pool %d: %d\n",pool,zatm_dev->pool_info[pool].ref_count); -} - - -static void unuse_pool(struct atm_dev *dev,int pool) -{ - if (!(--ZATM_DEV(dev)->pool_info[pool].ref_count)) - drain_free(dev,pool); -} - -/*----------------------------------- RX ------------------------------------*/ - - -#if 0 -static void exception(struct atm_vcc *vcc) -{ - static int count = 0; - struct zatm_dev *zatm_dev = ZATM_DEV(vcc->dev); - struct zatm_vcc *zatm_vcc = ZATM_VCC(vcc); - unsigned long *qrp; - int i; - - if (count++ > 2) return; - for (i = 0; i < 8; i++) - printk("TX%d: 0x%08lx\n",i, - zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+i)); - for (i = 0; i < 5; i++) - printk("SH%d: 0x%08lx\n",i, - zpeekl(zatm_dev,uPD98401_IM(zatm_vcc->shaper)+16*i)); - qrp = (unsigned long *) zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+ - uPD98401_TXVC_QRP); - printk("qrp=0x%08lx\n",(unsigned long) qrp); - for (i = 0; i < 4; i++) printk("QRP[%d]: 0x%08lx",i,qrp[i]); -} -#endif - - -static const char *err_txt[] = { - "No error", - "RX buf underflow", - "RX FIFO overrun", - "Maximum len violation", - "CRC error", - "User abort", - "Length violation", - "T1 error", - "Deactivated", - "???", - "???", - "???", - "???", - "???", - "???", - "???" -}; - - -static void poll_rx(struct atm_dev *dev,int mbx) -{ - struct zatm_dev *zatm_dev; - unsigned long pos; - u32 x; - int error; - - EVENT("poll_rx\n",0,0); - zatm_dev = ZATM_DEV(dev); - pos = (zatm_dev->mbx_start[mbx] & ~0xffffUL) | zin(MTA(mbx)); - while (x = zin(MWA(mbx)), (pos & 0xffff) != x) { - u32 *here; - struct sk_buff *skb; - struct atm_vcc *vcc; - int cells,size,chan; - - EVENT("MBX: host 0x%lx, nic 0x%x\n",pos,x); - here = (u32 *) pos; - if (((pos += 16) & 0xffff) == zatm_dev->mbx_end[mbx]) - pos = zatm_dev->mbx_start[mbx]; - cells = here[0] & uPD98401_AAL5_SIZE; -#if 0 -printk("RX IND: 0x%x, 0x%x, 0x%x, 0x%x\n",here[0],here[1],here[2],here[3]); -{ -unsigned long *x; - printk("POOL: 0x%08x, 0x%08x\n",zpeekl(zatm_dev, - zatm_dev->pool_base), - zpeekl(zatm_dev,zatm_dev->pool_base+1)); - x = (unsigned long *) here[2]; - printk("[0..3] = 0x%08lx, 0x%08lx, 0x%08lx, 0x%08lx\n", - x[0],x[1],x[2],x[3]); -} -#endif - error = 0; - if (here[3] & uPD98401_AAL5_ERR) { - error = (here[3] & uPD98401_AAL5_ES) >> - uPD98401_AAL5_ES_SHIFT; - if (error == uPD98401_AAL5_ES_DEACT || - error == uPD98401_AAL5_ES_FREE) continue; - } -EVENT("error code 0x%x/0x%x\n",(here[3] & uPD98401_AAL5_ES) >> - uPD98401_AAL5_ES_SHIFT,error); - skb = ((struct rx_buffer_head *) bus_to_virt(here[2]))->skb; - __net_timestamp(skb); -#if 0 -printk("[-3..0] 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",((unsigned *) skb->data)[-3], - ((unsigned *) skb->data)[-2],((unsigned *) skb->data)[-1], - ((unsigned *) skb->data)[0]); -#endif - EVENT("skb 0x%lx, here 0x%lx\n",(unsigned long) skb, - (unsigned long) here); -#if 0 -printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]); -#endif - size = error ? 0 : ntohs(((__be16 *) skb->data)[cells* - ATM_CELL_PAYLOAD/sizeof(u16)-3]); - EVENT("got skb 0x%lx, size %d\n",(unsigned long) skb,size); - chan = (here[3] & uPD98401_AAL5_CHAN) >> - uPD98401_AAL5_CHAN_SHIFT; - if (chan < zatm_dev->chans && zatm_dev->rx_map[chan]) { - int pos; - vcc = zatm_dev->rx_map[chan]; - pos = ZATM_VCC(vcc)->pool; - if (skb == zatm_dev->last_free[pos]) - zatm_dev->last_free[pos] = NULL; - skb_unlink(skb, zatm_dev->pool + pos); - } - else { - printk(KERN_ERR DEV_LABEL "(itf %d): RX indication " - "for non-existing channel\n",dev->number); - size = 0; - vcc = NULL; - event_dump(); - } - if (error) { - static unsigned long silence = 0; - static int last_error = 0; - - if (error != last_error || - time_after(jiffies, silence) || silence == 0){ - printk(KERN_WARNING DEV_LABEL "(itf %d): " - "chan %d error %s\n",dev->number,chan, - err_txt[error]); - last_error = error; - silence = (jiffies+2*HZ)|1; - } - size = 0; - } - if (size && (size > cells*ATM_CELL_PAYLOAD-ATM_AAL5_TRAILER || - size <= (cells-1)*ATM_CELL_PAYLOAD-ATM_AAL5_TRAILER)) { - printk(KERN_ERR DEV_LABEL "(itf %d): size %d with %d " - "cells\n",dev->number,size,cells); - size = 0; - event_dump(); - } - if (size > ATM_MAX_AAL5_PDU) { - printk(KERN_ERR DEV_LABEL "(itf %d): size too big " - "(%d)\n",dev->number,size); - size = 0; - event_dump(); - } - if (!size) { - dev_kfree_skb_irq(skb); - if (vcc) atomic_inc(&vcc->stats->rx_err); - continue; - } - if (!atm_charge(vcc,skb->truesize)) { - dev_kfree_skb_irq(skb); - continue; - } - skb->len = size; - ATM_SKB(skb)->vcc = vcc; - vcc->push(vcc,skb); - atomic_inc(&vcc->stats->rx); - } - zout(pos & 0xffff,MTA(mbx)); -#if 0 /* probably a stupid idea */ - refill_pool(dev,zatm_vcc->pool); - /* maybe this saves us a few interrupts */ -#endif -} - - -static int open_rx_first(struct atm_vcc *vcc) -{ - struct zatm_dev *zatm_dev; - struct zatm_vcc *zatm_vcc; - unsigned long flags; - unsigned short chan; - int cells; - - DPRINTK("open_rx_first (0x%x)\n",inb_p(0xc053)); - zatm_dev = ZATM_DEV(vcc->dev); - zatm_vcc = ZATM_VCC(vcc); - zatm_vcc->rx_chan = 0; - if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0; - if (vcc->qos.aal == ATM_AAL5) { - if (vcc->qos.rxtp.max_sdu > 65464) - vcc->qos.rxtp.max_sdu = 65464; - /* fix this - we may want to receive 64kB SDUs - later */ - cells = DIV_ROUND_UP(vcc->qos.rxtp.max_sdu + ATM_AAL5_TRAILER, - ATM_CELL_PAYLOAD); - zatm_vcc->pool = pool_index(cells*ATM_CELL_PAYLOAD); - } - else { - cells = 1; - zatm_vcc->pool = ZATM_AAL0_POOL; - } - if (zatm_vcc->pool < 0) return -EMSGSIZE; - spin_lock_irqsave(&zatm_dev->lock, flags); - zwait(); - zout(uPD98401_OPEN_CHAN,CMR); - zwait(); - DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER)); - chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT; - spin_unlock_irqrestore(&zatm_dev->lock, flags); - DPRINTK("chan is %d\n",chan); - if (!chan) return -EAGAIN; - use_pool(vcc->dev,zatm_vcc->pool); - DPRINTK("pool %d\n",zatm_vcc->pool); - /* set up VC descriptor */ - spin_lock_irqsave(&zatm_dev->lock, flags); - zpokel(zatm_dev,zatm_vcc->pool << uPD98401_RXVC_POOL_SHIFT, - chan*VC_SIZE/4); - zpokel(zatm_dev,uPD98401_RXVC_OD | (vcc->qos.aal == ATM_AAL5 ? - uPD98401_RXVC_AR : 0) | cells,chan*VC_SIZE/4+1); - zpokel(zatm_dev,0,chan*VC_SIZE/4+2); - zatm_vcc->rx_chan = chan; - zatm_dev->rx_map[chan] = vcc; - spin_unlock_irqrestore(&zatm_dev->lock, flags); - return 0; -} - - -static int open_rx_second(struct atm_vcc *vcc) -{ - struct zatm_dev *zatm_dev; - struct zatm_vcc *zatm_vcc; - unsigned long flags; - int pos,shift; - - DPRINTK("open_rx_second (0x%x)\n",inb_p(0xc053)); - zatm_dev = ZATM_DEV(vcc->dev); - zatm_vcc = ZATM_VCC(vcc); - if (!zatm_vcc->rx_chan) return 0; - spin_lock_irqsave(&zatm_dev->lock, flags); - /* should also handle VPI @@@ */ - pos = vcc->vci >> 1; - shift = (1-(vcc->vci & 1)) << 4; - zpokel(zatm_dev,(zpeekl(zatm_dev,pos) & ~(0xffff << shift)) | - ((zatm_vcc->rx_chan | uPD98401_RXLT_ENBL) << shift),pos); - spin_unlock_irqrestore(&zatm_dev->lock, flags); - return 0; -} - - -static void close_rx(struct atm_vcc *vcc) -{ - struct zatm_dev *zatm_dev; - struct zatm_vcc *zatm_vcc; - unsigned long flags; - int pos,shift; - - zatm_vcc = ZATM_VCC(vcc); - zatm_dev = ZATM_DEV(vcc->dev); - if (!zatm_vcc->rx_chan) return; - DPRINTK("close_rx\n"); - /* disable receiver */ - if (vcc->vpi != ATM_VPI_UNSPEC && vcc->vci != ATM_VCI_UNSPEC) { - spin_lock_irqsave(&zatm_dev->lock, flags); - pos = vcc->vci >> 1; - shift = (1-(vcc->vci & 1)) << 4; - zpokel(zatm_dev,zpeekl(zatm_dev,pos) & ~(0xffff << shift),pos); - zwait(); - zout(uPD98401_NOP,CMR); - zwait(); - zout(uPD98401_NOP,CMR); - spin_unlock_irqrestore(&zatm_dev->lock, flags); - } - spin_lock_irqsave(&zatm_dev->lock, flags); - zwait(); - zout(uPD98401_DEACT_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan << - uPD98401_CHAN_ADDR_SHIFT),CMR); - zwait(); - udelay(10); /* why oh why ... ? */ - zout(uPD98401_CLOSE_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan << - uPD98401_CHAN_ADDR_SHIFT),CMR); - zwait(); - if (!(zin(CMR) & uPD98401_CHAN_ADDR)) - printk(KERN_CRIT DEV_LABEL "(itf %d): can't close RX channel " - "%d\n",vcc->dev->number,zatm_vcc->rx_chan); - spin_unlock_irqrestore(&zatm_dev->lock, flags); - zatm_dev->rx_map[zatm_vcc->rx_chan] = NULL; - zatm_vcc->rx_chan = 0; - unuse_pool(vcc->dev,zatm_vcc->pool); -} - - -static int start_rx(struct atm_dev *dev) -{ - struct zatm_dev *zatm_dev; - int i; - - DPRINTK("start_rx\n"); - zatm_dev = ZATM_DEV(dev); - zatm_dev->rx_map = kcalloc(zatm_dev->chans, - sizeof(*zatm_dev->rx_map), - GFP_KERNEL); - if (!zatm_dev->rx_map) return -ENOMEM; - /* set VPI/VCI split (use all VCIs and give what's left to VPIs) */ - zpokel(zatm_dev,(1 << dev->ci_range.vci_bits)-1,uPD98401_VRR); - /* prepare free buffer pools */ - for (i = 0; i <= ZATM_LAST_POOL; i++) { - zatm_dev->pool_info[i].ref_count = 0; - zatm_dev->pool_info[i].rqa_count = 0; - zatm_dev->pool_info[i].rqu_count = 0; - zatm_dev->pool_info[i].low_water = LOW_MARK; - zatm_dev->pool_info[i].high_water = HIGH_MARK; - zatm_dev->pool_info[i].offset = 0; - zatm_dev->pool_info[i].next_off = 0; - zatm_dev->pool_info[i].next_cnt = 0; - zatm_dev->pool_info[i].next_thres = OFF_CNG_THRES; - } - return 0; -} - - -/*----------------------------------- TX ------------------------------------*/ - - -static int do_tx(struct sk_buff *skb) -{ - struct atm_vcc *vcc; - struct zatm_dev *zatm_dev; - struct zatm_vcc *zatm_vcc; - u32 *dsc; - unsigned long flags; - - EVENT("do_tx\n",0,0); - DPRINTK("sending skb %p\n",skb); - vcc = ATM_SKB(skb)->vcc; - zatm_dev = ZATM_DEV(vcc->dev); - zatm_vcc = ZATM_VCC(vcc); - EVENT("iovcnt=%d\n",skb_shinfo(skb)->nr_frags,0); - spin_lock_irqsave(&zatm_dev->lock, flags); - if (!skb_shinfo(skb)->nr_frags) { - if (zatm_vcc->txing == RING_ENTRIES-1) { - spin_unlock_irqrestore(&zatm_dev->lock, flags); - return RING_BUSY; - } - zatm_vcc->txing++; - dsc = zatm_vcc->ring+zatm_vcc->ring_curr; - zatm_vcc->ring_curr = (zatm_vcc->ring_curr+RING_WORDS) & - (RING_ENTRIES*RING_WORDS-1); - dsc[1] = 0; - dsc[2] = skb->len; - dsc[3] = virt_to_bus(skb->data); - mb(); - dsc[0] = uPD98401_TXPD_V | uPD98401_TXPD_DP | uPD98401_TXPD_SM - | (vcc->qos.aal == ATM_AAL5 ? uPD98401_TXPD_AAL5 : 0 | - (ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ? - uPD98401_CLPM_1 : uPD98401_CLPM_0)); - EVENT("dsc (0x%lx)\n",(unsigned long) dsc,0); - } - else { -printk("NONONONOO!!!!\n"); - dsc = NULL; -#if 0 - u32 *put; - int i; - - dsc = kmalloc(uPD98401_TXPD_SIZE * 2 + - uPD98401_TXBD_SIZE * ATM_SKB(skb)->iovcnt, GFP_ATOMIC); - if (!dsc) { - if (vcc->pop) - vcc->pop(vcc, skb); - else - dev_kfree_skb_irq(skb); - return -EAGAIN; - } - /* @@@ should check alignment */ - put = dsc+8; - dsc[0] = uPD98401_TXPD_V | uPD98401_TXPD_DP | - (vcc->aal == ATM_AAL5 ? uPD98401_TXPD_AAL5 : 0 | - (ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ? - uPD98401_CLPM_1 : uPD98401_CLPM_0)); - dsc[1] = 0; - dsc[2] = ATM_SKB(skb)->iovcnt * uPD98401_TXBD_SIZE; - dsc[3] = virt_to_bus(put); - for (i = 0; i < ATM_SKB(skb)->iovcnt; i++) { - *put++ = ((struct iovec *) skb->data)[i].iov_len; - *put++ = virt_to_bus(((struct iovec *) - skb->data)[i].iov_base); - } - put[-2] |= uPD98401_TXBD_LAST; -#endif - } - ZATM_PRV_DSC(skb) = dsc; - skb_queue_tail(&zatm_vcc->tx_queue,skb); - DPRINTK("QRP=0x%08lx\n",zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+ - uPD98401_TXVC_QRP)); - zwait(); - zout(uPD98401_TX_READY | (zatm_vcc->tx_chan << - uPD98401_CHAN_ADDR_SHIFT),CMR); - spin_unlock_irqrestore(&zatm_dev->lock, flags); - EVENT("done\n",0,0); - return 0; -} - - -static inline void dequeue_tx(struct atm_vcc *vcc) -{ - struct zatm_vcc *zatm_vcc; - struct sk_buff *skb; - - EVENT("dequeue_tx\n",0,0); - zatm_vcc = ZATM_VCC(vcc); - skb = skb_dequeue(&zatm_vcc->tx_queue); - if (!skb) { - printk(KERN_CRIT DEV_LABEL "(itf %d): dequeue_tx but not " - "txing\n",vcc->dev->number); - return; - } -#if 0 /* @@@ would fail on CLP */ -if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP | - uPD98401_TXPD_SM | uPD98401_TXPD_AAL5)) printk("@#*$!!!! (%08x)\n", - *ZATM_PRV_DSC(skb)); -#endif - *ZATM_PRV_DSC(skb) = 0; /* mark as invalid */ - zatm_vcc->txing--; - if (vcc->pop) vcc->pop(vcc,skb); - else dev_kfree_skb_irq(skb); - while ((skb = skb_dequeue(&zatm_vcc->backlog))) - if (do_tx(skb) == RING_BUSY) { - skb_queue_head(&zatm_vcc->backlog,skb); - break; - } - atomic_inc(&vcc->stats->tx); - wake_up(&zatm_vcc->tx_wait); -} - - -static void poll_tx(struct atm_dev *dev,int mbx) -{ - struct zatm_dev *zatm_dev; - unsigned long pos; - u32 x; - - EVENT("poll_tx\n",0,0); - zatm_dev = ZATM_DEV(dev); - pos = (zatm_dev->mbx_start[mbx] & ~0xffffUL) | zin(MTA(mbx)); - while (x = zin(MWA(mbx)), (pos & 0xffff) != x) { - int chan; - -#if 1 - u32 data,*addr; - - EVENT("MBX: host 0x%lx, nic 0x%x\n",pos,x); - addr = (u32 *) pos; - data = *addr; - chan = (data & uPD98401_TXI_CONN) >> uPD98401_TXI_CONN_SHIFT; - EVENT("addr = 0x%lx, data = 0x%08x,",(unsigned long) addr, - data); - EVENT("chan = %d\n",chan,0); -#else -NO ! - chan = (zatm_dev->mbx_start[mbx][pos >> 2] & uPD98401_TXI_CONN) - >> uPD98401_TXI_CONN_SHIFT; -#endif - if (chan < zatm_dev->chans && zatm_dev->tx_map[chan]) - dequeue_tx(zatm_dev->tx_map[chan]); - else { - printk(KERN_CRIT DEV_LABEL "(itf %d): TX indication " - "for non-existing channel %d\n",dev->number,chan); - event_dump(); - } - if (((pos += 4) & 0xffff) == zatm_dev->mbx_end[mbx]) - pos = zatm_dev->mbx_start[mbx]; - } - zout(pos & 0xffff,MTA(mbx)); -} - - -/* - * BUG BUG BUG: Doesn't handle "new-style" rate specification yet. - */ - -static int alloc_shaper(struct atm_dev *dev,int *pcr,int min,int max,int ubr) -{ - struct zatm_dev *zatm_dev; - unsigned long flags; - unsigned long i,m,c; - int shaper; - - DPRINTK("alloc_shaper (min = %d, max = %d)\n",min,max); - zatm_dev = ZATM_DEV(dev); - if (!zatm_dev->free_shapers) return -EAGAIN; - for (shaper = 0; !((zatm_dev->free_shapers >> shaper) & 1); shaper++); - zatm_dev->free_shapers &= ~1 << shaper; - if (ubr) { - c = 5; - i = m = 1; - zatm_dev->ubr_ref_cnt++; - zatm_dev->ubr = shaper; - *pcr = 0; - } - else { - if (min) { - if (min <= 255) { - i = min; - m = ATM_OC3_PCR; - } - else { - i = 255; - m = ATM_OC3_PCR*255/min; - } - } - else { - if (max > zatm_dev->tx_bw) max = zatm_dev->tx_bw; - if (max <= 255) { - i = max; - m = ATM_OC3_PCR; - } - else { - i = 255; - m = DIV_ROUND_UP(ATM_OC3_PCR*255, max); - } - } - if (i > m) { - printk(KERN_CRIT DEV_LABEL "shaper algorithm botched " - "[%d,%d] -> i=%ld,m=%ld\n",min,max,i,m); - m = i; - } - *pcr = i*ATM_OC3_PCR/m; - c = 20; /* @@@ should use max_cdv ! */ - if ((min && *pcr < min) || (max && *pcr > max)) return -EINVAL; - if (zatm_dev->tx_bw < *pcr) return -EAGAIN; - zatm_dev->tx_bw -= *pcr; - } - spin_lock_irqsave(&zatm_dev->lock, flags); - DPRINTK("i = %d, m = %d, PCR = %d\n",i,m,*pcr); - zpokel(zatm_dev,(i << uPD98401_IM_I_SHIFT) | m,uPD98401_IM(shaper)); - zpokel(zatm_dev,c << uPD98401_PC_C_SHIFT,uPD98401_PC(shaper)); - zpokel(zatm_dev,0,uPD98401_X(shaper)); - zpokel(zatm_dev,0,uPD98401_Y(shaper)); - zpokel(zatm_dev,uPD98401_PS_E,uPD98401_PS(shaper)); - spin_unlock_irqrestore(&zatm_dev->lock, flags); - return shaper; -} - - -static void dealloc_shaper(struct atm_dev *dev,int shaper) -{ - struct zatm_dev *zatm_dev; - unsigned long flags; - - zatm_dev = ZATM_DEV(dev); - if (shaper == zatm_dev->ubr) { - if (--zatm_dev->ubr_ref_cnt) return; - zatm_dev->ubr = -1; - } - spin_lock_irqsave(&zatm_dev->lock, flags); - zpokel(zatm_dev,zpeekl(zatm_dev,uPD98401_PS(shaper)) & ~uPD98401_PS_E, - uPD98401_PS(shaper)); - spin_unlock_irqrestore(&zatm_dev->lock, flags); - zatm_dev->free_shapers |= 1 << shaper; -} - - -static void close_tx(struct atm_vcc *vcc) -{ - struct zatm_dev *zatm_dev; - struct zatm_vcc *zatm_vcc; - unsigned long flags; - int chan; - - zatm_vcc = ZATM_VCC(vcc); - zatm_dev = ZATM_DEV(vcc->dev); - chan = zatm_vcc->tx_chan; - if (!chan) return; - DPRINTK("close_tx\n"); - if (skb_peek(&zatm_vcc->backlog)) { - printk("waiting for backlog to drain ...\n"); - event_dump(); - wait_event(zatm_vcc->tx_wait, !skb_peek(&zatm_vcc->backlog)); - } - if (skb_peek(&zatm_vcc->tx_queue)) { - printk("waiting for TX queue to drain ...\n"); - event_dump(); - wait_event(zatm_vcc->tx_wait, !skb_peek(&zatm_vcc->tx_queue)); - } - spin_lock_irqsave(&zatm_dev->lock, flags); -#if 0 - zwait(); - zout(uPD98401_DEACT_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR); -#endif - zwait(); - zout(uPD98401_CLOSE_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR); - zwait(); - if (!(zin(CMR) & uPD98401_CHAN_ADDR)) - printk(KERN_CRIT DEV_LABEL "(itf %d): can't close TX channel " - "%d\n",vcc->dev->number,chan); - spin_unlock_irqrestore(&zatm_dev->lock, flags); - zatm_vcc->tx_chan = 0; - zatm_dev->tx_map[chan] = NULL; - if (zatm_vcc->shaper != zatm_dev->ubr) { - zatm_dev->tx_bw += vcc->qos.txtp.min_pcr; - dealloc_shaper(vcc->dev,zatm_vcc->shaper); - } - kfree(zatm_vcc->ring); -} - - -static int open_tx_first(struct atm_vcc *vcc) -{ - struct zatm_dev *zatm_dev; - struct zatm_vcc *zatm_vcc; - unsigned long flags; - u32 *loop; - unsigned short chan; - int unlimited; - - DPRINTK("open_tx_first\n"); - zatm_dev = ZATM_DEV(vcc->dev); - zatm_vcc = ZATM_VCC(vcc); - zatm_vcc->tx_chan = 0; - if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0; - spin_lock_irqsave(&zatm_dev->lock, flags); - zwait(); - zout(uPD98401_OPEN_CHAN,CMR); - zwait(); - DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER)); - chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT; - spin_unlock_irqrestore(&zatm_dev->lock, flags); - DPRINTK("chan is %d\n",chan); - if (!chan) return -EAGAIN; - unlimited = vcc->qos.txtp.traffic_class == ATM_UBR && - (!vcc->qos.txtp.max_pcr || vcc->qos.txtp.max_pcr == ATM_MAX_PCR || - vcc->qos.txtp.max_pcr >= ATM_OC3_PCR); - if (unlimited && zatm_dev->ubr != -1) zatm_vcc->shaper = zatm_dev->ubr; - else { - int pcr; - - if (unlimited) vcc->qos.txtp.max_sdu = ATM_MAX_AAL5_PDU; - if ((zatm_vcc->shaper = alloc_shaper(vcc->dev,&pcr, - vcc->qos.txtp.min_pcr,vcc->qos.txtp.max_pcr,unlimited)) - < 0) { - close_tx(vcc); - return zatm_vcc->shaper; - } - if (pcr > ATM_OC3_PCR) pcr = ATM_OC3_PCR; - vcc->qos.txtp.min_pcr = vcc->qos.txtp.max_pcr = pcr; - } - zatm_vcc->tx_chan = chan; - skb_queue_head_init(&zatm_vcc->tx_queue); - init_waitqueue_head(&zatm_vcc->tx_wait); - /* initialize ring */ - zatm_vcc->ring = kzalloc(RING_SIZE,GFP_KERNEL); - if (!zatm_vcc->ring) return -ENOMEM; - loop = zatm_vcc->ring+RING_ENTRIES*RING_WORDS; - loop[0] = uPD98401_TXPD_V; - loop[1] = loop[2] = 0; - loop[3] = virt_to_bus(zatm_vcc->ring); - zatm_vcc->ring_curr = 0; - zatm_vcc->txing = 0; - skb_queue_head_init(&zatm_vcc->backlog); - zpokel(zatm_dev,virt_to_bus(zatm_vcc->ring), - chan*VC_SIZE/4+uPD98401_TXVC_QRP); - return 0; -} - - -static int open_tx_second(struct atm_vcc *vcc) -{ - struct zatm_dev *zatm_dev; - struct zatm_vcc *zatm_vcc; - unsigned long flags; - - DPRINTK("open_tx_second\n"); - zatm_dev = ZATM_DEV(vcc->dev); - zatm_vcc = ZATM_VCC(vcc); - if (!zatm_vcc->tx_chan) return 0; - /* set up VC descriptor */ - spin_lock_irqsave(&zatm_dev->lock, flags); - zpokel(zatm_dev,0,zatm_vcc->tx_chan*VC_SIZE/4); - zpokel(zatm_dev,uPD98401_TXVC_L | (zatm_vcc->shaper << - uPD98401_TXVC_SHP_SHIFT) | (vcc->vpi << uPD98401_TXVC_VPI_SHIFT) | - vcc->vci,zatm_vcc->tx_chan*VC_SIZE/4+1); - zpokel(zatm_dev,0,zatm_vcc->tx_chan*VC_SIZE/4+2); - spin_unlock_irqrestore(&zatm_dev->lock, flags); - zatm_dev->tx_map[zatm_vcc->tx_chan] = vcc; - return 0; -} - - -static int start_tx(struct atm_dev *dev) -{ - struct zatm_dev *zatm_dev; - int i; - - DPRINTK("start_tx\n"); - zatm_dev = ZATM_DEV(dev); - zatm_dev->tx_map = kmalloc_array(zatm_dev->chans, - sizeof(*zatm_dev->tx_map), - GFP_KERNEL); - if (!zatm_dev->tx_map) return -ENOMEM; - zatm_dev->tx_bw = ATM_OC3_PCR; - zatm_dev->free_shapers = (1 << NR_SHAPERS)-1; - zatm_dev->ubr = -1; - zatm_dev->ubr_ref_cnt = 0; - /* initialize shapers */ - for (i = 0; i < NR_SHAPERS; i++) zpokel(zatm_dev,0,uPD98401_PS(i)); - return 0; -} - - -/*------------------------------- interrupts --------------------------------*/ - - -static irqreturn_t zatm_int(int irq,void *dev_id) -{ - struct atm_dev *dev; - struct zatm_dev *zatm_dev; - u32 reason; - int handled = 0; - - dev = dev_id; - zatm_dev = ZATM_DEV(dev); - while ((reason = zin(GSR))) { - handled = 1; - EVENT("reason 0x%x\n",reason,0); - if (reason & uPD98401_INT_PI) { - EVENT("PHY int\n",0,0); - dev->phy->interrupt(dev); - } - if (reason & uPD98401_INT_RQA) { - unsigned long pools; - int i; - - pools = zin(RQA); - EVENT("RQA (0x%08x)\n",pools,0); - for (i = 0; pools; i++) { - if (pools & 1) { - refill_pool(dev,i); - zatm_dev->pool_info[i].rqa_count++; - } - pools >>= 1; - } - } - if (reason & uPD98401_INT_RQU) { - unsigned long pools; - int i; - pools = zin(RQU); - printk(KERN_WARNING DEV_LABEL "(itf %d): RQU 0x%08lx\n", - dev->number,pools); - event_dump(); - for (i = 0; pools; i++) { - if (pools & 1) { - refill_pool(dev,i); - zatm_dev->pool_info[i].rqu_count++; - } - pools >>= 1; - } - } - /* don't handle RD */ - if (reason & uPD98401_INT_SPE) - printk(KERN_ALERT DEV_LABEL "(itf %d): system parity " - "error at 0x%08x\n",dev->number,zin(ADDR)); - if (reason & uPD98401_INT_CPE) - printk(KERN_ALERT DEV_LABEL "(itf %d): control memory " - "parity error at 0x%08x\n",dev->number,zin(ADDR)); - if (reason & uPD98401_INT_SBE) { - printk(KERN_ALERT DEV_LABEL "(itf %d): system bus " - "error at 0x%08x\n",dev->number,zin(ADDR)); - event_dump(); - } - /* don't handle IND */ - if (reason & uPD98401_INT_MF) { - printk(KERN_CRIT DEV_LABEL "(itf %d): mailbox full " - "(0x%x)\n",dev->number,(reason & uPD98401_INT_MF) - >> uPD98401_INT_MF_SHIFT); - event_dump(); - /* @@@ should try to recover */ - } - if (reason & uPD98401_INT_MM) { - if (reason & 1) poll_rx(dev,0); - if (reason & 2) poll_rx(dev,1); - if (reason & 4) poll_tx(dev,2); - if (reason & 8) poll_tx(dev,3); - } - /* @@@ handle RCRn */ - } - return IRQ_RETVAL(handled); -} - - -/*----------------------------- (E)EPROM access -----------------------------*/ - - -static void eprom_set(struct zatm_dev *zatm_dev, unsigned long value, - unsigned short cmd) -{ - int error; - - if ((error = pci_write_config_dword(zatm_dev->pci_dev,cmd,value))) - printk(KERN_ERR DEV_LABEL ": PCI write failed (0x%02x)\n", - error); -} - - -static unsigned long eprom_get(struct zatm_dev *zatm_dev, unsigned short cmd) -{ - unsigned int value; - int error; - - if ((error = pci_read_config_dword(zatm_dev->pci_dev,cmd,&value))) - printk(KERN_ERR DEV_LABEL ": PCI read failed (0x%02x)\n", - error); - return value; -} - - -static void eprom_put_bits(struct zatm_dev *zatm_dev, unsigned long data, - int bits, unsigned short cmd) -{ - unsigned long value; - int i; - - for (i = bits-1; i >= 0; i--) { - value = ZEPROM_CS | (((data >> i) & 1) ? ZEPROM_DI : 0); - eprom_set(zatm_dev,value,cmd); - eprom_set(zatm_dev,value | ZEPROM_SK,cmd); - eprom_set(zatm_dev,value,cmd); - } -} - - -static void eprom_get_byte(struct zatm_dev *zatm_dev, unsigned char *byte, - unsigned short cmd) -{ - int i; - - *byte = 0; - for (i = 8; i; i--) { - eprom_set(zatm_dev,ZEPROM_CS,cmd); - eprom_set(zatm_dev,ZEPROM_CS | ZEPROM_SK,cmd); - *byte <<= 1; - if (eprom_get(zatm_dev,cmd) & ZEPROM_DO) *byte |= 1; - eprom_set(zatm_dev,ZEPROM_CS,cmd); - } -} - - -static int eprom_try_esi(struct atm_dev *dev, unsigned short cmd, int offset, - int swap) -{ - unsigned char buf[ZEPROM_SIZE]; - struct zatm_dev *zatm_dev; - int i; - - zatm_dev = ZATM_DEV(dev); - for (i = 0; i < ZEPROM_SIZE; i += 2) { - eprom_set(zatm_dev,ZEPROM_CS,cmd); /* select EPROM */ - eprom_put_bits(zatm_dev,ZEPROM_CMD_READ,ZEPROM_CMD_LEN,cmd); - eprom_put_bits(zatm_dev,i >> 1,ZEPROM_ADDR_LEN,cmd); - eprom_get_byte(zatm_dev,buf+i+swap,cmd); - eprom_get_byte(zatm_dev,buf+i+1-swap,cmd); - eprom_set(zatm_dev,0,cmd); /* deselect EPROM */ - } - memcpy(dev->esi,buf+offset,ESI_LEN); - return memcmp(dev->esi,"\0\0\0\0\0",ESI_LEN); /* assumes ESI_LEN == 6 */ -} - - -static void eprom_get_esi(struct atm_dev *dev) -{ - if (eprom_try_esi(dev,ZEPROM_V1_REG,ZEPROM_V1_ESI_OFF,1)) return; - (void) eprom_try_esi(dev,ZEPROM_V2_REG,ZEPROM_V2_ESI_OFF,0); -} - - -/*--------------------------------- entries ---------------------------------*/ - - -static int zatm_init(struct atm_dev *dev) -{ - struct zatm_dev *zatm_dev; - struct pci_dev *pci_dev; - unsigned short command; - int error,i,last; - unsigned long t0,t1,t2; - - DPRINTK(">zatm_init\n"); - zatm_dev = ZATM_DEV(dev); - spin_lock_init(&zatm_dev->lock); - pci_dev = zatm_dev->pci_dev; - zatm_dev->base = pci_resource_start(pci_dev, 0); - zatm_dev->irq = pci_dev->irq; - if ((error = pci_read_config_word(pci_dev,PCI_COMMAND,&command))) { - printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%02x\n", - dev->number,error); - return -EINVAL; - } - if ((error = pci_write_config_word(pci_dev,PCI_COMMAND, - command | PCI_COMMAND_IO | PCI_COMMAND_MASTER))) { - printk(KERN_ERR DEV_LABEL "(itf %d): can't enable IO (0x%02x)" - "\n",dev->number,error); - return -EIO; - } - eprom_get_esi(dev); - printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d,base=0x%x,irq=%d,", - dev->number,pci_dev->revision,zatm_dev->base,zatm_dev->irq); - /* reset uPD98401 */ - zout(0,SWR); - while (!(zin(GSR) & uPD98401_INT_IND)); - zout(uPD98401_GMR_ONE /*uPD98401_BURST4*/,GMR); - last = MAX_CRAM_SIZE; - for (i = last-RAM_INCREMENT; i >= 0; i -= RAM_INCREMENT) { - zpokel(zatm_dev,0x55555555,i); - if (zpeekl(zatm_dev,i) != 0x55555555) last = i; - else { - zpokel(zatm_dev,0xAAAAAAAA,i); - if (zpeekl(zatm_dev,i) != 0xAAAAAAAA) last = i; - else zpokel(zatm_dev,i,i); - } - } - for (i = 0; i < last; i += RAM_INCREMENT) - if (zpeekl(zatm_dev,i) != i) break; - zatm_dev->mem = i << 2; - while (i) zpokel(zatm_dev,0,--i); - /* reset again to rebuild memory pointers */ - zout(0,SWR); - while (!(zin(GSR) & uPD98401_INT_IND)); - zout(uPD98401_GMR_ONE | uPD98401_BURST8 | uPD98401_BURST4 | - uPD98401_BURST2 | uPD98401_GMR_PM | uPD98401_GMR_DR,GMR); - /* TODO: should shrink allocation now */ - printk("mem=%dkB,%s (",zatm_dev->mem >> 10,zatm_dev->copper ? "UTP" : - "MMF"); - for (i = 0; i < ESI_LEN; i++) - printk("%02X%s",dev->esi[i],i == ESI_LEN-1 ? ")\n" : "-"); - do { - unsigned long flags; - - spin_lock_irqsave(&zatm_dev->lock, flags); - t0 = zpeekl(zatm_dev,uPD98401_TSR); - udelay(10); - t1 = zpeekl(zatm_dev,uPD98401_TSR); - udelay(1010); - t2 = zpeekl(zatm_dev,uPD98401_TSR); - spin_unlock_irqrestore(&zatm_dev->lock, flags); - } - while (t0 > t1 || t1 > t2); /* loop if wrapping ... */ - zatm_dev->khz = t2-2*t1+t0; - printk(KERN_NOTICE DEV_LABEL "(itf %d): uPD98401 %d.%d at %d.%03d " - "MHz\n",dev->number, - (zin(VER) & uPD98401_MAJOR) >> uPD98401_MAJOR_SHIFT, - zin(VER) & uPD98401_MINOR,zatm_dev->khz/1000,zatm_dev->khz % 1000); - return uPD98402_init(dev); -} - - -static int zatm_start(struct atm_dev *dev) -{ - struct zatm_dev *zatm_dev = ZATM_DEV(dev); - struct pci_dev *pdev = zatm_dev->pci_dev; - unsigned long curr; - int pools,vccs,rx; - int error, i, ld; - - DPRINTK("zatm_start\n"); - zatm_dev->rx_map = zatm_dev->tx_map = NULL; - for (i = 0; i < NR_MBX; i++) - zatm_dev->mbx_start[i] = 0; - error = request_irq(zatm_dev->irq, zatm_int, IRQF_SHARED, DEV_LABEL, dev); - if (error < 0) { - printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n", - dev->number,zatm_dev->irq); - goto done; - } - /* define memory regions */ - pools = NR_POOLS; - if (NR_SHAPERS*SHAPER_SIZE > pools*POOL_SIZE) - pools = NR_SHAPERS*SHAPER_SIZE/POOL_SIZE; - vccs = (zatm_dev->mem-NR_SHAPERS*SHAPER_SIZE-pools*POOL_SIZE)/ - (2*VC_SIZE+RX_SIZE); - ld = -1; - for (rx = 1; rx < vccs; rx <<= 1) ld++; - dev->ci_range.vpi_bits = 0; /* @@@ no VPI for now */ - dev->ci_range.vci_bits = ld; - dev->link_rate = ATM_OC3_PCR; - zatm_dev->chans = vccs; /* ??? */ - curr = rx*RX_SIZE/4; - DPRINTK("RX pool 0x%08lx\n",curr); - zpokel(zatm_dev,curr,uPD98401_PMA); /* receive pool */ - zatm_dev->pool_base = curr; - curr += pools*POOL_SIZE/4; - DPRINTK("Shapers 0x%08lx\n",curr); - zpokel(zatm_dev,curr,uPD98401_SMA); /* shapers */ - curr += NR_SHAPERS*SHAPER_SIZE/4; - DPRINTK("Free 0x%08lx\n",curr); - zpokel(zatm_dev,curr,uPD98401_TOS); /* free pool */ - printk(KERN_INFO DEV_LABEL "(itf %d): %d shapers, %d pools, %d RX, " - "%ld VCs\n",dev->number,NR_SHAPERS,pools,rx, - (zatm_dev->mem-curr*4)/VC_SIZE); - /* create mailboxes */ - for (i = 0; i < NR_MBX; i++) { - void *mbx; - dma_addr_t mbx_dma; - - if (!mbx_entries[i]) - continue; - mbx = dma_alloc_coherent(&pdev->dev, - 2 * MBX_SIZE(i), &mbx_dma, GFP_KERNEL); - if (!mbx) { - error = -ENOMEM; - goto out; - } - /* - * Alignment provided by dma_alloc_coherent() isn't enough - * for this device. - */ - if (((unsigned long)mbx ^ mbx_dma) & 0xffff) { - printk(KERN_ERR DEV_LABEL "(itf %d): system " - "bus incompatible with driver\n", dev->number); - dma_free_coherent(&pdev->dev, 2*MBX_SIZE(i), mbx, mbx_dma); - error = -ENODEV; - goto out; - } - DPRINTK("mbx@0x%08lx-0x%08lx\n", mbx, mbx + MBX_SIZE(i)); - zatm_dev->mbx_start[i] = (unsigned long)mbx; - zatm_dev->mbx_dma[i] = mbx_dma; - zatm_dev->mbx_end[i] = (zatm_dev->mbx_start[i] + MBX_SIZE(i)) & - 0xffff; - zout(mbx_dma >> 16, MSH(i)); - zout(mbx_dma, MSL(i)); - zout(zatm_dev->mbx_end[i], MBA(i)); - zout((unsigned long)mbx & 0xffff, MTA(i)); - zout((unsigned long)mbx & 0xffff, MWA(i)); - } - error = start_tx(dev); - if (error) - goto out; - error = start_rx(dev); - if (error) - goto out_tx; - error = dev->phy->start(dev); - if (error) - goto out_rx; - zout(0xffffffff,IMR); /* enable interrupts */ - /* enable TX & RX */ - zout(zin(GMR) | uPD98401_GMR_SE | uPD98401_GMR_RE,GMR); -done: - return error; - -out_rx: - kfree(zatm_dev->rx_map); -out_tx: - kfree(zatm_dev->tx_map); -out: - while (i-- > 0) { - dma_free_coherent(&pdev->dev, 2 * MBX_SIZE(i), - (void *)zatm_dev->mbx_start[i], - zatm_dev->mbx_dma[i]); - } - free_irq(zatm_dev->irq, dev); - goto done; -} - - -static void zatm_close(struct atm_vcc *vcc) -{ - DPRINTK(">zatm_close\n"); - if (!ZATM_VCC(vcc)) return; - clear_bit(ATM_VF_READY,&vcc->flags); - close_rx(vcc); - EVENT("close_tx\n",0,0); - close_tx(vcc); - DPRINTK("zatm_close: done waiting\n"); - /* deallocate memory */ - kfree(ZATM_VCC(vcc)); - vcc->dev_data = NULL; - clear_bit(ATM_VF_ADDR,&vcc->flags); -} - - -static int zatm_open(struct atm_vcc *vcc) -{ - struct zatm_vcc *zatm_vcc; - short vpi = vcc->vpi; - int vci = vcc->vci; - int error; - - DPRINTK(">zatm_open\n"); - if (!test_bit(ATM_VF_PARTIAL,&vcc->flags)) - vcc->dev_data = NULL; - if (vci != ATM_VPI_UNSPEC && vpi != ATM_VCI_UNSPEC) - set_bit(ATM_VF_ADDR,&vcc->flags); - if (vcc->qos.aal != ATM_AAL5) return -EINVAL; /* @@@ AAL0 */ - DPRINTK(DEV_LABEL "(itf %d): open %d.%d\n",vcc->dev->number,vcc->vpi, - vcc->vci); - if (!test_bit(ATM_VF_PARTIAL,&vcc->flags)) { - zatm_vcc = kmalloc(sizeof(*zatm_vcc), GFP_KERNEL); - if (!zatm_vcc) { - clear_bit(ATM_VF_ADDR,&vcc->flags); - return -ENOMEM; - } - vcc->dev_data = zatm_vcc; - ZATM_VCC(vcc)->tx_chan = 0; /* for zatm_close after open_rx */ - if ((error = open_rx_first(vcc))) { - zatm_close(vcc); - return error; - } - if ((error = open_tx_first(vcc))) { - zatm_close(vcc); - return error; - } - } - if (vci == ATM_VPI_UNSPEC || vpi == ATM_VCI_UNSPEC) return 0; - if ((error = open_rx_second(vcc))) { - zatm_close(vcc); - return error; - } - if ((error = open_tx_second(vcc))) { - zatm_close(vcc); - return error; - } - set_bit(ATM_VF_READY,&vcc->flags); - return 0; -} - - -static int zatm_change_qos(struct atm_vcc *vcc,struct atm_qos *qos,int flags) -{ - printk("Not yet implemented\n"); - return -ENOSYS; - /* @@@ */ -} - - -static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg) -{ - struct zatm_dev *zatm_dev; - unsigned long flags; - - zatm_dev = ZATM_DEV(dev); - switch (cmd) { - case ZATM_GETPOOLZ: - if (!capable(CAP_NET_ADMIN)) return -EPERM; - fallthrough; - case ZATM_GETPOOL: - { - struct zatm_pool_info info; - int pool; - - if (get_user(pool, - &((struct zatm_pool_req __user *) arg)->pool_num)) - return -EFAULT; - if (pool < 0 || pool > ZATM_LAST_POOL) - return -EINVAL; - pool = array_index_nospec(pool, - ZATM_LAST_POOL + 1); - spin_lock_irqsave(&zatm_dev->lock, flags); - info = zatm_dev->pool_info[pool]; - if (cmd == ZATM_GETPOOLZ) { - zatm_dev->pool_info[pool].rqa_count = 0; - zatm_dev->pool_info[pool].rqu_count = 0; - } - spin_unlock_irqrestore(&zatm_dev->lock, flags); - return copy_to_user( - &((struct zatm_pool_req __user *) arg)->info, - &info,sizeof(info)) ? -EFAULT : 0; - } - case ZATM_SETPOOL: - { - struct zatm_pool_info info; - int pool; - - if (!capable(CAP_NET_ADMIN)) return -EPERM; - if (get_user(pool, - &((struct zatm_pool_req __user *) arg)->pool_num)) - return -EFAULT; - if (pool < 0 || pool > ZATM_LAST_POOL) - return -EINVAL; - pool = array_index_nospec(pool, - ZATM_LAST_POOL + 1); - if (copy_from_user(&info, - &((struct zatm_pool_req __user *) arg)->info, - sizeof(info))) return -EFAULT; - if (!info.low_water) - info.low_water = zatm_dev-> - pool_info[pool].low_water; - if (!info.high_water) - info.high_water = zatm_dev-> - pool_info[pool].high_water; - if (!info.next_thres) - info.next_thres = zatm_dev-> - pool_info[pool].next_thres; - if (info.low_water >= info.high_water || - info.low_water < 0) - return -EINVAL; - spin_lock_irqsave(&zatm_dev->lock, flags); - zatm_dev->pool_info[pool].low_water = - info.low_water; - zatm_dev->pool_info[pool].high_water = - info.high_water; - zatm_dev->pool_info[pool].next_thres = - info.next_thres; - spin_unlock_irqrestore(&zatm_dev->lock, flags); - return 0; - } - default: - if (!dev->phy->ioctl) return -ENOIOCTLCMD; - return dev->phy->ioctl(dev,cmd,arg); - } -} - -static int zatm_send(struct atm_vcc *vcc,struct sk_buff *skb) -{ - int error; - - EVENT(">zatm_send 0x%lx\n",(unsigned long) skb,0); - if (!ZATM_VCC(vcc)->tx_chan || !test_bit(ATM_VF_READY,&vcc->flags)) { - if (vcc->pop) vcc->pop(vcc,skb); - else dev_kfree_skb(skb); - return -EINVAL; - } - if (!skb) { - printk(KERN_CRIT "!skb in zatm_send ?\n"); - if (vcc->pop) vcc->pop(vcc,skb); - return -EINVAL; - } - ATM_SKB(skb)->vcc = vcc; - error = do_tx(skb); - if (error != RING_BUSY) return error; - skb_queue_tail(&ZATM_VCC(vcc)->backlog,skb); - return 0; -} - - -static void zatm_phy_put(struct atm_dev *dev,unsigned char value, - unsigned long addr) -{ - struct zatm_dev *zatm_dev; - - zatm_dev = ZATM_DEV(dev); - zwait(); - zout(value,CER); - zout(uPD98401_IND_ACC | uPD98401_IA_B0 | - (uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR); -} - - -static unsigned char zatm_phy_get(struct atm_dev *dev,unsigned long addr) -{ - struct zatm_dev *zatm_dev; - - zatm_dev = ZATM_DEV(dev); - zwait(); - zout(uPD98401_IND_ACC | uPD98401_IA_B0 | uPD98401_IA_RW | - (uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR); - zwait(); - return zin(CER) & 0xff; -} - - -static const struct atmdev_ops ops = { - .open = zatm_open, - .close = zatm_close, - .ioctl = zatm_ioctl, - .send = zatm_send, - .phy_put = zatm_phy_put, - .phy_get = zatm_phy_get, - .change_qos = zatm_change_qos, -}; - -static int zatm_init_one(struct pci_dev *pci_dev, - const struct pci_device_id *ent) -{ - struct atm_dev *dev; - struct zatm_dev *zatm_dev; - int ret = -ENOMEM; - - zatm_dev = kmalloc(sizeof(*zatm_dev), GFP_KERNEL); - if (!zatm_dev) { - printk(KERN_EMERG "%s: memory shortage\n", DEV_LABEL); - goto out; - } - - dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &ops, -1, NULL); - if (!dev) - goto out_free; - - ret = pci_enable_device(pci_dev); - if (ret < 0) - goto out_deregister; - - ret = pci_request_regions(pci_dev, DEV_LABEL); - if (ret < 0) - goto out_disable; - - ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32)); - if (ret < 0) - goto out_release; - - zatm_dev->pci_dev = pci_dev; - dev->dev_data = zatm_dev; - zatm_dev->copper = (int)ent->driver_data; - if ((ret = zatm_init(dev)) || (ret = zatm_start(dev))) - goto out_release; - - pci_set_drvdata(pci_dev, dev); - zatm_dev->more = zatm_boards; - zatm_boards = dev; - ret = 0; -out: - return ret; - -out_release: - pci_release_regions(pci_dev); -out_disable: - pci_disable_device(pci_dev); -out_deregister: - atm_dev_deregister(dev); -out_free: - kfree(zatm_dev); - goto out; -} - - -MODULE_LICENSE("GPL"); - -static const struct pci_device_id zatm_pci_tbl[] = { - { PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1221), ZATM_COPPER }, - { PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1225), 0 }, - { 0, } -}; -MODULE_DEVICE_TABLE(pci, zatm_pci_tbl); - -static struct pci_driver zatm_driver = { - .name = DEV_LABEL, - .id_table = zatm_pci_tbl, - .probe = zatm_init_one, -}; - -static int __init zatm_init_module(void) -{ - return pci_register_driver(&zatm_driver); -} - -module_init(zatm_init_module); -/* module_exit not defined so not unloadable */ diff --git a/drivers/atm/zatm.h b/drivers/atm/zatm.h deleted file mode 100644 index 8204369fe825..000000000000 --- a/drivers/atm/zatm.h +++ /dev/null @@ -1,104 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* drivers/atm/zatm.h - ZeitNet ZN122x device driver declarations */ - -/* Written 1995-1998 by Werner Almesberger, EPFL LRC/ICA */ - - -#ifndef DRIVER_ATM_ZATM_H -#define DRIVER_ATM_ZATM_H - -#include -#include -#include -#include -#include - - -#define DEV_LABEL "zatm" - -#define MAX_AAL5_PDU 10240 /* allocate for AAL5 PDUs of this size */ -#define MAX_RX_SIZE_LD 14 /* ceil(log2((MAX_AAL5_PDU+47)/48)) */ - -#define LOW_MARK 12 /* start adding new buffers if less than 12 */ -#define HIGH_MARK 30 /* stop adding buffers after reaching 30 */ -#define OFF_CNG_THRES 5 /* threshold for offset changes */ - -#define RX_SIZE 2 /* RX lookup entry size (in bytes) */ -#define NR_POOLS 32 /* number of free buffer pointers */ -#define POOL_SIZE 8 /* buffer entry size (in bytes) */ -#define NR_SHAPERS 16 /* number of shapers */ -#define SHAPER_SIZE 4 /* shaper entry size (in bytes) */ -#define VC_SIZE 32 /* VC dsc (TX or RX) size (in bytes) */ - -#define RING_ENTRIES 32 /* ring entries (without back pointer) */ -#define RING_WORDS 4 /* ring element size */ -#define RING_SIZE (sizeof(unsigned long)*(RING_ENTRIES+1)*RING_WORDS) - -#define NR_MBX 4 /* four mailboxes */ -#define MBX_RX_0 0 /* mailbox indices */ -#define MBX_RX_1 1 -#define MBX_TX_0 2 -#define MBX_TX_1 3 - -struct zatm_vcc { - /*-------------------------------- RX part */ - int rx_chan; /* RX channel, 0 if none */ - int pool; /* free buffer pool */ - /*-------------------------------- TX part */ - int tx_chan; /* TX channel, 0 if none */ - int shaper; /* shaper, <0 if none */ - struct sk_buff_head tx_queue; /* list of buffers in transit */ - wait_queue_head_t tx_wait; /* for close */ - u32 *ring; /* transmit ring */ - int ring_curr; /* current write position */ - int txing; /* number of transmits in progress */ - struct sk_buff_head backlog; /* list of buffers waiting for ring */ -}; - -struct zatm_dev { - /*-------------------------------- TX part */ - int tx_bw; /* remaining bandwidth */ - u32 free_shapers; /* bit set */ - int ubr; /* UBR shaper; -1 if none */ - int ubr_ref_cnt; /* number of VCs using UBR shaper */ - /*-------------------------------- RX part */ - int pool_ref[NR_POOLS]; /* free buffer pool usage counters */ - volatile struct sk_buff *last_free[NR_POOLS]; - /* last entry in respective pool */ - struct sk_buff_head pool[NR_POOLS];/* free buffer pools */ - struct zatm_pool_info pool_info[NR_POOLS]; /* pool information */ - /*-------------------------------- maps */ - struct atm_vcc **tx_map; /* TX VCCs */ - struct atm_vcc **rx_map; /* RX VCCs */ - int chans; /* map size, must be 2^n */ - /*-------------------------------- mailboxes */ - unsigned long mbx_start[NR_MBX];/* start addresses */ - dma_addr_t mbx_dma[NR_MBX]; - u16 mbx_end[NR_MBX]; /* end offset (in bytes) */ - /*-------------------------------- other pointers */ - u32 pool_base; /* Free buffer pool dsc (word addr) */ - /*-------------------------------- ZATM links */ - struct atm_dev *more; /* other ZATM devices */ - /*-------------------------------- general information */ - int mem; /* RAM on board (in bytes) */ - int khz; /* timer clock */ - int copper; /* PHY type */ - unsigned char irq; /* IRQ */ - unsigned int base; /* IO base address */ - struct pci_dev *pci_dev; /* PCI stuff */ - spinlock_t lock; -}; - - -#define ZATM_DEV(d) ((struct zatm_dev *) (d)->dev_data) -#define ZATM_VCC(d) ((struct zatm_vcc *) (d)->dev_data) - - -struct zatm_skb_prv { - struct atm_skb_data _; /* reserved */ - u32 *dsc; /* pointer to skb's descriptor */ -}; - -#define ZATM_PRV_DSC(skb) (((struct zatm_skb_prv *) (skb)->cb)->dsc) - -#endif diff --git a/include/uapi/linux/atm_zatm.h b/include/uapi/linux/atm_zatm.h deleted file mode 100644 index 5135027b93c1..000000000000 --- a/include/uapi/linux/atm_zatm.h +++ /dev/null @@ -1,47 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* atm_zatm.h - Driver-specific declarations of the ZATM driver (for use by - driver-specific utilities) */ - -/* Written 1995-1999 by Werner Almesberger, EPFL LRC/ICA */ - - -#ifndef LINUX_ATM_ZATM_H -#define LINUX_ATM_ZATM_H - -/* - * Note: non-kernel programs including this file must also include - * sys/types.h for struct timeval - */ - -#include -#include - -#define ZATM_GETPOOL _IOW('a',ATMIOC_SARPRV+1,struct atmif_sioc) - /* get pool statistics */ -#define ZATM_GETPOOLZ _IOW('a',ATMIOC_SARPRV+2,struct atmif_sioc) - /* get statistics and zero */ -#define ZATM_SETPOOL _IOW('a',ATMIOC_SARPRV+3,struct atmif_sioc) - /* set pool parameters */ - -struct zatm_pool_info { - int ref_count; /* free buffer pool usage counters */ - int low_water,high_water; /* refill parameters */ - int rqa_count,rqu_count; /* queue condition counters */ - int offset,next_off; /* alignment optimizations: offset */ - int next_cnt,next_thres; /* repetition counter and threshold */ -}; - -struct zatm_pool_req { - int pool_num; /* pool number */ - struct zatm_pool_info info; /* actual information */ -}; - -#define ZATM_OAM_POOL 0 /* free buffer pool for OAM cells */ -#define ZATM_AAL0_POOL 1 /* free buffer pool for AAL0 cells */ -#define ZATM_AAL5_POOL_BASE 2 /* first AAL5 free buffer pool */ -#define ZATM_LAST_POOL ZATM_AAL5_POOL_BASE+10 /* max. 64 kB */ - -#define ZATM_TIMER_HISTORY_SIZE 16 /* number of timer adjustments to - record; must be 2^n */ - -#endif -- cgit v1.2.3-59-g8ed1b From 0e0af57e0e91b304f36b7d1dba859e3c04094273 Mon Sep 17 00:00:00 2001 From: "Dr. Thomas Orgis" Date: Fri, 29 Apr 2022 14:38:03 -0700 Subject: taskstats: version 12 with thread group and exe info The task exit struct needs some crucial information to be able to provide an enhanced version of process and thread accounting. This change provides: 1. ac_tgid in additon to ac_pid 2. thread group execution walltime in ac_tgetime 3. flag AGROUP in ac_flag to indicate the last task in a thread group / process 4. device ID and inode of task's /proc/self/exe in ac_exe_dev and ac_exe_inode 5. tools/accounting/procacct as demonstrator When a task exits, taskstats are reported to userspace including the task's pid and ppid, but without the id of the thread group this task is part of. Without the tgid, the stats of single tasks cannot be correlated to each other as a thread group (process). The taskstats documentation suggests that on process exit a data set consisting of accumulated stats for the whole group is produced. But such an additional set of stats is only produced for actually multithreaded processes, not groups that had only one thread, and also those stats only contain data about delay accounting and not the more basic information about CPU and memory resource usage. Adding the AGROUP flag to be set when the last task of a group exited enables determination of process end also for single-threaded processes. My applicaton basically does enhanced process accounting with summed cputime, biggest maxrss, tasks per process. The data is not available with the traditional BSD process accounting (which is not designed to be extensible) and the taskstats interface allows more efficient on-the-fly grouping and summing of the stats, anyway, without intermediate disk writes. Furthermore, I do carry statistics on which exact program binary is used how often with associated resources, getting a picture on how important which parts of a collection of installed scientific software in different versions are, and how well they put load on the machine. This is enabled by providing information on /proc/self/exe for each task. I assume the two 64-bit fields for device ID and inode are more appropriate than the possibly large resolved path to keep the data volume down. Add the tgid to the stats to complete task identification, the flag AGROUP to mark the last task of a group, the group wallclock time, and inode-based identification of the associated executable file. Add tools/accounting/procacct.c as a simplified fork of getdelays.c to demonstrate process and thread accounting. [thomas.orgis@uni-hamburg.de: fix version number in comment] Link: https://lkml.kernel.org/r/20220405003601.7a5f6008@plasteblaster Link: https://lkml.kernel.org/r/20220331004106.64e5616b@plasteblaster Signed-off-by: Dr. Thomas Orgis Reviewed-by: Ismael Luceno Cc: Balbir Singh Cc: Eric W. Biederman Cc: xu xin Cc: Yang Yang Signed-off-by: Andrew Morton --- include/uapi/linux/acct.h | 3 +- include/uapi/linux/taskstats.h | 24 ++- kernel/taskstats.c | 23 +++ kernel/tsacct.c | 10 +- tools/accounting/.gitignore | 1 + tools/accounting/Makefile | 2 +- tools/accounting/procacct.c | 417 +++++++++++++++++++++++++++++++++++++++++ 7 files changed, 473 insertions(+), 7 deletions(-) create mode 100644 tools/accounting/procacct.c (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/acct.h b/include/uapi/linux/acct.h index 985b89068591..0e591152aa8a 100644 --- a/include/uapi/linux/acct.h +++ b/include/uapi/linux/acct.h @@ -103,12 +103,13 @@ struct acct_v3 /* * accounting flags */ - /* bit set when the process ... */ + /* bit set when the process/task ... */ #define AFORK 0x01 /* ... executed fork, but did not exec */ #define ASU 0x02 /* ... used super-user privileges */ #define ACOMPAT 0x04 /* ... used compatibility mode (VAX only not used) */ #define ACORE 0x08 /* ... dumped core */ #define AXSIG 0x10 /* ... was killed by a signal */ +#define AGROUP 0x20 /* ... was the last task of the process (task group) */ #if defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN) #define ACCT_BYTEORDER 0x80 /* accounting file is big endian */ diff --git a/include/uapi/linux/taskstats.h b/include/uapi/linux/taskstats.h index 12327d32378f..736154171489 100644 --- a/include/uapi/linux/taskstats.h +++ b/include/uapi/linux/taskstats.h @@ -34,7 +34,7 @@ */ -#define TASKSTATS_VERSION 11 +#define TASKSTATS_VERSION 12 #define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN * in linux/sched.h */ @@ -48,7 +48,8 @@ struct taskstats { __u32 ac_exitcode; /* Exit status */ /* The accounting flags of a task as defined in - * Defined values are AFORK, ASU, ACOMPAT, ACORE, and AXSIG. + * Defined values are AFORK, ASU, ACOMPAT, ACORE, AXSIG, and AGROUP. + * (AGROUP since version 12). */ __u8 ac_flag; /* Record flags */ __u8 ac_nice; /* task_nice */ @@ -173,9 +174,26 @@ struct taskstats { /* v10: 64-bit btime to avoid overflow */ __u64 ac_btime64; /* 64-bit begin time */ - /* Delay waiting for memory compact */ + /* v11: Delay waiting for memory compact */ __u64 compact_count; __u64 compact_delay_total; + + /* v12 begin */ + __u32 ac_tgid; /* thread group ID */ + /* Thread group walltime up to now. This is total process walltime if + * AGROUP flag is set. + */ + __u64 ac_tgetime __attribute__((aligned(8))); + /* Lightweight information to identify process binary files. + * This leaves userspace to match this to a file system path, using + * MAJOR() and MINOR() macros to identify a device and mount point, + * the inode to identify the executable file. This is /proc/self/exe + * at the end, so matching the most recent exec(). Values are zero + * for kernel threads. + */ + __u64 ac_exe_dev; /* program binary device ID */ + __u64 ac_exe_inode; /* program binary inode number */ + /* v12 end */ }; diff --git a/kernel/taskstats.c b/kernel/taskstats.c index bcac5a9043aa..72415e22342b 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -153,6 +154,23 @@ static void send_cpu_listeners(struct sk_buff *skb, up_write(&listeners->sem); } +static void exe_add_tsk(struct taskstats *stats, struct task_struct *tsk) +{ + /* No idea if I'm allowed to access that here, now. */ + struct file *exe_file = get_task_exe_file(tsk); + + if (exe_file) { + /* Following cp_new_stat64() in stat.c . */ + stats->ac_exe_dev = + huge_encode_dev(exe_file->f_inode->i_sb->s_dev); + stats->ac_exe_inode = exe_file->f_inode->i_ino; + fput(exe_file); + } else { + stats->ac_exe_dev = 0; + stats->ac_exe_inode = 0; + } +} + static void fill_stats(struct user_namespace *user_ns, struct pid_namespace *pid_ns, struct task_struct *tsk, struct taskstats *stats) @@ -175,6 +193,9 @@ static void fill_stats(struct user_namespace *user_ns, /* fill in extended acct fields */ xacct_add_tsk(stats, tsk); + + /* add executable info */ + exe_add_tsk(stats, tsk); } static int fill_stats_for_pid(pid_t pid, struct taskstats *stats) @@ -620,6 +641,8 @@ void taskstats_exit(struct task_struct *tsk, int group_dead) goto err; fill_stats(&init_user_ns, &init_pid_ns, tsk, stats); + if (group_dead) + stats->ac_flag |= AGROUP; /* * Doesn't matter if tsk is the leader or the last group member leaving diff --git a/kernel/tsacct.c b/kernel/tsacct.c index 1d261fbe367b..4252f0645b9e 100644 --- a/kernel/tsacct.c +++ b/kernel/tsacct.c @@ -23,15 +23,20 @@ void bacct_add_tsk(struct user_namespace *user_ns, { const struct cred *tcred; u64 utime, stime, utimescaled, stimescaled; - u64 delta; + u64 now_ns, delta; time64_t btime; BUILD_BUG_ON(TS_COMM_LEN < TASK_COMM_LEN); /* calculate task elapsed time in nsec */ - delta = ktime_get_ns() - tsk->start_time; + now_ns = ktime_get_ns(); + /* store whole group time first */ + delta = now_ns - tsk->group_leader->start_time; /* Convert to micro seconds */ do_div(delta, NSEC_PER_USEC); + stats->ac_tgetime = delta; + delta = now_ns - tsk->start_time; + do_div(delta, NSEC_PER_USEC); stats->ac_etime = delta; /* Convert to seconds for btime (note y2106 limit) */ btime = ktime_get_real_seconds() - div_u64(delta, USEC_PER_SEC); @@ -51,6 +56,7 @@ void bacct_add_tsk(struct user_namespace *user_ns, stats->ac_nice = task_nice(tsk); stats->ac_sched = tsk->policy; stats->ac_pid = task_pid_nr_ns(tsk, pid_ns); + stats->ac_tgid = task_tgid_nr_ns(tsk, pid_ns); rcu_read_lock(); tcred = __task_cred(tsk); stats->ac_uid = from_kuid_munged(user_ns, tcred->uid); diff --git a/tools/accounting/.gitignore b/tools/accounting/.gitignore index c45fb4ed4309..522a690aaf3d 100644 --- a/tools/accounting/.gitignore +++ b/tools/accounting/.gitignore @@ -1,2 +1,3 @@ # SPDX-License-Identifier: GPL-2.0-only getdelays +procacct diff --git a/tools/accounting/Makefile b/tools/accounting/Makefile index 03687f19cbb1..11def1ad046c 100644 --- a/tools/accounting/Makefile +++ b/tools/accounting/Makefile @@ -2,7 +2,7 @@ CC := $(CROSS_COMPILE)gcc CFLAGS := -I../../usr/include -PROGS := getdelays +PROGS := getdelays procacct all: $(PROGS) diff --git a/tools/accounting/procacct.c b/tools/accounting/procacct.c new file mode 100644 index 000000000000..8353d3237e50 --- /dev/null +++ b/tools/accounting/procacct.c @@ -0,0 +1,417 @@ +// SPDX-License-Identifier: GPL-2.0 +/* procacct.c + * + * Demonstrator of fetching resource data on task exit, as a way + * to accumulate accurate program resource usage statistics, without + * prior identification of the programs. For that, the fields for + * device and inode of the program executable binary file are also + * extracted in addition to the command string. + * + * The TGID together with the PID and the AGROUP flag allow + * identification of threads in a process and single-threaded processes. + * The ac_tgetime field gives proper whole-process walltime. + * + * Written (changed) by Thomas Orgis, University of Hamburg in 2022 + * + * This is a cheap derivation (inheriting the style) of getdelays.c: + * + * Utility to get per-pid and per-tgid delay accounting statistics + * Also illustrates usage of the taskstats interface + * + * Copyright (C) Shailabh Nagar, IBM Corp. 2005 + * Copyright (C) Balbir Singh, IBM Corp. 2006 + * Copyright (c) Jay Lan, SGI. 2006 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +/* + * Generic macros for dealing with netlink sockets. Might be duplicated + * elsewhere. It is recommended that commercial grade applications use + * libnl or libnetlink and use the interfaces provided by the library + */ +#define GENLMSG_DATA(glh) ((void *)(NLMSG_DATA(glh) + GENL_HDRLEN)) +#define GENLMSG_PAYLOAD(glh) (NLMSG_PAYLOAD(glh, 0) - GENL_HDRLEN) +#define NLA_DATA(na) ((void *)((char *)(na) + NLA_HDRLEN)) +#define NLA_PAYLOAD(len) (len - NLA_HDRLEN) + +#define err(code, fmt, arg...) \ + do { \ + fprintf(stderr, fmt, ##arg); \ + exit(code); \ + } while (0) + +int rcvbufsz; +char name[100]; +int dbg; +int print_delays; +int print_io_accounting; +int print_task_context_switch_counts; + +#define PRINTF(fmt, arg...) { \ + if (dbg) { \ + printf(fmt, ##arg); \ + } \ + } + +/* Maximum size of response requested or message sent */ +#define MAX_MSG_SIZE 1024 +/* Maximum number of cpus expected to be specified in a cpumask */ +#define MAX_CPUS 32 + +struct msgtemplate { + struct nlmsghdr n; + struct genlmsghdr g; + char buf[MAX_MSG_SIZE]; +}; + +char cpumask[100+6*MAX_CPUS]; + +static void usage(void) +{ + fprintf(stderr, "procacct [-v] [-w logfile] [-r bufsize] [-m cpumask]\n"); + fprintf(stderr, " -v: debug on\n"); +} + +/* + * Create a raw netlink socket and bind + */ +static int create_nl_socket(int protocol) +{ + int fd; + struct sockaddr_nl local; + + fd = socket(AF_NETLINK, SOCK_RAW, protocol); + if (fd < 0) + return -1; + + if (rcvbufsz) + if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF, + &rcvbufsz, sizeof(rcvbufsz)) < 0) { + fprintf(stderr, "Unable to set socket rcv buf size to %d\n", + rcvbufsz); + goto error; + } + + memset(&local, 0, sizeof(local)); + local.nl_family = AF_NETLINK; + + if (bind(fd, (struct sockaddr *) &local, sizeof(local)) < 0) + goto error; + + return fd; +error: + close(fd); + return -1; +} + + +static int send_cmd(int sd, __u16 nlmsg_type, __u32 nlmsg_pid, + __u8 genl_cmd, __u16 nla_type, + void *nla_data, int nla_len) +{ + struct nlattr *na; + struct sockaddr_nl nladdr; + int r, buflen; + char *buf; + + struct msgtemplate msg; + + msg.n.nlmsg_len = NLMSG_LENGTH(GENL_HDRLEN); + msg.n.nlmsg_type = nlmsg_type; + msg.n.nlmsg_flags = NLM_F_REQUEST; + msg.n.nlmsg_seq = 0; + msg.n.nlmsg_pid = nlmsg_pid; + msg.g.cmd = genl_cmd; + msg.g.version = 0x1; + na = (struct nlattr *) GENLMSG_DATA(&msg); + na->nla_type = nla_type; + na->nla_len = nla_len + 1 + NLA_HDRLEN; + memcpy(NLA_DATA(na), nla_data, nla_len); + msg.n.nlmsg_len += NLMSG_ALIGN(na->nla_len); + + buf = (char *) &msg; + buflen = msg.n.nlmsg_len; + memset(&nladdr, 0, sizeof(nladdr)); + nladdr.nl_family = AF_NETLINK; + while ((r = sendto(sd, buf, buflen, 0, (struct sockaddr *) &nladdr, + sizeof(nladdr))) < buflen) { + if (r > 0) { + buf += r; + buflen -= r; + } else if (errno != EAGAIN) + return -1; + } + return 0; +} + + +/* + * Probe the controller in genetlink to find the family id + * for the TASKSTATS family + */ +static int get_family_id(int sd) +{ + struct { + struct nlmsghdr n; + struct genlmsghdr g; + char buf[256]; + } ans; + + int id = 0, rc; + struct nlattr *na; + int rep_len; + + strcpy(name, TASKSTATS_GENL_NAME); + rc = send_cmd(sd, GENL_ID_CTRL, getpid(), CTRL_CMD_GETFAMILY, + CTRL_ATTR_FAMILY_NAME, (void *)name, + strlen(TASKSTATS_GENL_NAME)+1); + if (rc < 0) + return 0; /* sendto() failure? */ + + rep_len = recv(sd, &ans, sizeof(ans), 0); + if (ans.n.nlmsg_type == NLMSG_ERROR || + (rep_len < 0) || !NLMSG_OK((&ans.n), rep_len)) + return 0; + + na = (struct nlattr *) GENLMSG_DATA(&ans); + na = (struct nlattr *) ((char *) na + NLA_ALIGN(na->nla_len)); + if (na->nla_type == CTRL_ATTR_FAMILY_ID) + id = *(__u16 *) NLA_DATA(na); + + return id; +} + +#define average_ms(t, c) (t / 1000000ULL / (c ? c : 1)) + +static void print_procacct(struct taskstats *t) +{ + /* First letter: T is a mere thread, G the last in a group, U unknown. */ + printf( + "%c pid=%lu tgid=%lu uid=%lu wall=%llu gwall=%llu cpu=%llu vmpeak=%llu rsspeak=%llu dev=%lu:%lu inode=%llu comm=%s\n" + , t->version >= 12 ? (t->ac_flag & AGROUP ? 'P' : 'T') : '?' + , (unsigned long)t->ac_pid + , (unsigned long)(t->version >= 12 ? t->ac_tgid : 0) + , (unsigned long)t->ac_uid + , (unsigned long long)t->ac_etime + , (unsigned long long)(t->version >= 12 ? t->ac_tgetime : 0) + , (unsigned long long)(t->ac_utime+t->ac_stime) + , (unsigned long long)t->hiwater_vm + , (unsigned long long)t->hiwater_rss + , (unsigned long)(t->version >= 12 ? MAJOR(t->ac_exe_dev) : 0) + , (unsigned long)(t->version >= 12 ? MINOR(t->ac_exe_dev) : 0) + , (unsigned long long)(t->version >= 12 ? t->ac_exe_inode : 0) + , t->ac_comm + ); +} + +void handle_aggr(int mother, struct nlattr *na, int fd) +{ + int aggr_len = NLA_PAYLOAD(na->nla_len); + int len2 = 0; + pid_t rtid = 0; + + na = (struct nlattr *) NLA_DATA(na); + while (len2 < aggr_len) { + switch (na->nla_type) { + case TASKSTATS_TYPE_PID: + rtid = *(int *) NLA_DATA(na); + PRINTF("PID\t%d\n", rtid); + break; + case TASKSTATS_TYPE_TGID: + rtid = *(int *) NLA_DATA(na); + PRINTF("TGID\t%d\n", rtid); + break; + case TASKSTATS_TYPE_STATS: + if (mother == TASKSTATS_TYPE_AGGR_PID) + print_procacct((struct taskstats *) NLA_DATA(na)); + if (fd) { + if (write(fd, NLA_DATA(na), na->nla_len) < 0) + err(1, "write error\n"); + } + break; + case TASKSTATS_TYPE_NULL: + break; + default: + fprintf(stderr, "Unknown nested nla_type %d\n", + na->nla_type); + break; + } + len2 += NLA_ALIGN(na->nla_len); + na = (struct nlattr *)((char *)na + + NLA_ALIGN(na->nla_len)); + } +} + +int main(int argc, char *argv[]) +{ + int c, rc, rep_len, aggr_len, len2; + int cmd_type = TASKSTATS_CMD_ATTR_UNSPEC; + __u16 id; + __u32 mypid; + + struct nlattr *na; + int nl_sd = -1; + int len = 0; + pid_t tid = 0; + + int fd = 0; + int write_file = 0; + int maskset = 0; + char *logfile = NULL; + int containerset = 0; + char *containerpath = NULL; + int cfd = 0; + int forking = 0; + sigset_t sigset; + + struct msgtemplate msg; + + while (!forking) { + c = getopt(argc, argv, "m:vr:"); + if (c < 0) + break; + + switch (c) { + case 'w': + logfile = strdup(optarg); + printf("write to file %s\n", logfile); + write_file = 1; + break; + case 'r': + rcvbufsz = atoi(optarg); + printf("receive buf size %d\n", rcvbufsz); + if (rcvbufsz < 0) + err(1, "Invalid rcv buf size\n"); + break; + case 'm': + strncpy(cpumask, optarg, sizeof(cpumask)); + cpumask[sizeof(cpumask) - 1] = '\0'; + maskset = 1; + break; + case 'v': + printf("debug on\n"); + dbg = 1; + break; + default: + usage(); + exit(-1); + } + } + if (!maskset) { + maskset = 1; + strncpy(cpumask, "1", sizeof(cpumask)); + cpumask[sizeof(cpumask) - 1] = '\0'; + } + printf("cpumask %s maskset %d\n", cpumask, maskset); + + if (write_file) { + fd = open(logfile, O_WRONLY | O_CREAT | O_TRUNC, 0644); + if (fd == -1) { + perror("Cannot open output file\n"); + exit(1); + } + } + + nl_sd = create_nl_socket(NETLINK_GENERIC); + if (nl_sd < 0) + err(1, "error creating Netlink socket\n"); + + mypid = getpid(); + id = get_family_id(nl_sd); + if (!id) { + fprintf(stderr, "Error getting family id, errno %d\n", errno); + goto err; + } + PRINTF("family id %d\n", id); + + if (maskset) { + rc = send_cmd(nl_sd, id, mypid, TASKSTATS_CMD_GET, + TASKSTATS_CMD_ATTR_REGISTER_CPUMASK, + &cpumask, strlen(cpumask) + 1); + PRINTF("Sent register cpumask, retval %d\n", rc); + if (rc < 0) { + fprintf(stderr, "error sending register cpumask\n"); + goto err; + } + } + + do { + rep_len = recv(nl_sd, &msg, sizeof(msg), 0); + PRINTF("received %d bytes\n", rep_len); + + if (rep_len < 0) { + fprintf(stderr, "nonfatal reply error: errno %d\n", + errno); + continue; + } + if (msg.n.nlmsg_type == NLMSG_ERROR || + !NLMSG_OK((&msg.n), rep_len)) { + struct nlmsgerr *err = NLMSG_DATA(&msg); + + fprintf(stderr, "fatal reply error, errno %d\n", + err->error); + goto done; + } + + PRINTF("nlmsghdr size=%zu, nlmsg_len=%d, rep_len=%d\n", + sizeof(struct nlmsghdr), msg.n.nlmsg_len, rep_len); + + + rep_len = GENLMSG_PAYLOAD(&msg.n); + + na = (struct nlattr *) GENLMSG_DATA(&msg); + len = 0; + while (len < rep_len) { + len += NLA_ALIGN(na->nla_len); + int mother = na->nla_type; + + PRINTF("mother=%i\n", mother); + switch (na->nla_type) { + case TASKSTATS_TYPE_AGGR_PID: + case TASKSTATS_TYPE_AGGR_TGID: + /* For nested attributes, na follows */ + handle_aggr(mother, na, fd); + break; + default: + fprintf(stderr, "Unexpected nla_type %d\n", + na->nla_type); + case TASKSTATS_TYPE_NULL: + break; + } + na = (struct nlattr *) (GENLMSG_DATA(&msg) + len); + } + } while (1); +done: + if (maskset) { + rc = send_cmd(nl_sd, id, mypid, TASKSTATS_CMD_GET, + TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK, + &cpumask, strlen(cpumask) + 1); + printf("Sent deregister mask, retval %d\n", rc); + if (rc < 0) + err(rc, "error sending deregister cpumask\n"); + } +err: + close(nl_sd); + if (fd) + close(fd); + if (cfd) + close(cfd); + return 0; +} -- cgit v1.2.3-59-g8ed1b From f548a12efd5ab97e6b1fb332e5634ce44b3d9328 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 26 Apr 2022 17:39:50 -0600 Subject: io_uring: return hint on whether more data is available after receive For now just use a CQE flag for this, with big CQE support we could return the actual number of bytes left. Signed-off-by: Jens Axboe --- fs/io_uring.c | 19 +++++++++++++++---- include/uapi/linux/io_uring.h | 2 ++ 2 files changed, 17 insertions(+), 4 deletions(-) (limited to 'include/uapi/linux') diff --git a/fs/io_uring.c b/fs/io_uring.c index 5a0388bac42c..20c5d29e5b6c 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -5948,6 +5948,7 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) struct io_sr_msg *sr = &req->sr_msg; struct socket *sock; struct io_buffer *kbuf; + unsigned int cflags; unsigned flags; int ret, min_ret = 0; bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; @@ -5981,6 +5982,8 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) if (flags & MSG_WAITALL) min_ret = iov_iter_count(&kmsg->msg.msg_iter); + kmsg->msg.msg_get_inq = 1; + ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg, kmsg->uaddr, flags); if (ret < min_ret) { @@ -6006,7 +6009,10 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) ret += sr->done_io; else if (sr->done_io) ret = sr->done_io; - __io_req_complete(req, issue_flags, ret, io_put_kbuf(req, issue_flags)); + cflags = io_put_kbuf(req, issue_flags); + if (kmsg->msg.msg_inq) + cflags |= IORING_CQE_F_SOCK_NONEMPTY; + __io_req_complete(req, issue_flags, ret, cflags); return 0; } @@ -6018,6 +6024,7 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags) void __user *buf = sr->buf; struct socket *sock; struct iovec iov; + unsigned int cflags; unsigned flags; int ret, min_ret = 0; bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; @@ -6038,11 +6045,12 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags) goto out_free; msg.msg_name = NULL; + msg.msg_namelen = 0; msg.msg_control = NULL; + msg.msg_get_inq = 1; + msg.msg_flags = 0; msg.msg_controllen = 0; - msg.msg_namelen = 0; msg.msg_iocb = NULL; - msg.msg_flags = 0; flags = req->sr_msg.msg_flags; if (force_nonblock) @@ -6073,7 +6081,10 @@ out_free: ret += sr->done_io; else if (sr->done_io) ret = sr->done_io; - __io_req_complete(req, issue_flags, ret, io_put_kbuf(req, issue_flags)); + cflags = io_put_kbuf(req, issue_flags); + if (msg.msg_inq) + cflags |= IORING_CQE_F_SOCK_NONEMPTY; + __io_req_complete(req, issue_flags, ret, cflags); return 0; } diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 49d1f3994f8d..92d1799892b2 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -220,9 +220,11 @@ struct io_uring_cqe { * * IORING_CQE_F_BUFFER If set, the upper 16 bits are the buffer ID * IORING_CQE_F_MORE If set, parent SQE will generate more CQE entries + * IORING_CQE_F_SOCK_NONEMPTY If set, more data to read after socket recv */ #define IORING_CQE_F_BUFFER (1U << 0) #define IORING_CQE_F_MORE (1U << 1) +#define IORING_CQE_F_SOCK_NONEMPTY (1U << 2) enum { IORING_CQE_BUFFER_SHIFT = 16, -- cgit v1.2.3-59-g8ed1b From e1169f06d5bbdbc2b22ae4e3083a4bf75ae5ecee Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 25 Apr 2022 19:49:03 -0600 Subject: io_uring: use TWA_SIGNAL_NO_IPI if IORING_SETUP_COOP_TASKRUN is used If this is set, io_uring will never use an IPI to deliver a task_work notification. This can be used in the common case where a single task or thread communicates with the ring, and doesn't rely on io_uring_cqe_peek(). This provides a noticeable win in performance, both from eliminating the IPI itself, but also from avoiding interrupting the submitting task unnecessarily. Reviewed-by: Pavel Begunkov Link: https://lore.kernel.org/r/20220426014904.60384-6-axboe@kernel.dk Signed-off-by: Jens Axboe --- fs/io_uring.c | 17 +++++++++++++---- include/uapi/linux/io_uring.h | 8 ++++++++ 2 files changed, 21 insertions(+), 4 deletions(-) (limited to 'include/uapi/linux') diff --git a/fs/io_uring.c b/fs/io_uring.c index 3c669d8f5e57..0b9ae3615911 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -11327,12 +11327,20 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p, ctx->user = get_uid(current_user()); /* - * For SQPOLL, we just need a wakeup, always. + * For SQPOLL, we just need a wakeup, always. For !SQPOLL, if + * COOP_TASKRUN is set, then IPIs are never needed by the app. */ - if (ctx->flags & IORING_SETUP_SQPOLL) + ret = -EINVAL; + if (ctx->flags & IORING_SETUP_SQPOLL) { + /* IPI related flags don't make sense with SQPOLL */ + if (ctx->flags & IORING_SETUP_COOP_TASKRUN) + goto err; ctx->notify_method = TWA_SIGNAL_NO_IPI; - else + } else if (ctx->flags & IORING_SETUP_COOP_TASKRUN) { + ctx->notify_method = TWA_SIGNAL_NO_IPI; + } else { ctx->notify_method = TWA_SIGNAL; + } /* * This is just grabbed for accounting purposes. When a process exits, @@ -11431,7 +11439,8 @@ static long io_uring_setup(u32 entries, struct io_uring_params __user *params) if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL | IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE | IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ | - IORING_SETUP_R_DISABLED | IORING_SETUP_SUBMIT_ALL)) + IORING_SETUP_R_DISABLED | IORING_SETUP_SUBMIT_ALL | + IORING_SETUP_COOP_TASKRUN)) return -EINVAL; return io_uring_create(entries, &p, params); diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 980d82eb196e..a84f29d657c3 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -102,6 +102,14 @@ enum { #define IORING_SETUP_ATTACH_WQ (1U << 5) /* attach to existing wq */ #define IORING_SETUP_R_DISABLED (1U << 6) /* start with ring disabled */ #define IORING_SETUP_SUBMIT_ALL (1U << 7) /* continue submit on error */ +/* + * Cooperative task running. When requests complete, they often require + * forcing the submitter to transition to the kernel to complete. If this + * flag is set, work will be done when the task transitions anyway, rather + * than force an inter-processor interrupt reschedule. This avoids interrupting + * a task running in userspace, and saves an IPI. + */ +#define IORING_SETUP_COOP_TASKRUN (1U << 8) enum { IORING_OP_NOP, -- cgit v1.2.3-59-g8ed1b From ef060ea9e4fd3b763e7060a3af0a258d2d5d7c0d Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 25 Apr 2022 19:49:04 -0600 Subject: io_uring: add IORING_SETUP_TASKRUN_FLAG If IORING_SETUP_COOP_TASKRUN is set to use cooperative scheduling for running task_work, then IORING_SETUP_TASKRUN_FLAG can be set so the application can tell if task_work is pending in the kernel for this ring. This allows use cases like io_uring_peek_cqe() to still function appropriately, or for the task to know when it would be useful to call io_uring_wait_cqe() to run pending events. Reviewed-by: Pavel Begunkov Link: https://lore.kernel.org/r/20220426014904.60384-7-axboe@kernel.dk Signed-off-by: Jens Axboe --- fs/io_uring.c | 14 +++++++++++--- include/uapi/linux/io_uring.h | 7 +++++++ 2 files changed, 18 insertions(+), 3 deletions(-) (limited to 'include/uapi/linux') diff --git a/fs/io_uring.c b/fs/io_uring.c index 0b9ae3615911..72cb2d50125c 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2506,6 +2506,8 @@ static void ctx_flush_and_put(struct io_ring_ctx *ctx, bool *locked) { if (!ctx) return; + if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) + atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); if (*locked) { io_submit_flush_completions(ctx); mutex_unlock(&ctx->uring_lock); @@ -2646,6 +2648,9 @@ static void io_req_task_work_add(struct io_kiocb *req, bool priority) if (running) return; + if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) + atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); + if (likely(!task_work_add(tsk, &tctx->task_work, ctx->notify_method))) return; @@ -11333,12 +11338,15 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p, ret = -EINVAL; if (ctx->flags & IORING_SETUP_SQPOLL) { /* IPI related flags don't make sense with SQPOLL */ - if (ctx->flags & IORING_SETUP_COOP_TASKRUN) + if (ctx->flags & (IORING_SETUP_COOP_TASKRUN | + IORING_SETUP_TASKRUN_FLAG)) goto err; ctx->notify_method = TWA_SIGNAL_NO_IPI; } else if (ctx->flags & IORING_SETUP_COOP_TASKRUN) { ctx->notify_method = TWA_SIGNAL_NO_IPI; } else { + if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) + goto err; ctx->notify_method = TWA_SIGNAL; } @@ -11440,10 +11448,10 @@ static long io_uring_setup(u32 entries, struct io_uring_params __user *params) IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE | IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ | IORING_SETUP_R_DISABLED | IORING_SETUP_SUBMIT_ALL | - IORING_SETUP_COOP_TASKRUN)) + IORING_SETUP_COOP_TASKRUN | IORING_SETUP_TASKRUN_FLAG)) return -EINVAL; - return io_uring_create(entries, &p, params); + return io_uring_create(entries, &p, params); } SYSCALL_DEFINE2(io_uring_setup, u32, entries, diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index a84f29d657c3..fad63564678a 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -110,6 +110,12 @@ enum { * a task running in userspace, and saves an IPI. */ #define IORING_SETUP_COOP_TASKRUN (1U << 8) +/* + * If COOP_TASKRUN is set, get notified if task work is available for + * running and a kernel transition would be needed to run it. This sets + * IORING_SQ_TASKRUN in the sq ring flags. Not valid with COOP_TASKRUN. + */ +#define IORING_SETUP_TASKRUN_FLAG (1U << 9) enum { IORING_OP_NOP, @@ -256,6 +262,7 @@ struct io_sqring_offsets { */ #define IORING_SQ_NEED_WAKEUP (1U << 0) /* needs io_uring_enter wakeup */ #define IORING_SQ_CQ_OVERFLOW (1U << 1) /* CQ ring is overflown */ +#define IORING_SQ_TASKRUN (1U << 2) /* task should enter the kernel */ struct io_cqring_offsets { __u32 head; -- cgit v1.2.3-59-g8ed1b From 3254e0b9eb5649ffaa48717ebc9c593adc4ee6a9 Mon Sep 17 00:00:00 2001 From: Alexandru Tachici Date: Fri, 29 Apr 2022 18:34:31 +0300 Subject: ethtool: Add 10base-T1L link mode entry Add entry for the 10base-T1L full duplex mode. Reviewed-by: Andrew Lunn Reviewed-by: Oleksij Rempel Signed-off-by: Alexandru Tachici Signed-off-by: David S. Miller --- drivers/net/phy/phy-core.c | 3 ++- drivers/net/phy/phy_device.c | 3 ++- drivers/net/phy/phylink.c | 4 +++- include/linux/phy.h | 2 +- include/uapi/linux/ethtool.h | 1 + net/ethtool/common.c | 3 +++ 6 files changed, 12 insertions(+), 4 deletions(-) (limited to 'include/uapi/linux') diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c index 2001f3329133..1f2531a1a876 100644 --- a/drivers/net/phy/phy-core.c +++ b/drivers/net/phy/phy-core.c @@ -13,7 +13,7 @@ */ const char *phy_speed_to_str(int speed) { - BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 92, + BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 93, "Enum ethtool_link_mode_bit_indices and phylib are out of sync. " "If a speed or mode has been added please update phy_speed_to_str " "and the PHY settings array.\n"); @@ -176,6 +176,7 @@ static const struct phy_setting settings[] = { /* 10M */ PHY_SETTING( 10, FULL, 10baseT_Full ), PHY_SETTING( 10, HALF, 10baseT_Half ), + PHY_SETTING( 10, FULL, 10baseT1L_Full ), }; #undef PHY_SETTING diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index f867042b2eb4..1369daeded14 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -90,8 +90,9 @@ const int phy_10_100_features_array[4] = { }; EXPORT_SYMBOL_GPL(phy_10_100_features_array); -const int phy_basic_t1_features_array[2] = { +const int phy_basic_t1_features_array[3] = { ETHTOOL_LINK_MODE_TP_BIT, + ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, ETHTOOL_LINK_MODE_100baseT1_Full_BIT, }; EXPORT_SYMBOL_GPL(phy_basic_t1_features_array); diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 33c285252584..d707604d1d5a 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -168,8 +168,10 @@ static void phylink_caps_to_linkmodes(unsigned long *linkmodes, if (caps & MAC_10HD) __set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, linkmodes); - if (caps & MAC_10FD) + if (caps & MAC_10FD) { __set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, linkmodes); + } if (caps & MAC_100HD) { __set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, linkmodes); diff --git a/include/linux/phy.h b/include/linux/phy.h index 36ca2b5c2253..b12af9e2f389 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -65,7 +65,7 @@ extern const int phy_basic_ports_array[3]; extern const int phy_fibre_port_array[1]; extern const int phy_all_ports_features_array[7]; extern const int phy_10_100_features_array[4]; -extern const int phy_basic_t1_features_array[2]; +extern const int phy_basic_t1_features_array[3]; extern const int phy_gbit_features_array[2]; extern const int phy_10gbit_features_array[1]; diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 7bc4b8def12c..e0f0ee9bc89e 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -1691,6 +1691,7 @@ enum ethtool_link_mode_bit_indices { ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT = 89, ETHTOOL_LINK_MODE_100baseFX_Half_BIT = 90, ETHTOOL_LINK_MODE_100baseFX_Full_BIT = 91, + ETHTOOL_LINK_MODE_10baseT1L_Full_BIT = 92, /* must be last entry */ __ETHTOOL_LINK_MODE_MASK_NBITS }; diff --git a/net/ethtool/common.c b/net/ethtool/common.c index 0c5210015911..566adf85e658 100644 --- a/net/ethtool/common.c +++ b/net/ethtool/common.c @@ -201,6 +201,7 @@ const char link_mode_names[][ETH_GSTRING_LEN] = { __DEFINE_LINK_MODE_NAME(400000, CR4, Full), __DEFINE_LINK_MODE_NAME(100, FX, Half), __DEFINE_LINK_MODE_NAME(100, FX, Full), + __DEFINE_LINK_MODE_NAME(10, T1L, Full), }; static_assert(ARRAY_SIZE(link_mode_names) == __ETHTOOL_LINK_MODE_MASK_NBITS); @@ -236,6 +237,7 @@ static_assert(ARRAY_SIZE(link_mode_names) == __ETHTOOL_LINK_MODE_MASK_NBITS); #define __LINK_MODE_LANES_T1 1 #define __LINK_MODE_LANES_X 1 #define __LINK_MODE_LANES_FX 1 +#define __LINK_MODE_LANES_T1L 1 #define __DEFINE_LINK_MODE_PARAMS(_speed, _type, _duplex) \ [ETHTOOL_LINK_MODE(_speed, _type, _duplex)] = { \ @@ -349,6 +351,7 @@ const struct link_mode_info link_mode_params[] = { __DEFINE_LINK_MODE_PARAMS(400000, CR4, Full), __DEFINE_LINK_MODE_PARAMS(100, FX, Half), __DEFINE_LINK_MODE_PARAMS(100, FX, Full), + __DEFINE_LINK_MODE_PARAMS(10, T1L, Full), }; static_assert(ARRAY_SIZE(link_mode_params) == __ETHTOOL_LINK_MODE_MASK_NBITS); -- cgit v1.2.3-59-g8ed1b From 909b4f2bf764a903e9183111368f1509f9b40e6d Mon Sep 17 00:00:00 2001 From: Alexandru Tachici Date: Fri, 29 Apr 2022 18:34:32 +0300 Subject: net: phy: Add 10-BaseT1L registers The 802.3gc specification defines the 10-BaseT1L link mode for ethernet trafic on twisted wire pair. PMA status register can be used to detect if the phy supports 2.4 V TX level and PCS control register can be used to enable/disable PCS level loopback. Reviewed-by: Andrew Lunn Signed-off-by: Alexandru Tachici Signed-off-by: David S. Miller --- include/uapi/linux/mdio.h | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/mdio.h b/include/uapi/linux/mdio.h index c54e6eae5366..0b2eba36dd7c 100644 --- a/include/uapi/linux/mdio.h +++ b/include/uapi/linux/mdio.h @@ -67,6 +67,9 @@ #define MDIO_PCS_10GBRT_STAT2 33 /* 10GBASE-R/-T PCS status 2 */ #define MDIO_AN_10GBT_CTRL 32 /* 10GBASE-T auto-negotiation control */ #define MDIO_AN_10GBT_STAT 33 /* 10GBASE-T auto-negotiation status */ +#define MDIO_B10L_PMA_CTRL 2294 /* 10BASE-T1L PMA control */ +#define MDIO_PMA_10T1L_STAT 2295 /* 10BASE-T1L PMA status */ +#define MDIO_PCS_10T1L_CTRL 2278 /* 10BASE-T1L PCS control */ /* LASI (Link Alarm Status Interrupt) registers, defined by XENPAK MSA. */ #define MDIO_PMA_LASI_RXCTRL 0x9000 /* RX_ALARM control */ @@ -268,6 +271,28 @@ #define MDIO_AN_10GBT_STAT_MS 0x4000 /* Master/slave config */ #define MDIO_AN_10GBT_STAT_MSFLT 0x8000 /* Master/slave config fault */ +/* 10BASE-T1L PMA control */ +#define MDIO_PMA_10T1L_CTRL_LB_EN 0x0001 /* Enable loopback mode */ +#define MDIO_PMA_10T1L_CTRL_EEE_EN 0x0400 /* Enable EEE mode */ +#define MDIO_PMA_10T1L_CTRL_LOW_POWER 0x0800 /* Low-power mode */ +#define MDIO_PMA_10T1L_CTRL_2V4_EN 0x1000 /* Enable 2.4 Vpp operating mode */ +#define MDIO_PMA_10T1L_CTRL_TX_DIS 0x4000 /* Transmit disable */ +#define MDIO_PMA_10T1L_CTRL_PMA_RST 0x8000 /* MA reset */ + +/* 10BASE-T1L PMA status register. */ +#define MDIO_PMA_10T1L_STAT_LINK 0x0001 /* PMA receive link up */ +#define MDIO_PMA_10T1L_STAT_FAULT 0x0002 /* Fault condition detected */ +#define MDIO_PMA_10T1L_STAT_POLARITY 0x0004 /* Receive polarity is reversed */ +#define MDIO_PMA_10T1L_STAT_RECV_FAULT 0x0200 /* Able to detect fault on receive path */ +#define MDIO_PMA_10T1L_STAT_EEE 0x0400 /* PHY has EEE ability */ +#define MDIO_PMA_10T1L_STAT_LOW_POWER 0x0800 /* PMA has low-power ability */ +#define MDIO_PMA_10T1L_STAT_2V4_ABLE 0x1000 /* PHY has 2.4 Vpp operating mode ability */ +#define MDIO_PMA_10T1L_STAT_LB_ABLE 0x2000 /* PHY has loopback ability */ + +/* 10BASE-T1L PCS control register. */ +#define MDIO_PCS_10T1L_CTRL_LB 0x4000 /* Enable PCS level loopback mode */ +#define MDIO_PCS_10T1L_CTRL_RESET 0x8000 /* PCS reset */ + /* EEE Supported/Advertisement/LP Advertisement registers. * * EEE capability Register (3.20), Advertisement (7.60) and -- cgit v1.2.3-59-g8ed1b From 1b020e448e0fb67bcb04ee0f778d413045f965d3 Mon Sep 17 00:00:00 2001 From: Alexandru Tachici Date: Fri, 29 Apr 2022 18:34:33 +0300 Subject: net: phy: Add BaseT1 auto-negotiation registers Added BASE-T1 AN advertisement register (Registers 7.514, 7.515, and 7.516) and BASE-T1 AN LP Base Page ability register (Registers 7.517, 7.518, and 7.519). Reviewed-by: Andrew Lunn Signed-off-by: Alexandru Tachici Signed-off-by: David S. Miller --- include/uapi/linux/mdio.h | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/mdio.h b/include/uapi/linux/mdio.h index 0b2eba36dd7c..fa3515257f54 100644 --- a/include/uapi/linux/mdio.h +++ b/include/uapi/linux/mdio.h @@ -70,6 +70,14 @@ #define MDIO_B10L_PMA_CTRL 2294 /* 10BASE-T1L PMA control */ #define MDIO_PMA_10T1L_STAT 2295 /* 10BASE-T1L PMA status */ #define MDIO_PCS_10T1L_CTRL 2278 /* 10BASE-T1L PCS control */ +#define MDIO_AN_T1_CTRL 512 /* BASE-T1 AN control */ +#define MDIO_AN_T1_STAT 513 /* BASE-T1 AN status */ +#define MDIO_AN_T1_ADV_L 514 /* BASE-T1 AN advertisement register [15:0] */ +#define MDIO_AN_T1_ADV_M 515 /* BASE-T1 AN advertisement register [31:16] */ +#define MDIO_AN_T1_ADV_H 516 /* BASE-T1 AN advertisement register [47:32] */ +#define MDIO_AN_T1_LP_L 517 /* BASE-T1 AN LP Base Page ability register [15:0] */ +#define MDIO_AN_T1_LP_M 518 /* BASE-T1 AN LP Base Page ability register [31:16] */ +#define MDIO_AN_T1_LP_H 519 /* BASE-T1 AN LP Base Page ability register [47:32] */ /* LASI (Link Alarm Status Interrupt) registers, defined by XENPAK MSA. */ #define MDIO_PMA_LASI_RXCTRL 0x9000 /* RX_ALARM control */ @@ -293,6 +301,38 @@ #define MDIO_PCS_10T1L_CTRL_LB 0x4000 /* Enable PCS level loopback mode */ #define MDIO_PCS_10T1L_CTRL_RESET 0x8000 /* PCS reset */ +/* BASE-T1 auto-negotiation advertisement register [15:0] */ +#define MDIO_AN_T1_ADV_L_PAUSE_CAP ADVERTISE_PAUSE_CAP +#define MDIO_AN_T1_ADV_L_PAUSE_ASYM ADVERTISE_PAUSE_ASYM +#define MDIO_AN_T1_ADV_L_FORCE_MS 0x1000 /* Force Master/slave Configuration */ +#define MDIO_AN_T1_ADV_L_REMOTE_FAULT ADVERTISE_RFAULT +#define MDIO_AN_T1_ADV_L_ACK ADVERTISE_LPACK +#define MDIO_AN_T1_ADV_L_NEXT_PAGE_REQ ADVERTISE_NPAGE + +/* BASE-T1 auto-negotiation advertisement register [31:16] */ +#define MDIO_AN_T1_ADV_M_B10L 0x4000 /* device is compatible with 10BASE-T1L */ +#define MDIO_AN_T1_ADV_M_MST 0x0010 /* advertise master preference */ + +/* BASE-T1 auto-negotiation advertisement register [47:32] */ +#define MDIO_AN_T1_ADV_H_10L_TX_HI_REQ 0x1000 /* 10BASE-T1L High Level Transmit Request */ +#define MDIO_AN_T1_ADV_H_10L_TX_HI 0x2000 /* 10BASE-T1L High Level Transmit Ability */ + +/* BASE-T1 AN LP Base Page ability register [15:0] */ +#define MDIO_AN_T1_LP_L_PAUSE_CAP LPA_PAUSE_CAP +#define MDIO_AN_T1_LP_L_PAUSE_ASYM LPA_PAUSE_ASYM +#define MDIO_AN_T1_LP_L_FORCE_MS 0x1000 /* LP Force Master/slave Configuration */ +#define MDIO_AN_T1_LP_L_REMOTE_FAULT LPA_RFAULT +#define MDIO_AN_T1_LP_L_ACK LPA_LPACK +#define MDIO_AN_T1_LP_L_NEXT_PAGE_REQ LPA_NPAGE + +/* BASE-T1 AN LP Base Page ability register [31:16] */ +#define MDIO_AN_T1_LP_M_MST 0x0010 /* LP master preference */ +#define MDIO_AN_T1_LP_M_B10L 0x4000 /* LP is compatible with 10BASE-T1L */ + +/* BASE-T1 AN LP Base Page ability register [47:32] */ +#define MDIO_AN_T1_LP_H_10L_TX_HI_REQ 0x1000 /* 10BASE-T1L High Level LP Transmit Request */ +#define MDIO_AN_T1_LP_H_10L_TX_HI 0x2000 /* 10BASE-T1L High Level LP Transmit Ability */ + /* EEE Supported/Advertisement/LP Advertisement registers. * * EEE capability Register (3.20), Advertisement (7.60) and -- cgit v1.2.3-59-g8ed1b From 3da8ffd8545f62fec85a48a3c637b2f427974f11 Mon Sep 17 00:00:00 2001 From: Alexandru Tachici Date: Fri, 29 Apr 2022 18:34:34 +0300 Subject: net: phy: Add 10BASE-T1L support in phy-c45 This patch is needed because the BASE-T1 uses different registers for status, control and advertisement to those already employed in the existing phy-c45 functions. Where required, genphy_c45 functions will now check whether the device supports BASE-T1 and use the specific registers instead: 45.2.7.19 BASE-T1 AN control register, 45.2.7.20 BASE-T1 AN status, 45.2.7.21 BASE-T1 AN advertisement register, 45.2.7.22 BASE-T1 AN LP Base Page ability register, 45.2.1.185 BASE-T1 PMA/PMD control register. Tested-by: Oleksij Rempel Signed-off-by: Alexandru Tachici Signed-off-by: David S. Miller --- drivers/net/phy/phy-c45.c | 257 ++++++++++++++++++++++++++++++++++++++++++- drivers/net/phy/phy_device.c | 1 + include/linux/mdio.h | 70 ++++++++++++ include/linux/phy.h | 3 + include/uapi/linux/mdio.h | 10 ++ 5 files changed, 336 insertions(+), 5 deletions(-) (limited to 'include/uapi/linux') diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index db709d30bf84..eefdd67d5556 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -8,6 +8,25 @@ #include #include +/** + * genphy_c45_baset1_able - checks if the PMA has BASE-T1 extended abilities + * @phydev: target phy_device struct + */ +static bool genphy_c45_baset1_able(struct phy_device *phydev) +{ + int val; + + if (phydev->pma_extable == -ENODATA) { + val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_EXTABLE); + if (val < 0) + return false; + + phydev->pma_extable = val; + } + + return !!(phydev->pma_extable & MDIO_PMA_EXTABLE_BT1); +} + /** * genphy_c45_pma_can_sleep - checks if the PMA have sleep support * @phydev: target phy_device struct @@ -80,7 +99,10 @@ int genphy_c45_pma_setup_forced(struct phy_device *phydev) switch (phydev->speed) { case SPEED_10: - ctrl2 |= MDIO_PMA_CTRL2_10BT; + if (genphy_c45_baset1_able(phydev)) + ctrl2 |= MDIO_PMA_CTRL2_BASET1; + else + ctrl2 |= MDIO_PMA_CTRL2_10BT; break; case SPEED_100: ctrl1 |= MDIO_PMA_CTRL1_SPEED100; @@ -118,10 +140,95 @@ int genphy_c45_pma_setup_forced(struct phy_device *phydev) if (ret < 0) return ret; + if (genphy_c45_baset1_able(phydev)) { + int ctl = 0; + + switch (phydev->master_slave_set) { + case MASTER_SLAVE_CFG_MASTER_PREFERRED: + case MASTER_SLAVE_CFG_MASTER_FORCE: + ctl = MDIO_PMA_PMD_BT1_CTRL_CFG_MST; + break; + case MASTER_SLAVE_CFG_SLAVE_FORCE: + case MASTER_SLAVE_CFG_SLAVE_PREFERRED: + case MASTER_SLAVE_CFG_UNKNOWN: + case MASTER_SLAVE_CFG_UNSUPPORTED: + break; + default: + phydev_warn(phydev, "Unsupported Master/Slave mode\n"); + return -EOPNOTSUPP; + } + + ret = phy_modify_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_PMD_BT1_CTRL, + MDIO_PMA_PMD_BT1_CTRL_CFG_MST, ctl); + if (ret < 0) + return ret; + } + return genphy_c45_an_disable_aneg(phydev); } EXPORT_SYMBOL_GPL(genphy_c45_pma_setup_forced); +/* Sets master/slave preference and supported technologies. + * The preference is set in the BIT(4) of BASE-T1 AN + * advertisement register 7.515 and whether the status + * is forced or not, it is set in the BIT(12) of BASE-T1 + * AN advertisement register 7.514. + * Sets 10BASE-T1L Ability BIT(14) in BASE-T1 autonegotiation + * advertisement register [31:16] if supported. + */ +static int genphy_c45_baset1_an_config_aneg(struct phy_device *phydev) +{ + int changed = 0; + u16 adv_l = 0; + u16 adv_m = 0; + int ret; + + switch (phydev->master_slave_set) { + case MASTER_SLAVE_CFG_MASTER_FORCE: + case MASTER_SLAVE_CFG_SLAVE_FORCE: + adv_l |= MDIO_AN_T1_ADV_L_FORCE_MS; + break; + case MASTER_SLAVE_CFG_MASTER_PREFERRED: + case MASTER_SLAVE_CFG_SLAVE_PREFERRED: + break; + default: + break; + } + + switch (phydev->master_slave_set) { + case MASTER_SLAVE_CFG_MASTER_FORCE: + case MASTER_SLAVE_CFG_MASTER_PREFERRED: + adv_m |= MDIO_AN_T1_ADV_M_MST; + break; + case MASTER_SLAVE_CFG_SLAVE_FORCE: + case MASTER_SLAVE_CFG_SLAVE_PREFERRED: + break; + default: + break; + } + + adv_l |= linkmode_adv_to_mii_t1_adv_l_t(phydev->advertising); + + ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_T1_ADV_L, + (MDIO_AN_T1_ADV_L_FORCE_MS | MDIO_AN_T1_ADV_L_PAUSE_CAP + | MDIO_AN_T1_ADV_L_PAUSE_ASYM), adv_l); + if (ret < 0) + return ret; + if (ret > 0) + changed = 1; + + adv_m |= linkmode_adv_to_mii_t1_adv_m_t(phydev->advertising); + + ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_T1_ADV_M, + MDIO_AN_T1_ADV_M_MST | MDIO_AN_T1_ADV_M_B10L, adv_m); + if (ret < 0) + return ret; + if (ret > 0) + changed = 1; + + return changed; +} + /** * genphy_c45_an_config_aneg - configure advertisement registers * @phydev: target phy_device struct @@ -141,6 +248,9 @@ int genphy_c45_an_config_aneg(struct phy_device *phydev) changed = genphy_config_eee_advert(phydev); + if (genphy_c45_baset1_able(phydev)) + return genphy_c45_baset1_an_config_aneg(phydev); + adv = linkmode_adv_to_mii_adv_t(phydev->advertising); ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, @@ -178,8 +288,12 @@ EXPORT_SYMBOL_GPL(genphy_c45_an_config_aneg); */ int genphy_c45_an_disable_aneg(struct phy_device *phydev) { + u16 reg = MDIO_CTRL1; - return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, + if (genphy_c45_baset1_able(phydev)) + reg = MDIO_AN_T1_CTRL; + + return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, reg, MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART); } EXPORT_SYMBOL_GPL(genphy_c45_an_disable_aneg); @@ -194,7 +308,12 @@ EXPORT_SYMBOL_GPL(genphy_c45_an_disable_aneg); */ int genphy_c45_restart_aneg(struct phy_device *phydev) { - return phy_set_bits_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, + u16 reg = MDIO_CTRL1; + + if (genphy_c45_baset1_able(phydev)) + reg = MDIO_AN_T1_CTRL; + + return phy_set_bits_mmd(phydev, MDIO_MMD_AN, reg, MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART); } EXPORT_SYMBOL_GPL(genphy_c45_restart_aneg); @@ -210,11 +329,15 @@ EXPORT_SYMBOL_GPL(genphy_c45_restart_aneg); */ int genphy_c45_check_and_restart_aneg(struct phy_device *phydev, bool restart) { + u16 reg = MDIO_CTRL1; int ret; + if (genphy_c45_baset1_able(phydev)) + reg = MDIO_AN_T1_CTRL; + if (!restart) { /* Configure and restart aneg if it wasn't set before */ - ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1); + ret = phy_read_mmd(phydev, MDIO_MMD_AN, reg); if (ret < 0) return ret; @@ -242,7 +365,13 @@ EXPORT_SYMBOL_GPL(genphy_c45_check_and_restart_aneg); */ int genphy_c45_aneg_done(struct phy_device *phydev) { - int val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); + int reg = MDIO_STAT1; + int val; + + if (genphy_c45_baset1_able(phydev)) + reg = MDIO_AN_T1_STAT; + + val = phy_read_mmd(phydev, MDIO_MMD_AN, reg); return val < 0 ? val : val & MDIO_AN_STAT1_COMPLETE ? 1 : 0; } @@ -307,6 +436,49 @@ int genphy_c45_read_link(struct phy_device *phydev) } EXPORT_SYMBOL_GPL(genphy_c45_read_link); +/* Read the Clause 45 defined BASE-T1 AN (7.513) status register to check + * if autoneg is complete. If so read the BASE-T1 Autonegotiation + * Advertisement registers filling in the link partner advertisement, + * pause and asym_pause members in phydev. + */ +static int genphy_c45_baset1_read_lpa(struct phy_device *phydev) +{ + int val; + + val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_STAT); + if (val < 0) + return val; + + if (!(val & MDIO_AN_STAT1_COMPLETE)) { + linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->lp_advertising); + mii_t1_adv_l_mod_linkmode_t(phydev->lp_advertising, 0); + mii_t1_adv_m_mod_linkmode_t(phydev->lp_advertising, 0); + + phydev->pause = 0; + phydev->asym_pause = 0; + + return 0; + } + + linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->lp_advertising, 1); + + val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_LP_L); + if (val < 0) + return val; + + mii_t1_adv_l_mod_linkmode_t(phydev->lp_advertising, val); + phydev->pause = val & MDIO_AN_T1_ADV_L_PAUSE_CAP ? 1 : 0; + phydev->asym_pause = val & MDIO_AN_T1_ADV_L_PAUSE_ASYM ? 1 : 0; + + val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_LP_M); + if (val < 0) + return val; + + mii_t1_adv_m_mod_linkmode_t(phydev->lp_advertising, val); + + return 0; +} + /** * genphy_c45_read_lpa - read the link partner advertisement and pause * @phydev: target phy_device struct @@ -321,6 +493,9 @@ int genphy_c45_read_lpa(struct phy_device *phydev) { int val; + if (genphy_c45_baset1_able(phydev)) + return genphy_c45_baset1_read_lpa(phydev); + val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); if (val < 0) return val; @@ -399,6 +574,17 @@ int genphy_c45_read_pma(struct phy_device *phydev) phydev->duplex = DUPLEX_FULL; + if (genphy_c45_baset1_able(phydev)) { + val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_PMD_BT1_CTRL); + if (val < 0) + return val; + + if (MDIO_PMA_PMD_BT1_CTRL_CFG_MST) + phydev->master_slave_state = MASTER_SLAVE_STATE_MASTER; + else + phydev->master_slave_state = MASTER_SLAVE_STATE_SLAVE; + } + return 0; } EXPORT_SYMBOL_GPL(genphy_c45_read_pma); @@ -530,12 +716,67 @@ int genphy_c45_pma_read_abilities(struct phy_device *phydev) phydev->supported, val & MDIO_PMA_NG_EXTABLE_5GBT); } + + if (val & MDIO_PMA_EXTABLE_BT1) { + val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_PMD_BT1); + if (val < 0) + return val; + + linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, + phydev->supported, + val & MDIO_PMA_PMD_BT1_B10L_ABLE); + + val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_STAT); + if (val < 0) + return val; + + linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + phydev->supported, + val & MDIO_AN_STAT1_ABLE); + } } return 0; } EXPORT_SYMBOL_GPL(genphy_c45_pma_read_abilities); +/* Read master/slave preference from registers. + * The preference is read from the BIT(4) of BASE-T1 AN + * advertisement register 7.515 and whether the preference + * is forced or not, it is read from BASE-T1 AN advertisement + * register 7.514. + */ +static int genphy_c45_baset1_read_status(struct phy_device *phydev) +{ + int ret; + int cfg; + + phydev->master_slave_get = MASTER_SLAVE_CFG_UNKNOWN; + phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN; + + ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_ADV_L); + if (ret < 0) + return ret; + + cfg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_ADV_M); + if (cfg < 0) + return cfg; + + if (ret & MDIO_AN_T1_ADV_L_FORCE_MS) { + if (cfg & MDIO_AN_T1_ADV_M_MST) + phydev->master_slave_get = MASTER_SLAVE_CFG_MASTER_FORCE; + else + phydev->master_slave_get = MASTER_SLAVE_CFG_SLAVE_FORCE; + } else { + if (cfg & MDIO_AN_T1_ADV_M_MST) + phydev->master_slave_get = MASTER_SLAVE_CFG_MASTER_PREFERRED; + else + phydev->master_slave_get = MASTER_SLAVE_CFG_SLAVE_PREFERRED; + } + + return 0; +} + /** * genphy_c45_read_status - read PHY status * @phydev: target phy_device struct @@ -560,6 +801,12 @@ int genphy_c45_read_status(struct phy_device *phydev) if (ret) return ret; + if (genphy_c45_baset1_able(phydev)) { + ret = genphy_c45_baset1_read_status(phydev); + if (ret < 0) + return ret; + } + phy_resolve_aneg_linkmode(phydev); } else { ret = genphy_c45_read_pma(phydev); diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 1369daeded14..431a8719c635 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -600,6 +600,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id, dev->autoneg = AUTONEG_ENABLE; + dev->pma_extable = -ENODATA; dev->is_c45 = is_c45; dev->phy_id = phy_id; if (c45_ids) diff --git a/include/linux/mdio.h b/include/linux/mdio.h index ecac96d52e01..00177567cfef 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h @@ -340,6 +340,76 @@ static inline void mii_10gbt_stat_mod_linkmode_lpa_t(unsigned long *advertising, advertising, lpa & MDIO_AN_10GBT_STAT_LP10G); } +/** + * mii_t1_adv_l_mod_linkmode_t + * @advertising: target the linkmode advertisement settings + * @lpa: value of the BASE-T1 Autonegotiation Advertisement [15:0] Register + * + * A small helper function that translates BASE-T1 Autonegotiation + * Advertisement [15:0] Register bits to linkmode advertisement settings. + * Other bits in advertising aren't changed. + */ +static inline void mii_t1_adv_l_mod_linkmode_t(unsigned long *advertising, u32 lpa) +{ + linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising, + lpa & MDIO_AN_T1_ADV_L_PAUSE_CAP); + linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising, + lpa & MDIO_AN_T1_ADV_L_PAUSE_ASYM); +} + +/** + * mii_t1_adv_m_mod_linkmode_t + * @advertising: target the linkmode advertisement settings + * @lpa: value of the BASE-T1 Autonegotiation Advertisement [31:16] Register + * + * A small helper function that translates BASE-T1 Autonegotiation + * Advertisement [31:16] Register bits to linkmode advertisement settings. + * Other bits in advertising aren't changed. + */ +static inline void mii_t1_adv_m_mod_linkmode_t(unsigned long *advertising, u32 lpa) +{ + linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, + advertising, lpa & MDIO_AN_T1_ADV_M_B10L); +} + +/** + * linkmode_adv_to_mii_t1_adv_l_t + * @advertising: the linkmode advertisement settings + * + * A small helper function that translates linkmode advertisement + * settings to phy autonegotiation advertisements for the + * BASE-T1 Autonegotiation Advertisement [15:0] Register. + */ +static inline u32 linkmode_adv_to_mii_t1_adv_l_t(unsigned long *advertising) +{ + u32 result = 0; + + if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising)) + result |= MDIO_AN_T1_ADV_L_PAUSE_CAP; + if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising)) + result |= MDIO_AN_T1_ADV_L_PAUSE_ASYM; + + return result; +} + +/** + * linkmode_adv_to_mii_t1_adv_m_t + * @advertising: the linkmode advertisement settings + * + * A small helper function that translates linkmode advertisement + * settings to phy autonegotiation advertisements for the + * BASE-T1 Autonegotiation Advertisement [31:16] Register. + */ +static inline u32 linkmode_adv_to_mii_t1_adv_m_t(unsigned long *advertising) +{ + u32 result = 0; + + if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, advertising)) + result |= MDIO_AN_T1_ADV_M_B10L; + + return result; +} + int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum); int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val); int __mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum, diff --git a/include/linux/phy.h b/include/linux/phy.h index b12af9e2f389..2d12054932ba 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -570,6 +570,7 @@ struct macsec_ops; * @autoneg_complete: Flag auto negotiation of the link has completed * @mdix: Current crossover * @mdix_ctrl: User setting of crossover + * @pma_extable: Cached value of PMA/PMD Extended Abilities Register * @interrupts: Flag interrupts have been enabled * @interface: enum phy_interface_t value * @skb: Netlink message for cable diagnostics @@ -698,6 +699,8 @@ struct phy_device { u8 mdix; u8 mdix_ctrl; + int pma_extable; + void (*phy_link_change)(struct phy_device *phydev, bool up); void (*adjust_link)(struct net_device *dev); diff --git a/include/uapi/linux/mdio.h b/include/uapi/linux/mdio.h index fa3515257f54..75b7257a51e1 100644 --- a/include/uapi/linux/mdio.h +++ b/include/uapi/linux/mdio.h @@ -70,6 +70,7 @@ #define MDIO_B10L_PMA_CTRL 2294 /* 10BASE-T1L PMA control */ #define MDIO_PMA_10T1L_STAT 2295 /* 10BASE-T1L PMA status */ #define MDIO_PCS_10T1L_CTRL 2278 /* 10BASE-T1L PCS control */ +#define MDIO_PMA_PMD_BT1 18 /* BASE-T1 PMA/PMD extended ability */ #define MDIO_AN_T1_CTRL 512 /* BASE-T1 AN control */ #define MDIO_AN_T1_STAT 513 /* BASE-T1 AN status */ #define MDIO_AN_T1_ADV_L 514 /* BASE-T1 AN advertisement register [15:0] */ @@ -78,6 +79,7 @@ #define MDIO_AN_T1_LP_L 517 /* BASE-T1 AN LP Base Page ability register [15:0] */ #define MDIO_AN_T1_LP_M 518 /* BASE-T1 AN LP Base Page ability register [31:16] */ #define MDIO_AN_T1_LP_H 519 /* BASE-T1 AN LP Base Page ability register [47:32] */ +#define MDIO_PMA_PMD_BT1_CTRL 2100 /* BASE-T1 PMA/PMD control register */ /* LASI (Link Alarm Status Interrupt) registers, defined by XENPAK MSA. */ #define MDIO_PMA_LASI_RXCTRL 0x9000 /* RX_ALARM control */ @@ -170,6 +172,7 @@ #define MDIO_PMA_CTRL2_10BT 0x000f /* 10BASE-T type */ #define MDIO_PMA_CTRL2_2_5GBT 0x0030 /* 2.5GBaseT type */ #define MDIO_PMA_CTRL2_5GBT 0x0031 /* 5GBaseT type */ +#define MDIO_PMA_CTRL2_BASET1 0x003D /* BASE-T1 type */ #define MDIO_PCS_CTRL2_TYPE 0x0003 /* PCS type selection */ #define MDIO_PCS_CTRL2_10GBR 0x0000 /* 10GBASE-R type */ #define MDIO_PCS_CTRL2_10GBX 0x0001 /* 10GBASE-X type */ @@ -223,6 +226,7 @@ #define MDIO_PMA_EXTABLE_1000BKX 0x0040 /* 1000BASE-KX ability */ #define MDIO_PMA_EXTABLE_100BTX 0x0080 /* 100BASE-TX ability */ #define MDIO_PMA_EXTABLE_10BT 0x0100 /* 10BASE-T ability */ +#define MDIO_PMA_EXTABLE_BT1 0x0800 /* BASE-T1 ability */ #define MDIO_PMA_EXTABLE_NBT 0x4000 /* 2.5/5GBASE-T ability */ /* PHY XGXS lane state register. */ @@ -301,6 +305,9 @@ #define MDIO_PCS_10T1L_CTRL_LB 0x4000 /* Enable PCS level loopback mode */ #define MDIO_PCS_10T1L_CTRL_RESET 0x8000 /* PCS reset */ +/* BASE-T1 PMA/PMD extended ability register. */ +#define MDIO_PMA_PMD_BT1_B10L_ABLE 0x0004 /* 10BASE-T1L Ability */ + /* BASE-T1 auto-negotiation advertisement register [15:0] */ #define MDIO_AN_T1_ADV_L_PAUSE_CAP ADVERTISE_PAUSE_CAP #define MDIO_AN_T1_ADV_L_PAUSE_ASYM ADVERTISE_PAUSE_ASYM @@ -333,6 +340,9 @@ #define MDIO_AN_T1_LP_H_10L_TX_HI_REQ 0x1000 /* 10BASE-T1L High Level LP Transmit Request */ #define MDIO_AN_T1_LP_H_10L_TX_HI 0x2000 /* 10BASE-T1L High Level LP Transmit Ability */ +/* BASE-T1 PMA/PMD control register */ +#define MDIO_PMA_PMD_BT1_CTRL_CFG_MST 0x4000 /* MASTER-SLAVE config value */ + /* EEE Supported/Advertisement/LP Advertisement registers. * * EEE capability Register (3.20), Advertisement (7.60) and -- cgit v1.2.3-59-g8ed1b From c2aa2dfef243efe213a480a1ee8566507a5152f4 Mon Sep 17 00:00:00 2001 From: Sargun Dhillon Date: Tue, 3 May 2022 01:09:56 -0700 Subject: seccomp: Add wait_killable semantic to seccomp user notifier This introduces a per-filter flag (SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV) that makes it so that when notifications are received by the supervisor the notifying process will transition to wait killable semantics. Although wait killable isn't a set of semantics formally exposed to userspace, the concept is searchable. If the notifying process is signaled prior to the notification being received by the userspace agent, it will be handled as normal. One quirk about how this is handled is that the notifying process only switches to TASK_KILLABLE if it receives a wakeup from either an addfd or a signal. This is to avoid an unnecessary wakeup of the notifying task. The reasons behind switching into wait_killable only after userspace receives the notification are: * Avoiding unncessary work - Often, workloads will perform work that they may abort (request racing comes to mind). This allows for syscalls to be aborted safely prior to the notification being received by the supervisor. In this, the supervisor doesn't end up doing work that the workload does not want to complete anyways. * Avoiding side effects - We don't want the syscall to be interruptible once the supervisor starts doing work because it may not be trivial to reverse the operation. For example, unmounting a file system may take a long time, and it's hard to rollback, or treat that as reentrant. * Avoid breaking runtimes - Various runtimes do not GC when they are during a syscall (or while running native code that subsequently calls a syscall). If many notifications are blocked, and not picked up by the supervisor, this can get the application into a bad state. Signed-off-by: Sargun Dhillon Signed-off-by: Kees Cook Link: https://lore.kernel.org/r/20220503080958.20220-2-sargun@sargun.me --- Documentation/userspace-api/seccomp_filter.rst | 10 ++++++ include/linux/seccomp.h | 3 +- include/uapi/linux/seccomp.h | 2 ++ kernel/seccomp.c | 42 ++++++++++++++++++++++++-- 4 files changed, 54 insertions(+), 3 deletions(-) (limited to 'include/uapi/linux') diff --git a/Documentation/userspace-api/seccomp_filter.rst b/Documentation/userspace-api/seccomp_filter.rst index 539e9d4a4860..d1e2b9193f09 100644 --- a/Documentation/userspace-api/seccomp_filter.rst +++ b/Documentation/userspace-api/seccomp_filter.rst @@ -271,6 +271,16 @@ notifying process it will be replaced. The supervisor can also add an FD, and respond atomically by using the ``SECCOMP_ADDFD_FLAG_SEND`` flag and the return value will be the injected file descriptor number. +The notifying process can be preempted, resulting in the notification being +aborted. This can be problematic when trying to take actions on behalf of the +notifying process that are long-running and typically retryable (mounting a +filesytem). Alternatively, at filter installation time, the +``SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV`` flag can be set. This flag makes it +such that when a user notification is received by the supervisor, the notifying +process will ignore non-fatal signals until the response is sent. Signals that +are sent prior to the notification being received by userspace are handled +normally. + It is worth noting that ``struct seccomp_data`` contains the values of register arguments to the syscall, but does not contain pointers to memory. The task's memory is accessible to suitably privileged traces via ``ptrace()`` or diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h index 0c564e5d40ff..d31d76be4982 100644 --- a/include/linux/seccomp.h +++ b/include/linux/seccomp.h @@ -8,7 +8,8 @@ SECCOMP_FILTER_FLAG_LOG | \ SECCOMP_FILTER_FLAG_SPEC_ALLOW | \ SECCOMP_FILTER_FLAG_NEW_LISTENER | \ - SECCOMP_FILTER_FLAG_TSYNC_ESRCH) + SECCOMP_FILTER_FLAG_TSYNC_ESRCH | \ + SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV) /* sizeof() the first published struct seccomp_notif_addfd */ #define SECCOMP_NOTIFY_ADDFD_SIZE_VER0 24 diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h index 78074254ab98..0fdc6ef02b94 100644 --- a/include/uapi/linux/seccomp.h +++ b/include/uapi/linux/seccomp.h @@ -23,6 +23,8 @@ #define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2) #define SECCOMP_FILTER_FLAG_NEW_LISTENER (1UL << 3) #define SECCOMP_FILTER_FLAG_TSYNC_ESRCH (1UL << 4) +/* Received notifications wait in killable state (only respond to fatal signals) */ +#define SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV (1UL << 5) /* * All BPF programs must return a 32-bit value. diff --git a/kernel/seccomp.c b/kernel/seccomp.c index 3caa0fe85235..e9852d1b4a5e 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -200,6 +200,8 @@ static inline void seccomp_cache_prepare(struct seccomp_filter *sfilter) * the filter can be freed. * @cache: cache of arch/syscall mappings to actions * @log: true if all actions except for SECCOMP_RET_ALLOW should be logged + * @wait_killable_recv: Put notifying process in killable state once the + * notification is received by the userspace listener. * @prev: points to a previously installed, or inherited, filter * @prog: the BPF program to evaluate * @notif: the struct that holds all notification related information @@ -220,6 +222,7 @@ struct seccomp_filter { refcount_t refs; refcount_t users; bool log; + bool wait_killable_recv; struct action_cache cache; struct seccomp_filter *prev; struct bpf_prog *prog; @@ -893,6 +896,10 @@ static long seccomp_attach_filter(unsigned int flags, if (flags & SECCOMP_FILTER_FLAG_LOG) filter->log = true; + /* Set wait killable flag, if present. */ + if (flags & SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV) + filter->wait_killable_recv = true; + /* * If there is an existing filter, make it the prev and don't drop its * task reference. @@ -1080,6 +1087,12 @@ static void seccomp_handle_addfd(struct seccomp_kaddfd *addfd, struct seccomp_kn complete(&addfd->completion); } +static bool should_sleep_killable(struct seccomp_filter *match, + struct seccomp_knotif *n) +{ + return match->wait_killable_recv && n->state == SECCOMP_NOTIFY_SENT; +} + static int seccomp_do_user_notification(int this_syscall, struct seccomp_filter *match, const struct seccomp_data *sd) @@ -1110,11 +1123,25 @@ static int seccomp_do_user_notification(int this_syscall, * This is where we wait for a reply from userspace. */ do { + bool wait_killable = should_sleep_killable(match, &n); + mutex_unlock(&match->notify_lock); - err = wait_for_completion_interruptible(&n.ready); + if (wait_killable) + err = wait_for_completion_killable(&n.ready); + else + err = wait_for_completion_interruptible(&n.ready); mutex_lock(&match->notify_lock); - if (err != 0) + + if (err != 0) { + /* + * Check to see if the notifcation got picked up and + * whether we should switch to wait killable. + */ + if (!wait_killable && should_sleep_killable(match, &n)) + continue; + goto interrupted; + } addfd = list_first_entry_or_null(&n.addfd, struct seccomp_kaddfd, list); @@ -1484,6 +1511,9 @@ out: mutex_lock(&filter->notify_lock); knotif = find_notification(filter, unotif.id); if (knotif) { + /* Reset the process to make sure it's not stuck */ + if (should_sleep_killable(filter, knotif)) + complete(&knotif->ready); knotif->state = SECCOMP_NOTIFY_INIT; up(&filter->notif->request); } @@ -1829,6 +1859,14 @@ static long seccomp_set_mode_filter(unsigned int flags, ((flags & SECCOMP_FILTER_FLAG_TSYNC_ESRCH) == 0)) return -EINVAL; + /* + * The SECCOMP_FILTER_FLAG_WAIT_KILLABLE_SENT flag doesn't make sense + * without the SECCOMP_FILTER_FLAG_NEW_LISTENER flag. + */ + if ((flags & SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV) && + ((flags & SECCOMP_FILTER_FLAG_NEW_LISTENER) == 0)) + return -EINVAL; + /* Prepare the new filter before holding any locks. */ prepared = seccomp_prepare_user_filter(filter); if (IS_ERR(prepared)) -- cgit v1.2.3-59-g8ed1b From 41b3c69bf9414375319290c59f198ff5c71d273f Mon Sep 17 00:00:00 2001 From: Kishen Maloor Date: Mon, 2 May 2022 13:52:36 -0700 Subject: mptcp: expose server_side attribute in MPTCP netlink events This change records the 'server_side' attribute of MPTCP_EVENT_CREATED and MPTCP_EVENT_ESTABLISHED events to inform their recipient about the Client/Server role of the running MPTCP application. Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/246 Acked-by: Paolo Abeni Signed-off-by: Kishen Maloor Signed-off-by: Mat Martineau Signed-off-by: Jakub Kicinski --- include/uapi/linux/mptcp.h | 1 + net/mptcp/pm_netlink.c | 3 +++ 2 files changed, 4 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h index 9690efedb5fa..e41ea01a94bb 100644 --- a/include/uapi/linux/mptcp.h +++ b/include/uapi/linux/mptcp.h @@ -188,6 +188,7 @@ enum mptcp_event_attr { MPTCP_ATTR_IF_IDX, /* s32 */ MPTCP_ATTR_RESET_REASON,/* u32 */ MPTCP_ATTR_RESET_FLAGS, /* u32 */ + MPTCP_ATTR_SERVER_SIDE, /* u8 */ __MPTCP_ATTR_AFTER_LAST }; diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c index eeaa96bcae6c..a4430c576ce9 100644 --- a/net/mptcp/pm_netlink.c +++ b/net/mptcp/pm_netlink.c @@ -1985,6 +1985,9 @@ static int mptcp_event_created(struct sk_buff *skb, if (err) return err; + if (nla_put_u8(skb, MPTCP_ATTR_SERVER_SIDE, READ_ONCE(msk->pm.server_side))) + return -EMSGSIZE; + return mptcp_event_add_subflow(skb, ssk); } -- cgit v1.2.3-59-g8ed1b From 7b33a09d036ffd9a04506122840629c7e870cf08 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Wed, 4 May 2022 03:24:40 +0000 Subject: KVM: arm64: Add support for userspace to suspend a vCPU Introduce a new MP state, KVM_MP_STATE_SUSPENDED, which indicates a vCPU is in a suspended state. In the suspended state the vCPU will block until a wakeup event (pending interrupt) is recognized. Add a new system event type, KVM_SYSTEM_EVENT_WAKEUP, to indicate to userspace that KVM has recognized one such wakeup event. It is the responsibility of userspace to then make the vCPU runnable, or leave it suspended until the next wakeup event. Signed-off-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220504032446.4133305-7-oupton@google.com --- Documentation/virt/kvm/api.rst | 37 ++++++++++++++++++++++++++-- arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/kvm/arm.c | 51 +++++++++++++++++++++++++++++++++++++++ include/uapi/linux/kvm.h | 2 ++ 4 files changed, 89 insertions(+), 2 deletions(-) (limited to 'include/uapi/linux') diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 4a900cdbc62e..46ca84600dca 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -1476,14 +1476,43 @@ Possible values are: [s390] KVM_MP_STATE_LOAD the vcpu is in a special load/startup state [s390] + KVM_MP_STATE_SUSPENDED the vcpu is in a suspend state and is waiting + for a wakeup event [arm64] ========================== =============================================== On x86, this ioctl is only useful after KVM_CREATE_IRQCHIP. Without an in-kernel irqchip, the multiprocessing state must be maintained by userspace on these architectures. -For arm64/riscv: -^^^^^^^^^^^^^^^^ +For arm64: +^^^^^^^^^^ + +If a vCPU is in the KVM_MP_STATE_SUSPENDED state, KVM will emulate the +architectural execution of a WFI instruction. + +If a wakeup event is recognized, KVM will exit to userspace with a +KVM_SYSTEM_EVENT exit, where the event type is KVM_SYSTEM_EVENT_WAKEUP. If +userspace wants to honor the wakeup, it must set the vCPU's MP state to +KVM_MP_STATE_RUNNABLE. If it does not, KVM will continue to await a wakeup +event in subsequent calls to KVM_RUN. + +.. warning:: + + If userspace intends to keep the vCPU in a SUSPENDED state, it is + strongly recommended that userspace take action to suppress the + wakeup event (such as masking an interrupt). Otherwise, subsequent + calls to KVM_RUN will immediately exit with a KVM_SYSTEM_EVENT_WAKEUP + event and inadvertently waste CPU cycles. + + Additionally, if userspace takes action to suppress a wakeup event, + it is strongly recommended that it also restores the vCPU to its + original state when the vCPU is made RUNNABLE again. For example, + if userspace masked a pending interrupt to suppress the wakeup, + the interrupt should be unmasked before returning control to the + guest. + +For riscv: +^^^^^^^^^^ The only states that are valid are KVM_MP_STATE_STOPPED and KVM_MP_STATE_RUNNABLE which reflect if the vcpu is paused or not. @@ -5985,6 +6014,7 @@ should put the acknowledged interrupt vector into the 'epr' field. #define KVM_SYSTEM_EVENT_SHUTDOWN 1 #define KVM_SYSTEM_EVENT_RESET 2 #define KVM_SYSTEM_EVENT_CRASH 3 + #define KVM_SYSTEM_EVENT_WAKEUP 4 __u32 type; __u32 ndata; __u64 data[16]; @@ -6009,6 +6039,9 @@ Valid values for 'type' are: has requested a crash condition maintenance. Userspace can choose to ignore the request, or to gather VM memory core dump and/or reset/shutdown of the VM. + - KVM_SYSTEM_EVENT_WAKEUP -- the exiting vCPU is in a suspended state and + KVM has recognized a wakeup event. Userspace may honor this event by + marking the exiting vCPU as runnable, or deny it and call KVM_RUN again. If KVM_CAP_SYSTEM_EVENT_DATA is present, the 'data' field can contain architecture specific information for the system-level event. Only diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index f3f93d48e21a..46027b9b80ca 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -46,6 +46,7 @@ #define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3) #define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4) #define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5) +#define KVM_REQ_SUSPEND KVM_ARCH_REQ(6) #define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \ KVM_DIRTY_LOG_INITIALLY_SET) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index efe54aba5cce..abd32a84ed7a 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -444,6 +444,18 @@ bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu) return vcpu->arch.mp_state.mp_state == KVM_MP_STATE_STOPPED; } +static void kvm_arm_vcpu_suspend(struct kvm_vcpu *vcpu) +{ + vcpu->arch.mp_state.mp_state = KVM_MP_STATE_SUSPENDED; + kvm_make_request(KVM_REQ_SUSPEND, vcpu); + kvm_vcpu_kick(vcpu); +} + +static bool kvm_arm_vcpu_suspended(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.mp_state.mp_state == KVM_MP_STATE_SUSPENDED; +} + int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { @@ -464,6 +476,9 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, case KVM_MP_STATE_STOPPED: kvm_arm_vcpu_power_off(vcpu); break; + case KVM_MP_STATE_SUSPENDED: + kvm_arm_vcpu_suspend(vcpu); + break; default: ret = -EINVAL; } @@ -648,6 +663,39 @@ void kvm_vcpu_wfi(struct kvm_vcpu *vcpu) preempt_enable(); } +static int kvm_vcpu_suspend(struct kvm_vcpu *vcpu) +{ + if (!kvm_arm_vcpu_suspended(vcpu)) + return 1; + + kvm_vcpu_wfi(vcpu); + + /* + * The suspend state is sticky; we do not leave it until userspace + * explicitly marks the vCPU as runnable. Request that we suspend again + * later. + */ + kvm_make_request(KVM_REQ_SUSPEND, vcpu); + + /* + * Check to make sure the vCPU is actually runnable. If so, exit to + * userspace informing it of the wakeup condition. + */ + if (kvm_arch_vcpu_runnable(vcpu)) { + memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event)); + vcpu->run->system_event.type = KVM_SYSTEM_EVENT_WAKEUP; + vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; + return 0; + } + + /* + * Otherwise, we were unblocked to process a different event, such as a + * pending signal. Return 1 and allow kvm_arch_vcpu_ioctl_run() to + * process the event. + */ + return 1; +} + /** * check_vcpu_requests - check and handle pending vCPU requests * @vcpu: the VCPU pointer @@ -686,6 +734,9 @@ static int check_vcpu_requests(struct kvm_vcpu *vcpu) if (kvm_check_request(KVM_REQ_RELOAD_PMU, vcpu)) kvm_pmu_handle_pmcr(vcpu, __vcpu_sys_reg(vcpu, PMCR_EL0)); + + if (kvm_check_request(KVM_REQ_SUSPEND, vcpu)) + return kvm_vcpu_suspend(vcpu); } return 1; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 6a184d260c7f..7f72fb7b05f2 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -444,6 +444,7 @@ struct kvm_run { #define KVM_SYSTEM_EVENT_SHUTDOWN 1 #define KVM_SYSTEM_EVENT_RESET 2 #define KVM_SYSTEM_EVENT_CRASH 3 +#define KVM_SYSTEM_EVENT_WAKEUP 4 __u32 type; __u32 ndata; union { @@ -646,6 +647,7 @@ struct kvm_vapic_addr { #define KVM_MP_STATE_OPERATING 7 #define KVM_MP_STATE_LOAD 8 #define KVM_MP_STATE_AP_RESET_HOLD 9 +#define KVM_MP_STATE_SUSPENDED 10 struct kvm_mp_state { __u32 mp_state; -- cgit v1.2.3-59-g8ed1b From bfbab44568779e1682bc6f63688bb9c965f0e74a Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Wed, 4 May 2022 03:24:41 +0000 Subject: KVM: arm64: Implement PSCI SYSTEM_SUSPEND ARM DEN0022D.b 5.19 "SYSTEM_SUSPEND" describes a PSCI call that allows software to request that a system be placed in the deepest possible low-power state. Effectively, software can use this to suspend itself to RAM. Unfortunately, there really is no good way to implement a system-wide PSCI call in KVM. Any precondition checks done in the kernel will need to be repeated by userspace since there is no good way to protect a critical section that spans an exit to userspace. SYSTEM_RESET and SYSTEM_OFF are equally plagued by this issue, although no users have seemingly cared for the relatively long time these calls have been supported. The solution is to just make the whole implementation userspace's problem. Introduce a new system event, KVM_SYSTEM_EVENT_SUSPEND, that indicates to userspace a calling vCPU has invoked PSCI SYSTEM_SUSPEND. Additionally, add a CAP to get buy-in from userspace for this new exit type. Only advertise the SYSTEM_SUSPEND PSCI call if userspace has opted in. If a vCPU calls SYSTEM_SUSPEND, punt straight to userspace. Provide explicit documentation of userspace's responsibilites for the exit and point to the PSCI specification to describe the actual PSCI call. Reviewed-by: Reiji Watanabe Signed-off-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220504032446.4133305-8-oupton@google.com --- Documentation/virt/kvm/api.rst | 39 +++++++++++++++++++++++++++++++++++++++ arch/arm64/include/asm/kvm_host.h | 2 ++ arch/arm64/kvm/arm.c | 5 +++++ arch/arm64/kvm/psci.c | 29 +++++++++++++++++++++++++++++ include/uapi/linux/kvm.h | 2 ++ 5 files changed, 77 insertions(+) (limited to 'include/uapi/linux') diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 46ca84600dca..d8d7859fc556 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -6015,6 +6015,7 @@ should put the acknowledged interrupt vector into the 'epr' field. #define KVM_SYSTEM_EVENT_RESET 2 #define KVM_SYSTEM_EVENT_CRASH 3 #define KVM_SYSTEM_EVENT_WAKEUP 4 + #define KVM_SYSTEM_EVENT_SUSPEND 5 __u32 type; __u32 ndata; __u64 data[16]; @@ -6042,6 +6043,34 @@ Valid values for 'type' are: - KVM_SYSTEM_EVENT_WAKEUP -- the exiting vCPU is in a suspended state and KVM has recognized a wakeup event. Userspace may honor this event by marking the exiting vCPU as runnable, or deny it and call KVM_RUN again. + - KVM_SYSTEM_EVENT_SUSPEND -- the guest has requested a suspension of + the VM. + +For arm/arm64: +^^^^^^^^^^^^^^ + + KVM_SYSTEM_EVENT_SUSPEND exits are enabled with the + KVM_CAP_ARM_SYSTEM_SUSPEND VM capability. If a guest invokes the PSCI + SYSTEM_SUSPEND function, KVM will exit to userspace with this event + type. + + It is the sole responsibility of userspace to implement the PSCI + SYSTEM_SUSPEND call according to ARM DEN0022D.b 5.19 "SYSTEM_SUSPEND". + KVM does not change the vCPU's state before exiting to userspace, so + the call parameters are left in-place in the vCPU registers. + + Userspace is _required_ to take action for such an exit. It must + either: + + - Honor the guest request to suspend the VM. Userspace can request + in-kernel emulation of suspension by setting the calling vCPU's + state to KVM_MP_STATE_SUSPENDED. Userspace must configure the vCPU's + state according to the parameters passed to the PSCI function when + the calling vCPU is resumed. See ARM DEN0022D.b 5.19.1 "Intended use" + for details on the function parameters. + + - Deny the guest request to suspend the VM. See ARM DEN0022D.b 5.19.2 + "Caller responsibilities" for possible return values. If KVM_CAP_SYSTEM_EVENT_DATA is present, the 'data' field can contain architecture specific information for the system-level event. Only @@ -7767,6 +7796,16 @@ At this time, KVM_PMU_CAP_DISABLE is the only capability. Setting this capability will disable PMU virtualization for that VM. Usermode should adjust CPUID leaf 0xA to reflect that the PMU is disabled. +8.36 KVM_CAP_ARM_SYSTEM_SUSPEND +------------------------------- + +:Capability: KVM_CAP_ARM_SYSTEM_SUSPEND +:Architectures: arm64 +:Type: vm + +When enabled, KVM will exit to userspace with KVM_EXIT_SYSTEM_EVENT of +type KVM_SYSTEM_EVENT_SUSPEND to process the guest suspend request. + 9. Known KVM API problems ========================= diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 46027b9b80ca..d9df81949f76 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -137,6 +137,8 @@ struct kvm_arch { */ #define KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED 3 #define KVM_ARCH_FLAG_EL1_32BIT 4 + /* PSCI SYSTEM_SUSPEND enabled for the guest */ +#define KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED 5 unsigned long flags; diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index abd32a84ed7a..f8a89ae52710 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -97,6 +97,10 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, } mutex_unlock(&kvm->lock); break; + case KVM_CAP_ARM_SYSTEM_SUSPEND: + r = 0; + set_bit(KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED, &kvm->arch.flags); + break; default: r = -EINVAL; break; @@ -210,6 +214,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_SET_GUEST_DEBUG: case KVM_CAP_VCPU_ATTRIBUTES: case KVM_CAP_PTP_KVM: + case KVM_CAP_ARM_SYSTEM_SUSPEND: r = 1; break; case KVM_CAP_SET_GUEST_DEBUG2: diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c index 2e6f060214a7..5de30e72ad40 100644 --- a/arch/arm64/kvm/psci.c +++ b/arch/arm64/kvm/psci.c @@ -195,6 +195,15 @@ static void kvm_psci_system_reset2(struct kvm_vcpu *vcpu) KVM_SYSTEM_EVENT_RESET_FLAG_PSCI_RESET2); } +static void kvm_psci_system_suspend(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + + memset(&run->system_event, 0, sizeof(vcpu->run->system_event)); + run->system_event.type = KVM_SYSTEM_EVENT_SUSPEND; + run->exit_reason = KVM_EXIT_SYSTEM_EVENT; +} + static void kvm_psci_narrow_to_32bit(struct kvm_vcpu *vcpu) { int i; @@ -300,6 +309,7 @@ static int kvm_psci_1_x_call(struct kvm_vcpu *vcpu, u32 minor) { unsigned long val = PSCI_RET_NOT_SUPPORTED; u32 psci_fn = smccc_get_function(vcpu); + struct kvm *kvm = vcpu->kvm; u32 arg; int ret = 1; @@ -331,6 +341,11 @@ static int kvm_psci_1_x_call(struct kvm_vcpu *vcpu, u32 minor) case ARM_SMCCC_VERSION_FUNC_ID: val = 0; break; + case PSCI_1_0_FN_SYSTEM_SUSPEND: + case PSCI_1_0_FN64_SYSTEM_SUSPEND: + if (test_bit(KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED, &kvm->arch.flags)) + val = 0; + break; case PSCI_1_1_FN_SYSTEM_RESET2: case PSCI_1_1_FN64_SYSTEM_RESET2: if (minor >= 1) @@ -338,6 +353,20 @@ static int kvm_psci_1_x_call(struct kvm_vcpu *vcpu, u32 minor) break; } break; + case PSCI_1_0_FN_SYSTEM_SUSPEND: + kvm_psci_narrow_to_32bit(vcpu); + fallthrough; + case PSCI_1_0_FN64_SYSTEM_SUSPEND: + /* + * Return directly to userspace without changing the vCPU's + * registers. Userspace depends on reading the SMCCC parameters + * to implement SYSTEM_SUSPEND. + */ + if (test_bit(KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED, &kvm->arch.flags)) { + kvm_psci_system_suspend(vcpu); + return 0; + } + break; case PSCI_1_1_FN_SYSTEM_RESET2: kvm_psci_narrow_to_32bit(vcpu); fallthrough; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 7f72fb7b05f2..32c56384fd08 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -445,6 +445,7 @@ struct kvm_run { #define KVM_SYSTEM_EVENT_RESET 2 #define KVM_SYSTEM_EVENT_CRASH 3 #define KVM_SYSTEM_EVENT_WAKEUP 4 +#define KVM_SYSTEM_EVENT_SUSPEND 5 __u32 type; __u32 ndata; union { @@ -1154,6 +1155,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_DISABLE_QUIRKS2 213 /* #define KVM_CAP_VM_TSC_CONTROL 214 */ #define KVM_CAP_SYSTEM_EVENT_DATA 215 +#define KVM_CAP_ARM_SYSTEM_SUSPEND 216 #ifdef KVM_CAP_IRQ_ROUTING -- cgit v1.2.3-59-g8ed1b From 2068339a6c35147847ba433fd0da67b313779059 Mon Sep 17 00:00:00 2001 From: Dipen Patel Date: Fri, 22 Apr 2022 13:52:18 -0700 Subject: gpiolib: cdev: Add hardware timestamp clock type This patch adds new clock type for the GPIO controller which can timestamp gpio lines in using hardware means. To expose such functionalities to the userspace, code has been added where during line create or set config API calls, it checks for new clock type and if requested, calls HTE API. During line change event, the HTE subsystem pushes timestamp data to userspace through gpiolib-cdev. Signed-off-by: Dipen Patel Acked-by: Linus Walleij Reported-by: kernel test robot Reported-by: Dan Carpenter Signed-off-by: Thierry Reding --- drivers/gpio/gpiolib-cdev.c | 252 ++++++++++++++++++++++++++++++++++++++------ include/uapi/linux/gpio.h | 3 + 2 files changed, 222 insertions(+), 33 deletions(-) (limited to 'include/uapi/linux') diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c index ffa0256cad5a..d8dba8c9d2cf 100644 --- a/drivers/gpio/gpiolib-cdev.c +++ b/drivers/gpio/gpiolib-cdev.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include "gpiolib.h" @@ -464,6 +465,25 @@ struct line { * stale value. */ unsigned int level; + /* + * -- hte specific fields -- + */ + struct hte_ts_desc hdesc; + /* + * HTE provider sets line level at the time of event. The valid + * value is 0 or 1 and negative value for an error. + */ + int raw_level; + /* + * when sw_debounce is set on HTE enabled line, this is running + * counter of the discarded events. + */ + u32 total_discard_seq; + /* + * when sw_debounce is set on HTE enabled line, this variable records + * last sequence number before debounce period expires. + */ + u32 last_seqno; }; /** @@ -518,6 +538,7 @@ struct linereq { GPIO_V2_LINE_DRIVE_FLAGS | \ GPIO_V2_LINE_EDGE_FLAGS | \ GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME | \ + GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \ GPIO_V2_LINE_BIAS_FLAGS) static void linereq_put_event(struct linereq *lr, @@ -542,10 +563,98 @@ static u64 line_event_timestamp(struct line *line) { if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &line->desc->flags)) return ktime_get_real_ns(); + else if (test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags)) + return line->timestamp_ns; return ktime_get_ns(); } +static enum hte_return process_hw_ts_thread(void *p) +{ + struct line *line; + struct linereq *lr; + struct gpio_v2_line_event le; + int level; + u64 eflags; + + if (!p) + return HTE_CB_HANDLED; + + line = p; + lr = line->req; + + memset(&le, 0, sizeof(le)); + + le.timestamp_ns = line->timestamp_ns; + eflags = READ_ONCE(line->eflags); + + if (eflags == GPIO_V2_LINE_FLAG_EDGE_BOTH) { + if (line->raw_level >= 0) { + if (test_bit(FLAG_ACTIVE_LOW, &line->desc->flags)) + level = !line->raw_level; + else + level = line->raw_level; + } else { + level = gpiod_get_value_cansleep(line->desc); + } + + if (level) + le.id = GPIO_V2_LINE_EVENT_RISING_EDGE; + else + le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE; + } else if (eflags == GPIO_V2_LINE_FLAG_EDGE_RISING) { + /* Emit low-to-high event */ + le.id = GPIO_V2_LINE_EVENT_RISING_EDGE; + } else if (eflags == GPIO_V2_LINE_FLAG_EDGE_FALLING) { + /* Emit high-to-low event */ + le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE; + } else { + return HTE_CB_HANDLED; + } + le.line_seqno = line->line_seqno; + le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno; + le.offset = gpio_chip_hwgpio(line->desc); + + linereq_put_event(lr, &le); + + return HTE_CB_HANDLED; +} + +static enum hte_return process_hw_ts(struct hte_ts_data *ts, void *p) +{ + struct line *line; + struct linereq *lr; + int diff_seqno = 0; + + if (!ts || !p) + return HTE_CB_HANDLED; + + line = p; + line->timestamp_ns = ts->tsc; + line->raw_level = ts->raw_level; + lr = line->req; + + if (READ_ONCE(line->sw_debounced)) { + line->total_discard_seq++; + line->last_seqno = ts->seq; + mod_delayed_work(system_wq, &line->work, + usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us))); + } else { + if (unlikely(ts->seq < line->line_seqno)) + return HTE_CB_HANDLED; + + diff_seqno = ts->seq - line->line_seqno; + line->line_seqno = ts->seq; + if (lr->num_lines != 1) + line->req_seqno = atomic_add_return(diff_seqno, + &lr->seqno); + + return HTE_RUN_SECOND_CB; + } + + return HTE_CB_HANDLED; +} + static irqreturn_t edge_irq_thread(int irq, void *p) { struct line *line = p; @@ -651,10 +760,16 @@ static void debounce_work_func(struct work_struct *work) struct gpio_v2_line_event le; struct line *line = container_of(work, struct line, work.work); struct linereq *lr; - int level; + int level, diff_seqno; u64 eflags; - level = gpiod_get_raw_value_cansleep(line->desc); + if (test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags)) { + level = line->raw_level; + if (level < 0) + level = gpiod_get_raw_value_cansleep(line->desc); + } else { + level = gpiod_get_raw_value_cansleep(line->desc); + } if (level < 0) { pr_debug_ratelimited("debouncer failed to read line value\n"); return; @@ -685,10 +800,21 @@ static void debounce_work_func(struct work_struct *work) lr = line->req; le.timestamp_ns = line_event_timestamp(line); le.offset = gpio_chip_hwgpio(line->desc); - line->line_seqno++; - le.line_seqno = line->line_seqno; - le.seqno = (lr->num_lines == 1) ? - le.line_seqno : atomic_inc_return(&lr->seqno); + if (test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags)) { + /* discard events except the last one */ + line->total_discard_seq -= 1; + diff_seqno = line->last_seqno - line->total_discard_seq - + line->line_seqno; + line->line_seqno = line->last_seqno - line->total_discard_seq; + le.line_seqno = line->line_seqno; + le.seqno = (lr->num_lines == 1) ? + le.line_seqno : atomic_add_return(diff_seqno, &lr->seqno); + } else { + line->line_seqno++; + le.line_seqno = line->line_seqno; + le.seqno = (lr->num_lines == 1) ? + le.line_seqno : atomic_inc_return(&lr->seqno); + } if (level) /* Emit low-to-high event */ @@ -700,8 +826,34 @@ static void debounce_work_func(struct work_struct *work) linereq_put_event(lr, &le); } +static int hte_edge_setup(struct line *line, u64 eflags) +{ + int ret; + unsigned long flags = 0; + struct hte_ts_desc *hdesc = &line->hdesc; + + if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING) + flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ? + HTE_FALLING_EDGE_TS : HTE_RISING_EDGE_TS; + if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING) + flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ? + HTE_RISING_EDGE_TS : HTE_FALLING_EDGE_TS; + + line->total_discard_seq = 0; + + hte_init_line_attr(hdesc, desc_to_gpio(line->desc), flags, + NULL, line->desc); + + ret = hte_ts_get(NULL, hdesc, 0); + if (ret) + return ret; + + return hte_request_ts_ns(hdesc, process_hw_ts, + process_hw_ts_thread, line); +} + static int debounce_setup(struct line *line, - unsigned int debounce_period_us) + unsigned int debounce_period_us, bool hte_req) { unsigned long irqflags; int ret, level, irq; @@ -721,19 +873,27 @@ static int debounce_setup(struct line *line, if (level < 0) return level; - irq = gpiod_to_irq(line->desc); - if (irq < 0) - return -ENXIO; + if (!hte_req) { + irq = gpiod_to_irq(line->desc); + if (irq < 0) + return -ENXIO; - WRITE_ONCE(line->level, level); - irqflags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING; - ret = request_irq(irq, debounce_irq_handler, irqflags, - line->req->label, line); - if (ret) - return ret; + irqflags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING; + ret = request_irq(irq, debounce_irq_handler, irqflags, + line->req->label, line); + if (ret) + return ret; + line->irq = irq; + } else { + ret = hte_edge_setup(line, + GPIO_V2_LINE_FLAG_EDGE_RISING | + GPIO_V2_LINE_FLAG_EDGE_FALLING); + if (ret) + return ret; + } + WRITE_ONCE(line->level, level); WRITE_ONCE(line->sw_debounced, 1); - line->irq = irq; } return 0; } @@ -766,13 +926,16 @@ static u32 gpio_v2_line_config_debounce_period(struct gpio_v2_line_config *lc, return 0; } -static void edge_detector_stop(struct line *line) +static void edge_detector_stop(struct line *line, bool hte_en) { - if (line->irq) { + if (line->irq && !hte_en) { free_irq(line->irq, line); line->irq = 0; } + if (hte_en) + hte_ts_put(&line->hdesc); + cancel_delayed_work_sync(&line->work); WRITE_ONCE(line->sw_debounced, 0); WRITE_ONCE(line->eflags, 0); @@ -784,7 +947,7 @@ static void edge_detector_stop(struct line *line) static int edge_detector_setup(struct line *line, struct gpio_v2_line_config *lc, unsigned int line_idx, - u64 eflags) + u64 eflags, bool hte_req) { u32 debounce_period_us; unsigned long irqflags = 0; @@ -799,7 +962,7 @@ static int edge_detector_setup(struct line *line, WRITE_ONCE(line->eflags, eflags); if (gpio_v2_line_config_debounced(lc, line_idx)) { debounce_period_us = gpio_v2_line_config_debounce_period(lc, line_idx); - ret = debounce_setup(line, debounce_period_us); + ret = debounce_setup(line, debounce_period_us, hte_req); if (ret) return ret; WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us); @@ -809,6 +972,9 @@ static int edge_detector_setup(struct line *line, if (!eflags || READ_ONCE(line->sw_debounced)) return 0; + if (hte_req) + return hte_edge_setup(line, eflags); + irq = gpiod_to_irq(line->desc); if (irq < 0) return -ENXIO; @@ -834,13 +1000,18 @@ static int edge_detector_setup(struct line *line, static int edge_detector_update(struct line *line, struct gpio_v2_line_config *lc, unsigned int line_idx, - u64 eflags, bool polarity_change) + u64 flags, bool polarity_change, + bool prev_hte_flag) { + u64 eflags = flags & GPIO_V2_LINE_EDGE_FLAGS; unsigned int debounce_period_us = - gpio_v2_line_config_debounce_period(lc, line_idx); + gpio_v2_line_config_debounce_period(lc, line_idx); + bool hte_change = (prev_hte_flag != + ((flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) != 0)); if ((READ_ONCE(line->eflags) == eflags) && !polarity_change && - (READ_ONCE(line->desc->debounce_period_us) == debounce_period_us)) + (READ_ONCE(line->desc->debounce_period_us) == debounce_period_us) + && !hte_change) return 0; /* sw debounced and still will be...*/ @@ -851,11 +1022,12 @@ static int edge_detector_update(struct line *line, } /* reconfiguring edge detection or sw debounce being disabled */ - if ((line->irq && !READ_ONCE(line->sw_debounced)) || + if ((line->irq && !READ_ONCE(line->sw_debounced)) || prev_hte_flag || (!debounce_period_us && READ_ONCE(line->sw_debounced))) - edge_detector_stop(line); + edge_detector_stop(line, prev_hte_flag); - return edge_detector_setup(line, lc, line_idx, eflags); + return edge_detector_setup(line, lc, line_idx, eflags, + flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE); } static u64 gpio_v2_line_config_flags(struct gpio_v2_line_config *lc, @@ -891,7 +1063,6 @@ static int gpio_v2_line_flags_validate(u64 flags) /* Return an error if an unknown flag is set */ if (flags & ~GPIO_V2_LINE_VALID_FLAGS) return -EINVAL; - /* * Do not allow both INPUT and OUTPUT flags to be set as they are * contradictory. @@ -900,6 +1071,11 @@ static int gpio_v2_line_flags_validate(u64 flags) (flags & GPIO_V2_LINE_FLAG_OUTPUT)) return -EINVAL; + /* Only allow one event clock source */ + if ((flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME) && + (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)) + return -EINVAL; + /* Edge detection requires explicit input. */ if ((flags & GPIO_V2_LINE_EDGE_FLAGS) && !(flags & GPIO_V2_LINE_FLAG_INPUT)) @@ -992,6 +1168,8 @@ static void gpio_v2_line_config_flags_to_desc_flags(u64 flags, assign_bit(FLAG_EVENT_CLOCK_REALTIME, flagsp, flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME); + assign_bit(FLAG_EVENT_CLOCK_HTE, flagsp, + flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE); } static long linereq_get_values(struct linereq *lr, void __user *ip) @@ -1121,6 +1299,7 @@ static long linereq_set_config_unlocked(struct linereq *lr, unsigned int i; u64 flags; bool polarity_change; + bool prev_hte_flag; int ret; for (i = 0; i < lr->num_lines; i++) { @@ -1130,6 +1309,8 @@ static long linereq_set_config_unlocked(struct linereq *lr, (!!test_bit(FLAG_ACTIVE_LOW, &desc->flags) != ((flags & GPIO_V2_LINE_FLAG_ACTIVE_LOW) != 0)); + prev_hte_flag = !!test_bit(FLAG_EVENT_CLOCK_HTE, &desc->flags); + gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags); /* * Lines have to be requested explicitly for input @@ -1138,7 +1319,7 @@ static long linereq_set_config_unlocked(struct linereq *lr, if (flags & GPIO_V2_LINE_FLAG_OUTPUT) { int val = gpio_v2_line_config_output_value(lc, i); - edge_detector_stop(&lr->lines[i]); + edge_detector_stop(&lr->lines[i], prev_hte_flag); ret = gpiod_direction_output(desc, val); if (ret) return ret; @@ -1148,8 +1329,7 @@ static long linereq_set_config_unlocked(struct linereq *lr, return ret; ret = edge_detector_update(&lr->lines[i], lc, i, - flags & GPIO_V2_LINE_EDGE_FLAGS, - polarity_change); + flags, polarity_change, prev_hte_flag); if (ret) return ret; } @@ -1278,9 +1458,12 @@ static ssize_t linereq_read(struct file *file, static void linereq_free(struct linereq *lr) { unsigned int i; + bool hte; for (i = 0; i < lr->num_lines; i++) { - edge_detector_stop(&lr->lines[i]); + hte = !!test_bit(FLAG_EVENT_CLOCK_HTE, + &lr->lines[i].desc->flags); + edge_detector_stop(&lr->lines[i], hte); if (lr->lines[i].desc) gpiod_free(lr->lines[i].desc); } @@ -1406,7 +1589,8 @@ static int linereq_create(struct gpio_device *gdev, void __user *ip) goto out_free_linereq; ret = edge_detector_setup(&lr->lines[i], lc, i, - flags & GPIO_V2_LINE_EDGE_FLAGS); + flags & GPIO_V2_LINE_EDGE_FLAGS, + flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE); if (ret) goto out_free_linereq; } @@ -1959,6 +2143,8 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc, if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &desc->flags)) info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME; + else if (test_bit(FLAG_EVENT_CLOCK_HTE, &desc->flags)) + info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE; debounce_period_us = READ_ONCE(desc->debounce_period_us); if (debounce_period_us) { diff --git a/include/uapi/linux/gpio.h b/include/uapi/linux/gpio.h index eaaea3d8e6b4..cb9966d49a16 100644 --- a/include/uapi/linux/gpio.h +++ b/include/uapi/linux/gpio.h @@ -66,6 +66,8 @@ struct gpiochip_info { * @GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN: line has pull-down bias enabled * @GPIO_V2_LINE_FLAG_BIAS_DISABLED: line has bias disabled * @GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME: line events contain REALTIME timestamps + * @GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE: line events contain timestamps from + * hardware timestamp engine */ enum gpio_v2_line_flag { GPIO_V2_LINE_FLAG_USED = _BITULL(0), @@ -80,6 +82,7 @@ enum gpio_v2_line_flag { GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN = _BITULL(9), GPIO_V2_LINE_FLAG_BIAS_DISABLED = _BITULL(10), GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME = _BITULL(11), + GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE = _BITULL(12), }; /** -- cgit v1.2.3-59-g8ed1b From 9ab4807c84a4aacfc9b4f79cc81254035e0ec361 Mon Sep 17 00:00:00 2001 From: Kishen Maloor Date: Tue, 3 May 2022 19:38:52 -0700 Subject: mptcp: netlink: Add MPTCP_PM_CMD_ANNOUNCE This change adds a MPTCP netlink interface for issuing ADD_ADDR advertisements over the chosen MPTCP connection from a userspace path manager. The command requires the following parameters: { token, { loc_id, family, daddr4 | daddr6 [, dport] } [, if_idx], flags[signal] }. Acked-by: Paolo Abeni Signed-off-by: Kishen Maloor Signed-off-by: Mat Martineau Signed-off-by: David S. Miller --- include/uapi/linux/mptcp.h | 2 ++ net/mptcp/pm_netlink.c | 16 ++++++++---- net/mptcp/pm_userspace.c | 61 ++++++++++++++++++++++++++++++++++++++++++++++ net/mptcp/protocol.h | 7 ++++++ 4 files changed, 81 insertions(+), 5 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h index e41ea01a94bb..ac66c1263f02 100644 --- a/include/uapi/linux/mptcp.h +++ b/include/uapi/linux/mptcp.h @@ -55,6 +55,7 @@ enum { MPTCP_PM_ATTR_ADDR, /* nested address */ MPTCP_PM_ATTR_RCV_ADD_ADDRS, /* u32 */ MPTCP_PM_ATTR_SUBFLOWS, /* u32 */ + MPTCP_PM_ATTR_TOKEN, /* u32 */ __MPTCP_PM_ATTR_MAX }; @@ -93,6 +94,7 @@ enum { MPTCP_PM_CMD_SET_LIMITS, MPTCP_PM_CMD_GET_LIMITS, MPTCP_PM_CMD_SET_FLAGS, + MPTCP_PM_CMD_ANNOUNCE, __MPTCP_PM_CMD_AFTER_LAST }; diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c index 7d9bed536966..dbe5ccd95ac5 100644 --- a/net/mptcp/pm_netlink.c +++ b/net/mptcp/pm_netlink.c @@ -352,8 +352,8 @@ mptcp_pm_del_add_timer(struct mptcp_sock *msk, return entry; } -static bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk, - const struct mptcp_pm_addr_entry *entry) +bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk, + const struct mptcp_pm_addr_entry *entry) { struct mptcp_pm_add_entry *add_entry = NULL; struct sock *sk = (struct sock *)msk; @@ -1094,6 +1094,7 @@ static const struct nla_policy mptcp_pm_policy[MPTCP_PM_ATTR_MAX + 1] = { NLA_POLICY_NESTED(mptcp_pm_addr_policy), [MPTCP_PM_ATTR_RCV_ADD_ADDRS] = { .type = NLA_U32, }, [MPTCP_PM_ATTR_SUBFLOWS] = { .type = NLA_U32, }, + [MPTCP_PM_ATTR_TOKEN] = { .type = NLA_U32, }, }; void mptcp_pm_nl_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk) @@ -1203,9 +1204,9 @@ static int mptcp_pm_parse_pm_addr_attr(struct nlattr *tb[], return err; } -static int mptcp_pm_parse_entry(struct nlattr *attr, struct genl_info *info, - bool require_family, - struct mptcp_pm_addr_entry *entry) +int mptcp_pm_parse_entry(struct nlattr *attr, struct genl_info *info, + bool require_family, + struct mptcp_pm_addr_entry *entry) { struct nlattr *tb[MPTCP_PM_ADDR_ATTR_MAX + 1]; int err; @@ -2198,6 +2199,11 @@ static const struct genl_small_ops mptcp_pm_ops[] = { .doit = mptcp_nl_cmd_set_flags, .flags = GENL_ADMIN_PERM, }, + { + .cmd = MPTCP_PM_CMD_ANNOUNCE, + .doit = mptcp_nl_cmd_announce, + .flags = GENL_ADMIN_PERM, + }, }; static struct genl_family mptcp_genl_family __ro_after_init = { diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c index 910116b0f5b9..347184a9157b 100644 --- a/net/mptcp/pm_userspace.c +++ b/net/mptcp/pm_userspace.c @@ -119,3 +119,64 @@ int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk, return mptcp_userspace_pm_append_new_local_addr(msk, &new_entry); } + +int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info) +{ + struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN]; + struct nlattr *addr = info->attrs[MPTCP_PM_ATTR_ADDR]; + struct mptcp_pm_addr_entry addr_val; + struct mptcp_sock *msk; + int err = -EINVAL; + u32 token_val; + + if (!addr || !token) { + GENL_SET_ERR_MSG(info, "missing required inputs"); + return err; + } + + token_val = nla_get_u32(token); + + msk = mptcp_token_get_sock(sock_net(skb->sk), token_val); + if (!msk) { + NL_SET_ERR_MSG_ATTR(info->extack, token, "invalid token"); + return err; + } + + if (!mptcp_pm_is_userspace(msk)) { + GENL_SET_ERR_MSG(info, "invalid request; userspace PM not selected"); + goto announce_err; + } + + err = mptcp_pm_parse_entry(addr, info, true, &addr_val); + if (err < 0) { + GENL_SET_ERR_MSG(info, "error parsing local address"); + goto announce_err; + } + + if (addr_val.addr.id == 0 || !(addr_val.flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) { + GENL_SET_ERR_MSG(info, "invalid addr id or flags"); + goto announce_err; + } + + err = mptcp_userspace_pm_append_new_local_addr(msk, &addr_val); + if (err < 0) { + GENL_SET_ERR_MSG(info, "did not match address and id"); + goto announce_err; + } + + lock_sock((struct sock *)msk); + spin_lock_bh(&msk->pm.lock); + + if (mptcp_pm_alloc_anno_list(msk, &addr_val)) { + mptcp_pm_announce_addr(msk, &addr_val.addr, false); + mptcp_pm_nl_addr_send_ack(msk); + } + + spin_unlock_bh(&msk->pm.lock); + release_sock((struct sock *)msk); + + err = 0; + announce_err: + sock_put((struct sock *)msk); + return err; +} diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index 7257dc7aed43..de645efbc806 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -11,6 +11,7 @@ #include #include #include +#include #define MPTCP_SUPPORTED_VERSION 1 @@ -755,6 +756,9 @@ u16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum sum); void __init mptcp_pm_init(void); void mptcp_pm_data_init(struct mptcp_sock *msk); void mptcp_pm_data_reset(struct mptcp_sock *msk); +int mptcp_pm_parse_entry(struct nlattr *attr, struct genl_info *info, + bool require_family, + struct mptcp_pm_addr_entry *entry); void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk); void mptcp_pm_nl_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk); void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int server_side); @@ -775,6 +779,8 @@ void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list); void mptcp_pm_mp_prio_received(struct sock *sk, u8 bkup); void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq); +bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk, + const struct mptcp_pm_addr_entry *entry); void mptcp_pm_free_anno_list(struct mptcp_sock *msk); bool mptcp_pm_sport_in_anno_list(struct mptcp_sock *msk, const struct sock *sk); struct mptcp_pm_add_entry * @@ -798,6 +804,7 @@ int mptcp_pm_remove_subflow(struct mptcp_sock *msk, const struct mptcp_rm_list * int mptcp_userspace_pm_append_new_local_addr(struct mptcp_sock *msk, struct mptcp_pm_addr_entry *entry); void mptcp_free_local_addr_list(struct mptcp_sock *msk); +int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info); void mptcp_event(enum mptcp_event_type type, const struct mptcp_sock *msk, const struct sock *ssk, gfp_t gfp); -- cgit v1.2.3-59-g8ed1b From d9a4594edabf125dc17dfd52acc722c3de1cb44c Mon Sep 17 00:00:00 2001 From: Kishen Maloor Date: Tue, 3 May 2022 19:38:54 -0700 Subject: mptcp: netlink: Add MPTCP_PM_CMD_REMOVE This change adds a MPTCP netlink command for issuing a REMOVE_ADDR signal for an address over the chosen MPTCP connection from a userspace path manager. The command requires the following parameters: {token, loc_id}. Acked-by: Paolo Abeni Signed-off-by: Kishen Maloor Signed-off-by: Mat Martineau Signed-off-by: David S. Miller --- include/uapi/linux/mptcp.h | 2 ++ net/mptcp/pm_netlink.c | 10 ++++++-- net/mptcp/pm_userspace.c | 62 ++++++++++++++++++++++++++++++++++++++++++++++ net/mptcp/protocol.h | 4 +++ 4 files changed, 76 insertions(+), 2 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h index ac66c1263f02..11f9fa001a3c 100644 --- a/include/uapi/linux/mptcp.h +++ b/include/uapi/linux/mptcp.h @@ -56,6 +56,7 @@ enum { MPTCP_PM_ATTR_RCV_ADD_ADDRS, /* u32 */ MPTCP_PM_ATTR_SUBFLOWS, /* u32 */ MPTCP_PM_ATTR_TOKEN, /* u32 */ + MPTCP_PM_ATTR_LOC_ID, /* u8 */ __MPTCP_PM_ATTR_MAX }; @@ -95,6 +96,7 @@ enum { MPTCP_PM_CMD_GET_LIMITS, MPTCP_PM_CMD_SET_FLAGS, MPTCP_PM_CMD_ANNOUNCE, + MPTCP_PM_CMD_REMOVE, __MPTCP_PM_CMD_AFTER_LAST }; diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c index dbe5ccd95ac5..a26750f19f65 100644 --- a/net/mptcp/pm_netlink.c +++ b/net/mptcp/pm_netlink.c @@ -1095,6 +1095,7 @@ static const struct nla_policy mptcp_pm_policy[MPTCP_PM_ATTR_MAX + 1] = { [MPTCP_PM_ATTR_RCV_ADD_ADDRS] = { .type = NLA_U32, }, [MPTCP_PM_ATTR_SUBFLOWS] = { .type = NLA_U32, }, [MPTCP_PM_ATTR_TOKEN] = { .type = NLA_U32, }, + [MPTCP_PM_ATTR_LOC_ID] = { .type = NLA_U8, }, }; void mptcp_pm_nl_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk) @@ -1504,8 +1505,8 @@ static int mptcp_nl_cmd_del_addr(struct sk_buff *skb, struct genl_info *info) return ret; } -static void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk, - struct list_head *rm_list) +void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk, + struct list_head *rm_list) { struct mptcp_rm_list alist = { .nr = 0 }, slist = { .nr = 0 }; struct mptcp_pm_addr_entry *entry; @@ -2204,6 +2205,11 @@ static const struct genl_small_ops mptcp_pm_ops[] = { .doit = mptcp_nl_cmd_announce, .flags = GENL_ADMIN_PERM, }, + { + .cmd = MPTCP_PM_CMD_REMOVE, + .doit = mptcp_nl_cmd_remove, + .flags = GENL_ADMIN_PERM, + }, }; static struct genl_family mptcp_genl_family __ro_after_init = { diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c index 347184a9157b..3a42c9e66126 100644 --- a/net/mptcp/pm_userspace.c +++ b/net/mptcp/pm_userspace.c @@ -180,3 +180,65 @@ int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info) sock_put((struct sock *)msk); return err; } + +int mptcp_nl_cmd_remove(struct sk_buff *skb, struct genl_info *info) +{ + struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN]; + struct nlattr *id = info->attrs[MPTCP_PM_ATTR_LOC_ID]; + struct mptcp_pm_addr_entry *match = NULL; + struct mptcp_pm_addr_entry *entry; + struct mptcp_sock *msk; + LIST_HEAD(free_list); + int err = -EINVAL; + u32 token_val; + u8 id_val; + + if (!id || !token) { + GENL_SET_ERR_MSG(info, "missing required inputs"); + return err; + } + + id_val = nla_get_u8(id); + token_val = nla_get_u32(token); + + msk = mptcp_token_get_sock(sock_net(skb->sk), token_val); + if (!msk) { + NL_SET_ERR_MSG_ATTR(info->extack, token, "invalid token"); + return err; + } + + if (!mptcp_pm_is_userspace(msk)) { + GENL_SET_ERR_MSG(info, "invalid request; userspace PM not selected"); + goto remove_err; + } + + lock_sock((struct sock *)msk); + + list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) { + if (entry->addr.id == id_val) { + match = entry; + break; + } + } + + if (!match) { + GENL_SET_ERR_MSG(info, "address with specified id not found"); + release_sock((struct sock *)msk); + goto remove_err; + } + + list_move(&match->list, &free_list); + + mptcp_pm_remove_addrs_and_subflows(msk, &free_list); + + release_sock((struct sock *)msk); + + list_for_each_entry_safe(match, entry, &free_list, list) { + sock_kfree_s((struct sock *)msk, match, sizeof(*match)); + } + + err = 0; + remove_err: + sock_put((struct sock *)msk); + return err; +} diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index de645efbc806..4026aa3df7f4 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -801,10 +801,14 @@ int mptcp_pm_announce_addr(struct mptcp_sock *msk, bool echo); int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list); int mptcp_pm_remove_subflow(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list); +void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk, + struct list_head *rm_list); + int mptcp_userspace_pm_append_new_local_addr(struct mptcp_sock *msk, struct mptcp_pm_addr_entry *entry); void mptcp_free_local_addr_list(struct mptcp_sock *msk); int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info); +int mptcp_nl_cmd_remove(struct sk_buff *skb, struct genl_info *info); void mptcp_event(enum mptcp_event_type type, const struct mptcp_sock *msk, const struct sock *ssk, gfp_t gfp); -- cgit v1.2.3-59-g8ed1b From 702c2f646d42cfd9e31133d68a8283fea48fd810 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 3 May 2022 19:38:56 -0700 Subject: mptcp: netlink: allow userspace-driven subflow establishment This allows userspace to tell kernel to add a new subflow to an existing mptcp connection. Userspace provides the token to identify the mptcp-level connection that needs a change in active subflows and the local and remote addresses of the new or the to-be-removed subflow. MPTCP_PM_CMD_SUBFLOW_CREATE requires the following parameters: { token, { loc_id, family, loc_addr4 | loc_addr6 }, { family, rem_addr4 | rem_addr6, rem_port } MPTCP_PM_CMD_SUBFLOW_DESTROY requires the following parameters: { token, { family, loc_addr4 | loc_addr6, loc_port }, { family, rem_addr4 | rem_addr6, rem_port } Acked-by: Paolo Abeni Co-developed-by: Kishen Maloor Signed-off-by: Kishen Maloor Signed-off-by: Florian Westphal Signed-off-by: Mat Martineau Signed-off-by: David S. Miller --- include/uapi/linux/mptcp.h | 3 + net/mptcp/pm_netlink.c | 22 ++++++ net/mptcp/pm_userspace.c | 185 +++++++++++++++++++++++++++++++++++++++++++++ net/mptcp/protocol.h | 4 + 4 files changed, 214 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h index 11f9fa001a3c..921963589904 100644 --- a/include/uapi/linux/mptcp.h +++ b/include/uapi/linux/mptcp.h @@ -57,6 +57,7 @@ enum { MPTCP_PM_ATTR_SUBFLOWS, /* u32 */ MPTCP_PM_ATTR_TOKEN, /* u32 */ MPTCP_PM_ATTR_LOC_ID, /* u8 */ + MPTCP_PM_ATTR_ADDR_REMOTE, /* nested address */ __MPTCP_PM_ATTR_MAX }; @@ -97,6 +98,8 @@ enum { MPTCP_PM_CMD_SET_FLAGS, MPTCP_PM_CMD_ANNOUNCE, MPTCP_PM_CMD_REMOVE, + MPTCP_PM_CMD_SUBFLOW_CREATE, + MPTCP_PM_CMD_SUBFLOW_DESTROY, __MPTCP_PM_CMD_AFTER_LAST }; diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c index a26750f19f65..e099f2a12504 100644 --- a/net/mptcp/pm_netlink.c +++ b/net/mptcp/pm_netlink.c @@ -1096,6 +1096,8 @@ static const struct nla_policy mptcp_pm_policy[MPTCP_PM_ATTR_MAX + 1] = { [MPTCP_PM_ATTR_SUBFLOWS] = { .type = NLA_U32, }, [MPTCP_PM_ATTR_TOKEN] = { .type = NLA_U32, }, [MPTCP_PM_ATTR_LOC_ID] = { .type = NLA_U8, }, + [MPTCP_PM_ATTR_ADDR_REMOTE] = + NLA_POLICY_NESTED(mptcp_pm_addr_policy), }; void mptcp_pm_nl_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk) @@ -1205,6 +1207,16 @@ static int mptcp_pm_parse_pm_addr_attr(struct nlattr *tb[], return err; } +int mptcp_pm_parse_addr(struct nlattr *attr, struct genl_info *info, + struct mptcp_addr_info *addr) +{ + struct nlattr *tb[MPTCP_PM_ADDR_ATTR_MAX + 1]; + + memset(addr, 0, sizeof(*addr)); + + return mptcp_pm_parse_pm_addr_attr(tb, attr, info, addr, true); +} + int mptcp_pm_parse_entry(struct nlattr *attr, struct genl_info *info, bool require_family, struct mptcp_pm_addr_entry *entry) @@ -2210,6 +2222,16 @@ static const struct genl_small_ops mptcp_pm_ops[] = { .doit = mptcp_nl_cmd_remove, .flags = GENL_ADMIN_PERM, }, + { + .cmd = MPTCP_PM_CMD_SUBFLOW_CREATE, + .doit = mptcp_nl_cmd_sf_create, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = MPTCP_PM_CMD_SUBFLOW_DESTROY, + .doit = mptcp_nl_cmd_sf_destroy, + .flags = GENL_ADMIN_PERM, + }, }; static struct genl_family mptcp_genl_family __ro_after_init = { diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c index 3a42c9e66126..f56378e4f597 100644 --- a/net/mptcp/pm_userspace.c +++ b/net/mptcp/pm_userspace.c @@ -242,3 +242,188 @@ int mptcp_nl_cmd_remove(struct sk_buff *skb, struct genl_info *info) sock_put((struct sock *)msk); return err; } + +int mptcp_nl_cmd_sf_create(struct sk_buff *skb, struct genl_info *info) +{ + struct nlattr *raddr = info->attrs[MPTCP_PM_ATTR_ADDR_REMOTE]; + struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN]; + struct nlattr *laddr = info->attrs[MPTCP_PM_ATTR_ADDR]; + struct mptcp_addr_info addr_r; + struct mptcp_addr_info addr_l; + struct mptcp_sock *msk; + int err = -EINVAL; + struct sock *sk; + u32 token_val; + + if (!laddr || !raddr || !token) { + GENL_SET_ERR_MSG(info, "missing required inputs"); + return err; + } + + token_val = nla_get_u32(token); + + msk = mptcp_token_get_sock(genl_info_net(info), token_val); + if (!msk) { + NL_SET_ERR_MSG_ATTR(info->extack, token, "invalid token"); + return err; + } + + if (!mptcp_pm_is_userspace(msk)) { + GENL_SET_ERR_MSG(info, "invalid request; userspace PM not selected"); + goto create_err; + } + + err = mptcp_pm_parse_addr(laddr, info, &addr_l); + if (err < 0) { + NL_SET_ERR_MSG_ATTR(info->extack, laddr, "error parsing local addr"); + goto create_err; + } + + if (addr_l.id == 0) { + NL_SET_ERR_MSG_ATTR(info->extack, laddr, "missing local addr id"); + goto create_err; + } + + err = mptcp_pm_parse_addr(raddr, info, &addr_r); + if (err < 0) { + NL_SET_ERR_MSG_ATTR(info->extack, raddr, "error parsing remote addr"); + goto create_err; + } + + sk = &msk->sk.icsk_inet.sk; + lock_sock(sk); + + err = __mptcp_subflow_connect(sk, &addr_l, &addr_r); + + release_sock(sk); + + create_err: + sock_put((struct sock *)msk); + return err; +} + +static struct sock *mptcp_nl_find_ssk(struct mptcp_sock *msk, + const struct mptcp_addr_info *local, + const struct mptcp_addr_info *remote) +{ + struct sock *sk = &msk->sk.icsk_inet.sk; + struct mptcp_subflow_context *subflow; + struct sock *found = NULL; + + if (local->family != remote->family) + return NULL; + + lock_sock(sk); + + mptcp_for_each_subflow(msk, subflow) { + const struct inet_sock *issk; + struct sock *ssk; + + ssk = mptcp_subflow_tcp_sock(subflow); + + if (local->family != ssk->sk_family) + continue; + + issk = inet_sk(ssk); + + switch (ssk->sk_family) { + case AF_INET: + if (issk->inet_saddr != local->addr.s_addr || + issk->inet_daddr != remote->addr.s_addr) + continue; + break; +#if IS_ENABLED(CONFIG_MPTCP_IPV6) + case AF_INET6: { + const struct ipv6_pinfo *pinfo = inet6_sk(ssk); + + if (!ipv6_addr_equal(&local->addr6, &pinfo->saddr) || + !ipv6_addr_equal(&remote->addr6, &ssk->sk_v6_daddr)) + continue; + break; + } +#endif + default: + continue; + } + + if (issk->inet_sport == local->port && + issk->inet_dport == remote->port) { + found = ssk; + goto found; + } + } + +found: + release_sock(sk); + + return found; +} + +int mptcp_nl_cmd_sf_destroy(struct sk_buff *skb, struct genl_info *info) +{ + struct nlattr *raddr = info->attrs[MPTCP_PM_ATTR_ADDR_REMOTE]; + struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN]; + struct nlattr *laddr = info->attrs[MPTCP_PM_ATTR_ADDR]; + struct mptcp_addr_info addr_l; + struct mptcp_addr_info addr_r; + struct mptcp_sock *msk; + struct sock *sk, *ssk; + int err = -EINVAL; + u32 token_val; + + if (!laddr || !raddr || !token) { + GENL_SET_ERR_MSG(info, "missing required inputs"); + return err; + } + + token_val = nla_get_u32(token); + + msk = mptcp_token_get_sock(genl_info_net(info), token_val); + if (!msk) { + NL_SET_ERR_MSG_ATTR(info->extack, token, "invalid token"); + return err; + } + + if (!mptcp_pm_is_userspace(msk)) { + GENL_SET_ERR_MSG(info, "invalid request; userspace PM not selected"); + goto destroy_err; + } + + err = mptcp_pm_parse_addr(laddr, info, &addr_l); + if (err < 0) { + NL_SET_ERR_MSG_ATTR(info->extack, laddr, "error parsing local addr"); + goto destroy_err; + } + + err = mptcp_pm_parse_addr(raddr, info, &addr_r); + if (err < 0) { + NL_SET_ERR_MSG_ATTR(info->extack, raddr, "error parsing remote addr"); + goto destroy_err; + } + + if (addr_l.family != addr_r.family) { + GENL_SET_ERR_MSG(info, "address families do not match"); + goto destroy_err; + } + + if (!addr_l.port || !addr_r.port) { + GENL_SET_ERR_MSG(info, "missing local or remote port"); + goto destroy_err; + } + + sk = &msk->sk.icsk_inet.sk; + ssk = mptcp_nl_find_ssk(msk, &addr_l, &addr_r); + if (ssk) { + struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); + + mptcp_subflow_shutdown(sk, ssk, RCV_SHUTDOWN | SEND_SHUTDOWN); + mptcp_close_ssk(sk, ssk, subflow); + err = 0; + } else { + err = -ESRCH; + } + + destroy_err: + sock_put((struct sock *)msk); + return err; +} diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index 4026aa3df7f4..f542aeaa5b09 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -756,6 +756,8 @@ u16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum sum); void __init mptcp_pm_init(void); void mptcp_pm_data_init(struct mptcp_sock *msk); void mptcp_pm_data_reset(struct mptcp_sock *msk); +int mptcp_pm_parse_addr(struct nlattr *attr, struct genl_info *info, + struct mptcp_addr_info *addr); int mptcp_pm_parse_entry(struct nlattr *attr, struct genl_info *info, bool require_family, struct mptcp_pm_addr_entry *entry); @@ -809,6 +811,8 @@ int mptcp_userspace_pm_append_new_local_addr(struct mptcp_sock *msk, void mptcp_free_local_addr_list(struct mptcp_sock *msk); int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info); int mptcp_nl_cmd_remove(struct sk_buff *skb, struct genl_info *info); +int mptcp_nl_cmd_sf_create(struct sk_buff *skb, struct genl_info *info); +int mptcp_nl_cmd_sf_destroy(struct sk_buff *skb, struct genl_info *info); void mptcp_event(enum mptcp_event_type type, const struct mptcp_sock *msk, const struct sock *ssk, gfp_t gfp); -- cgit v1.2.3-59-g8ed1b From 36f8423597000bd7d5e48b7b306e1d0958e72359 Mon Sep 17 00:00:00 2001 From: Muna Sinada Date: Wed, 23 Mar 2022 15:46:35 -0700 Subject: cfg80211: support disabling EHT mode Allow userspace to disable EHT mode during association. Signed-off-by: Muna Sinada Signed-off-by: Aloka Dixit Link: https://lore.kernel.org/r/20220323224636.20211-1-quic_alokad@quicinc.com Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 2 ++ include/uapi/linux/nl80211.h | 2 ++ net/wireless/nl80211.c | 7 +++++++ 3 files changed, 11 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index cd1212113901..6a3e3f0a8615 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -2735,6 +2735,7 @@ struct cfg80211_auth_request { * userspace if this flag is set. Only applicable for cfg80211_connect() * request (connect callback). * @ASSOC_REQ_DISABLE_HE: Disable HE + * @ASSOC_REQ_DISABLE_EHT: Disable EHT */ enum cfg80211_assoc_req_flags { ASSOC_REQ_DISABLE_HT = BIT(0), @@ -2742,6 +2743,7 @@ enum cfg80211_assoc_req_flags { ASSOC_REQ_USE_RRM = BIT(2), CONNECT_REQ_EXTERNAL_AUTH_SUPPORT = BIT(3), ASSOC_REQ_DISABLE_HE = BIT(4), + ASSOC_REQ_DISABLE_EHT = BIT(5), }; /** diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 0568a79097b8..d9490e3062a7 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -3175,6 +3175,8 @@ enum nl80211_attrs { NL80211_ATTR_EHT_CAPABILITY, + NL80211_ATTR_DISABLE_EHT, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 174f254ee947..2c64baae9863 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -791,6 +791,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { NLA_POLICY_RANGE(NLA_BINARY, NL80211_EHT_MIN_CAPABILITY_LEN, NL80211_EHT_MAX_CAPABILITY_LEN), + [NL80211_ATTR_DISABLE_EHT] = { .type = NLA_FLAG }, }; /* policy for the key attributes */ @@ -10378,6 +10379,9 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info) if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_HE])) req.flags |= ASSOC_REQ_DISABLE_HE; + if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_EHT])) + req.flags |= ASSOC_REQ_DISABLE_EHT; + if (info->attrs[NL80211_ATTR_VHT_CAPABILITY_MASK]) memcpy(&req.vht_capa_mask, nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY_MASK]), @@ -11166,6 +11170,9 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_HE])) connect.flags |= ASSOC_REQ_DISABLE_HE; + if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_EHT])) + connect.flags |= ASSOC_REQ_DISABLE_EHT; + if (info->attrs[NL80211_ATTR_VHT_CAPABILITY_MASK]) memcpy(&connect.vht_capa_mask, nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY_MASK]), -- cgit v1.2.3-59-g8ed1b From c4a67a21a6d255ddcbaa076c0412aad73c7e0c02 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 4 May 2022 08:40:37 -0700 Subject: Revert "Merge branch 'mlxsw-line-card-model'" This reverts commit 5e927a9f4b9f29d78a7c7d66ea717bb5c8bbad8e, reversing changes made to cfc1d91a7d78cf9de25b043d81efcc16966d55b3. The discussion is still ongoing so let's remove the uAPI until the discussion settles. Link: https://lore.kernel.org/all/20220425090021.32e9a98f@kernel.org/ Reviewed-by: Ido Schimmel Link: https://lore.kernel.org/r/20220504154037.539442-1-kuba@kernel.org Signed-off-by: Jakub Kicinski --- .../networking/devlink/devlink-linecard.rst | 4 - Documentation/networking/devlink/mlxsw.rst | 33 --- drivers/net/ethernet/mellanox/mlxsw/core.h | 1 - .../net/ethernet/mellanox/mlxsw/core_linecards.c | 237 +--------------- drivers/net/ethernet/mellanox/mlxsw/reg.h | 87 +----- include/net/devlink.h | 18 +- include/uapi/linux/devlink.h | 5 - net/core/devlink.c | 303 +-------------------- .../drivers/net/mlxsw/devlink_linecard.sh | 61 ----- 9 files changed, 10 insertions(+), 739 deletions(-) (limited to 'include/uapi/linux') diff --git a/Documentation/networking/devlink/devlink-linecard.rst b/Documentation/networking/devlink/devlink-linecard.rst index a98b468ad479..6c0b8928bc13 100644 --- a/Documentation/networking/devlink/devlink-linecard.rst +++ b/Documentation/networking/devlink/devlink-linecard.rst @@ -14,7 +14,6 @@ system. Following operations are provided: * Get a list of supported line card types. * Provision of a slot with specific line card type. * Get and monitor of line card state and its change. - * Get information about line card versions and devices. Line card according to the type may contain one or more gearboxes to mux the lanes with certain speed to multiple ports with lanes @@ -121,6 +120,3 @@ Example usage # Set slot 8 to be unprovisioned: $ devlink lc set pci/0000:01:00.0 lc 8 notype - - # Set info for slot 8: - $ devlink lc info pci/0000:01:00.0 lc 8 diff --git a/Documentation/networking/devlink/mlxsw.rst b/Documentation/networking/devlink/mlxsw.rst index 0af345680510..cf857cb4ba8f 100644 --- a/Documentation/networking/devlink/mlxsw.rst +++ b/Documentation/networking/devlink/mlxsw.rst @@ -58,39 +58,6 @@ The ``mlxsw`` driver reports the following versions - running - Three digit firmware version -Line card info versions -======================= - -The ``mlxsw`` driver reports the following versions for line cards - -.. list-table:: devlink line card info versions implemented - :widths: 5 5 90 - - * - Name - - Type - - Description - * - ``hw.revision`` - - fixed - - The hardware revision for this line card - * - ``ini.version`` - - running - - Version of line card INI loaded - -Line card device info versions -============================== - -The ``mlxsw`` driver reports the following versions for line card devices - -.. list-table:: devlink line card device info versions implemented - :widths: 5 5 90 - - * - Name - - Type - - Description - * - ``fw.version`` - - running - - Three digit firmware version - Driver-specific Traps ===================== diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index d008282d7f2e..c2a891287047 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -581,7 +581,6 @@ struct mlxsw_linecard { active:1; u16 hw_revision; u16 ini_version; - struct list_head device_list; }; struct mlxsw_linecard_types_info; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c index 2abd31a62776..5c9869dcf674 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c @@ -87,191 +87,11 @@ static const char *mlxsw_linecard_type_name(struct mlxsw_linecard *linecard) return linecard->name; } -struct mlxsw_linecard_device_info { - u16 fw_major; - u16 fw_minor; - u16 fw_sub_minor; -}; - -struct mlxsw_linecard_device { - struct list_head list; - u8 index; - struct mlxsw_linecard *linecard; - struct devlink_linecard_device *devlink_device; - struct mlxsw_linecard_device_info info; -}; - -static struct mlxsw_linecard_device * -mlxsw_linecard_device_lookup(struct mlxsw_linecard *linecard, u8 index) -{ - struct mlxsw_linecard_device *device; - - list_for_each_entry(device, &linecard->device_list, list) - if (device->index == index) - return device; - return NULL; -} - -static int mlxsw_linecard_device_attach(struct mlxsw_core *mlxsw_core, - struct mlxsw_linecard *linecard, - u8 device_index, bool flash_owner) -{ - struct mlxsw_linecard_device *device; - int err; - - device = kzalloc(sizeof(*device), GFP_KERNEL); - if (!device) - return -ENOMEM; - device->index = device_index; - device->linecard = linecard; - - device->devlink_device = devlink_linecard_device_create(linecard->devlink_linecard, - device_index, device); - if (IS_ERR(device->devlink_device)) { - err = PTR_ERR(device->devlink_device); - goto err_devlink_linecard_device_attach; - } - - list_add_tail(&device->list, &linecard->device_list); - return 0; - -err_devlink_linecard_device_attach: - kfree(device); - return err; -} - -static void mlxsw_linecard_device_detach(struct mlxsw_core *mlxsw_core, - struct mlxsw_linecard *linecard, - struct mlxsw_linecard_device *device) -{ - list_del(&device->list); - devlink_linecard_device_destroy(linecard->devlink_linecard, - device->devlink_device); - kfree(device); -} - -static void mlxsw_linecard_devices_detach(struct mlxsw_linecard *linecard) -{ - struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core; - struct mlxsw_linecard_device *device, *tmp; - - list_for_each_entry_safe(device, tmp, &linecard->device_list, list) - mlxsw_linecard_device_detach(mlxsw_core, linecard, device); -} - -static int mlxsw_linecard_devices_attach(struct mlxsw_linecard *linecard) -{ - struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core; - u8 msg_seq = 0; - int err; - - do { - char mddq_pl[MLXSW_REG_MDDQ_LEN]; - bool flash_owner; - bool data_valid; - u8 device_index; - - mlxsw_reg_mddq_device_info_pack(mddq_pl, linecard->slot_index, - msg_seq); - err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddq), mddq_pl); - if (err) - return err; - mlxsw_reg_mddq_device_info_unpack(mddq_pl, &msg_seq, - &data_valid, &flash_owner, - &device_index, NULL, - NULL, NULL); - if (!data_valid) - break; - err = mlxsw_linecard_device_attach(mlxsw_core, linecard, - device_index, flash_owner); - if (err) - goto rollback; - } while (msg_seq); - - return 0; - -rollback: - mlxsw_linecard_devices_detach(linecard); - return err; -} - -static void mlxsw_linecard_device_update(struct mlxsw_linecard *linecard, - u8 device_index, - struct mlxsw_linecard_device_info *info) -{ - struct mlxsw_linecard_device *device; - - device = mlxsw_linecard_device_lookup(linecard, device_index); - if (!device) - return; - device->info = *info; -} - -static int mlxsw_linecard_devices_update(struct mlxsw_linecard *linecard) -{ - struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core; - u8 msg_seq = 0; - - do { - struct mlxsw_linecard_device_info info; - char mddq_pl[MLXSW_REG_MDDQ_LEN]; - bool data_valid; - u8 device_index; - int err; - - mlxsw_reg_mddq_device_info_pack(mddq_pl, linecard->slot_index, - msg_seq); - err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddq), mddq_pl); - if (err) - return err; - mlxsw_reg_mddq_device_info_unpack(mddq_pl, &msg_seq, - &data_valid, NULL, - &device_index, - &info.fw_major, - &info.fw_minor, - &info.fw_sub_minor); - if (!data_valid) - break; - mlxsw_linecard_device_update(linecard, device_index, &info); - } while (msg_seq); - - return 0; -} - -static int -mlxsw_linecard_device_info_get(struct devlink_linecard_device *devlink_linecard_device, - void *priv, struct devlink_info_req *req, - struct netlink_ext_ack *extack) -{ - struct mlxsw_linecard_device *device = priv; - struct mlxsw_linecard_device_info *info; - struct mlxsw_linecard *linecard; - char buf[32]; - - linecard = device->linecard; - mutex_lock(&linecard->lock); - if (!linecard->active) { - mutex_unlock(&linecard->lock); - return 0; - } - - info = &device->info; - - sprintf(buf, "%u.%u.%u", info->fw_major, info->fw_minor, - info->fw_sub_minor); - mutex_unlock(&linecard->lock); - - return devlink_info_version_running_put(req, - DEVLINK_INFO_VERSION_GENERIC_FW, - buf); -} - static void mlxsw_linecard_provision_fail(struct mlxsw_linecard *linecard) { linecard->provisioned = false; linecard->ready = false; linecard->active = false; - mlxsw_linecard_devices_detach(linecard); devlink_linecard_provision_fail(linecard->devlink_linecard); } @@ -412,7 +232,6 @@ mlxsw_linecard_provision_set(struct mlxsw_linecard *linecard, u8 card_type, { struct mlxsw_linecards *linecards = linecard->linecards; const char *type; - int err; type = mlxsw_linecard_types_lookup(linecards, card_type); mlxsw_linecard_status_event_done(linecard, @@ -430,11 +249,6 @@ mlxsw_linecard_provision_set(struct mlxsw_linecard *linecard, u8 card_type, return PTR_ERR(type); } } - err = mlxsw_linecard_devices_attach(linecard); - if (err) { - mlxsw_linecard_provision_fail(linecard); - return err; - } linecard->provisioned = true; linecard->hw_revision = hw_revision; linecard->ini_version = ini_version; @@ -447,7 +261,6 @@ static void mlxsw_linecard_provision_clear(struct mlxsw_linecard *linecard) mlxsw_linecard_status_event_done(linecard, MLXSW_LINECARD_STATUS_EVENT_TYPE_UNPROVISION); linecard->provisioned = false; - mlxsw_linecard_devices_detach(linecard); devlink_linecard_provision_clear(linecard->devlink_linecard); } @@ -479,18 +292,11 @@ static int mlxsw_linecard_ready_clear(struct mlxsw_linecard *linecard) return 0; } -static int mlxsw_linecard_active_set(struct mlxsw_linecard *linecard) +static void mlxsw_linecard_active_set(struct mlxsw_linecard *linecard) { - int err; - - err = mlxsw_linecard_devices_update(linecard); - if (err) - return err; - mlxsw_linecard_active_ops_call(linecard); linecard->active = true; devlink_linecard_activate(linecard->devlink_linecard); - return 0; } static void mlxsw_linecard_active_clear(struct mlxsw_linecard *linecard) @@ -539,11 +345,8 @@ static int mlxsw_linecard_status_process(struct mlxsw_linecards *linecards, goto out; } - if (active && linecard->active != active) { - err = mlxsw_linecard_active_set(linecard); - if (err) - goto out; - } + if (active && linecard->active != active) + mlxsw_linecard_active_set(linecard); if (!active && linecard->active != active) mlxsw_linecard_active_clear(linecard); @@ -934,44 +737,12 @@ static void mlxsw_linecard_types_get(struct devlink_linecard *devlink_linecard, *type_priv = ini_file; } -static int -mlxsw_linecard_info_get(struct devlink_linecard *devlink_linecard, void *priv, - struct devlink_info_req *req, - struct netlink_ext_ack *extack) -{ - struct mlxsw_linecard *linecard = priv; - char buf[32]; - int err; - - mutex_lock(&linecard->lock); - if (!linecard->provisioned) { - err = 0; - goto unlock; - } - - sprintf(buf, "%d", linecard->hw_revision); - err = devlink_info_version_fixed_put(req, "hw.revision", buf); - if (err) - goto unlock; - - sprintf(buf, "%d", linecard->ini_version); - err = devlink_info_version_running_put(req, "ini.version", buf); - if (err) - goto unlock; - -unlock: - mutex_unlock(&linecard->lock); - return err; -} - static const struct devlink_linecard_ops mlxsw_linecard_ops = { .provision = mlxsw_linecard_provision, .unprovision = mlxsw_linecard_unprovision, .same_provision = mlxsw_linecard_same_provision, .types_count = mlxsw_linecard_types_count, .types_get = mlxsw_linecard_types_get, - .info_get = mlxsw_linecard_info_get, - .device_info_get = mlxsw_linecard_device_info_get, }; struct mlxsw_linecard_status_event { @@ -1069,7 +840,6 @@ static int mlxsw_linecard_init(struct mlxsw_core *mlxsw_core, linecard->slot_index = slot_index; linecard->linecards = linecards; mutex_init(&linecard->lock); - INIT_LIST_HEAD(&linecard->device_list); devlink_linecard = devlink_linecard_create(priv_to_devlink(mlxsw_core), slot_index, &mlxsw_linecard_ops, @@ -1115,7 +885,6 @@ static void mlxsw_linecard_fini(struct mlxsw_core *mlxsw_core, mlxsw_core_flush_owq(); if (linecard->active) mlxsw_linecard_active_clear(linecard); - mlxsw_linecard_devices_detach(linecard); devlink_linecard_destroy(linecard->devlink_linecard); mutex_destroy(&linecard->lock); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 078e3aa04383..93af6c974ece 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -11643,11 +11643,7 @@ MLXSW_ITEM32(reg, mddq, sie, 0x00, 31, 1); enum mlxsw_reg_mddq_query_type { MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_INFO = 1, - MLXSW_REG_MDDQ_QUERY_TYPE_DEVICE_INFO, /* If there are no devices - * on the slot, data_valid - * will be '0'. - */ - MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_NAME, + MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_NAME = 3, }; /* reg_mddq_query_type @@ -11661,28 +11657,6 @@ MLXSW_ITEM32(reg, mddq, query_type, 0x00, 16, 8); */ MLXSW_ITEM32(reg, mddq, slot_index, 0x00, 0, 4); -/* reg_mddq_response_msg_seq - * Response message sequential number. For a specific request, the response - * message sequential number is the following one. In addition, the last - * message should be 0. - * Access: RO - */ -MLXSW_ITEM32(reg, mddq, response_msg_seq, 0x04, 16, 8); - -/* reg_mddq_request_msg_seq - * Request message sequential number. - * The first message number should be 0. - * Access: Index - */ -MLXSW_ITEM32(reg, mddq, request_msg_seq, 0x04, 0, 8); - -/* reg_mddq_data_valid - * If set, the data in the data field is valid and contain the information - * for the queried index. - * Access: RO - */ -MLXSW_ITEM32(reg, mddq, data_valid, 0x08, 31, 1); - /* reg_mddq_slot_info_provisioned * If set, the INI file is applied and the card is provisioned. * Access: RO @@ -11769,65 +11743,6 @@ mlxsw_reg_mddq_slot_info_unpack(const char *payload, u8 *p_slot_index, *p_card_type = mlxsw_reg_mddq_slot_info_card_type_get(payload); } -/* reg_mddq_device_info_flash_owner - * If set, the device is the flash owner. Otherwise, a shared flash - * is used by this device (another device is the flash owner). - * Access: RO - */ -MLXSW_ITEM32(reg, mddq, device_info_flash_owner, 0x10, 30, 1); - -/* reg_mddq_device_info_device_index - * Device index. The first device should number 0. - * Access: RO - */ -MLXSW_ITEM32(reg, mddq, device_info_device_index, 0x10, 0, 8); - -/* reg_mddq_device_info_fw_major - * Major FW version number. - * Access: RO - */ -MLXSW_ITEM32(reg, mddq, device_info_fw_major, 0x14, 16, 16); - -/* reg_mddq_device_info_fw_minor - * Minor FW version number. - * Access: RO - */ -MLXSW_ITEM32(reg, mddq, device_info_fw_minor, 0x18, 16, 16); - -/* reg_mddq_device_info_fw_sub_minor - * Sub-minor FW version number. - * Access: RO - */ -MLXSW_ITEM32(reg, mddq, device_info_fw_sub_minor, 0x18, 0, 16); - -static inline void -mlxsw_reg_mddq_device_info_pack(char *payload, u8 slot_index, - u8 request_msg_seq) -{ - __mlxsw_reg_mddq_pack(payload, slot_index, - MLXSW_REG_MDDQ_QUERY_TYPE_DEVICE_INFO); - mlxsw_reg_mddq_request_msg_seq_set(payload, request_msg_seq); -} - -static inline void -mlxsw_reg_mddq_device_info_unpack(const char *payload, u8 *p_response_msg_seq, - bool *p_data_valid, bool *p_flash_owner, - u8 *p_device_index, u16 *p_fw_major, - u16 *p_fw_minor, u16 *p_fw_sub_minor) -{ - *p_response_msg_seq = mlxsw_reg_mddq_response_msg_seq_get(payload); - *p_data_valid = mlxsw_reg_mddq_data_valid_get(payload); - if (p_flash_owner) - *p_flash_owner = mlxsw_reg_mddq_device_info_flash_owner_get(payload); - *p_device_index = mlxsw_reg_mddq_device_info_device_index_get(payload); - if (p_fw_major) - *p_fw_major = mlxsw_reg_mddq_device_info_fw_major_get(payload); - if (p_fw_minor) - *p_fw_minor = mlxsw_reg_mddq_device_info_fw_minor_get(payload); - if (p_fw_sub_minor) - *p_fw_sub_minor = mlxsw_reg_mddq_device_info_fw_sub_minor_get(payload); -} - #define MLXSW_REG_MDDQ_SLOT_ASCII_NAME_LEN 20 /* reg_mddq_slot_ascii_name diff --git a/include/net/devlink.h b/include/net/devlink.h index 062895973656..2a2a2a0c93f7 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -150,9 +150,6 @@ struct devlink_port_new_attrs { sfnum_valid:1; }; -struct devlink_info_req; -struct devlink_linecard_device; - /** * struct devlink_linecard_ops - Linecard operations * @provision: callback to provision the linecard slot with certain @@ -171,8 +168,6 @@ struct devlink_linecard_device; * provisioned. * @types_count: callback to get number of supported types * @types_get: callback to get next type in list - * @info_get: callback to get linecard info - * @device_info_get: callback to get linecard device info */ struct devlink_linecard_ops { int (*provision)(struct devlink_linecard *linecard, void *priv, @@ -187,12 +182,6 @@ struct devlink_linecard_ops { void (*types_get)(struct devlink_linecard *linecard, void *priv, unsigned int index, const char **type, const void **type_priv); - int (*info_get)(struct devlink_linecard *linecard, void *priv, - struct devlink_info_req *req, - struct netlink_ext_ack *extack); - int (*device_info_get)(struct devlink_linecard_device *device, - void *priv, struct devlink_info_req *req, - struct netlink_ext_ack *extack); }; struct devlink_sb_pool_info { @@ -639,6 +628,7 @@ struct devlink_flash_update_params { #define DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK BIT(1) struct devlink_region; +struct devlink_info_req; /** * struct devlink_region_ops - Region operations @@ -1588,12 +1578,6 @@ struct devlink_linecard * devlink_linecard_create(struct devlink *devlink, unsigned int linecard_index, const struct devlink_linecard_ops *ops, void *priv); void devlink_linecard_destroy(struct devlink_linecard *linecard); -struct devlink_linecard_device * -devlink_linecard_device_create(struct devlink_linecard *linecard, - unsigned int device_index, void *priv); -void -devlink_linecard_device_destroy(struct devlink_linecard *linecard, - struct devlink_linecard_device *linecard_device); void devlink_linecard_provision_set(struct devlink_linecard *linecard, const char *type); void devlink_linecard_provision_clear(struct devlink_linecard *linecard); diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index fb8c3864457f..b3d40a5d72ff 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -136,8 +136,6 @@ enum devlink_command { DEVLINK_CMD_LINECARD_NEW, DEVLINK_CMD_LINECARD_DEL, - DEVLINK_CMD_LINECARD_INFO_GET, /* can dump */ - /* add new commands above here */ __DEVLINK_CMD_MAX, DEVLINK_CMD_MAX = __DEVLINK_CMD_MAX - 1 @@ -577,9 +575,6 @@ enum devlink_attr { DEVLINK_ATTR_LINECARD_STATE, /* u8 */ DEVLINK_ATTR_LINECARD_TYPE, /* string */ DEVLINK_ATTR_LINECARD_SUPPORTED_TYPES, /* nested */ - DEVLINK_ATTR_LINECARD_DEVICE_LIST, /* nested */ - DEVLINK_ATTR_LINECARD_DEVICE, /* nested */ - DEVLINK_ATTR_LINECARD_DEVICE_INDEX, /* u32 */ /* add new attributes above here, update the policy in devlink.c */ diff --git a/net/core/devlink.c b/net/core/devlink.c index 5f441a0e34f4..5cc88490f18f 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -83,11 +83,10 @@ struct devlink_linecard { const struct devlink_linecard_ops *ops; void *priv; enum devlink_linecard_state state; - struct mutex state_lock; /* Protects state and device_list */ + struct mutex state_lock; /* Protects state */ const char *type; struct devlink_linecard_type *types; unsigned int types_count; - struct list_head device_list; }; /** @@ -2059,56 +2058,6 @@ struct devlink_linecard_type { const void *priv; }; -struct devlink_linecard_device { - struct list_head list; - unsigned int index; - void *priv; -}; - -static int -devlink_nl_linecard_device_fill(struct sk_buff *msg, - struct devlink_linecard_device *linecard_device) -{ - struct nlattr *attr; - - attr = nla_nest_start(msg, DEVLINK_ATTR_LINECARD_DEVICE); - if (!attr) - return -EMSGSIZE; - if (nla_put_u32(msg, DEVLINK_ATTR_LINECARD_DEVICE_INDEX, - linecard_device->index)) { - nla_nest_cancel(msg, attr); - return -EMSGSIZE; - } - nla_nest_end(msg, attr); - - return 0; -} - -static int devlink_nl_linecard_devices_fill(struct sk_buff *msg, - struct devlink_linecard *linecard) -{ - struct devlink_linecard_device *linecard_device; - struct nlattr *attr; - int err; - - if (list_empty(&linecard->device_list)) - return 0; - - attr = nla_nest_start(msg, DEVLINK_ATTR_LINECARD_DEVICE_LIST); - if (!attr) - return -EMSGSIZE; - list_for_each_entry(linecard_device, &linecard->device_list, list) { - err = devlink_nl_linecard_device_fill(msg, linecard_device); - if (err) { - nla_nest_cancel(msg, attr); - return err; - } - } - nla_nest_end(msg, attr); - - return 0; -} - static int devlink_nl_linecard_fill(struct sk_buff *msg, struct devlink *devlink, struct devlink_linecard *linecard, @@ -2119,7 +2068,6 @@ static int devlink_nl_linecard_fill(struct sk_buff *msg, struct devlink_linecard_type *linecard_type; struct nlattr *attr; void *hdr; - int err; int i; hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd); @@ -2152,10 +2100,6 @@ static int devlink_nl_linecard_fill(struct sk_buff *msg, nla_nest_end(msg, attr); } - err = devlink_nl_linecard_devices_fill(msg, linecard); - if (err) - goto nla_put_failure; - genlmsg_end(msg, hdr); return 0; @@ -2425,191 +2369,6 @@ static int devlink_nl_cmd_linecard_set_doit(struct sk_buff *skb, return 0; } -struct devlink_info_req { - struct sk_buff *msg; -}; - -static int -devlink_nl_linecard_device_info_fill(struct sk_buff *msg, - struct devlink_linecard *linecard, - struct devlink_linecard_device *linecard_device, - struct netlink_ext_ack *extack) -{ - struct nlattr *attr; - - attr = nla_nest_start(msg, DEVLINK_ATTR_LINECARD_DEVICE); - if (!attr) - return -EMSGSIZE; - if (nla_put_u32(msg, DEVLINK_ATTR_LINECARD_DEVICE_INDEX, - linecard_device->index)) { - nla_nest_cancel(msg, attr); - return -EMSGSIZE; - } - if (linecard->ops->device_info_get) { - struct devlink_info_req req; - int err; - - req.msg = msg; - err = linecard->ops->device_info_get(linecard_device, - linecard_device->priv, - &req, extack); - if (err) { - nla_nest_cancel(msg, attr); - return err; - } - } - nla_nest_end(msg, attr); - - return 0; -} - -static int devlink_nl_linecard_devices_info_fill(struct sk_buff *msg, - struct devlink_linecard *linecard, - struct netlink_ext_ack *extack) -{ - struct devlink_linecard_device *linecard_device; - struct nlattr *attr; - int err; - - if (list_empty(&linecard->device_list)) - return 0; - - attr = nla_nest_start(msg, DEVLINK_ATTR_LINECARD_DEVICE_LIST); - if (!attr) - return -EMSGSIZE; - list_for_each_entry(linecard_device, &linecard->device_list, list) { - err = devlink_nl_linecard_device_info_fill(msg, linecard, - linecard_device, - extack); - if (err) { - nla_nest_cancel(msg, attr); - return err; - } - } - nla_nest_end(msg, attr); - - return 0; -} - -static int -devlink_nl_linecard_info_fill(struct sk_buff *msg, struct devlink *devlink, - struct devlink_linecard *linecard, - enum devlink_command cmd, u32 portid, - u32 seq, int flags, struct netlink_ext_ack *extack) -{ - struct devlink_info_req req; - void *hdr; - int err; - - hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd); - if (!hdr) - return -EMSGSIZE; - - err = -EMSGSIZE; - if (devlink_nl_put_handle(msg, devlink)) - goto nla_put_failure; - if (nla_put_u32(msg, DEVLINK_ATTR_LINECARD_INDEX, linecard->index)) - goto nla_put_failure; - - req.msg = msg; - err = linecard->ops->info_get(linecard, linecard->priv, &req, extack); - if (err) - goto nla_put_failure; - - err = devlink_nl_linecard_devices_info_fill(msg, linecard, extack); - if (err) - goto nla_put_failure; - - genlmsg_end(msg, hdr); - return 0; - -nla_put_failure: - genlmsg_cancel(msg, hdr); - return err; -} - -static int devlink_nl_cmd_linecard_info_get_doit(struct sk_buff *skb, - struct genl_info *info) -{ - struct devlink_linecard *linecard = info->user_ptr[1]; - struct devlink *devlink = linecard->devlink; - struct sk_buff *msg; - int err; - - if (!linecard->ops->info_get) - return -EOPNOTSUPP; - - msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); - if (!msg) - return -ENOMEM; - - mutex_lock(&linecard->state_lock); - err = devlink_nl_linecard_info_fill(msg, devlink, linecard, - DEVLINK_CMD_LINECARD_INFO_GET, - info->snd_portid, info->snd_seq, 0, - info->extack); - mutex_unlock(&linecard->state_lock); - if (err) { - nlmsg_free(msg); - return err; - } - - return genlmsg_reply(msg, info); -} - -static int devlink_nl_cmd_linecard_info_get_dumpit(struct sk_buff *msg, - struct netlink_callback *cb) -{ - struct devlink_linecard *linecard; - struct devlink *devlink; - int start = cb->args[0]; - unsigned long index; - int idx = 0; - int err = 0; - - mutex_lock(&devlink_mutex); - xa_for_each_marked(&devlinks, index, devlink, DEVLINK_REGISTERED) { - if (!devlink_try_get(devlink)) - continue; - - if (!net_eq(devlink_net(devlink), sock_net(msg->sk))) - goto retry; - - mutex_lock(&devlink->linecards_lock); - list_for_each_entry(linecard, &devlink->linecard_list, list) { - if (idx < start || !linecard->ops->info_get) { - idx++; - continue; - } - mutex_lock(&linecard->state_lock); - err = devlink_nl_linecard_info_fill(msg, devlink, linecard, - DEVLINK_CMD_LINECARD_INFO_GET, - NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, - NLM_F_MULTI, - cb->extack); - mutex_unlock(&linecard->state_lock); - if (err) { - mutex_unlock(&devlink->linecards_lock); - devlink_put(devlink); - goto out; - } - idx++; - } - mutex_unlock(&devlink->linecards_lock); -retry: - devlink_put(devlink); - } -out: - mutex_unlock(&devlink_mutex); - - if (err != -EMSGSIZE) - return err; - - cb->args[0] = idx; - return msg->len; -} - static int devlink_nl_sb_fill(struct sk_buff *msg, struct devlink *devlink, struct devlink_sb *devlink_sb, enum devlink_command cmd, u32 portid, @@ -6602,6 +6361,10 @@ out_dev: return err; } +struct devlink_info_req { + struct sk_buff *msg; +}; + int devlink_info_driver_name_put(struct devlink_info_req *req, const char *name) { return nla_put_string(req->msg, DEVLINK_ATTR_INFO_DRIVER_NAME, name); @@ -9321,13 +9084,6 @@ static const struct genl_small_ops devlink_nl_ops[] = { .flags = GENL_ADMIN_PERM, .internal_flags = DEVLINK_NL_FLAG_NEED_LINECARD, }, - { - .cmd = DEVLINK_CMD_LINECARD_INFO_GET, - .doit = devlink_nl_cmd_linecard_info_get_doit, - .dumpit = devlink_nl_cmd_linecard_info_get_dumpit, - .internal_flags = DEVLINK_NL_FLAG_NEED_LINECARD, - /* can be retrieved by unprivileged users */ - }, { .cmd = DEVLINK_CMD_SB_GET, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, @@ -10508,7 +10264,6 @@ devlink_linecard_create(struct devlink *devlink, unsigned int linecard_index, linecard->priv = priv; linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED; mutex_init(&linecard->state_lock); - INIT_LIST_HEAD(&linecard->device_list); err = devlink_linecard_types_init(linecard); if (err) { @@ -10536,7 +10291,6 @@ void devlink_linecard_destroy(struct devlink_linecard *linecard) struct devlink *devlink = linecard->devlink; devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_DEL); - WARN_ON(!list_empty(&linecard->device_list)); mutex_lock(&devlink->linecards_lock); list_del(&linecard->list); devlink_linecard_types_fini(linecard); @@ -10545,52 +10299,6 @@ void devlink_linecard_destroy(struct devlink_linecard *linecard) } EXPORT_SYMBOL_GPL(devlink_linecard_destroy); -/** - * devlink_linecard_device_create - Create a device on linecard - * - * @linecard: devlink linecard - * @device_index: index of the linecard device - * @priv: user priv pointer - * - * Return: Line card device structure or an ERR_PTR() encoded error code. - */ -struct devlink_linecard_device * -devlink_linecard_device_create(struct devlink_linecard *linecard, - unsigned int device_index, void *priv) -{ - struct devlink_linecard_device *linecard_device; - - linecard_device = kzalloc(sizeof(*linecard_device), GFP_KERNEL); - if (!linecard_device) - return ERR_PTR(-ENOMEM); - linecard_device->index = device_index; - linecard_device->priv = priv; - mutex_lock(&linecard->state_lock); - list_add_tail(&linecard_device->list, &linecard->device_list); - devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW); - mutex_unlock(&linecard->state_lock); - return linecard_device; -} -EXPORT_SYMBOL_GPL(devlink_linecard_device_create); - -/** - * devlink_linecard_device_destroy - Destroy device on linecard - * - * @linecard: devlink linecard - * @linecard_device: devlink linecard device - */ -void -devlink_linecard_device_destroy(struct devlink_linecard *linecard, - struct devlink_linecard_device *linecard_device) -{ - mutex_lock(&linecard->state_lock); - list_del(&linecard_device->list); - devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW); - mutex_unlock(&linecard->state_lock); - kfree(linecard_device); -} -EXPORT_SYMBOL_GPL(devlink_linecard_device_destroy); - /** * devlink_linecard_provision_set - Set provisioning on linecard * @@ -10623,7 +10331,6 @@ EXPORT_SYMBOL_GPL(devlink_linecard_provision_set); void devlink_linecard_provision_clear(struct devlink_linecard *linecard) { mutex_lock(&linecard->state_lock); - WARN_ON(!list_empty(&linecard->device_list)); linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED; linecard->type = NULL; devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW); diff --git a/tools/testing/selftests/drivers/net/mlxsw/devlink_linecard.sh b/tools/testing/selftests/drivers/net/mlxsw/devlink_linecard.sh index 53a65f416770..08a922d8b86a 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/devlink_linecard.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/devlink_linecard.sh @@ -152,7 +152,6 @@ unprovision_test() LC_16X100G_TYPE="16x100G" LC_16X100G_PORT_COUNT=16 -LC_16X100G_DEVICE_COUNT=4 supported_types_check() { @@ -178,42 +177,6 @@ supported_types_check() check_err $? "16X100G not found between supported types of linecard $lc" } -lc_info_check() -{ - local lc=$1 - local fixed_hw_revision - local running_ini_version - - fixed_hw_revision=$(devlink lc -v info $DEVLINK_DEV lc $lc -j | \ - jq -e -r '.[][][].versions.fixed."hw.revision"') - check_err $? "Failed to get linecard $lc fixed.hw.revision" - log_info "Linecard $lc fixed.hw.revision: \"$fixed_hw_revision\"" - running_ini_version=$(devlink lc -v info $DEVLINK_DEV lc $lc -j | \ - jq -e -r '.[][][].versions.running."ini.version"') - check_err $? "Failed to get linecard $lc running.ini.version" - log_info "Linecard $lc running.ini.version: \"$running_ini_version\"" -} - -lc_devices_check() -{ - local lc=$1 - local expected_device_count=$2 - local device_count - local device - - device_count=$(devlink lc show $DEVLINK_DEV lc $lc -j | \ - jq -e -r ".[][][].devices |length") - check_err $? "Failed to get linecard $lc device count" - [ $device_count != 0 ] - check_err $? "No device found on linecard $lc" - [ $device_count == $expected_device_count ] - check_err $? "Unexpected device count on linecard $lc (got $expected_device_count, expected $device_count)" - for (( device=0; device Date: Tue, 26 Apr 2022 12:11:33 -0600 Subject: io_uring: add POLL_FIRST support for send/sendmsg and recv/recvmsg If IORING_RECVSEND_POLL_FIRST is set for recv/recvmsg or send/sendmsg, then we arm poll first rather than attempt a receive or send upfront. This can be useful if we expect there to be no data (or space) available for the request, as we can then avoid wasting time on the initial issue attempt. Reviewed-by: Hao Xu Signed-off-by: Jens Axboe --- fs/io_uring.c | 27 +++++++++++++++++++++++++-- include/uapi/linux/io_uring.h | 10 ++++++++++ 2 files changed, 35 insertions(+), 2 deletions(-) (limited to 'include/uapi/linux') diff --git a/fs/io_uring.c b/fs/io_uring.c index efe4e92ad8ad..6db9ab8d4d15 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -637,6 +637,7 @@ struct io_sr_msg { int bgid; size_t len; size_t done_io; + unsigned int flags; }; struct io_open { @@ -5272,11 +5273,14 @@ static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_sr_msg *sr = &req->sr_msg; - if (unlikely(sqe->addr2 || sqe->file_index)) + if (unlikely(sqe->file_index)) return -EINVAL; sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); sr->len = READ_ONCE(sqe->len); + sr->flags = READ_ONCE(sqe->addr2); + if (sr->flags & ~IORING_RECVSEND_POLL_FIRST) + return -EINVAL; sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL; if (sr->msg_flags & MSG_DONTWAIT) req->flags |= REQ_F_NOWAIT; @@ -5311,6 +5315,10 @@ static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) kmsg = &iomsg; } + if (!(req->flags & REQ_F_POLLED) && + (sr->flags & IORING_RECVSEND_POLL_FIRST)) + return io_setup_async_msg(req, kmsg); + flags = req->sr_msg.msg_flags; if (issue_flags & IO_URING_F_NONBLOCK) flags |= MSG_DONTWAIT; @@ -5353,6 +5361,10 @@ static int io_send(struct io_kiocb *req, unsigned int issue_flags) int min_ret = 0; int ret; + if (!(req->flags & REQ_F_POLLED) && + (sr->flags & IORING_RECVSEND_POLL_FIRST)) + return -EAGAIN; + sock = sock_from_file(req->file); if (unlikely(!sock)) return -ENOTSOCK; @@ -5505,11 +5517,14 @@ static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_sr_msg *sr = &req->sr_msg; - if (unlikely(sqe->addr2 || sqe->file_index)) + if (unlikely(sqe->file_index)) return -EINVAL; sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); sr->len = READ_ONCE(sqe->len); + sr->flags = READ_ONCE(sqe->addr2); + if (sr->flags & ~IORING_RECVSEND_POLL_FIRST) + return -EINVAL; sr->bgid = READ_ONCE(sqe->buf_group); sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL; if (sr->msg_flags & MSG_DONTWAIT) @@ -5546,6 +5561,10 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) kmsg = &iomsg; } + if (!(req->flags & REQ_F_POLLED) && + (sr->flags & IORING_RECVSEND_POLL_FIRST)) + return io_setup_async_msg(req, kmsg); + if (req->flags & REQ_F_BUFFER_SELECT) { kbuf = io_recv_buffer_select(req, issue_flags); if (IS_ERR(kbuf)) @@ -5603,6 +5622,10 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags) int ret, min_ret = 0; bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; + if (!(req->flags & REQ_F_POLLED) && + (sr->flags & IORING_RECVSEND_POLL_FIRST)) + return -EAGAIN; + sock = sock_from_file(req->file); if (unlikely(!sock)) return -ENOTSOCK; diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index fad63564678a..06621a278cb6 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -213,6 +213,16 @@ enum { #define IORING_ASYNC_CANCEL_FD (1U << 1) #define IORING_ASYNC_CANCEL_ANY (1U << 2) +/* + * send/sendmsg and recv/recvmsg flags (sqe->addr2) + * + * IORING_RECVSEND_POLL_FIRST If set, instead of first attempting to send + * or receive and arm poll if that yields an + * -EAGAIN result, arm poll upfront and skip + * the initial transfer attempt. + */ +#define IORING_RECVSEND_POLL_FIRST (1U << 0) + /* * IO completion data structure (Completion Queue Entry) */ -- cgit v1.2.3-59-g8ed1b From 6cc2df8e3a3967e7c13a424f87f6efb1d4a62d80 Mon Sep 17 00:00:00 2001 From: Mickaël Salaün Date: Fri, 6 May 2022 18:05:07 +0200 Subject: landlock: Add clang-format exceptions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In preparation to a following commit, add clang-format on and clang-format off stanzas around constant definitions. This enables to keep aligned values, which is much more readable than packed definitions. Link: https://lore.kernel.org/r/20220506160513.523257-2-mic@digikod.net Cc: stable@vger.kernel.org Signed-off-by: Mickaël Salaün --- include/uapi/linux/landlock.h | 4 ++++ security/landlock/fs.c | 2 ++ security/landlock/limits.h | 4 ++++ 3 files changed, 10 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/landlock.h b/include/uapi/linux/landlock.h index b3d952067f59..15c31abb0d76 100644 --- a/include/uapi/linux/landlock.h +++ b/include/uapi/linux/landlock.h @@ -33,7 +33,9 @@ struct landlock_ruleset_attr { * - %LANDLOCK_CREATE_RULESET_VERSION: Get the highest supported Landlock ABI * version. */ +/* clang-format off */ #define LANDLOCK_CREATE_RULESET_VERSION (1U << 0) +/* clang-format on */ /** * enum landlock_rule_type - Landlock rule type @@ -120,6 +122,7 @@ struct landlock_path_beneath_attr { * :manpage:`access(2)`. * Future Landlock evolutions will enable to restrict them. */ +/* clang-format off */ #define LANDLOCK_ACCESS_FS_EXECUTE (1ULL << 0) #define LANDLOCK_ACCESS_FS_WRITE_FILE (1ULL << 1) #define LANDLOCK_ACCESS_FS_READ_FILE (1ULL << 2) @@ -133,5 +136,6 @@ struct landlock_path_beneath_attr { #define LANDLOCK_ACCESS_FS_MAKE_FIFO (1ULL << 10) #define LANDLOCK_ACCESS_FS_MAKE_BLOCK (1ULL << 11) #define LANDLOCK_ACCESS_FS_MAKE_SYM (1ULL << 12) +/* clang-format on */ #endif /* _UAPI_LINUX_LANDLOCK_H */ diff --git a/security/landlock/fs.c b/security/landlock/fs.c index 97b8e421f617..4195a6be60b2 100644 --- a/security/landlock/fs.c +++ b/security/landlock/fs.c @@ -141,10 +141,12 @@ retry: } /* All access rights that can be tied to files. */ +/* clang-format off */ #define ACCESS_FILE ( \ LANDLOCK_ACCESS_FS_EXECUTE | \ LANDLOCK_ACCESS_FS_WRITE_FILE | \ LANDLOCK_ACCESS_FS_READ_FILE) +/* clang-format on */ /* * @path: Should have been checked by get_path_from_fd(). diff --git a/security/landlock/limits.h b/security/landlock/limits.h index 2a0a1095ee27..a274ae6b5570 100644 --- a/security/landlock/limits.h +++ b/security/landlock/limits.h @@ -12,10 +12,14 @@ #include #include +/* clang-format off */ + #define LANDLOCK_MAX_NUM_LAYERS 64 #define LANDLOCK_MAX_NUM_RULES U32_MAX #define LANDLOCK_LAST_ACCESS_FS LANDLOCK_ACCESS_FS_MAKE_SYM #define LANDLOCK_MASK_ACCESS_FS ((LANDLOCK_LAST_ACCESS_FS << 1) - 1) +/* clang-format on */ + #endif /* _SECURITY_LANDLOCK_LIMITS_H */ -- cgit v1.2.3-59-g8ed1b From a36e07dfe6ee71e209383ea9288cd8d1617e14f9 Mon Sep 17 00:00:00 2001 From: Gleb Fotengauer-Malinovskiy Date: Fri, 6 May 2022 17:24:54 +0000 Subject: rfkill: uapi: fix RFKILL_IOCTL_MAX_SIZE ioctl request definition The definition of RFKILL_IOCTL_MAX_SIZE introduced by commit 54f586a91532 ("rfkill: make new event layout opt-in") is unusable since it is based on RFKILL_IOC_EXT_SIZE which has not been defined. Fix that by replacing the undefined constant with the constant which is intended to be used in this definition. Fixes: 54f586a91532 ("rfkill: make new event layout opt-in") Cc: stable@vger.kernel.org # 5.11+ Signed-off-by: Gleb Fotengauer-Malinovskiy Signed-off-by: Dmitry V. Levin Link: https://lore.kernel.org/r/20220506172454.120319-1-glebfm@altlinux.org [add commit message provided later by Dmitry] Signed-off-by: Johannes Berg --- include/uapi/linux/rfkill.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/rfkill.h b/include/uapi/linux/rfkill.h index 283c5a7b3f2c..db6c8588c1d0 100644 --- a/include/uapi/linux/rfkill.h +++ b/include/uapi/linux/rfkill.h @@ -184,7 +184,7 @@ struct rfkill_event_ext { #define RFKILL_IOC_NOINPUT 1 #define RFKILL_IOCTL_NOINPUT _IO(RFKILL_IOC_MAGIC, RFKILL_IOC_NOINPUT) #define RFKILL_IOC_MAX_SIZE 2 -#define RFKILL_IOCTL_MAX_SIZE _IOW(RFKILL_IOC_MAGIC, RFKILL_IOC_EXT_SIZE, __u32) +#define RFKILL_IOCTL_MAX_SIZE _IOW(RFKILL_IOC_MAGIC, RFKILL_IOC_MAX_SIZE, __u32) /* and that's all userspace gets */ -- cgit v1.2.3-59-g8ed1b From ebdeb7c01d025cb059f05dc26b9dc914e46dd43f Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 31 Mar 2022 19:27:52 -0600 Subject: io_uring: add support for 128-byte SQEs Normal SQEs are 64-bytes in length, which is fine for all the commands we support. However, in preparation for supporting passthrough IO, provide an option for setting up a ring with 128-byte SQEs. We continue to use the same type for io_uring_sqe, it's marked and commented with a zero sized array pad at the end. This provides up to 80 bytes of data for a passthrough command - 64 bytes for the extra added data, and 16 bytes available at the end of the existing SQE. Signed-off-by: Jens Axboe --- fs/io_uring.c | 14 +++++++++++--- include/uapi/linux/io_uring.h | 8 ++++++++ 2 files changed, 19 insertions(+), 3 deletions(-) (limited to 'include/uapi/linux') diff --git a/fs/io_uring.c b/fs/io_uring.c index 53e54fc05488..106a0db56ddb 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -8519,8 +8519,12 @@ static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx) * though the application is the one updating it. */ head = READ_ONCE(ctx->sq_array[sq_idx]); - if (likely(head < ctx->sq_entries)) + if (likely(head < ctx->sq_entries)) { + /* double index for 128-byte SQEs, twice as long */ + if (ctx->flags & IORING_SETUP_SQE128) + head <<= 1; return &ctx->sq_sqes[head]; + } /* drop invalid entries */ ctx->cq_extra--; @@ -11689,7 +11693,10 @@ static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx, rings->sq_ring_entries = p->sq_entries; rings->cq_ring_entries = p->cq_entries; - size = array_size(sizeof(struct io_uring_sqe), p->sq_entries); + if (p->flags & IORING_SETUP_SQE128) + size = array_size(2 * sizeof(struct io_uring_sqe), p->sq_entries); + else + size = array_size(sizeof(struct io_uring_sqe), p->sq_entries); if (size == SIZE_MAX) { io_mem_free(ctx->rings); ctx->rings = NULL; @@ -11933,7 +11940,8 @@ static long io_uring_setup(u32 entries, struct io_uring_params __user *params) IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE | IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ | IORING_SETUP_R_DISABLED | IORING_SETUP_SUBMIT_ALL | - IORING_SETUP_COOP_TASKRUN | IORING_SETUP_TASKRUN_FLAG)) + IORING_SETUP_COOP_TASKRUN | IORING_SETUP_TASKRUN_FLAG | + IORING_SETUP_SQE128)) return -EINVAL; return io_uring_create(entries, &p, params); diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 31e719f38615..ee84132cadad 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -63,6 +63,12 @@ struct io_uring_sqe { }; __u64 addr3; __u64 __pad2[1]; + + /* + * If the ring is initialized with IORING_SETUP_SQE128, then this field + * contains 64-bytes of padding, doubling the size of the SQE. + */ + __u64 __big_sqe_pad[0]; }; enum { @@ -119,6 +125,8 @@ enum { */ #define IORING_SETUP_TASKRUN_FLAG (1U << 9) +#define IORING_SETUP_SQE128 (1U << 10) /* SQEs are 128 byte */ + enum io_uring_op { IORING_OP_NOP, IORING_OP_READV, -- cgit v1.2.3-59-g8ed1b From 7a51e5b44b92686eebd3e1b46b86e1eb4db975db Mon Sep 17 00:00:00 2001 From: Stefan Roesch Date: Tue, 26 Apr 2022 11:21:23 -0700 Subject: io_uring: support CQE32 in io_uring_cqe This adds the big_cqe array to the struct io_uring_cqe to support large CQE's. Co-developed-by: Jens Axboe Signed-off-by: Stefan Roesch Reviewed-by: Kanchan Joshi Link: https://lore.kernel.org/r/20220426182134.136504-2-shr@fb.com Signed-off-by: Jens Axboe --- include/uapi/linux/io_uring.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index ee84132cadad..ac2d90d669c3 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -126,6 +126,7 @@ enum { #define IORING_SETUP_TASKRUN_FLAG (1U << 9) #define IORING_SETUP_SQE128 (1U << 10) /* SQEs are 128 byte */ +#define IORING_SETUP_CQE32 (1U << 11) /* CQEs are 32 byte */ enum io_uring_op { IORING_OP_NOP, @@ -245,6 +246,12 @@ struct io_uring_cqe { __u64 user_data; /* sqe->data submission passed back */ __s32 res; /* result code for this event */ __u32 flags; + + /* + * If the ring is initialized with IORING_SETUP_CQE32, then this field + * contains 16-bytes of padding, doubling the size of the CQE. + */ + __u64 big_cqe[]; }; /* -- cgit v1.2.3-59-g8ed1b From bd32889e841c12533d09a1bd02bba932baa9ed8f Mon Sep 17 00:00:00 2001 From: Carlos Llamas Date: Fri, 29 Apr 2022 23:56:41 +0000 Subject: binder: add BINDER_GET_EXTENDED_ERROR ioctl Provide a userspace mechanism to pull precise error information upon failed operations. Extending the current error codes returned by the interfaces allows userspace to better determine the course of action. This could be for instance, retrying a failed transaction at a later point and thus offloading the error handling from the driver. Acked-by: Christian Brauner (Microsoft) Acked-by: Todd Kjos Signed-off-by: Carlos Llamas Link: https://lore.kernel.org/r/20220429235644.697372-3-cmllamas@google.com Signed-off-by: Greg Kroah-Hartman --- drivers/android/binder.c | 60 +++++++++++++++++++++++++++++++++++++ drivers/android/binder_internal.h | 3 ++ include/uapi/linux/android/binder.h | 16 ++++++++++ 3 files changed, 79 insertions(+) (limited to 'include/uapi/linux') diff --git a/drivers/android/binder.c b/drivers/android/binder.c index f0690d46caa1..4c2caf38e056 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -147,6 +147,13 @@ module_param_call(stop_on_user_error, binder_set_stop_on_user_error, binder_stop_on_user_error = 2; \ } while (0) +#define binder_set_extended_error(ee, _id, _command, _param) \ + do { \ + (ee)->id = _id; \ + (ee)->command = _command; \ + (ee)->param = _param; \ + } while (0) + #define to_flat_binder_object(hdr) \ container_of(hdr, struct flat_binder_object, hdr) @@ -2708,6 +2715,24 @@ static struct binder_node *binder_get_node_refs_for_txn( return target_node; } +static void binder_set_txn_from_error(struct binder_transaction *t, int id, + uint32_t command, int32_t param) +{ + struct binder_thread *from = binder_get_txn_from_and_acq_inner(t); + + if (!from) { + /* annotation for sparse */ + __release(&from->proc->inner_lock); + return; + } + + /* don't override existing errors */ + if (from->ee.command == BR_OK) + binder_set_extended_error(&from->ee, id, command, param); + binder_inner_proc_unlock(from->proc); + binder_thread_dec_tmpref(from); +} + static void binder_transaction(struct binder_proc *proc, struct binder_thread *thread, struct binder_transaction_data *tr, int reply, @@ -2753,6 +2778,10 @@ static void binder_transaction(struct binder_proc *proc, e->offsets_size = tr->offsets_size; strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME); + binder_inner_proc_lock(proc); + binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0); + binder_inner_proc_unlock(proc); + if (reply) { binder_inner_proc_lock(proc); in_reply_to = thread->transaction_stack; @@ -3498,10 +3527,16 @@ err_invalid_target_handle: BUG_ON(thread->return_error.cmd != BR_OK); if (in_reply_to) { + binder_set_txn_from_error(in_reply_to, t_debug_id, + return_error, return_error_param); thread->return_error.cmd = BR_TRANSACTION_COMPLETE; binder_enqueue_thread_work(thread, &thread->return_error.work); binder_send_failed_reply(in_reply_to, return_error); } else { + binder_inner_proc_lock(proc); + binder_set_extended_error(&thread->ee, t_debug_id, + return_error, return_error_param); + binder_inner_proc_unlock(proc); thread->return_error.cmd = return_error; binder_enqueue_thread_work(thread, &thread->return_error.work); } @@ -4628,6 +4663,7 @@ static struct binder_thread *binder_get_thread_ilocked( thread->return_error.cmd = BR_OK; thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR; thread->reply_error.cmd = BR_OK; + thread->ee.command = BR_OK; INIT_LIST_HEAD(&new_thread->waiting_thread_node); return thread; } @@ -5066,6 +5102,25 @@ static int binder_ioctl_get_freezer_info( return 0; } +static int binder_ioctl_get_extended_error(struct binder_thread *thread, + void __user *ubuf) +{ + struct binder_extended_error *ee = &thread->ee; + + binder_inner_proc_lock(thread->proc); + if (copy_to_user(ubuf, ee, sizeof(*ee))) { + binder_inner_proc_unlock(thread->proc); + return -EFAULT; + } + + ee->id = 0; + ee->command = BR_OK; + ee->param = 0; + binder_inner_proc_unlock(thread->proc); + + return 0; +} + static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int ret; @@ -5274,6 +5329,11 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) binder_inner_proc_unlock(proc); break; } + case BINDER_GET_EXTENDED_ERROR: + ret = binder_ioctl_get_extended_error(thread, ubuf); + if (ret < 0) + goto err; + break; default: ret = -EINVAL; goto err; diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h index cf70a104594d..8dc0bccf8513 100644 --- a/drivers/android/binder_internal.h +++ b/drivers/android/binder_internal.h @@ -480,6 +480,8 @@ struct binder_proc { * (only accessed by this thread) * @reply_error: transaction errors reported by target thread * (protected by @proc->inner_lock) + * @ee: extended error information from this thread + * (protected by @proc->inner_lock) * @wait: wait queue for thread work * @stats: per-thread statistics * (atomics, no lock needed) @@ -504,6 +506,7 @@ struct binder_thread { bool process_todo; struct binder_error return_error; struct binder_error reply_error; + struct binder_extended_error ee; wait_queue_head_t wait; struct binder_stats stats; atomic_t tmp_ref; diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h index 11157fae8a8e..e6ee8cae303b 100644 --- a/include/uapi/linux/android/binder.h +++ b/include/uapi/linux/android/binder.h @@ -236,6 +236,21 @@ struct binder_frozen_status_info { __u32 async_recv; }; +/* struct binder_extened_error - extended error information + * @id: identifier for the failed operation + * @command: command as defined by binder_driver_return_protocol + * @param: parameter holding a negative errno value + * + * Used with BINDER_GET_EXTENDED_ERROR. This extends the error information + * returned by the driver upon a failed operation. Userspace can pull this + * data to properly handle specific error scenarios. + */ +struct binder_extended_error { + __u32 id; + __u32 command; + __s32 param; +}; + #define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read) #define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64) #define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32) @@ -249,6 +264,7 @@ struct binder_frozen_status_info { #define BINDER_FREEZE _IOW('b', 14, struct binder_freeze_info) #define BINDER_GET_FROZEN_INFO _IOWR('b', 15, struct binder_frozen_status_info) #define BINDER_ENABLE_ONEWAY_SPAM_DETECTION _IOW('b', 16, __u32) +#define BINDER_GET_EXTENDED_ERROR _IOWR('b', 17, struct binder_extended_error) /* * NOTE: Two special error codes you should check for when calling -- cgit v1.2.3-59-g8ed1b From 7ff960a6fe399fdcbca6159063684671ae57eee9 Mon Sep 17 00:00:00 2001 From: Shunsuke Mie Date: Tue, 10 May 2022 19:27:23 +0900 Subject: virtio: fix virtio transitional ids This commit fixes the transitional PCI device ID. Fixes: d61914ea6ada ("virtio: update virtio id table, add transitional ids") Signed-off-by: Shunsuke Mie Link: https://lore.kernel.org/r/20220510102723.87666-1-mie@igel.co.jp Signed-off-by: Michael S. Tsirkin --- include/uapi/linux/virtio_ids.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h index 80d76b75bccd..7aa2eb766205 100644 --- a/include/uapi/linux/virtio_ids.h +++ b/include/uapi/linux/virtio_ids.h @@ -73,12 +73,12 @@ * Virtio Transitional IDs */ -#define VIRTIO_TRANS_ID_NET 1000 /* transitional virtio net */ -#define VIRTIO_TRANS_ID_BLOCK 1001 /* transitional virtio block */ -#define VIRTIO_TRANS_ID_BALLOON 1002 /* transitional virtio balloon */ -#define VIRTIO_TRANS_ID_CONSOLE 1003 /* transitional virtio console */ -#define VIRTIO_TRANS_ID_SCSI 1004 /* transitional virtio SCSI */ -#define VIRTIO_TRANS_ID_RNG 1005 /* transitional virtio rng */ -#define VIRTIO_TRANS_ID_9P 1009 /* transitional virtio 9p console */ +#define VIRTIO_TRANS_ID_NET 0x1000 /* transitional virtio net */ +#define VIRTIO_TRANS_ID_BLOCK 0x1001 /* transitional virtio block */ +#define VIRTIO_TRANS_ID_BALLOON 0x1002 /* transitional virtio balloon */ +#define VIRTIO_TRANS_ID_CONSOLE 0x1003 /* transitional virtio console */ +#define VIRTIO_TRANS_ID_SCSI 0x1004 /* transitional virtio SCSI */ +#define VIRTIO_TRANS_ID_RNG 0x1005 /* transitional virtio rng */ +#define VIRTIO_TRANS_ID_9P 0x1009 /* transitional virtio 9p console */ #endif /* _LINUX_VIRTIO_IDS_H */ -- cgit v1.2.3-59-g8ed1b From c23d47abee3a54e4991ed3993340596d04aabd6a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 19 Apr 2022 08:33:03 +0200 Subject: loop: remove most the top-of-file boilerplate comment from the UAPI header Just leave the SPDX marker and the copyright notice and remove the irrelevant rest. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20220419063303.583106-5-hch@lst.de Signed-off-by: Jens Axboe --- include/uapi/linux/loop.h | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/loop.h b/include/uapi/linux/loop.h index 98e60801195e..6f63527dd2ed 100644 --- a/include/uapi/linux/loop.h +++ b/include/uapi/linux/loop.h @@ -1,11 +1,6 @@ /* SPDX-License-Identifier: GPL-1.0+ WITH Linux-syscall-note */ /* - * include/linux/loop.h - * - * Written by Theodore Ts'o, 3/29/93. - * - * Copyright 1993 by Theodore Ts'o. Redistribution of this file is - * permitted under the GNU General Public License. + * Copyright 1993 by Theodore Ts'o. */ #ifndef _UAPI_LINUX_LOOP_H #define _UAPI_LINUX_LOOP_H -- cgit v1.2.3-59-g8ed1b From 26101f5ab6bdf30ac25c8e578e0b4873e7849e0c Mon Sep 17 00:00:00 2001 From: Kaixi Fan Date: Sat, 30 Apr 2022 15:48:42 +0800 Subject: bpf: Add source ip in "struct bpf_tunnel_key" Add tunnel source ip field in "struct bpf_tunnel_key". Add related code to set and get tunnel source field. Signed-off-by: Kaixi Fan Link: https://lore.kernel.org/r/20220430074844.69214-2-fankaixi.li@bytedance.com Signed-off-by: Alexei Starovoitov --- include/uapi/linux/bpf.h | 4 ++++ net/core/filter.c | 9 +++++++++ tools/include/uapi/linux/bpf.h | 4 ++++ 3 files changed, 17 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 444fe6f1cf35..95a3d1ff6255 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -5604,6 +5604,10 @@ struct bpf_tunnel_key { __u8 tunnel_ttl; __u16 tunnel_ext; /* Padding, future use. */ __u32 tunnel_label; + union { + __u32 local_ipv4; + __u32 local_ipv6[4]; + }; }; /* user accessible mirror of in-kernel xfrm_state. diff --git a/net/core/filter.c b/net/core/filter.c index b741b9f7e6a9..fe0da529d00f 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -4498,6 +4498,7 @@ BPF_CALL_4(bpf_skb_get_tunnel_key, struct sk_buff *, skb, struct bpf_tunnel_key if (unlikely(size != sizeof(struct bpf_tunnel_key))) { err = -EINVAL; switch (size) { + case offsetof(struct bpf_tunnel_key, local_ipv6[0]): case offsetof(struct bpf_tunnel_key, tunnel_label): case offsetof(struct bpf_tunnel_key, tunnel_ext): goto set_compat; @@ -4523,10 +4524,14 @@ set_compat: if (flags & BPF_F_TUNINFO_IPV6) { memcpy(to->remote_ipv6, &info->key.u.ipv6.src, sizeof(to->remote_ipv6)); + memcpy(to->local_ipv6, &info->key.u.ipv6.dst, + sizeof(to->local_ipv6)); to->tunnel_label = be32_to_cpu(info->key.label); } else { to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src); memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3); + to->local_ipv4 = be32_to_cpu(info->key.u.ipv4.dst); + memset(&to->local_ipv6[1], 0, sizeof(__u32) * 3); to->tunnel_label = 0; } @@ -4597,6 +4602,7 @@ BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb, return -EINVAL; if (unlikely(size != sizeof(struct bpf_tunnel_key))) { switch (size) { + case offsetof(struct bpf_tunnel_key, local_ipv6[0]): case offsetof(struct bpf_tunnel_key, tunnel_label): case offsetof(struct bpf_tunnel_key, tunnel_ext): case offsetof(struct bpf_tunnel_key, remote_ipv6[1]): @@ -4639,10 +4645,13 @@ BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb, info->mode |= IP_TUNNEL_INFO_IPV6; memcpy(&info->key.u.ipv6.dst, from->remote_ipv6, sizeof(from->remote_ipv6)); + memcpy(&info->key.u.ipv6.src, from->local_ipv6, + sizeof(from->local_ipv6)); info->key.label = cpu_to_be32(from->tunnel_label) & IPV6_FLOWLABEL_MASK; } else { info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4); + info->key.u.ipv4.src = cpu_to_be32(from->local_ipv4); } return 0; diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 444fe6f1cf35..95a3d1ff6255 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -5604,6 +5604,10 @@ struct bpf_tunnel_key { __u8 tunnel_ttl; __u16 tunnel_ext; /* Padding, future use. */ __u32 tunnel_label; + union { + __u32 local_ipv4; + __u32 local_ipv6[4]; + }; }; /* user accessible mirror of in-kernel xfrm_state. -- cgit v1.2.3-59-g8ed1b From f7e0beaf39d3868dc700d4954b26cf8443c5d423 Mon Sep 17 00:00:00 2001 From: Kui-Feng Lee Date: Tue, 10 May 2022 13:59:19 -0700 Subject: bpf, x86: Generate trampolines from bpf_tramp_links Replace struct bpf_tramp_progs with struct bpf_tramp_links to collect struct bpf_tramp_link(s) for a trampoline. struct bpf_tramp_link extends bpf_link to act as a linked list node. arch_prepare_bpf_trampoline() accepts a struct bpf_tramp_links to collects all bpf_tramp_link(s) that a trampoline should call. Change BPF trampoline and bpf_struct_ops to pass bpf_tramp_links instead of bpf_tramp_progs. Signed-off-by: Kui-Feng Lee Signed-off-by: Alexei Starovoitov Signed-off-by: Andrii Nakryiko Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220510205923.3206889-2-kuifeng@fb.com --- arch/x86/net/bpf_jit_comp.c | 36 +++++++++++---------- include/linux/bpf.h | 36 ++++++++++++++------- include/linux/bpf_types.h | 1 + include/uapi/linux/bpf.h | 1 + kernel/bpf/bpf_struct_ops.c | 71 +++++++++++++++++++++++++++------------- kernel/bpf/syscall.c | 23 +++++-------- kernel/bpf/trampoline.c | 73 ++++++++++++++++++++++++------------------ net/bpf/bpf_dummy_struct_ops.c | 24 +++++++++++--- tools/bpf/bpftool/link.c | 1 + tools/include/uapi/linux/bpf.h | 1 + 10 files changed, 164 insertions(+), 103 deletions(-) (limited to 'include/uapi/linux') diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 16b6efacf7c6..38eb43159230 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -1762,10 +1762,12 @@ static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args, } static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, - struct bpf_prog *p, int stack_size, bool save_ret) + struct bpf_tramp_link *l, int stack_size, + bool save_ret) { u8 *prog = *pprog; u8 *jmp_insn; + struct bpf_prog *p = l->link.prog; /* arg1: mov rdi, progs[i] */ emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p); @@ -1850,14 +1852,14 @@ static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond) } static int invoke_bpf(const struct btf_func_model *m, u8 **pprog, - struct bpf_tramp_progs *tp, int stack_size, + struct bpf_tramp_links *tl, int stack_size, bool save_ret) { int i; u8 *prog = *pprog; - for (i = 0; i < tp->nr_progs; i++) { - if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, + for (i = 0; i < tl->nr_links; i++) { + if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, save_ret)) return -EINVAL; } @@ -1866,7 +1868,7 @@ static int invoke_bpf(const struct btf_func_model *m, u8 **pprog, } static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog, - struct bpf_tramp_progs *tp, int stack_size, + struct bpf_tramp_links *tl, int stack_size, u8 **branches) { u8 *prog = *pprog; @@ -1877,8 +1879,8 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog, */ emit_mov_imm32(&prog, false, BPF_REG_0, 0); emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); - for (i = 0; i < tp->nr_progs; i++) { - if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, true)) + for (i = 0; i < tl->nr_links; i++) { + if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, true)) return -EINVAL; /* mod_ret prog stored return value into [rbp - 8]. Emit: @@ -1980,14 +1982,14 @@ static bool is_valid_bpf_tramp_flags(unsigned int flags) */ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end, const struct btf_func_model *m, u32 flags, - struct bpf_tramp_progs *tprogs, + struct bpf_tramp_links *tlinks, void *orig_call) { int ret, i, nr_args = m->nr_args; int regs_off, ip_off, args_off, stack_size = nr_args * 8; - struct bpf_tramp_progs *fentry = &tprogs[BPF_TRAMP_FENTRY]; - struct bpf_tramp_progs *fexit = &tprogs[BPF_TRAMP_FEXIT]; - struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN]; + struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY]; + struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT]; + struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; u8 **branches = NULL; u8 *prog; bool save_ret; @@ -2078,13 +2080,13 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i } } - if (fentry->nr_progs) + if (fentry->nr_links) if (invoke_bpf(m, &prog, fentry, regs_off, flags & BPF_TRAMP_F_RET_FENTRY_RET)) return -EINVAL; - if (fmod_ret->nr_progs) { - branches = kcalloc(fmod_ret->nr_progs, sizeof(u8 *), + if (fmod_ret->nr_links) { + branches = kcalloc(fmod_ret->nr_links, sizeof(u8 *), GFP_KERNEL); if (!branches) return -ENOMEM; @@ -2111,7 +2113,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i prog += X86_PATCH_SIZE; } - if (fmod_ret->nr_progs) { + if (fmod_ret->nr_links) { /* From Intel 64 and IA-32 Architectures Optimization * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler * Coding Rule 11: All branch targets should be 16-byte @@ -2121,12 +2123,12 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i /* Update the branches saved in invoke_bpf_mod_ret with the * aligned address of do_fexit. */ - for (i = 0; i < fmod_ret->nr_progs; i++) + for (i = 0; i < fmod_ret->nr_links; i++) emit_cond_near_jump(&branches[i], prog, branches[i], X86_JNE); } - if (fexit->nr_progs) + if (fexit->nr_links) if (invoke_bpf(m, &prog, fexit, regs_off, false)) { ret = -EINVAL; goto cleanup; diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 551b7198ae8a..75e0110a65e1 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -723,11 +723,11 @@ struct btf_func_model { /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50 * bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2 */ -#define BPF_MAX_TRAMP_PROGS 38 +#define BPF_MAX_TRAMP_LINKS 38 -struct bpf_tramp_progs { - struct bpf_prog *progs[BPF_MAX_TRAMP_PROGS]; - int nr_progs; +struct bpf_tramp_links { + struct bpf_tramp_link *links[BPF_MAX_TRAMP_LINKS]; + int nr_links; }; /* Different use cases for BPF trampoline: @@ -753,7 +753,7 @@ struct bpf_tramp_progs { struct bpf_tramp_image; int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end, const struct btf_func_model *m, u32 flags, - struct bpf_tramp_progs *tprogs, + struct bpf_tramp_links *tlinks, void *orig_call); /* these two functions are called from generated trampoline */ u64 notrace __bpf_prog_enter(struct bpf_prog *prog); @@ -852,9 +852,10 @@ static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func( { return bpf_func(ctx, insnsi); } + #ifdef CONFIG_BPF_JIT -int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr); -int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr); +int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr); +int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr); struct bpf_trampoline *bpf_trampoline_get(u64 key, struct bpf_attach_target_info *tgt_info); void bpf_trampoline_put(struct bpf_trampoline *tr); @@ -905,12 +906,12 @@ int bpf_jit_charge_modmem(u32 size); void bpf_jit_uncharge_modmem(u32 size); bool bpf_prog_has_trampoline(const struct bpf_prog *prog); #else -static inline int bpf_trampoline_link_prog(struct bpf_prog *prog, +static inline int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr) { return -ENOTSUPP; } -static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog, +static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr) { return -ENOTSUPP; @@ -1009,7 +1010,6 @@ struct bpf_prog_aux { bool tail_call_reachable; bool xdp_has_frags; bool use_bpf_prog_pack; - struct hlist_node tramp_hlist; /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */ const struct btf_type *attach_func_proto; /* function name for valid attach_btf_id */ @@ -1096,6 +1096,18 @@ struct bpf_link_ops { struct bpf_link_info *info); }; +struct bpf_tramp_link { + struct bpf_link link; + struct hlist_node tramp_hlist; +}; + +struct bpf_tracing_link { + struct bpf_tramp_link link; + enum bpf_attach_type attach_type; + struct bpf_trampoline *trampoline; + struct bpf_prog *tgt_prog; +}; + struct bpf_link_primer { struct bpf_link *link; struct file *file; @@ -1133,8 +1145,8 @@ bool bpf_struct_ops_get(const void *kdata); void bpf_struct_ops_put(const void *kdata); int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, void *value); -int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_progs *tprogs, - struct bpf_prog *prog, +int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks, + struct bpf_tramp_link *link, const struct btf_func_model *model, void *image, void *image_end); static inline bool bpf_try_module_get(const void *data, struct module *owner) diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index 3e24ad0c4b3c..2b9112b80171 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -141,3 +141,4 @@ BPF_LINK_TYPE(BPF_LINK_TYPE_XDP, xdp) BPF_LINK_TYPE(BPF_LINK_TYPE_PERF_EVENT, perf) #endif BPF_LINK_TYPE(BPF_LINK_TYPE_KPROBE_MULTI, kprobe_multi) +BPF_LINK_TYPE(BPF_LINK_TYPE_STRUCT_OPS, struct_ops) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 95a3d1ff6255..3d032ea1b6a3 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1013,6 +1013,7 @@ enum bpf_link_type { BPF_LINK_TYPE_XDP = 6, BPF_LINK_TYPE_PERF_EVENT = 7, BPF_LINK_TYPE_KPROBE_MULTI = 8, + BPF_LINK_TYPE_STRUCT_OPS = 9, MAX_BPF_LINK_TYPE, }; diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c index 3a0103ad97bc..d9a3c9207240 100644 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@ -33,15 +33,15 @@ struct bpf_struct_ops_map { const struct bpf_struct_ops *st_ops; /* protect map_update */ struct mutex lock; - /* progs has all the bpf_prog that is populated + /* link has all the bpf_links that is populated * to the func ptr of the kernel's struct * (in kvalue.data). */ - struct bpf_prog **progs; + struct bpf_link **links; /* image is a page that has all the trampolines * that stores the func args before calling the bpf_prog. * A PAGE_SIZE "image" is enough to store all trampoline for - * "progs[]". + * "links[]". */ void *image; /* uvalue->data stores the kernel struct @@ -283,9 +283,9 @@ static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map *st_map) u32 i; for (i = 0; i < btf_type_vlen(t); i++) { - if (st_map->progs[i]) { - bpf_prog_put(st_map->progs[i]); - st_map->progs[i] = NULL; + if (st_map->links[i]) { + bpf_link_put(st_map->links[i]); + st_map->links[i] = NULL; } } } @@ -316,18 +316,34 @@ static int check_zero_holes(const struct btf_type *t, void *data) return 0; } -int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_progs *tprogs, - struct bpf_prog *prog, +static void bpf_struct_ops_link_release(struct bpf_link *link) +{ +} + +static void bpf_struct_ops_link_dealloc(struct bpf_link *link) +{ + struct bpf_tramp_link *tlink = container_of(link, struct bpf_tramp_link, link); + + kfree(tlink); +} + +const struct bpf_link_ops bpf_struct_ops_link_lops = { + .release = bpf_struct_ops_link_release, + .dealloc = bpf_struct_ops_link_dealloc, +}; + +int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks, + struct bpf_tramp_link *link, const struct btf_func_model *model, void *image, void *image_end) { u32 flags; - tprogs[BPF_TRAMP_FENTRY].progs[0] = prog; - tprogs[BPF_TRAMP_FENTRY].nr_progs = 1; + tlinks[BPF_TRAMP_FENTRY].links[0] = link; + tlinks[BPF_TRAMP_FENTRY].nr_links = 1; flags = model->ret_size > 0 ? BPF_TRAMP_F_RET_FENTRY_RET : 0; return arch_prepare_bpf_trampoline(NULL, image, image_end, - model, flags, tprogs, NULL); + model, flags, tlinks, NULL); } static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, @@ -338,7 +354,7 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, struct bpf_struct_ops_value *uvalue, *kvalue; const struct btf_member *member; const struct btf_type *t = st_ops->type; - struct bpf_tramp_progs *tprogs = NULL; + struct bpf_tramp_links *tlinks = NULL; void *udata, *kdata; int prog_fd, err = 0; void *image, *image_end; @@ -362,8 +378,8 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, if (uvalue->state || refcount_read(&uvalue->refcnt)) return -EINVAL; - tprogs = kcalloc(BPF_TRAMP_MAX, sizeof(*tprogs), GFP_KERNEL); - if (!tprogs) + tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL); + if (!tlinks) return -ENOMEM; uvalue = (struct bpf_struct_ops_value *)st_map->uvalue; @@ -386,6 +402,7 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, for_each_member(i, t, member) { const struct btf_type *mtype, *ptype; struct bpf_prog *prog; + struct bpf_tramp_link *link; u32 moff; moff = __btf_member_bit_offset(t, member) / 8; @@ -439,16 +456,26 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, err = PTR_ERR(prog); goto reset_unlock; } - st_map->progs[i] = prog; if (prog->type != BPF_PROG_TYPE_STRUCT_OPS || prog->aux->attach_btf_id != st_ops->type_id || prog->expected_attach_type != i) { + bpf_prog_put(prog); err = -EINVAL; goto reset_unlock; } - err = bpf_struct_ops_prepare_trampoline(tprogs, prog, + link = kzalloc(sizeof(*link), GFP_USER); + if (!link) { + bpf_prog_put(prog); + err = -ENOMEM; + goto reset_unlock; + } + bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS, + &bpf_struct_ops_link_lops, prog); + st_map->links[i] = &link->link; + + err = bpf_struct_ops_prepare_trampoline(tlinks, link, &st_ops->func_models[i], image, image_end); if (err < 0) @@ -491,7 +518,7 @@ reset_unlock: memset(uvalue, 0, map->value_size); memset(kvalue, 0, map->value_size); unlock: - kfree(tprogs); + kfree(tlinks); mutex_unlock(&st_map->lock); return err; } @@ -546,9 +573,9 @@ static void bpf_struct_ops_map_free(struct bpf_map *map) { struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map; - if (st_map->progs) + if (st_map->links) bpf_struct_ops_map_put_progs(st_map); - bpf_map_area_free(st_map->progs); + bpf_map_area_free(st_map->links); bpf_jit_free_exec(st_map->image); bpf_map_area_free(st_map->uvalue); bpf_map_area_free(st_map); @@ -597,11 +624,11 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr) map = &st_map->map; st_map->uvalue = bpf_map_area_alloc(vt->size, NUMA_NO_NODE); - st_map->progs = - bpf_map_area_alloc(btf_type_vlen(t) * sizeof(struct bpf_prog *), + st_map->links = + bpf_map_area_alloc(btf_type_vlen(t) * sizeof(struct bpf_links *), NUMA_NO_NODE); st_map->image = bpf_jit_alloc_exec(PAGE_SIZE); - if (!st_map->uvalue || !st_map->progs || !st_map->image) { + if (!st_map->uvalue || !st_map->links || !st_map->image) { bpf_struct_ops_map_free(map); return ERR_PTR(-ENOMEM); } diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 50164d324eaf..2dc582773344 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -2864,19 +2864,12 @@ struct bpf_link *bpf_link_get_from_fd(u32 ufd) } EXPORT_SYMBOL(bpf_link_get_from_fd); -struct bpf_tracing_link { - struct bpf_link link; - enum bpf_attach_type attach_type; - struct bpf_trampoline *trampoline; - struct bpf_prog *tgt_prog; -}; - static void bpf_tracing_link_release(struct bpf_link *link) { struct bpf_tracing_link *tr_link = - container_of(link, struct bpf_tracing_link, link); + container_of(link, struct bpf_tracing_link, link.link); - WARN_ON_ONCE(bpf_trampoline_unlink_prog(link->prog, + WARN_ON_ONCE(bpf_trampoline_unlink_prog(&tr_link->link, tr_link->trampoline)); bpf_trampoline_put(tr_link->trampoline); @@ -2889,7 +2882,7 @@ static void bpf_tracing_link_release(struct bpf_link *link) static void bpf_tracing_link_dealloc(struct bpf_link *link) { struct bpf_tracing_link *tr_link = - container_of(link, struct bpf_tracing_link, link); + container_of(link, struct bpf_tracing_link, link.link); kfree(tr_link); } @@ -2898,7 +2891,7 @@ static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link, struct seq_file *seq) { struct bpf_tracing_link *tr_link = - container_of(link, struct bpf_tracing_link, link); + container_of(link, struct bpf_tracing_link, link.link); seq_printf(seq, "attach_type:\t%d\n", @@ -2909,7 +2902,7 @@ static int bpf_tracing_link_fill_link_info(const struct bpf_link *link, struct bpf_link_info *info) { struct bpf_tracing_link *tr_link = - container_of(link, struct bpf_tracing_link, link); + container_of(link, struct bpf_tracing_link, link.link); info->tracing.attach_type = tr_link->attach_type; bpf_trampoline_unpack_key(tr_link->trampoline->key, @@ -2990,7 +2983,7 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog, err = -ENOMEM; goto out_put_prog; } - bpf_link_init(&link->link, BPF_LINK_TYPE_TRACING, + bpf_link_init(&link->link.link, BPF_LINK_TYPE_TRACING, &bpf_tracing_link_lops, prog); link->attach_type = prog->expected_attach_type; @@ -3060,11 +3053,11 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog, tgt_prog = prog->aux->dst_prog; } - err = bpf_link_prime(&link->link, &link_primer); + err = bpf_link_prime(&link->link.link, &link_primer); if (err) goto out_unlock; - err = bpf_trampoline_link_prog(prog, tr); + err = bpf_trampoline_link_prog(&link->link, tr); if (err) { bpf_link_cleanup(&link_primer); link = NULL; diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c index ada97751ae1b..d5e6bc5517cb 100644 --- a/kernel/bpf/trampoline.c +++ b/kernel/bpf/trampoline.c @@ -168,30 +168,30 @@ static int register_fentry(struct bpf_trampoline *tr, void *new_addr) return ret; } -static struct bpf_tramp_progs * +static struct bpf_tramp_links * bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total, bool *ip_arg) { - const struct bpf_prog_aux *aux; - struct bpf_tramp_progs *tprogs; - struct bpf_prog **progs; + struct bpf_tramp_link *link; + struct bpf_tramp_links *tlinks; + struct bpf_tramp_link **links; int kind; *total = 0; - tprogs = kcalloc(BPF_TRAMP_MAX, sizeof(*tprogs), GFP_KERNEL); - if (!tprogs) + tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL); + if (!tlinks) return ERR_PTR(-ENOMEM); for (kind = 0; kind < BPF_TRAMP_MAX; kind++) { - tprogs[kind].nr_progs = tr->progs_cnt[kind]; + tlinks[kind].nr_links = tr->progs_cnt[kind]; *total += tr->progs_cnt[kind]; - progs = tprogs[kind].progs; + links = tlinks[kind].links; - hlist_for_each_entry(aux, &tr->progs_hlist[kind], tramp_hlist) { - *ip_arg |= aux->prog->call_get_func_ip; - *progs++ = aux->prog; + hlist_for_each_entry(link, &tr->progs_hlist[kind], tramp_hlist) { + *ip_arg |= link->link.prog->call_get_func_ip; + *links++ = link; } } - return tprogs; + return tlinks; } static void __bpf_tramp_image_put_deferred(struct work_struct *work) @@ -330,14 +330,14 @@ out: static int bpf_trampoline_update(struct bpf_trampoline *tr) { struct bpf_tramp_image *im; - struct bpf_tramp_progs *tprogs; + struct bpf_tramp_links *tlinks; u32 flags = BPF_TRAMP_F_RESTORE_REGS; bool ip_arg = false; int err, total; - tprogs = bpf_trampoline_get_progs(tr, &total, &ip_arg); - if (IS_ERR(tprogs)) - return PTR_ERR(tprogs); + tlinks = bpf_trampoline_get_progs(tr, &total, &ip_arg); + if (IS_ERR(tlinks)) + return PTR_ERR(tlinks); if (total == 0) { err = unregister_fentry(tr, tr->cur_image->image); @@ -353,15 +353,15 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr) goto out; } - if (tprogs[BPF_TRAMP_FEXIT].nr_progs || - tprogs[BPF_TRAMP_MODIFY_RETURN].nr_progs) + if (tlinks[BPF_TRAMP_FEXIT].nr_links || + tlinks[BPF_TRAMP_MODIFY_RETURN].nr_links) flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME; if (ip_arg) flags |= BPF_TRAMP_F_IP_ARG; err = arch_prepare_bpf_trampoline(im, im->image, im->image + PAGE_SIZE, - &tr->func.model, flags, tprogs, + &tr->func.model, flags, tlinks, tr->func.addr); if (err < 0) goto out; @@ -381,7 +381,7 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr) tr->cur_image = im; tr->selector++; out: - kfree(tprogs); + kfree(tlinks); return err; } @@ -407,13 +407,14 @@ static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(struct bpf_prog *prog) } } -int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr) +int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr) { enum bpf_tramp_prog_type kind; + struct bpf_tramp_link *link_exiting; int err = 0; int cnt; - kind = bpf_attach_type_to_tramp(prog); + kind = bpf_attach_type_to_tramp(link->link.prog); mutex_lock(&tr->mutex); if (tr->extension_prog) { /* cannot attach fentry/fexit if extension prog is attached. @@ -429,25 +430,33 @@ int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr) err = -EBUSY; goto out; } - tr->extension_prog = prog; + tr->extension_prog = link->link.prog; err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, NULL, - prog->bpf_func); + link->link.prog->bpf_func); goto out; } - if (cnt >= BPF_MAX_TRAMP_PROGS) { + if (cnt >= BPF_MAX_TRAMP_LINKS) { err = -E2BIG; goto out; } - if (!hlist_unhashed(&prog->aux->tramp_hlist)) { + if (!hlist_unhashed(&link->tramp_hlist)) { /* prog already linked */ err = -EBUSY; goto out; } - hlist_add_head(&prog->aux->tramp_hlist, &tr->progs_hlist[kind]); + hlist_for_each_entry(link_exiting, &tr->progs_hlist[kind], tramp_hlist) { + if (link_exiting->link.prog != link->link.prog) + continue; + /* prog already linked */ + err = -EBUSY; + goto out; + } + + hlist_add_head(&link->tramp_hlist, &tr->progs_hlist[kind]); tr->progs_cnt[kind]++; err = bpf_trampoline_update(tr); if (err) { - hlist_del_init(&prog->aux->tramp_hlist); + hlist_del_init(&link->tramp_hlist); tr->progs_cnt[kind]--; } out: @@ -456,12 +465,12 @@ out: } /* bpf_trampoline_unlink_prog() should never fail. */ -int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr) +int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr) { enum bpf_tramp_prog_type kind; int err; - kind = bpf_attach_type_to_tramp(prog); + kind = bpf_attach_type_to_tramp(link->link.prog); mutex_lock(&tr->mutex); if (kind == BPF_TRAMP_REPLACE) { WARN_ON_ONCE(!tr->extension_prog); @@ -470,7 +479,7 @@ int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr) tr->extension_prog = NULL; goto out; } - hlist_del_init(&prog->aux->tramp_hlist); + hlist_del_init(&link->tramp_hlist); tr->progs_cnt[kind]--; err = bpf_trampoline_update(tr); out: @@ -635,7 +644,7 @@ void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr) int __weak arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end, const struct btf_func_model *m, u32 flags, - struct bpf_tramp_progs *tprogs, + struct bpf_tramp_links *tlinks, void *orig_call) { return -ENOTSUPP; diff --git a/net/bpf/bpf_dummy_struct_ops.c b/net/bpf/bpf_dummy_struct_ops.c index d0e54e30658a..e78dadfc5829 100644 --- a/net/bpf/bpf_dummy_struct_ops.c +++ b/net/bpf/bpf_dummy_struct_ops.c @@ -72,13 +72,16 @@ static int dummy_ops_call_op(void *image, struct bpf_dummy_ops_test_args *args) args->args[3], args->args[4]); } +extern const struct bpf_link_ops bpf_struct_ops_link_lops; + int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr) { const struct bpf_struct_ops *st_ops = &bpf_bpf_dummy_ops; const struct btf_type *func_proto; struct bpf_dummy_ops_test_args *args; - struct bpf_tramp_progs *tprogs; + struct bpf_tramp_links *tlinks; + struct bpf_tramp_link *link = NULL; void *image = NULL; unsigned int op_idx; int prog_ret; @@ -92,8 +95,8 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr, if (IS_ERR(args)) return PTR_ERR(args); - tprogs = kcalloc(BPF_TRAMP_MAX, sizeof(*tprogs), GFP_KERNEL); - if (!tprogs) { + tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL); + if (!tlinks) { err = -ENOMEM; goto out; } @@ -105,8 +108,17 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr, } set_vm_flush_reset_perms(image); + link = kzalloc(sizeof(*link), GFP_USER); + if (!link) { + err = -ENOMEM; + goto out; + } + /* prog doesn't take the ownership of the reference from caller */ + bpf_prog_inc(prog); + bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS, &bpf_struct_ops_link_lops, prog); + op_idx = prog->expected_attach_type; - err = bpf_struct_ops_prepare_trampoline(tprogs, prog, + err = bpf_struct_ops_prepare_trampoline(tlinks, link, &st_ops->func_models[op_idx], image, image + PAGE_SIZE); if (err < 0) @@ -124,7 +136,9 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr, out: kfree(args); bpf_jit_free_exec(image); - kfree(tprogs); + if (link) + bpf_link_put(&link->link); + kfree(tlinks); return err; } diff --git a/tools/bpf/bpftool/link.c b/tools/bpf/bpftool/link.c index 8fb0116f9136..6353a789322b 100644 --- a/tools/bpf/bpftool/link.c +++ b/tools/bpf/bpftool/link.c @@ -23,6 +23,7 @@ static const char * const link_type_name[] = { [BPF_LINK_TYPE_XDP] = "xdp", [BPF_LINK_TYPE_PERF_EVENT] = "perf_event", [BPF_LINK_TYPE_KPROBE_MULTI] = "kprobe_multi", + [BPF_LINK_TYPE_STRUCT_OPS] = "struct_ops", }; static struct hashmap *link_table; diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 95a3d1ff6255..3d032ea1b6a3 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1013,6 +1013,7 @@ enum bpf_link_type { BPF_LINK_TYPE_XDP = 6, BPF_LINK_TYPE_PERF_EVENT = 7, BPF_LINK_TYPE_KPROBE_MULTI = 8, + BPF_LINK_TYPE_STRUCT_OPS = 9, MAX_BPF_LINK_TYPE, }; -- cgit v1.2.3-59-g8ed1b From 2fcc82411e74e5e6aba336561cf56fb899bfae4e Mon Sep 17 00:00:00 2001 From: Kui-Feng Lee Date: Tue, 10 May 2022 13:59:21 -0700 Subject: bpf, x86: Attach a cookie to fentry/fexit/fmod_ret/lsm. Pass a cookie along with BPF_LINK_CREATE requests. Add a bpf_cookie field to struct bpf_tracing_link to attach a cookie. The cookie of a bpf_tracing_link is available by calling bpf_get_attach_cookie when running the BPF program of the attached link. The value of a cookie will be set at bpf_tramp_run_ctx by the trampoline of the link. Signed-off-by: Kui-Feng Lee Signed-off-by: Alexei Starovoitov Signed-off-by: Andrii Nakryiko Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220510205923.3206889-4-kuifeng@fb.com --- arch/x86/net/bpf_jit_comp.c | 5 +++-- include/linux/bpf.h | 1 + include/uapi/linux/bpf.h | 9 +++++++++ kernel/bpf/bpf_lsm.c | 17 +++++++++++++++++ kernel/bpf/syscall.c | 12 ++++++++---- kernel/bpf/trampoline.c | 7 +++++-- kernel/trace/bpf_trace.c | 17 +++++++++++++++++ tools/include/uapi/linux/bpf.h | 9 +++++++++ 8 files changed, 69 insertions(+), 8 deletions(-) (limited to 'include/uapi/linux') diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 1fbc5cf1c7a7..a2b6d197c226 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -1769,9 +1769,10 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, u8 *jmp_insn; int ctx_cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie); struct bpf_prog *p = l->link.prog; + u64 cookie = l->cookie; - /* mov rdi, 0 */ - emit_mov_imm64(&prog, BPF_REG_1, 0, 0); + /* mov rdi, cookie */ + emit_mov_imm64(&prog, BPF_REG_1, (long) cookie >> 32, (u32) (long) cookie); /* Prepare struct bpf_tramp_run_ctx. * diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 256fb802e580..aba7ded56436 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1102,6 +1102,7 @@ struct bpf_link_ops { struct bpf_tramp_link { struct bpf_link link; struct hlist_node tramp_hlist; + u64 cookie; }; struct bpf_tracing_link { diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 3d032ea1b6a3..bc7f89948f54 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1490,6 +1490,15 @@ union bpf_attr { __aligned_u64 addrs; __aligned_u64 cookies; } kprobe_multi; + struct { + /* this is overlaid with the target_btf_id above. */ + __u32 target_btf_id; + /* black box user-provided value passed through + * to BPF program at the execution time and + * accessible through bpf_get_attach_cookie() BPF helper + */ + __u64 cookie; + } tracing; }; } link_create; diff --git a/kernel/bpf/bpf_lsm.c b/kernel/bpf/bpf_lsm.c index 064eccba641d..c1351df9f7ee 100644 --- a/kernel/bpf/bpf_lsm.c +++ b/kernel/bpf/bpf_lsm.c @@ -117,6 +117,21 @@ static const struct bpf_func_proto bpf_ima_file_hash_proto = { .allowed = bpf_ima_inode_hash_allowed, }; +BPF_CALL_1(bpf_get_attach_cookie, void *, ctx) +{ + struct bpf_trace_run_ctx *run_ctx; + + run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx); + return run_ctx->bpf_cookie; +} + +static const struct bpf_func_proto bpf_get_attach_cookie_proto = { + .func = bpf_get_attach_cookie, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +}; + static const struct bpf_func_proto * bpf_lsm_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { @@ -141,6 +156,8 @@ bpf_lsm_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return prog->aux->sleepable ? &bpf_ima_inode_hash_proto : NULL; case BPF_FUNC_ima_file_hash: return prog->aux->sleepable ? &bpf_ima_file_hash_proto : NULL; + case BPF_FUNC_get_attach_cookie: + return bpf_prog_has_trampoline(prog) ? &bpf_get_attach_cookie_proto : NULL; default: return tracing_prog_func_proto(func_id, prog); } diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index d48165fccf49..72e53489165d 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -2921,7 +2921,8 @@ static const struct bpf_link_ops bpf_tracing_link_lops = { static int bpf_tracing_prog_attach(struct bpf_prog *prog, int tgt_prog_fd, - u32 btf_id) + u32 btf_id, + u64 bpf_cookie) { struct bpf_link_primer link_primer; struct bpf_prog *tgt_prog = NULL; @@ -2986,6 +2987,7 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog, bpf_link_init(&link->link.link, BPF_LINK_TYPE_TRACING, &bpf_tracing_link_lops, prog); link->attach_type = prog->expected_attach_type; + link->link.cookie = bpf_cookie; mutex_lock(&prog->aux->dst_mutex); @@ -3271,7 +3273,7 @@ static int bpf_raw_tp_link_attach(struct bpf_prog *prog, tp_name = prog->aux->attach_func_name; break; } - return bpf_tracing_prog_attach(prog, 0, 0); + return bpf_tracing_prog_attach(prog, 0, 0, 0); case BPF_PROG_TYPE_RAW_TRACEPOINT: case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: if (strncpy_from_user(buf, user_tp_name, sizeof(buf) - 1) < 0) @@ -4524,7 +4526,8 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr) case BPF_PROG_TYPE_EXT: ret = bpf_tracing_prog_attach(prog, attr->link_create.target_fd, - attr->link_create.target_btf_id); + attr->link_create.target_btf_id, + attr->link_create.tracing.cookie); break; case BPF_PROG_TYPE_LSM: case BPF_PROG_TYPE_TRACING: @@ -4539,7 +4542,8 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr) else ret = bpf_tracing_prog_attach(prog, attr->link_create.target_fd, - attr->link_create.target_btf_id); + attr->link_create.target_btf_id, + attr->link_create.tracing.cookie); break; case BPF_PROG_TYPE_FLOW_DISSECTOR: case BPF_PROG_TYPE_SK_LOOKUP: diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c index baf1b65d523e..0e9b3aefc34a 100644 --- a/kernel/bpf/trampoline.c +++ b/kernel/bpf/trampoline.c @@ -30,9 +30,12 @@ static DEFINE_MUTEX(trampoline_mutex); bool bpf_prog_has_trampoline(const struct bpf_prog *prog) { enum bpf_attach_type eatype = prog->expected_attach_type; + enum bpf_prog_type ptype = prog->type; - return eatype == BPF_TRACE_FENTRY || eatype == BPF_TRACE_FEXIT || - eatype == BPF_MODIFY_RETURN; + return (ptype == BPF_PROG_TYPE_TRACING && + (eatype == BPF_TRACE_FENTRY || eatype == BPF_TRACE_FEXIT || + eatype == BPF_MODIFY_RETURN)) || + (ptype == BPF_PROG_TYPE_LSM && eatype == BPF_LSM_MAC); } void *bpf_jit_alloc_exec_page(void) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 7fd11c17558d..2eaac094caf8 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1091,6 +1091,21 @@ static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = { .arg1_type = ARG_PTR_TO_CTX, }; +BPF_CALL_1(bpf_get_attach_cookie_tracing, void *, ctx) +{ + struct bpf_trace_run_ctx *run_ctx; + + run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx); + return run_ctx->bpf_cookie; +} + +static const struct bpf_func_proto bpf_get_attach_cookie_proto_tracing = { + .func = bpf_get_attach_cookie_tracing, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +}; + BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags) { #ifndef CONFIG_X86 @@ -1719,6 +1734,8 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL; case BPF_FUNC_get_func_arg_cnt: return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL; + case BPF_FUNC_get_attach_cookie: + return bpf_prog_has_trampoline(prog) ? &bpf_get_attach_cookie_proto_tracing : NULL; default: fn = raw_tp_prog_func_proto(func_id, prog); if (!fn && prog->expected_attach_type == BPF_TRACE_ITER) diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 3d032ea1b6a3..bc7f89948f54 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1490,6 +1490,15 @@ union bpf_attr { __aligned_u64 addrs; __aligned_u64 cookies; } kprobe_multi; + struct { + /* this is overlaid with the target_btf_id above. */ + __u32 target_btf_id; + /* black box user-provided value passed through + * to BPF program at the execution time and + * accessible through bpf_get_attach_cookie() BPF helper + */ + __u64 cookie; + } tracing; }; } link_create; -- cgit v1.2.3-59-g8ed1b From a8641d7d8500d41d312350470464e03f3df3672a Mon Sep 17 00:00:00 2001 From: Basavaraj Natikar Date: Mon, 9 May 2022 18:50:25 +0530 Subject: HID: amd_sfh: Move bus declaration outside of amd-sfh This should allow external drivers to reference this bus ID reservation and detect data coming from amd-sfh. Signed-off-by: Mario Limonciello Signed-off-by: Basavaraj Natikar Signed-off-by: Jiri Kosina --- drivers/hid/amd-sfh-hid/amd_sfh_hid.h | 1 - include/uapi/linux/input.h | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) (limited to 'include/uapi/linux') diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_hid.h b/drivers/hid/amd-sfh-hid/amd_sfh_hid.h index cb04f47c8648..ad264db63180 100644 --- a/drivers/hid/amd-sfh-hid/amd_sfh_hid.h +++ b/drivers/hid/amd-sfh-hid/amd_sfh_hid.h @@ -12,7 +12,6 @@ #define AMDSFH_HID_H #define MAX_HID_DEVICES 5 -#define BUS_AMD_SFH 0x20 #define AMD_SFH_HID_VENDOR 0x1022 #define AMD_SFH_HID_PRODUCT 0x0001 diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h index ee3127461ee0..ef4257ab3026 100644 --- a/include/uapi/linux/input.h +++ b/include/uapi/linux/input.h @@ -271,6 +271,7 @@ struct input_mask { #define BUS_RMI 0x1D #define BUS_CEC 0x1E #define BUS_INTEL_ISHTP 0x1F +#define BUS_AMD_SFH 0x20 /* * MT_TOOL types -- cgit v1.2.3-59-g8ed1b From ee692a21e9bf8354bd3ec816f1cf4bff8619ed77 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 11 May 2022 11:17:45 +0530 Subject: fs,io_uring: add infrastructure for uring-cmd file_operations->uring_cmd is a file private handler. This is somewhat similar to ioctl but hopefully a lot more sane and useful as it can be used to enable many io_uring capabilities for the underlying operation. IORING_OP_URING_CMD is a file private kind of request. io_uring doesn't know what is in this command type, it's for the provider of ->uring_cmd() to deal with. Co-developed-by: Kanchan Joshi Signed-off-by: Kanchan Joshi Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20220511054750.20432-2-joshi.k@samsung.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 135 ++++++++++++++++++++++++++++++++++++------ include/linux/fs.h | 2 + include/linux/io_uring.h | 33 +++++++++++ include/uapi/linux/io_uring.h | 21 ++++--- 4 files changed, 165 insertions(+), 26 deletions(-) (limited to 'include/uapi/linux') diff --git a/fs/io_uring.c b/fs/io_uring.c index ceaf7826ed71..44c57dca358d 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -202,13 +202,6 @@ struct io_rings { struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp; }; -enum io_uring_cmd_flags { - IO_URING_F_COMPLETE_DEFER = 1, - IO_URING_F_UNLOCKED = 2, - /* int's last bit, sign checks are usually faster than a bit test */ - IO_URING_F_NONBLOCK = INT_MIN, -}; - struct io_mapped_ubuf { u64 ubuf; u64 ubuf_end; @@ -972,6 +965,7 @@ struct io_kiocb { struct io_xattr xattr; struct io_socket sock; struct io_nop nop; + struct io_uring_cmd uring_cmd; }; u8 opcode; @@ -1050,6 +1044,14 @@ struct io_cancel_data { int seq; }; +/* + * The URING_CMD payload starts at 'cmd' in the first sqe, and continues into + * the following sqe if SQE128 is used. + */ +#define uring_cmd_pdu_size(is_sqe128) \ + ((1 + !!(is_sqe128)) * sizeof(struct io_uring_sqe) - \ + offsetof(struct io_uring_sqe, cmd)) + struct io_op_def { /* needs req->file assigned */ unsigned needs_file : 1; @@ -1289,6 +1291,12 @@ static const struct io_op_def io_op_defs[] = { [IORING_OP_SOCKET] = { .audit_skip = 1, }, + [IORING_OP_URING_CMD] = { + .needs_file = 1, + .plug = 1, + .needs_async_setup = 1, + .async_size = uring_cmd_pdu_size(1), + }, }; /* requests with any of those set should undergo io_disarm_next() */ @@ -1428,6 +1436,8 @@ const char *io_uring_get_opcode(u8 opcode) return "GETXATTR"; case IORING_OP_SOCKET: return "SOCKET"; + case IORING_OP_URING_CMD: + return "URING_CMD"; case IORING_OP_LAST: return "INVALID"; } @@ -4507,10 +4517,6 @@ static int __io_getxattr_prep(struct io_kiocb *req, const char __user *name; int ret; - if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) - return -EINVAL; - if (unlikely(sqe->ioprio)) - return -EINVAL; if (unlikely(req->flags & REQ_F_FIXED_FILE)) return -EBADF; @@ -4620,10 +4626,6 @@ static int __io_setxattr_prep(struct io_kiocb *req, const char __user *name; int ret; - if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) - return -EINVAL; - if (unlikely(sqe->ioprio)) - return -EINVAL; if (unlikely(req->flags & REQ_F_FIXED_FILE)) return -EBADF; @@ -4910,6 +4912,96 @@ static int io_linkat(struct io_kiocb *req, unsigned int issue_flags) return 0; } +static void io_uring_cmd_work(struct io_kiocb *req, bool *locked) +{ + req->uring_cmd.task_work_cb(&req->uring_cmd); +} + +void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd, + void (*task_work_cb)(struct io_uring_cmd *)) +{ + struct io_kiocb *req = container_of(ioucmd, struct io_kiocb, uring_cmd); + + req->uring_cmd.task_work_cb = task_work_cb; + req->io_task_work.func = io_uring_cmd_work; + io_req_task_work_add(req, !!(req->ctx->flags & IORING_SETUP_SQPOLL)); +} +EXPORT_SYMBOL_GPL(io_uring_cmd_complete_in_task); + +/* + * Called by consumers of io_uring_cmd, if they originally returned + * -EIOCBQUEUED upon receiving the command. + */ +void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2) +{ + struct io_kiocb *req = container_of(ioucmd, struct io_kiocb, uring_cmd); + + if (ret < 0) + req_set_fail(req); + if (req->ctx->flags & IORING_SETUP_CQE32) + __io_req_complete32(req, 0, ret, 0, res2, 0); + else + io_req_complete(req, ret); +} +EXPORT_SYMBOL_GPL(io_uring_cmd_done); + +static int io_uring_cmd_prep_async(struct io_kiocb *req) +{ + size_t cmd_size; + + cmd_size = uring_cmd_pdu_size(req->ctx->flags & IORING_SETUP_SQE128); + + memcpy(req->async_data, req->uring_cmd.cmd, cmd_size); + return 0; +} + +static int io_uring_cmd_prep(struct io_kiocb *req, + const struct io_uring_sqe *sqe) +{ + struct io_uring_cmd *ioucmd = &req->uring_cmd; + + if (sqe->rw_flags) + return -EINVAL; + ioucmd->cmd = sqe->cmd; + ioucmd->cmd_op = READ_ONCE(sqe->cmd_op); + return 0; +} + +static int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags) +{ + struct io_uring_cmd *ioucmd = &req->uring_cmd; + struct io_ring_ctx *ctx = req->ctx; + struct file *file = req->file; + int ret; + + if (!req->file->f_op->uring_cmd) + return -EOPNOTSUPP; + + if (ctx->flags & IORING_SETUP_SQE128) + issue_flags |= IO_URING_F_SQE128; + if (ctx->flags & IORING_SETUP_CQE32) + issue_flags |= IO_URING_F_CQE32; + if (ctx->flags & IORING_SETUP_IOPOLL) + issue_flags |= IO_URING_F_IOPOLL; + + if (req_has_async_data(req)) + ioucmd->cmd = req->async_data; + + ret = file->f_op->uring_cmd(ioucmd, issue_flags); + if (ret == -EAGAIN) { + if (!req_has_async_data(req)) { + if (io_alloc_async_data(req)) + return -ENOMEM; + io_uring_cmd_prep_async(req); + } + return -EAGAIN; + } + + if (ret != -EIOCBQUEUED) + io_uring_cmd_done(ioucmd, ret, 0); + return 0; +} + static int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { @@ -6305,9 +6397,7 @@ static int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_socket *sock = &req->sock; - if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) - return -EINVAL; - if (sqe->ioprio || sqe->addr || sqe->rw_flags || sqe->buf_index) + if (sqe->addr || sqe->rw_flags || sqe->buf_index) return -EINVAL; sock->domain = READ_ONCE(sqe->fd); @@ -7755,6 +7845,8 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return io_getxattr_prep(req, sqe); case IORING_OP_SOCKET: return io_socket_prep(req, sqe); + case IORING_OP_URING_CMD: + return io_uring_cmd_prep(req, sqe); } printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n", @@ -7787,6 +7879,8 @@ static int io_req_prep_async(struct io_kiocb *req) return io_recvmsg_prep_async(req); case IORING_OP_CONNECT: return io_connect_prep_async(req); + case IORING_OP_URING_CMD: + return io_uring_cmd_prep_async(req); } printk_once(KERN_WARNING "io_uring: prep_async() bad opcode %d\n", req->opcode); @@ -8081,6 +8175,9 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) case IORING_OP_SOCKET: ret = io_socket(req, issue_flags); break; + case IORING_OP_URING_CMD: + ret = io_uring_cmd(req, issue_flags); + break; default: ret = -EINVAL; break; @@ -12699,6 +12796,8 @@ static int __init io_uring_init(void) BUILD_BUG_ON(sizeof(atomic_t) != sizeof(u32)); + BUILD_BUG_ON(sizeof(struct io_uring_cmd) > 64); + req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT); return 0; diff --git a/include/linux/fs.h b/include/linux/fs.h index bbde95387a23..87b5af1d9fbe 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1953,6 +1953,7 @@ struct dir_context { #define REMAP_FILE_ADVISORY (REMAP_FILE_CAN_SHORTEN) struct iov_iter; +struct io_uring_cmd; struct file_operations { struct module *owner; @@ -1995,6 +1996,7 @@ struct file_operations { struct file *file_out, loff_t pos_out, loff_t len, unsigned int remap_flags); int (*fadvise)(struct file *, loff_t, loff_t, int); + int (*uring_cmd)(struct io_uring_cmd *ioucmd, unsigned int issue_flags); } __randomize_layout; struct inode_operations { diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h index 24651c229ed2..4a2f6cc5a492 100644 --- a/include/linux/io_uring.h +++ b/include/linux/io_uring.h @@ -5,7 +5,32 @@ #include #include +enum io_uring_cmd_flags { + IO_URING_F_COMPLETE_DEFER = 1, + IO_URING_F_UNLOCKED = 2, + /* int's last bit, sign checks are usually faster than a bit test */ + IO_URING_F_NONBLOCK = INT_MIN, + + /* ctx state flags, for URING_CMD */ + IO_URING_F_SQE128 = 4, + IO_URING_F_CQE32 = 8, + IO_URING_F_IOPOLL = 16, +}; + +struct io_uring_cmd { + struct file *file; + const void *cmd; + /* callback to defer completions to task context */ + void (*task_work_cb)(struct io_uring_cmd *cmd); + u32 cmd_op; + u32 pad; + u8 pdu[32]; /* available inline for free use */ +}; + #if defined(CONFIG_IO_URING) +void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2); +void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd, + void (*task_work_cb)(struct io_uring_cmd *)); struct sock *io_uring_get_socket(struct file *file); void __io_uring_cancel(bool cancel_all); void __io_uring_free(struct task_struct *tsk); @@ -30,6 +55,14 @@ static inline void io_uring_free(struct task_struct *tsk) __io_uring_free(tsk); } #else +static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, + ssize_t ret2) +{ +} +static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd, + void (*task_work_cb)(struct io_uring_cmd *)) +{ +} static inline struct sock *io_uring_get_socket(struct file *file) { return NULL; diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index ac2d90d669c3..23618be55dd2 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -22,6 +22,7 @@ struct io_uring_sqe { union { __u64 off; /* offset into file */ __u64 addr2; + __u32 cmd_op; }; union { __u64 addr; /* pointer to buffer or iovecs */ @@ -61,14 +62,17 @@ struct io_uring_sqe { __s32 splice_fd_in; __u32 file_index; }; - __u64 addr3; - __u64 __pad2[1]; - - /* - * If the ring is initialized with IORING_SETUP_SQE128, then this field - * contains 64-bytes of padding, doubling the size of the SQE. - */ - __u64 __big_sqe_pad[0]; + union { + struct { + __u64 addr3; + __u64 __pad2[1]; + }; + /* + * If the ring is initialized with IORING_SETUP_SQE128, then + * this field is used for 80 bytes of arbitrary command data + */ + __u8 cmd[0]; + }; }; enum { @@ -175,6 +179,7 @@ enum io_uring_op { IORING_OP_FGETXATTR, IORING_OP_GETXATTR, IORING_OP_SOCKET, + IORING_OP_URING_CMD, /* this goes last, obviously */ IORING_OP_LAST, -- cgit v1.2.3-59-g8ed1b From 456cba386e94f22fa1b1426303fdcac9e66b1417 Mon Sep 17 00:00:00 2001 From: Kanchan Joshi Date: Wed, 11 May 2022 11:17:48 +0530 Subject: nvme: wire-up uring-cmd support for io-passthru on char-device. Introduce handler for fops->uring_cmd(), implementing async passthru on char device (/dev/ngX). The handler supports newly introduced operation NVME_URING_CMD_IO. This operates on a new structure nvme_uring_cmd, which is similar to struct nvme_passthru_cmd64 but without the embedded 8b result field. This field is not needed since uring-cmd allows to return additional result via big-CQE. Signed-off-by: Kanchan Joshi Signed-off-by: Anuj Gupta Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20220511054750.20432-5-joshi.k@samsung.com Signed-off-by: Jens Axboe --- drivers/nvme/host/core.c | 1 + drivers/nvme/host/ioctl.c | 192 +++++++++++++++++++++++++++++++++++++++- drivers/nvme/host/multipath.c | 1 + drivers/nvme/host/nvme.h | 4 + include/uapi/linux/nvme_ioctl.h | 25 ++++++ 5 files changed, 220 insertions(+), 3 deletions(-) (limited to 'include/uapi/linux') diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index e1846d04817f..682df98db341 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -3699,6 +3699,7 @@ static const struct file_operations nvme_ns_chr_fops = { .release = nvme_ns_chr_release, .unlocked_ioctl = nvme_ns_chr_ioctl, .compat_ioctl = compat_ptr_ioctl, + .uring_cmd = nvme_ns_chr_uring_cmd, }; static int nvme_add_ns_cdev(struct nvme_ns *ns) diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index 8d2569b656cc..92d695262d8f 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -5,6 +5,7 @@ */ #include /* for force_successful_syscall_return */ #include +#include #include "nvme.h" /* @@ -66,7 +67,8 @@ static int nvme_finish_user_metadata(struct request *req, void __user *ubuf, static struct request *nvme_alloc_user_request(struct request_queue *q, struct nvme_command *cmd, void __user *ubuffer, unsigned bufflen, void __user *meta_buffer, unsigned meta_len, - u32 meta_seed, void **metap, unsigned timeout, bool vec) + u32 meta_seed, void **metap, unsigned timeout, bool vec, + unsigned int rq_flags, blk_mq_req_flags_t blk_flags) { bool write = nvme_is_write(cmd); struct nvme_ns *ns = q->queuedata; @@ -76,7 +78,7 @@ static struct request *nvme_alloc_user_request(struct request_queue *q, void *meta = NULL; int ret; - req = blk_mq_alloc_request(q, nvme_req_op(cmd), 0); + req = blk_mq_alloc_request(q, nvme_req_op(cmd) | rq_flags, blk_flags); if (IS_ERR(req)) return req; nvme_init_request(req, cmd); @@ -140,7 +142,7 @@ static int nvme_submit_user_cmd(struct request_queue *q, int ret; req = nvme_alloc_user_request(q, cmd, ubuffer, bufflen, meta_buffer, - meta_len, meta_seed, &meta, timeout, vec); + meta_len, meta_seed, &meta, timeout, vec, 0, 0); if (IS_ERR(req)) return PTR_ERR(req); @@ -330,6 +332,139 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns, return status; } +struct nvme_uring_data { + __u64 metadata; + __u64 addr; + __u32 data_len; + __u32 metadata_len; + __u32 timeout_ms; +}; + +/* + * This overlays struct io_uring_cmd pdu. + * Expect build errors if this grows larger than that. + */ +struct nvme_uring_cmd_pdu { + union { + struct bio *bio; + struct request *req; + }; + void *meta; /* kernel-resident buffer */ + void __user *meta_buffer; + u32 meta_len; +}; + +static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu( + struct io_uring_cmd *ioucmd) +{ + return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu; +} + +static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd) +{ + struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); + struct request *req = pdu->req; + struct bio *bio = req->bio; + int status; + u64 result; + + if (nvme_req(req)->flags & NVME_REQ_CANCELLED) + status = -EINTR; + else + status = nvme_req(req)->status; + + result = le64_to_cpu(nvme_req(req)->result.u64); + + if (pdu->meta) + status = nvme_finish_user_metadata(req, pdu->meta_buffer, + pdu->meta, pdu->meta_len, status); + if (bio) + blk_rq_unmap_user(bio); + blk_mq_free_request(req); + + io_uring_cmd_done(ioucmd, status, result); +} + +static void nvme_uring_cmd_end_io(struct request *req, blk_status_t err) +{ + struct io_uring_cmd *ioucmd = req->end_io_data; + struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); + /* extract bio before reusing the same field for request */ + struct bio *bio = pdu->bio; + + pdu->req = req; + req->bio = bio; + /* this takes care of moving rest of completion-work to task context */ + io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb); +} + +static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns, + struct io_uring_cmd *ioucmd, unsigned int issue_flags) +{ + struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); + const struct nvme_uring_cmd *cmd = ioucmd->cmd; + struct request_queue *q = ns ? ns->queue : ctrl->admin_q; + struct nvme_uring_data d; + struct nvme_command c; + struct request *req; + unsigned int rq_flags = 0; + blk_mq_req_flags_t blk_flags = 0; + void *meta = NULL; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + c.common.opcode = READ_ONCE(cmd->opcode); + c.common.flags = READ_ONCE(cmd->flags); + if (c.common.flags) + return -EINVAL; + + c.common.command_id = 0; + c.common.nsid = cpu_to_le32(cmd->nsid); + if (!nvme_validate_passthru_nsid(ctrl, ns, le32_to_cpu(c.common.nsid))) + return -EINVAL; + + c.common.cdw2[0] = cpu_to_le32(READ_ONCE(cmd->cdw2)); + c.common.cdw2[1] = cpu_to_le32(READ_ONCE(cmd->cdw3)); + c.common.metadata = 0; + c.common.dptr.prp1 = c.common.dptr.prp2 = 0; + c.common.cdw10 = cpu_to_le32(READ_ONCE(cmd->cdw10)); + c.common.cdw11 = cpu_to_le32(READ_ONCE(cmd->cdw11)); + c.common.cdw12 = cpu_to_le32(READ_ONCE(cmd->cdw12)); + c.common.cdw13 = cpu_to_le32(READ_ONCE(cmd->cdw13)); + c.common.cdw14 = cpu_to_le32(READ_ONCE(cmd->cdw14)); + c.common.cdw15 = cpu_to_le32(READ_ONCE(cmd->cdw15)); + + d.metadata = READ_ONCE(cmd->metadata); + d.addr = READ_ONCE(cmd->addr); + d.data_len = READ_ONCE(cmd->data_len); + d.metadata_len = READ_ONCE(cmd->metadata_len); + d.timeout_ms = READ_ONCE(cmd->timeout_ms); + + if (issue_flags & IO_URING_F_NONBLOCK) { + rq_flags = REQ_NOWAIT; + blk_flags = BLK_MQ_REQ_NOWAIT; + } + + req = nvme_alloc_user_request(q, &c, nvme_to_user_ptr(d.addr), + d.data_len, nvme_to_user_ptr(d.metadata), + d.metadata_len, 0, &meta, d.timeout_ms ? + msecs_to_jiffies(d.timeout_ms) : 0, 0, rq_flags, + blk_flags); + if (IS_ERR(req)) + return PTR_ERR(req); + req->end_io_data = ioucmd; + + /* to free bio on completion, as req->bio will be null at that time */ + pdu->bio = req->bio; + pdu->meta = meta; + pdu->meta_buffer = nvme_to_user_ptr(d.metadata); + pdu->meta_len = d.metadata_len; + + blk_execute_rq_nowait(req, 0, nvme_uring_cmd_end_io); + return -EIOCBQUEUED; +} + static bool is_ctrl_ioctl(unsigned int cmd) { if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD) @@ -421,6 +556,42 @@ long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return __nvme_ioctl(ns, cmd, (void __user *)arg); } +static int nvme_ns_uring_cmd(struct nvme_ns *ns, struct io_uring_cmd *ioucmd, + unsigned int issue_flags) +{ + struct nvme_ctrl *ctrl = ns->ctrl; + int ret; + + BUILD_BUG_ON(sizeof(struct nvme_uring_cmd_pdu) > sizeof(ioucmd->pdu)); + + /* IOPOLL not supported yet */ + if (issue_flags & IO_URING_F_IOPOLL) + return -EOPNOTSUPP; + + /* NVMe passthrough requires bit SQE/CQE support */ + if ((issue_flags & (IO_URING_F_SQE128|IO_URING_F_CQE32)) != + (IO_URING_F_SQE128|IO_URING_F_CQE32)) + return -EOPNOTSUPP; + + switch (ioucmd->cmd_op) { + case NVME_URING_CMD_IO: + ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags); + break; + default: + ret = -ENOTTY; + } + + return ret; +} + +int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags) +{ + struct nvme_ns *ns = container_of(file_inode(ioucmd->file)->i_cdev, + struct nvme_ns, cdev); + + return nvme_ns_uring_cmd(ns, ioucmd, issue_flags); +} + #ifdef CONFIG_NVME_MULTIPATH static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd, void __user *argp, struct nvme_ns_head *head, int srcu_idx) @@ -487,6 +658,21 @@ out_unlock: srcu_read_unlock(&head->srcu, srcu_idx); return ret; } + +int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd, + unsigned int issue_flags) +{ + struct cdev *cdev = file_inode(ioucmd->file)->i_cdev; + struct nvme_ns_head *head = container_of(cdev, struct nvme_ns_head, cdev); + int srcu_idx = srcu_read_lock(&head->srcu); + struct nvme_ns *ns = nvme_find_path(head); + int ret = -EINVAL; + + if (ns) + ret = nvme_ns_uring_cmd(ns, ioucmd, issue_flags); + srcu_read_unlock(&head->srcu, srcu_idx); + return ret; +} #endif /* CONFIG_NVME_MULTIPATH */ static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp) diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index d464fdf978fb..d3e2440d8abb 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -437,6 +437,7 @@ static const struct file_operations nvme_ns_head_chr_fops = { .release = nvme_ns_head_chr_release, .unlocked_ioctl = nvme_ns_head_chr_ioctl, .compat_ioctl = compat_ptr_ioctl, + .uring_cmd = nvme_ns_head_chr_uring_cmd, }; static int nvme_add_ns_head_cdev(struct nvme_ns_head *head) diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index a2b53ca63335..086ccbdd7003 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -782,6 +782,10 @@ long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg); long nvme_dev_ioctl(struct file *file, unsigned int cmd, unsigned long arg); +int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, + unsigned int issue_flags); +int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd, + unsigned int issue_flags); int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo); extern const struct attribute_group *nvme_ns_id_attr_groups[]; diff --git a/include/uapi/linux/nvme_ioctl.h b/include/uapi/linux/nvme_ioctl.h index b2e43185e3b5..04e458c649ab 100644 --- a/include/uapi/linux/nvme_ioctl.h +++ b/include/uapi/linux/nvme_ioctl.h @@ -70,6 +70,28 @@ struct nvme_passthru_cmd64 { __u64 result; }; +/* same as struct nvme_passthru_cmd64, minus the 8b result field */ +struct nvme_uring_cmd { + __u8 opcode; + __u8 flags; + __u16 rsvd1; + __u32 nsid; + __u32 cdw2; + __u32 cdw3; + __u64 metadata; + __u64 addr; + __u32 metadata_len; + __u32 data_len; + __u32 cdw10; + __u32 cdw11; + __u32 cdw12; + __u32 cdw13; + __u32 cdw14; + __u32 cdw15; + __u32 timeout_ms; + __u32 rsvd2; +}; + #define nvme_admin_cmd nvme_passthru_cmd #define NVME_IOCTL_ID _IO('N', 0x40) @@ -83,4 +105,7 @@ struct nvme_passthru_cmd64 { #define NVME_IOCTL_IO64_CMD _IOWR('N', 0x48, struct nvme_passthru_cmd64) #define NVME_IOCTL_IO64_CMD_VEC _IOWR('N', 0x49, struct nvme_passthru_cmd64) +/* io_uring async commands: */ +#define NVME_URING_CMD_IO _IOWR('N', 0x80, struct nvme_uring_cmd) + #endif /* _UAPI_LINUX_NVME_IOCTL_H */ -- cgit v1.2.3-59-g8ed1b From f569add47119fa910ed7711b26b8d38e21f7ea77 Mon Sep 17 00:00:00 2001 From: Anuj Gupta Date: Wed, 11 May 2022 11:17:49 +0530 Subject: nvme: add vectored-io support for uring-cmd wire up support for async passthru that takes an array of buffers (using iovec). Exposed via a new op NVME_URING_CMD_IO_VEC. Same 'struct nvme_uring_cmd' is to be used with - 1. cmd.addr as base address of user iovec array 2. cmd.data_len as count of iovec array elements Signed-off-by: Kanchan Joshi Signed-off-by: Anuj Gupta Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20220511054750.20432-6-joshi.k@samsung.com Signed-off-by: Jens Axboe --- drivers/nvme/host/ioctl.c | 9 ++++++--- include/uapi/linux/nvme_ioctl.h | 1 + 2 files changed, 7 insertions(+), 3 deletions(-) (limited to 'include/uapi/linux') diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index 92d695262d8f..7b0e2c9cdcae 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -399,7 +399,7 @@ static void nvme_uring_cmd_end_io(struct request *req, blk_status_t err) } static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns, - struct io_uring_cmd *ioucmd, unsigned int issue_flags) + struct io_uring_cmd *ioucmd, unsigned int issue_flags, bool vec) { struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); const struct nvme_uring_cmd *cmd = ioucmd->cmd; @@ -449,7 +449,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns, req = nvme_alloc_user_request(q, &c, nvme_to_user_ptr(d.addr), d.data_len, nvme_to_user_ptr(d.metadata), d.metadata_len, 0, &meta, d.timeout_ms ? - msecs_to_jiffies(d.timeout_ms) : 0, 0, rq_flags, + msecs_to_jiffies(d.timeout_ms) : 0, vec, rq_flags, blk_flags); if (IS_ERR(req)) return PTR_ERR(req); @@ -575,7 +575,10 @@ static int nvme_ns_uring_cmd(struct nvme_ns *ns, struct io_uring_cmd *ioucmd, switch (ioucmd->cmd_op) { case NVME_URING_CMD_IO: - ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags); + ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, false); + break; + case NVME_URING_CMD_IO_VEC: + ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, true); break; default: ret = -ENOTTY; diff --git a/include/uapi/linux/nvme_ioctl.h b/include/uapi/linux/nvme_ioctl.h index 04e458c649ab..0b1876aa5a59 100644 --- a/include/uapi/linux/nvme_ioctl.h +++ b/include/uapi/linux/nvme_ioctl.h @@ -107,5 +107,6 @@ struct nvme_uring_cmd { /* io_uring async commands: */ #define NVME_URING_CMD_IO _IOWR('N', 0x80, struct nvme_uring_cmd) +#define NVME_URING_CMD_IO_VEC _IOWR('N', 0x81, struct nvme_uring_cmd) #endif /* _UAPI_LINUX_NVME_IOCTL_H */ -- cgit v1.2.3-59-g8ed1b From 07343110b293456d30393e89b86c4dee1ac051c8 Mon Sep 17 00:00:00 2001 From: Feng Zhou Date: Wed, 11 May 2022 17:38:53 +0800 Subject: bpf: add bpf_map_lookup_percpu_elem for percpu map Add new ebpf helpers bpf_map_lookup_percpu_elem. The implementation method is relatively simple, refer to the implementation method of map_lookup_elem of percpu map, increase the parameters of cpu, and obtain it according to the specified cpu. Signed-off-by: Feng Zhou Link: https://lore.kernel.org/r/20220511093854.411-2-zhoufeng.zf@bytedance.com Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 2 ++ include/uapi/linux/bpf.h | 9 +++++++++ kernel/bpf/arraymap.c | 15 +++++++++++++++ kernel/bpf/core.c | 1 + kernel/bpf/hashtab.c | 32 ++++++++++++++++++++++++++++++++ kernel/bpf/helpers.c | 18 ++++++++++++++++++ kernel/bpf/verifier.c | 17 +++++++++++++++-- kernel/trace/bpf_trace.c | 2 ++ tools/include/uapi/linux/bpf.h | 9 +++++++++ 9 files changed, 103 insertions(+), 2 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 3ded8711457f..5061ccd8b2dc 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -89,6 +89,7 @@ struct bpf_map_ops { int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags); int (*map_pop_elem)(struct bpf_map *map, void *value); int (*map_peek_elem)(struct bpf_map *map, void *value); + void *(*map_lookup_percpu_elem)(struct bpf_map *map, void *key, u32 cpu); /* funcs called by prog_array and perf_event_array map */ void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, @@ -2184,6 +2185,7 @@ extern const struct bpf_func_proto bpf_map_delete_elem_proto; extern const struct bpf_func_proto bpf_map_push_elem_proto; extern const struct bpf_func_proto bpf_map_pop_elem_proto; extern const struct bpf_func_proto bpf_map_peek_elem_proto; +extern const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto; extern const struct bpf_func_proto bpf_get_prandom_u32_proto; extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index bc7f89948f54..0210f85131b3 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -5164,6 +5164,14 @@ union bpf_attr { * if not NULL, is a reference which must be released using its * corresponding release function, or moved into a BPF map before * program exit. + * + * void *bpf_map_lookup_percpu_elem(struct bpf_map *map, const void *key, u32 cpu) + * Description + * Perform a lookup in *percpu map* for an entry associated to + * *key* on *cpu*. + * Return + * Map value associated to *key* on *cpu*, or **NULL** if no entry + * was found or *cpu* is invalid. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5361,6 +5369,7 @@ union bpf_attr { FN(skb_set_tstamp), \ FN(ima_file_hash), \ FN(kptr_xchg), \ + FN(map_lookup_percpu_elem), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 724613da6576..fe40d3b9458f 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -243,6 +243,20 @@ static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key) return this_cpu_ptr(array->pptrs[index & array->index_mask]); } +static void *percpu_array_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu) +{ + struct bpf_array *array = container_of(map, struct bpf_array, map); + u32 index = *(u32 *)key; + + if (cpu >= nr_cpu_ids) + return NULL; + + if (unlikely(index >= array->map.max_entries)) + return NULL; + + return per_cpu_ptr(array->pptrs[index & array->index_mask], cpu); +} + int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value) { struct bpf_array *array = container_of(map, struct bpf_array, map); @@ -725,6 +739,7 @@ const struct bpf_map_ops percpu_array_map_ops = { .map_lookup_elem = percpu_array_map_lookup_elem, .map_update_elem = array_map_update_elem, .map_delete_elem = array_map_delete_elem, + .map_lookup_percpu_elem = percpu_array_map_lookup_percpu_elem, .map_seq_show_elem = percpu_array_map_seq_show_elem, .map_check_btf = array_map_check_btf, .map_lookup_batch = generic_map_lookup_batch, diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 13e9dbeeedf3..76f68d0a7ae8 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -2619,6 +2619,7 @@ const struct bpf_func_proto bpf_map_delete_elem_proto __weak; const struct bpf_func_proto bpf_map_push_elem_proto __weak; const struct bpf_func_proto bpf_map_pop_elem_proto __weak; const struct bpf_func_proto bpf_map_peek_elem_proto __weak; +const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto __weak; const struct bpf_func_proto bpf_spin_lock_proto __weak; const struct bpf_func_proto bpf_spin_unlock_proto __weak; const struct bpf_func_proto bpf_jiffies64_proto __weak; diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 705841279d16..17fb69c0e0dc 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -2199,6 +2199,20 @@ static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key) return NULL; } +static void *htab_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu) +{ + struct htab_elem *l; + + if (cpu >= nr_cpu_ids) + return NULL; + + l = __htab_map_lookup_elem(map, key); + if (l) + return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu); + else + return NULL; +} + static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key) { struct htab_elem *l = __htab_map_lookup_elem(map, key); @@ -2211,6 +2225,22 @@ static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key) return NULL; } +static void *htab_lru_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu) +{ + struct htab_elem *l; + + if (cpu >= nr_cpu_ids) + return NULL; + + l = __htab_map_lookup_elem(map, key); + if (l) { + bpf_lru_node_set_ref(&l->lru_node); + return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu); + } + + return NULL; +} + int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value) { struct htab_elem *l; @@ -2300,6 +2330,7 @@ const struct bpf_map_ops htab_percpu_map_ops = { .map_lookup_and_delete_elem = htab_percpu_map_lookup_and_delete_elem, .map_update_elem = htab_percpu_map_update_elem, .map_delete_elem = htab_map_delete_elem, + .map_lookup_percpu_elem = htab_percpu_map_lookup_percpu_elem, .map_seq_show_elem = htab_percpu_map_seq_show_elem, .map_set_for_each_callback_args = map_set_for_each_callback_args, .map_for_each_callback = bpf_for_each_hash_elem, @@ -2318,6 +2349,7 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = { .map_lookup_and_delete_elem = htab_lru_percpu_map_lookup_and_delete_elem, .map_update_elem = htab_lru_percpu_map_update_elem, .map_delete_elem = htab_lru_map_delete_elem, + .map_lookup_percpu_elem = htab_lru_percpu_map_lookup_percpu_elem, .map_seq_show_elem = htab_percpu_map_seq_show_elem, .map_set_for_each_callback_args = map_set_for_each_callback_args, .map_for_each_callback = bpf_for_each_hash_elem, diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 3e709fed5306..d5f104a39092 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -119,6 +119,22 @@ const struct bpf_func_proto bpf_map_peek_elem_proto = { .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE, }; +BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu) +{ + WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); + return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu); +} + +const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto = { + .func = bpf_map_lookup_percpu_elem, + .gpl_only = false, + .pkt_access = true, + .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, + .arg1_type = ARG_CONST_MAP_PTR, + .arg2_type = ARG_PTR_TO_MAP_KEY, + .arg3_type = ARG_ANYTHING, +}; + const struct bpf_func_proto bpf_get_prandom_u32_proto = { .func = bpf_user_rnd_u32, .gpl_only = false, @@ -1420,6 +1436,8 @@ bpf_base_func_proto(enum bpf_func_id func_id) return &bpf_map_pop_elem_proto; case BPF_FUNC_map_peek_elem: return &bpf_map_peek_elem_proto; + case BPF_FUNC_map_lookup_percpu_elem: + return &bpf_map_lookup_percpu_elem_proto; case BPF_FUNC_get_prandom_u32: return &bpf_get_prandom_u32_proto; case BPF_FUNC_get_smp_processor_id: diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index c27fee73a2cb..05c1b6656824 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -6137,6 +6137,12 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env, map->map_type != BPF_MAP_TYPE_BLOOM_FILTER) goto error; break; + case BPF_FUNC_map_lookup_percpu_elem: + if (map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY && + map->map_type != BPF_MAP_TYPE_PERCPU_HASH && + map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH) + goto error; + break; case BPF_FUNC_sk_storage_get: case BPF_FUNC_sk_storage_delete: if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) @@ -6750,7 +6756,8 @@ record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, func_id != BPF_FUNC_map_pop_elem && func_id != BPF_FUNC_map_peek_elem && func_id != BPF_FUNC_for_each_map_elem && - func_id != BPF_FUNC_redirect_map) + func_id != BPF_FUNC_redirect_map && + func_id != BPF_FUNC_map_lookup_percpu_elem) return 0; if (map == NULL) { @@ -13810,7 +13817,8 @@ static int do_misc_fixups(struct bpf_verifier_env *env) insn->imm == BPF_FUNC_map_pop_elem || insn->imm == BPF_FUNC_map_peek_elem || insn->imm == BPF_FUNC_redirect_map || - insn->imm == BPF_FUNC_for_each_map_elem)) { + insn->imm == BPF_FUNC_for_each_map_elem || + insn->imm == BPF_FUNC_map_lookup_percpu_elem)) { aux = &env->insn_aux_data[i + delta]; if (bpf_map_ptr_poisoned(aux)) goto patch_call_imm; @@ -13859,6 +13867,8 @@ static int do_misc_fixups(struct bpf_verifier_env *env) bpf_callback_t callback_fn, void *callback_ctx, u64 flags))NULL)); + BUILD_BUG_ON(!__same_type(ops->map_lookup_percpu_elem, + (void *(*)(struct bpf_map *map, void *key, u32 cpu))NULL)); patch_map_ops_generic: switch (insn->imm) { @@ -13886,6 +13896,9 @@ patch_map_ops_generic: case BPF_FUNC_for_each_map_elem: insn->imm = BPF_CALL_IMM(ops->map_for_each_callback); continue; + case BPF_FUNC_map_lookup_percpu_elem: + insn->imm = BPF_CALL_IMM(ops->map_lookup_percpu_elem); + continue; } goto patch_call_imm; diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 2eaac094caf8..7141ca8a1c2d 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1197,6 +1197,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_map_pop_elem_proto; case BPF_FUNC_map_peek_elem: return &bpf_map_peek_elem_proto; + case BPF_FUNC_map_lookup_percpu_elem: + return &bpf_map_lookup_percpu_elem_proto; case BPF_FUNC_ktime_get_ns: return &bpf_ktime_get_ns_proto; case BPF_FUNC_ktime_get_boot_ns: diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index bc7f89948f54..0210f85131b3 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -5164,6 +5164,14 @@ union bpf_attr { * if not NULL, is a reference which must be released using its * corresponding release function, or moved into a BPF map before * program exit. + * + * void *bpf_map_lookup_percpu_elem(struct bpf_map *map, const void *key, u32 cpu) + * Description + * Perform a lookup in *percpu map* for an entry associated to + * *key* on *cpu*. + * Return + * Map value associated to *key* on *cpu*, or **NULL** if no entry + * was found or *cpu* is invalid. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5361,6 +5369,7 @@ union bpf_attr { FN(skb_set_tstamp), \ FN(ima_file_hash), \ FN(kptr_xchg), \ + FN(map_lookup_percpu_elem), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper -- cgit v1.2.3-59-g8ed1b From c9b516f16be5896a3d798f8efb03acbd2ceec715 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Thu, 12 May 2022 20:38:36 -0700 Subject: ELF, uapi: fixup ELF_ST_TYPE definition This is very theoretical compile failure: ELF_ST_TYPE(st_info = A) Cast will bind first and st_info will stop being lvalue: error: lvalue required as left operand of assignment Given that the only use of this macro is ELF_ST_TYPE(sym->st_info) where st_info is "unsigned char" I've decided to remove cast especially given that companion macro ELF_ST_BIND doesn't use cast. Link: https://lkml.kernel.org/r/Ymv7G1BeX4kt3obz@localhost.localdomain Signed-off-by: Alexey Dobriyan Acked-by: Kees Cook Cc: "Eric W. Biederman" Signed-off-by: Andrew Morton --- include/uapi/linux/elf.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h index 787c657bfae8..237f21a5e0f6 100644 --- a/include/uapi/linux/elf.h +++ b/include/uapi/linux/elf.h @@ -134,7 +134,7 @@ typedef __s64 Elf64_Sxword; #define STT_TLS 6 #define ELF_ST_BIND(x) ((x) >> 4) -#define ELF_ST_TYPE(x) (((unsigned int) x) & 0xf) +#define ELF_ST_TYPE(x) ((x) & 0xf) #define ELF32_ST_BIND(x) ELF_ST_BIND(x) #define ELF32_ST_TYPE(x) ELF_ST_TYPE(x) #define ELF64_ST_BIND(x) ELF_ST_BIND(x) -- cgit v1.2.3-59-g8ed1b From 783eb354fb3dcd598e8e7e8a2ed88c0fb6ce5d2f Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 4 Apr 2022 15:19:41 +0900 Subject: agpgart.h: do not include from exported header Commit 35d0f1d54ecd ("include/uapi/linux/agpgart.h: include stdlib.h in userspace") included to fix the unknown size_t error, but I do not think it is the right fix. This header already uses __kernel_size_t a few lines below. Replace the remaining size_t, and stop including . Signed-off-by: Masahiro Yamada Signed-off-by: Arnd Bergmann Reviewed-by: Christoph Hellwig Reviewed-by: Nick Desaulniers --- include/uapi/linux/agpgart.h | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/agpgart.h b/include/uapi/linux/agpgart.h index f5251045181a..9cc3448c0b5b 100644 --- a/include/uapi/linux/agpgart.h +++ b/include/uapi/linux/agpgart.h @@ -52,7 +52,6 @@ #ifndef __KERNEL__ #include -#include struct agp_version { __u16 major; @@ -64,10 +63,10 @@ typedef struct _agp_info { __u32 bridge_id; /* bridge vendor/device */ __u32 agp_mode; /* mode info of bridge */ unsigned long aper_base;/* base of aperture */ - size_t aper_size; /* size of aperture */ - size_t pg_total; /* max pages (swap + system) */ - size_t pg_system; /* max pages (system) */ - size_t pg_used; /* current pages used */ + __kernel_size_t aper_size; /* size of aperture */ + __kernel_size_t pg_total; /* max pages (swap + system) */ + __kernel_size_t pg_system; /* max pages (system) */ + __kernel_size_t pg_used; /* current pages used */ } agp_info; typedef struct _agp_setup { -- cgit v1.2.3-59-g8ed1b From 1339f24b336db5ded9811f3fe7b948e0de207785 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sat, 7 May 2022 14:18:44 -0600 Subject: io_uring: allow allocated fixed files for openat/openat2 If the application passes in IORING_FILE_INDEX_ALLOC as the file_slot, then that's a hint to allocate a fixed file descriptor rather than have one be passed in directly. This can be useful for having io_uring manage the direct descriptor space. Normal open direct requests will complete with 0 for success, and < 0 in case of error. If io_uring is asked to allocated the direct descriptor, then the direct descriptor is returned in case of success. Reviewed-by: Hao Xu Signed-off-by: Jens Axboe --- fs/io_uring.c | 36 +++++++++++++++++++++++++++++++++--- include/uapi/linux/io_uring.h | 9 +++++++++ 2 files changed, 42 insertions(+), 3 deletions(-) (limited to 'include/uapi/linux') diff --git a/fs/io_uring.c b/fs/io_uring.c index 8c40411a7e78..f448264a1067 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -4697,7 +4697,7 @@ static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return __io_openat_prep(req, sqe); } -static int __maybe_unused io_file_bitmap_get(struct io_ring_ctx *ctx) +static int io_file_bitmap_get(struct io_ring_ctx *ctx) { struct io_file_table *table = &ctx->file_table; unsigned long nr = ctx->nr_user_files; @@ -4722,6 +4722,36 @@ static int __maybe_unused io_file_bitmap_get(struct io_ring_ctx *ctx) return -ENFILE; } +static int io_fixed_fd_install(struct io_kiocb *req, unsigned int issue_flags, + struct file *file, unsigned int file_slot) +{ + bool alloc_slot = file_slot == IORING_FILE_INDEX_ALLOC; + struct io_ring_ctx *ctx = req->ctx; + int ret; + + if (alloc_slot) { + io_ring_submit_lock(ctx, issue_flags); + ret = io_file_bitmap_get(ctx); + if (unlikely(ret < 0)) { + io_ring_submit_unlock(ctx, issue_flags); + return ret; + } + + file_slot = ret; + } else { + file_slot--; + } + + ret = io_install_fixed_file(req, file, issue_flags, file_slot); + if (alloc_slot) { + io_ring_submit_unlock(ctx, issue_flags); + if (!ret) + return file_slot; + } + + return ret; +} + static int io_openat2(struct io_kiocb *req, unsigned int issue_flags) { struct open_flags op; @@ -4777,8 +4807,8 @@ static int io_openat2(struct io_kiocb *req, unsigned int issue_flags) if (!fixed) fd_install(ret, file); else - ret = io_install_fixed_file(req, file, issue_flags, - req->open.file_slot - 1); + ret = io_fixed_fd_install(req, issue_flags, file, + req->open.file_slot); err: putname(req->open.filename); req->flags &= ~REQ_F_NEED_CLEANUP; diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 06621a278cb6..b7f02a55032a 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -63,6 +63,15 @@ struct io_uring_sqe { __u64 __pad2[2]; }; +/* + * If sqe->file_index is set to this for opcodes that instantiate a new + * direct descriptor (like openat/openat2/accept), then io_uring will allocate + * an available direct descriptor instead of having the application pass one + * in. The picked direct descriptor will be returned in cqe->res, or -ENFILE + * if the space is full. + */ +#define IORING_FILE_INDEX_ALLOC (~0U) + enum { IOSQE_FIXED_FILE_BIT, IOSQE_IO_DRAIN_BIT, -- cgit v1.2.3-59-g8ed1b From a8da73a32b6e9271a613e5a0e90a8c35f40abeb8 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 9 May 2022 09:29:14 -0600 Subject: io_uring: add flag for allocating a fully sparse direct descriptor space Currently to setup a fully sparse descriptor space upfront, the app needs to alloate an array of the full size and memset it to -1 and then pass that in. Make this a bit easier by allowing a flag that simply does this internally rather than needing to copy each slot separately. This works with IORING_REGISTER_FILES2 as the flag is set in struct io_uring_rsrc_register, and is only allow when the type is IORING_RSRC_FILE as this doesn't make sense for registered buffers. Reviewed-by: Hao Xu Signed-off-by: Jens Axboe --- fs/io_uring.c | 15 ++++++++++++--- include/uapi/linux/io_uring.h | 8 +++++++- 2 files changed, 19 insertions(+), 4 deletions(-) (limited to 'include/uapi/linux') diff --git a/fs/io_uring.c b/fs/io_uring.c index b75a49d3831b..362189819898 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -9111,12 +9111,12 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg, for (i = 0; i < nr_args; i++, ctx->nr_user_files++) { struct io_fixed_file *file_slot; - if (copy_from_user(&fd, &fds[i], sizeof(fd))) { + if (fds && copy_from_user(&fd, &fds[i], sizeof(fd))) { ret = -EFAULT; goto fail; } /* allow sparse sets */ - if (fd == -1) { + if (!fds || fd == -1) { ret = -EINVAL; if (unlikely(*io_get_tag_slot(ctx->file_data, i))) goto fail; @@ -11759,14 +11759,20 @@ static __cold int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg, memset(&rr, 0, sizeof(rr)); if (copy_from_user(&rr, arg, size)) return -EFAULT; - if (!rr.nr || rr.resv || rr.resv2) + if (!rr.nr || rr.resv2) + return -EINVAL; + if (rr.flags & ~IORING_RSRC_REGISTER_SPARSE) return -EINVAL; switch (type) { case IORING_RSRC_FILE: + if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data) + break; return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data), rr.nr, u64_to_user_ptr(rr.tags)); case IORING_RSRC_BUFFER: + if (rr.flags & IORING_RSRC_REGISTER_SPARSE) + break; return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data), rr.nr, u64_to_user_ptr(rr.tags)); } @@ -11935,6 +11941,9 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, ret = io_sqe_buffers_unregister(ctx); break; case IORING_REGISTER_FILES: + ret = -EFAULT; + if (!arg) + break; ret = io_sqe_files_register(ctx, arg, nr_args, NULL); break; case IORING_UNREGISTER_FILES: diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index b7f02a55032a..36ec43dc7bf9 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -396,9 +396,15 @@ struct io_uring_files_update { __aligned_u64 /* __s32 * */ fds; }; +/* + * Register a fully sparse file space, rather than pass in an array of all + * -1 file descriptors. + */ +#define IORING_RSRC_REGISTER_SPARSE (1U << 0) + struct io_uring_rsrc_register { __u32 nr; - __u32 resv; + __u32 flags; __u64 resv2; __aligned_u64 data; __aligned_u64 tags; -- cgit v1.2.3-59-g8ed1b From b1f9e876862d8f7176299ec4fb2108bc1045cbc8 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Thu, 12 May 2022 20:22:56 -0700 Subject: mm/uffd: enable write protection for shmem & hugetlbfs We've had all the necessary changes ready for both shmem and hugetlbfs. Turn on all the shmem/hugetlbfs switches for userfaultfd-wp. We can expand UFFD_API_RANGE_IOCTLS_BASIC with _UFFDIO_WRITEPROTECT too because all existing types now support write protection mode. Since vma_can_userfault() will be used elsewhere, move into userfaultfd_k.h. Link: https://lkml.kernel.org/r/20220405014926.15101-1-peterx@redhat.com Signed-off-by: Peter Xu Cc: Alistair Popple Cc: Andrea Arcangeli Cc: Axel Rasmussen Cc: David Hildenbrand Cc: Hugh Dickins Cc: Jerome Glisse Cc: "Kirill A . Shutemov" Cc: Matthew Wilcox Cc: Mike Kravetz Cc: Mike Rapoport Cc: Nadav Amit Signed-off-by: Andrew Morton --- fs/userfaultfd.c | 21 +++------------------ include/linux/userfaultfd_k.h | 20 ++++++++++++++++++++ include/uapi/linux/userfaultfd.h | 10 ++++++++-- mm/userfaultfd.c | 9 +++------ 4 files changed, 34 insertions(+), 26 deletions(-) (limited to 'include/uapi/linux') diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 78b68e0f9774..e943370107d0 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -1258,24 +1258,6 @@ static __always_inline int validate_range(struct mm_struct *mm, return 0; } -static inline bool vma_can_userfault(struct vm_area_struct *vma, - unsigned long vm_flags) -{ - /* FIXME: add WP support to hugetlbfs and shmem */ - if (vm_flags & VM_UFFD_WP) { - if (is_vm_hugetlb_page(vma) || vma_is_shmem(vma)) - return false; - } - - if (vm_flags & VM_UFFD_MINOR) { - if (!(is_vm_hugetlb_page(vma) || vma_is_shmem(vma))) - return false; - } - - return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) || - vma_is_shmem(vma); -} - static int userfaultfd_register(struct userfaultfd_ctx *ctx, unsigned long arg) { @@ -1956,6 +1938,9 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx, #endif #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP uffdio_api.features &= ~UFFD_FEATURE_PAGEFAULT_FLAG_WP; +#endif +#ifndef CONFIG_PTE_MARKER_UFFD_WP + uffdio_api.features &= ~UFFD_FEATURE_WP_HUGETLBFS_SHMEM; #endif uffdio_api.ioctls = UFFD_API_IOCTLS; ret = -EFAULT; diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index e7afcdfd4b46..732b522bacb7 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -18,6 +18,7 @@ #include #include #include +#include /* The set of all possible UFFD-related VM flags. */ #define __VM_UFFD_FLAGS (VM_UFFD_MISSING | VM_UFFD_WP | VM_UFFD_MINOR) @@ -140,6 +141,25 @@ static inline bool userfaultfd_armed(struct vm_area_struct *vma) return vma->vm_flags & __VM_UFFD_FLAGS; } +static inline bool vma_can_userfault(struct vm_area_struct *vma, + unsigned long vm_flags) +{ + if (vm_flags & VM_UFFD_MINOR) + return is_vm_hugetlb_page(vma) || vma_is_shmem(vma); + +#ifndef CONFIG_PTE_MARKER_UFFD_WP + /* + * If user requested uffd-wp but not enabled pte markers for + * uffd-wp, then shmem & hugetlbfs are not supported but only + * anonymous. + */ + if ((vm_flags & VM_UFFD_WP) && !vma_is_anonymous(vma)) + return false; +#endif + return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) || + vma_is_shmem(vma); +} + extern int dup_userfaultfd(struct vm_area_struct *, struct list_head *); extern void dup_userfaultfd_complete(struct list_head *); diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h index ef739054cb1c..7d32b1e797fb 100644 --- a/include/uapi/linux/userfaultfd.h +++ b/include/uapi/linux/userfaultfd.h @@ -33,7 +33,8 @@ UFFD_FEATURE_THREAD_ID | \ UFFD_FEATURE_MINOR_HUGETLBFS | \ UFFD_FEATURE_MINOR_SHMEM | \ - UFFD_FEATURE_EXACT_ADDRESS) + UFFD_FEATURE_EXACT_ADDRESS | \ + UFFD_FEATURE_WP_HUGETLBFS_SHMEM) #define UFFD_API_IOCTLS \ ((__u64)1 << _UFFDIO_REGISTER | \ (__u64)1 << _UFFDIO_UNREGISTER | \ @@ -47,7 +48,8 @@ #define UFFD_API_RANGE_IOCTLS_BASIC \ ((__u64)1 << _UFFDIO_WAKE | \ (__u64)1 << _UFFDIO_COPY | \ - (__u64)1 << _UFFDIO_CONTINUE) + (__u64)1 << _UFFDIO_CONTINUE | \ + (__u64)1 << _UFFDIO_WRITEPROTECT) /* * Valid ioctl command number range with this API is from 0x00 to @@ -194,6 +196,9 @@ struct uffdio_api { * UFFD_FEATURE_EXACT_ADDRESS indicates that the exact address of page * faults would be provided and the offset within the page would not be * masked. + * + * UFFD_FEATURE_WP_HUGETLBFS_SHMEM indicates that userfaultfd + * write-protection mode is supported on both shmem and hugetlbfs. */ #define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0) #define UFFD_FEATURE_EVENT_FORK (1<<1) @@ -207,6 +212,7 @@ struct uffdio_api { #define UFFD_FEATURE_MINOR_HUGETLBFS (1<<9) #define UFFD_FEATURE_MINOR_SHMEM (1<<10) #define UFFD_FEATURE_EXACT_ADDRESS (1<<11) +#define UFFD_FEATURE_WP_HUGETLBFS_SHMEM (1<<12) __u64 features; __u64 ioctls; diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 01edc18902c5..4f4892a5f767 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -732,15 +732,12 @@ int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start, err = -ENOENT; dst_vma = find_dst_vma(dst_mm, start, len); - /* - * Make sure the vma is not shared, that the dst range is - * both valid and fully within a single existing vma. - */ - if (!dst_vma || (dst_vma->vm_flags & VM_SHARED)) + + if (!dst_vma) goto out_unlock; if (!userfaultfd_wp(dst_vma)) goto out_unlock; - if (!vma_is_anonymous(dst_vma)) + if (!vma_can_userfault(dst_vma, dst_vma->vm_flags)) goto out_unlock; if (is_vm_hugetlb_page(dst_vma)) { -- cgit v1.2.3-59-g8ed1b From 390ed29b5e425ba00da2b6113b74a14949f71b02 Mon Sep 17 00:00:00 2001 From: Hao Xu Date: Sat, 14 May 2022 22:20:43 +0800 Subject: io_uring: add IORING_ACCEPT_MULTISHOT for accept add an accept_flag IORING_ACCEPT_MULTISHOT for accept, which is to support multishot. Signed-off-by: Hao Xu Link: https://lore.kernel.org/r/20220514142046.58072-2-haoxu.linux@gmail.com Signed-off-by: Jens Axboe --- include/uapi/linux/io_uring.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 36ec43dc7bf9..15f821af9242 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -232,6 +232,11 @@ enum { */ #define IORING_RECVSEND_POLL_FIRST (1U << 0) +/* + * accept flags stored in sqe->ioprio + */ +#define IORING_ACCEPT_MULTISHOT (1U << 0) + /* * IO completion data structure (Completion Queue Entry) */ -- cgit v1.2.3-59-g8ed1b From 8fa10ee183c3a1ecb53e81c95895ed5bc2a5530a Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Sun, 15 May 2022 21:58:31 +0100 Subject: cdrom: mark CDROMGETSPINDOWN/CDROMSETSPINDOWN obsolete These were only implemented by the IDE CD driver, which has since been removed. Given that nobody is likely to create new CD/DVD hardware (and associated drivers) we can mark these appropriately. Cc: Jens Axboe Cc: Christoph Hellwig Cc: Phillip Potter Signed-off-by: Paul Gortmaker Link: https://lore.kernel.org/all/20220427132436.12795-3-paul.gortmaker@windriver.com Signed-off-by: Phillip Potter Link: https://lore.kernel.org/r/20220515205833.944139-4-phil@philpotter.co.uk Signed-off-by: Jens Axboe --- Documentation/userspace-api/ioctl/cdrom.rst | 6 ++++++ include/uapi/linux/cdrom.h | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) (limited to 'include/uapi/linux') diff --git a/Documentation/userspace-api/ioctl/cdrom.rst b/Documentation/userspace-api/ioctl/cdrom.rst index 682948fc88a3..2ad91dbebd7c 100644 --- a/Documentation/userspace-api/ioctl/cdrom.rst +++ b/Documentation/userspace-api/ioctl/cdrom.rst @@ -718,6 +718,9 @@ CDROMPLAYBLK CDROMGETSPINDOWN + Obsolete, was ide-cd only + + usage:: char spindown; @@ -736,6 +739,9 @@ CDROMGETSPINDOWN CDROMSETSPINDOWN + Obsolete, was ide-cd only + + usage:: char spindown diff --git a/include/uapi/linux/cdrom.h b/include/uapi/linux/cdrom.h index 804ff8d98f71..011e594e4a0d 100644 --- a/include/uapi/linux/cdrom.h +++ b/include/uapi/linux/cdrom.h @@ -103,7 +103,7 @@ #define CDROMREADALL 0x5318 /* read all 2646 bytes */ /* - * These ioctls are (now) only in ide-cd.c for controlling + * These ioctls were only in (now removed) ide-cd.c for controlling * drive spindown time. They should be implemented in the * Uniform driver, via generic packet commands, GPCMD_MODE_SELECT_10, * GPCMD_MODE_SENSE_10 and the GPMODE_POWER_PAGE... -- cgit v1.2.3-59-g8ed1b From 89527be8d8d672773eeaec910118a6e84fb597e3 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 13 May 2022 11:33:56 -0700 Subject: net: add IFLA_TSO_{MAX_SIZE|SEGS} attributes New netlink attributes IFLA_TSO_MAX_SIZE and IFLA_TSO_MAX_SEGS are used to report to user-space the device TSO limits. ip -d link sh dev eth1 ... tso_max_size 65536 tso_max_segs 65535 Signed-off-by: Eric Dumazet Acked-by: Alexander Duyck Signed-off-by: David S. Miller --- include/uapi/linux/if_link.h | 2 ++ net/core/rtnetlink.c | 6 ++++++ tools/include/uapi/linux/if_link.h | 2 ++ 3 files changed, 10 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index d1e600816b82..5f58dcfe2787 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -368,6 +368,8 @@ enum { IFLA_PARENT_DEV_NAME, IFLA_PARENT_DEV_BUS_NAME, IFLA_GRO_MAX_SIZE, + IFLA_TSO_MAX_SIZE, + IFLA_TSO_MAX_SEGS, __IFLA_MAX }; diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index bdc891326102..f35cc21298ac 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1064,6 +1064,8 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev, + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */ + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */ + nla_total_size(4) /* IFLA_GRO_MAX_SIZE */ + + nla_total_size(4) /* IFLA_TSO_MAX_SIZE */ + + nla_total_size(4) /* IFLA_TSO_MAX_SEGS */ + nla_total_size(1) /* IFLA_OPERSTATE */ + nla_total_size(1) /* IFLA_LINKMODE */ + nla_total_size(4) /* IFLA_CARRIER_CHANGES */ @@ -1769,6 +1771,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) || nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) || nla_put_u32(skb, IFLA_GRO_MAX_SIZE, dev->gro_max_size) || + nla_put_u32(skb, IFLA_TSO_MAX_SIZE, dev->tso_max_size) || + nla_put_u32(skb, IFLA_TSO_MAX_SEGS, dev->tso_max_segs) || #ifdef CONFIG_RPS nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) || #endif @@ -1922,6 +1926,8 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = { [IFLA_NEW_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1), [IFLA_PARENT_DEV_NAME] = { .type = NLA_NUL_STRING }, [IFLA_GRO_MAX_SIZE] = { .type = NLA_U32 }, + [IFLA_TSO_MAX_SIZE] = { .type = NLA_REJECT }, + [IFLA_TSO_MAX_SEGS] = { .type = NLA_REJECT }, }; static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h index e1ba2d51b717..b339bf2196ca 100644 --- a/tools/include/uapi/linux/if_link.h +++ b/tools/include/uapi/linux/if_link.h @@ -348,6 +348,8 @@ enum { IFLA_PARENT_DEV_NAME, IFLA_PARENT_DEV_BUS_NAME, IFLA_GRO_MAX_SIZE, + IFLA_TSO_MAX_SIZE, + IFLA_TSO_MAX_SEGS, __IFLA_MAX }; -- cgit v1.2.3-59-g8ed1b From f04fbcc64e4be16185151f9fca44ea1b3d074bd0 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Wed, 20 Apr 2022 16:08:27 +0800 Subject: btrfs: move definition of btrfs_raid_types to volumes.h It's only internally used as another way to represent btrfs profiles, it's not exposed through any on-disk format, in fact this btrfs_raid_types is diverted from the on-disk format values. Furthermore, since it's internal structure, its definition can change in the future. Reviewed-by: Johannes Thumshirn Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/space-info.h | 2 ++ fs/btrfs/volumes.h | 13 +++++++++++++ include/uapi/linux/btrfs_tree.h | 13 ------------- 3 files changed, 15 insertions(+), 13 deletions(-) (limited to 'include/uapi/linux') diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h index a803e29bd781..c096695598c1 100644 --- a/fs/btrfs/space-info.h +++ b/fs/btrfs/space-info.h @@ -3,6 +3,8 @@ #ifndef BTRFS_SPACE_INFO_H #define BTRFS_SPACE_INFO_H +#include "volumes.h" + struct btrfs_space_info { spinlock_t lock; diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 197877e684df..7b82aae89454 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -17,6 +17,19 @@ extern struct mutex uuid_mutex; #define BTRFS_STRIPE_LEN SZ_64K +enum btrfs_raid_types { + BTRFS_RAID_RAID10, + BTRFS_RAID_RAID1, + BTRFS_RAID_DUP, + BTRFS_RAID_RAID0, + BTRFS_RAID_SINGLE, + BTRFS_RAID_RAID5, + BTRFS_RAID_RAID6, + BTRFS_RAID_RAID1C3, + BTRFS_RAID_RAID1C4, + BTRFS_NR_RAID_TYPES +}; + struct btrfs_io_geometry { /* remaining bytes before crossing a stripe */ u64 len; diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h index b069752a8ecf..d4117152d907 100644 --- a/include/uapi/linux/btrfs_tree.h +++ b/include/uapi/linux/btrfs_tree.h @@ -880,19 +880,6 @@ struct btrfs_dev_replace_item { #define BTRFS_BLOCK_GROUP_RESERVED (BTRFS_AVAIL_ALLOC_BIT_SINGLE | \ BTRFS_SPACE_INFO_GLOBAL_RSV) -enum btrfs_raid_types { - BTRFS_RAID_RAID10, - BTRFS_RAID_RAID1, - BTRFS_RAID_DUP, - BTRFS_RAID_RAID0, - BTRFS_RAID_SINGLE, - BTRFS_RAID_RAID5, - BTRFS_RAID_RAID6, - BTRFS_RAID_RAID1C3, - BTRFS_RAID_RAID1C4, - BTRFS_NR_RAID_TYPES -}; - #define BTRFS_BLOCK_GROUP_TYPE_MASK (BTRFS_BLOCK_GROUP_DATA | \ BTRFS_BLOCK_GROUP_SYSTEM | \ BTRFS_BLOCK_GROUP_METADATA) -- cgit v1.2.3-59-g8ed1b From 1c05bb947f6464756174830b778aabf8f9d6ed0e Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Mon, 16 May 2022 12:12:02 +0200 Subject: include/uapi/linux/vfio.h: Fix trivial typo - _IORW should be _IOWR instead There is no macro called _IORW, so use _IOWR in the comment instead. Signed-off-by: Thomas Huth Reviewed-by: Cornelia Huck Link: https://lore.kernel.org/r/20220516101202.88373-1-thuth@redhat.com Signed-off-by: Alex Williamson --- include/uapi/linux/vfio.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index fea86061b44e..733a1cddde30 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -643,7 +643,7 @@ enum { }; /** - * VFIO_DEVICE_GET_PCI_HOT_RESET_INFO - _IORW(VFIO_TYPE, VFIO_BASE + 12, + * VFIO_DEVICE_GET_PCI_HOT_RESET_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 12, * struct vfio_pci_hot_reset_info) * * Return: 0 on success, -errno on failure: @@ -770,7 +770,7 @@ struct vfio_device_ioeventfd { #define VFIO_DEVICE_IOEVENTFD _IO(VFIO_TYPE, VFIO_BASE + 16) /** - * VFIO_DEVICE_FEATURE - _IORW(VFIO_TYPE, VFIO_BASE + 17, + * VFIO_DEVICE_FEATURE - _IOWR(VFIO_TYPE, VFIO_BASE + 17, * struct vfio_device_feature) * * Get, set, or probe feature data of the device. The feature is selected -- cgit v1.2.3-59-g8ed1b From 9f39d36530e5678d092d53c5c2c60d82b4dcc169 Mon Sep 17 00:00:00 2001 From: Oliver Hartkopp Date: Sat, 7 May 2022 13:55:58 +0200 Subject: can: isotp: add support for transmission without flow control Usually the ISO 15765-2 protocol is a point-to-point protocol to transfer segmented PDUs to a dedicated receiver. This receiver sends a flow control message to specify protocol options and timings (e.g. block size / STmin). The so called functional addressing communication allows a 1:N communication but is limited to a single frame length. This new CAN_ISOTP_CF_BROADCAST allows an unconfirmed 1:N communication with PDU length that would not fit into a single frame. This feature is not covered by the ISO 15765-2 standard. Link: https://lore.kernel.org/all/20220507115558.19065-1-socketcan@hartkopp.net Signed-off-by: Oliver Hartkopp Signed-off-by: Marc Kleine-Budde --- include/uapi/linux/can/isotp.h | 25 ++++++----- net/can/isotp.c | 100 ++++++++++++++++++++++++++++++++--------- 2 files changed, 92 insertions(+), 33 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/can/isotp.h b/include/uapi/linux/can/isotp.h index 590f8aea2b6d..439c982f7e81 100644 --- a/include/uapi/linux/can/isotp.h +++ b/include/uapi/linux/can/isotp.h @@ -124,18 +124,19 @@ struct can_isotp_ll_options { /* flags for isotp behaviour */ -#define CAN_ISOTP_LISTEN_MODE 0x001 /* listen only (do not send FC) */ -#define CAN_ISOTP_EXTEND_ADDR 0x002 /* enable extended addressing */ -#define CAN_ISOTP_TX_PADDING 0x004 /* enable CAN frame padding tx path */ -#define CAN_ISOTP_RX_PADDING 0x008 /* enable CAN frame padding rx path */ -#define CAN_ISOTP_CHK_PAD_LEN 0x010 /* check received CAN frame padding */ -#define CAN_ISOTP_CHK_PAD_DATA 0x020 /* check received CAN frame padding */ -#define CAN_ISOTP_HALF_DUPLEX 0x040 /* half duplex error state handling */ -#define CAN_ISOTP_FORCE_TXSTMIN 0x080 /* ignore stmin from received FC */ -#define CAN_ISOTP_FORCE_RXSTMIN 0x100 /* ignore CFs depending on rx stmin */ -#define CAN_ISOTP_RX_EXT_ADDR 0x200 /* different rx extended addressing */ -#define CAN_ISOTP_WAIT_TX_DONE 0x400 /* wait for tx completion */ -#define CAN_ISOTP_SF_BROADCAST 0x800 /* 1-to-N functional addressing */ +#define CAN_ISOTP_LISTEN_MODE 0x0001 /* listen only (do not send FC) */ +#define CAN_ISOTP_EXTEND_ADDR 0x0002 /* enable extended addressing */ +#define CAN_ISOTP_TX_PADDING 0x0004 /* enable CAN frame padding tx path */ +#define CAN_ISOTP_RX_PADDING 0x0008 /* enable CAN frame padding rx path */ +#define CAN_ISOTP_CHK_PAD_LEN 0x0010 /* check received CAN frame padding */ +#define CAN_ISOTP_CHK_PAD_DATA 0x0020 /* check received CAN frame padding */ +#define CAN_ISOTP_HALF_DUPLEX 0x0040 /* half duplex error state handling */ +#define CAN_ISOTP_FORCE_TXSTMIN 0x0080 /* ignore stmin from received FC */ +#define CAN_ISOTP_FORCE_RXSTMIN 0x0100 /* ignore CFs depending on rx stmin */ +#define CAN_ISOTP_RX_EXT_ADDR 0x0200 /* different rx extended addressing */ +#define CAN_ISOTP_WAIT_TX_DONE 0x0400 /* wait for tx completion */ +#define CAN_ISOTP_SF_BROADCAST 0x0800 /* 1-to-N functional addressing */ +#define CAN_ISOTP_CF_BROADCAST 0x1000 /* 1-to-N transmission w/o FC */ /* protocol machine default values */ diff --git a/net/can/isotp.c b/net/can/isotp.c index 35a1ae61744c..2caeeae8ec16 100644 --- a/net/can/isotp.c +++ b/net/can/isotp.c @@ -104,6 +104,7 @@ MODULE_ALIAS("can-proto-6"); #define FC_CONTENT_SZ 3 /* flow control content size in byte (FS/BS/STmin) */ #define ISOTP_CHECK_PADDING (CAN_ISOTP_CHK_PAD_LEN | CAN_ISOTP_CHK_PAD_DATA) +#define ISOTP_ALL_BC_FLAGS (CAN_ISOTP_SF_BROADCAST | CAN_ISOTP_CF_BROADCAST) /* Flow Status given in FC frame */ #define ISOTP_FC_CTS 0 /* clear to send */ @@ -159,6 +160,23 @@ static inline struct isotp_sock *isotp_sk(const struct sock *sk) return (struct isotp_sock *)sk; } +static u32 isotp_bc_flags(struct isotp_sock *so) +{ + return so->opt.flags & ISOTP_ALL_BC_FLAGS; +} + +static bool isotp_register_rxid(struct isotp_sock *so) +{ + /* no broadcast modes => register rx_id for FC frame reception */ + return (isotp_bc_flags(so) == 0); +} + +static bool isotp_register_txecho(struct isotp_sock *so) +{ + /* all modes but SF_BROADCAST register for tx echo skbs */ + return (isotp_bc_flags(so) != CAN_ISOTP_SF_BROADCAST); +} + static enum hrtimer_restart isotp_rx_timer_handler(struct hrtimer *hrtimer) { struct isotp_sock *so = container_of(hrtimer, struct isotp_sock, @@ -803,7 +821,6 @@ static void isotp_create_fframe(struct canfd_frame *cf, struct isotp_sock *so, cf->data[i] = so->tx.buf[so->tx.idx++]; so->tx.sn = 1; - so->tx.state = ISOTP_WAIT_FIRST_FC; } static void isotp_rcv_echo(struct sk_buff *skb, void *data) @@ -936,7 +953,7 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) off = (so->tx.ll_dl > CAN_MAX_DLEN) ? 1 : 0; /* does the given data fit into a single frame for SF_BROADCAST? */ - if ((so->opt.flags & CAN_ISOTP_SF_BROADCAST) && + if ((isotp_bc_flags(so) == CAN_ISOTP_SF_BROADCAST) && (size > so->tx.ll_dl - SF_PCI_SZ4 - ae - off)) { err = -EINVAL; goto err_out_drop; @@ -1000,12 +1017,41 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) /* don't enable wait queue for a single frame transmission */ wait_tx_done = 0; } else { - /* send first frame and wait for FC */ + /* send first frame */ isotp_create_fframe(cf, so, ae); - /* start timeout for FC */ - hrtimer_sec = 1; + if (isotp_bc_flags(so) == CAN_ISOTP_CF_BROADCAST) { + /* set timer for FC-less operation (STmin = 0) */ + if (so->opt.flags & CAN_ISOTP_FORCE_TXSTMIN) + so->tx_gap = ktime_set(0, so->force_tx_stmin); + else + so->tx_gap = ktime_set(0, so->frame_txtime); + + /* disable wait for FCs due to activated block size */ + so->txfc.bs = 0; + + /* cfecho should have been zero'ed by init */ + if (so->cfecho) + pr_notice_once("can-isotp: no fc cfecho %08X\n", + so->cfecho); + + /* set consecutive frame echo tag */ + so->cfecho = *(u32 *)cf->data; + + /* switch directly to ISOTP_SENDING state */ + so->tx.state = ISOTP_SENDING; + + /* start timeout for unlikely lost echo skb */ + hrtimer_sec = 2; + } else { + /* standard flow control check */ + so->tx.state = ISOTP_WAIT_FIRST_FC; + + /* start timeout for FC */ + hrtimer_sec = 1; + } + hrtimer_start(&so->txtimer, ktime_set(hrtimer_sec, 0), HRTIMER_MODE_REL_SOFT); } @@ -1025,6 +1071,9 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) if (hrtimer_sec) hrtimer_cancel(&so->txtimer); + /* reset consecutive frame echo tag */ + so->cfecho = 0; + goto err_out_drop; } @@ -1120,15 +1169,17 @@ static int isotp_release(struct socket *sock) lock_sock(sk); /* remove current filters & unregister */ - if (so->bound && (!(so->opt.flags & CAN_ISOTP_SF_BROADCAST))) { + if (so->bound && isotp_register_txecho(so)) { if (so->ifindex) { struct net_device *dev; dev = dev_get_by_index(net, so->ifindex); if (dev) { - can_rx_unregister(net, dev, so->rxid, - SINGLE_MASK(so->rxid), - isotp_rcv, sk); + if (isotp_register_rxid(so)) + can_rx_unregister(net, dev, so->rxid, + SINGLE_MASK(so->rxid), + isotp_rcv, sk); + can_rx_unregister(net, dev, so->txid, SINGLE_MASK(so->txid), isotp_rcv_echo, sk); @@ -1164,7 +1215,6 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len) canid_t tx_id, rx_id; int err = 0; int notify_enetdown = 0; - int do_rx_reg = 1; if (len < ISOTP_MIN_NAMELEN) return -EINVAL; @@ -1192,12 +1242,8 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len) goto out; } - /* do not register frame reception for functional addressing */ - if (so->opt.flags & CAN_ISOTP_SF_BROADCAST) - do_rx_reg = 0; - - /* do not validate rx address for functional addressing */ - if (do_rx_reg && rx_id == tx_id) { + /* ensure different CAN IDs when the rx_id is to be registered */ + if (isotp_register_rxid(so) && rx_id == tx_id) { err = -EADDRNOTAVAIL; goto out; } @@ -1222,10 +1268,11 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len) ifindex = dev->ifindex; - if (do_rx_reg) { + if (isotp_register_rxid(so)) can_rx_register(net, dev, rx_id, SINGLE_MASK(rx_id), isotp_rcv, sk, "isotp", sk); + if (isotp_register_txecho(so)) { /* no consecutive frame echo skb in flight */ so->cfecho = 0; @@ -1294,6 +1341,15 @@ static int isotp_setsockopt_locked(struct socket *sock, int level, int optname, if (!(so->opt.flags & CAN_ISOTP_RX_EXT_ADDR)) so->opt.rx_ext_address = so->opt.ext_address; + /* these broadcast flags are not allowed together */ + if (isotp_bc_flags(so) == ISOTP_ALL_BC_FLAGS) { + /* CAN_ISOTP_SF_BROADCAST is prioritized */ + so->opt.flags &= ~CAN_ISOTP_CF_BROADCAST; + + /* give user feedback on wrong config attempt */ + ret = -EINVAL; + } + /* check for frame_txtime changes (0 => no changes) */ if (so->opt.frame_txtime) { if (so->opt.frame_txtime == CAN_ISOTP_FRAME_TXTIME_ZERO) @@ -1444,10 +1500,12 @@ static void isotp_notify(struct isotp_sock *so, unsigned long msg, case NETDEV_UNREGISTER: lock_sock(sk); /* remove current filters & unregister */ - if (so->bound && (!(so->opt.flags & CAN_ISOTP_SF_BROADCAST))) { - can_rx_unregister(dev_net(dev), dev, so->rxid, - SINGLE_MASK(so->rxid), - isotp_rcv, sk); + if (so->bound && isotp_register_txecho(so)) { + if (isotp_register_rxid(so)) + can_rx_unregister(dev_net(dev), dev, so->rxid, + SINGLE_MASK(so->rxid), + isotp_rcv, sk); + can_rx_unregister(dev_net(dev), dev, so->txid, SINGLE_MASK(so->txid), isotp_rcv_echo, sk); -- cgit v1.2.3-59-g8ed1b From b87f5e25b2f9deb503a61c6957c7b1680d91cfea Mon Sep 17 00:00:00 2001 From: Daniel Scally Date: Fri, 6 May 2022 01:03:48 +0200 Subject: media: uapi: Add IPU3 packed Y10 format Some platforms with an Intel IPU3 have an IR sensor producing 10 bit greyscale format data that is transmitted over a CSI-2 bus to a CIO2 device - this packs the data into 32 bytes per 25 pixels. Add an entry to the uAPI header defining that format. Signed-off-by: Daniel Scally Acked-by: Andy Shevchenko Signed-off-by: Sakari Ailus Signed-off-by: Mauro Carvalho Chehab --- Documentation/userspace-api/media/v4l/pixfmt-yuv-luma.rst | 14 +++++++++++++- drivers/media/v4l2-core/v4l2-ioctl.c | 1 + include/uapi/linux/videodev2.h | 3 ++- 3 files changed, 16 insertions(+), 2 deletions(-) (limited to 'include/uapi/linux') diff --git a/Documentation/userspace-api/media/v4l/pixfmt-yuv-luma.rst b/Documentation/userspace-api/media/v4l/pixfmt-yuv-luma.rst index 8ebd58c3588f..6a387f9df3ba 100644 --- a/Documentation/userspace-api/media/v4l/pixfmt-yuv-luma.rst +++ b/Documentation/userspace-api/media/v4l/pixfmt-yuv-luma.rst @@ -48,6 +48,17 @@ are often referred to as greyscale formats. - ... - ... + * .. _V4L2-PIX-FMT-IPU3-Y10: + + - ``V4L2_PIX_FMT_IPU3_Y10`` + - 'ip3y' + + - Y'\ :sub:`0`\ [7:0] + - Y'\ :sub:`1`\ [5:0] Y'\ :sub:`0`\ [9:8] + - Y'\ :sub:`2`\ [3:0] Y'\ :sub:`1`\ [9:6] + - Y'\ :sub:`3`\ [1:0] Y'\ :sub:`2`\ [9:4] + - Y'\ :sub:`3`\ [9:2] + * .. _V4L2-PIX-FMT-Y10: - ``V4L2_PIX_FMT_Y10`` @@ -133,4 +144,5 @@ are often referred to as greyscale formats. For the Y16 and Y16_BE formats, the actual sampling precision may be lower than 16 bits. For example, 10 bits per pixel uses values in the range 0 to - 1023. + 1023. For the IPU3_Y10 format 25 pixels are packed into 32 bytes, which + leaves the 6 most significant bits of the last byte padded with 0. diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index e2636539c9db..21470de62d72 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -1269,6 +1269,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt) case V4L2_PIX_FMT_Y16_BE: descr = "16-bit Greyscale BE"; break; case V4L2_PIX_FMT_Y10BPACK: descr = "10-bit Greyscale (Packed)"; break; case V4L2_PIX_FMT_Y10P: descr = "10-bit Greyscale (MIPI Packed)"; break; + case V4L2_PIX_FMT_IPU3_Y10: descr = "10-bit greyscale (IPU3 Packed)"; break; case V4L2_PIX_FMT_Y8I: descr = "Interleaved 8-bit Greyscale"; break; case V4L2_PIX_FMT_Y12I: descr = "Interleaved 12-bit Greyscale"; break; case V4L2_PIX_FMT_Z16: descr = "16-bit Depth"; break; diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index 6d465dc443b7..343b95107fce 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -569,6 +569,7 @@ struct v4l2_pix_format { /* Grey bit-packed formats */ #define V4L2_PIX_FMT_Y10BPACK v4l2_fourcc('Y', '1', '0', 'B') /* 10 Greyscale bit-packed */ #define V4L2_PIX_FMT_Y10P v4l2_fourcc('Y', '1', '0', 'P') /* 10 Greyscale, MIPI RAW10 packed */ +#define V4L2_PIX_FMT_IPU3_Y10 v4l2_fourcc('i', 'p', '3', 'y') /* IPU3 packed 10-bit greyscale */ /* Palette formats */ #define V4L2_PIX_FMT_PAL8 v4l2_fourcc('P', 'A', 'L', '8') /* 8 8-bit palette */ @@ -749,7 +750,7 @@ struct v4l2_pix_format { #define V4L2_PIX_FMT_QC08C v4l2_fourcc('Q', '0', '8', 'C') /* Qualcomm 8-bit compressed */ #define V4L2_PIX_FMT_QC10C v4l2_fourcc('Q', '1', '0', 'C') /* Qualcomm 10-bit compressed */ -/* 10bit raw bayer packed, 32 bytes for every 25 pixels, last LSB 6 bits unused */ +/* 10bit raw packed, 32 bytes for every 25 pixels, last LSB 6 bits unused */ #define V4L2_PIX_FMT_IPU3_SBGGR10 v4l2_fourcc('i', 'p', '3', 'b') /* IPU3 packed 10-bit BGGR bayer */ #define V4L2_PIX_FMT_IPU3_SGBRG10 v4l2_fourcc('i', 'p', '3', 'g') /* IPU3 packed 10-bit GBRG bayer */ #define V4L2_PIX_FMT_IPU3_SGRBG10 v4l2_fourcc('i', 'p', '3', 'G') /* IPU3 packed 10-bit GRBG bayer */ -- cgit v1.2.3-59-g8ed1b From 7c3e9fcad9c7d8bb5d69a576044fb16b1d2e8a01 Mon Sep 17 00:00:00 2001 From: Jérôme Pouiller Date: Tue, 17 May 2022 09:27:08 +0200 Subject: dma-buf: fix use of DMA_BUF_SET_NAME_{A,B} in userspace MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The typedefs u32 and u64 are not available in userspace. Thus user get an error he try to use DMA_BUF_SET_NAME_A or DMA_BUF_SET_NAME_B: $ gcc -Wall -c -MMD -c -o ioctls_list.o ioctls_list.c In file included from /usr/include/x86_64-linux-gnu/asm/ioctl.h:1, from /usr/include/linux/ioctl.h:5, from /usr/include/asm-generic/ioctls.h:5, from ioctls_list.c:11: ioctls_list.c:463:29: error: ‘u32’ undeclared here (not in a function) 463 | { "DMA_BUF_SET_NAME_A", DMA_BUF_SET_NAME_A, -1, -1 }, // linux/dma-buf.h | ^~~~~~~~~~~~~~~~~~ ioctls_list.c:464:29: error: ‘u64’ undeclared here (not in a function) 464 | { "DMA_BUF_SET_NAME_B", DMA_BUF_SET_NAME_B, -1, -1 }, // linux/dma-buf.h | ^~~~~~~~~~~~~~~~~~ The issue was initially reported here[1]. [1]: https://github.com/jerome-pouiller/ioctl/pull/14 Signed-off-by: Jérôme Pouiller Reviewed-by: Christian König Fixes: a5bff92eaac4 ("dma-buf: Fix SET_NAME ioctl uapi") CC: stable@vger.kernel.org Link: https://patchwork.freedesktop.org/patch/msgid/20220517072708.245265-1-Jerome.Pouiller@silabs.com Signed-off-by: Christian König --- include/uapi/linux/dma-buf.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/dma-buf.h b/include/uapi/linux/dma-buf.h index 8e4a2ca0bcbf..b1523cb8ab30 100644 --- a/include/uapi/linux/dma-buf.h +++ b/include/uapi/linux/dma-buf.h @@ -92,7 +92,7 @@ struct dma_buf_sync { * between them in actual uapi, they're just different numbers. */ #define DMA_BUF_SET_NAME _IOW(DMA_BUF_BASE, 1, const char *) -#define DMA_BUF_SET_NAME_A _IOW(DMA_BUF_BASE, 1, u32) -#define DMA_BUF_SET_NAME_B _IOW(DMA_BUF_BASE, 1, u64) +#define DMA_BUF_SET_NAME_A _IOW(DMA_BUF_BASE, 1, __u32) +#define DMA_BUF_SET_NAME_B _IOW(DMA_BUF_BASE, 1, __u64) #endif -- cgit v1.2.3-59-g8ed1b From c8383054506c77b814489c09877b5db83fd4abf2 Mon Sep 17 00:00:00 2001 From: Jeffle Xu Date: Mon, 25 Apr 2022 20:21:24 +0800 Subject: cachefiles: notify the user daemon when looking up cookie Fscache/CacheFiles used to serve as a local cache for a remote networking fs. A new on-demand read mode will be introduced for CacheFiles, which can boost the scenario where on-demand read semantics are needed, e.g. container image distribution. The essential difference between these two modes is seen when a cache miss occurs: In the original mode, the netfs will fetch the data from the remote server and then write it to the cache file; in on-demand read mode, fetching the data and writing it into the cache is delegated to a user daemon. As the first step, notify the user daemon when looking up cookie. In this case, an anonymous fd is sent to the user daemon, through which the user daemon can write the fetched data to the cache file. Since the user daemon may move the anonymous fd around, e.g. through dup(), an object ID uniquely identifying the cache file is also attached. Also add one advisory flag (FSCACHE_ADV_WANT_CACHE_SIZE) suggesting that the cache file size shall be retrieved at runtime. This helps the scenario where one cache file contains multiple netfs files, e.g. for the purpose of deduplication. In this case, netfs itself has no idea the size of the cache file, whilst the user daemon should give the hint on it. Signed-off-by: Jeffle Xu Link: https://lore.kernel.org/r/20220509074028.74954-3-jefflexu@linux.alibaba.com Acked-by: David Howells Signed-off-by: Gao Xiang --- fs/cachefiles/Kconfig | 12 ++ fs/cachefiles/Makefile | 1 + fs/cachefiles/daemon.c | 81 ++++++-- fs/cachefiles/internal.h | 51 +++++ fs/cachefiles/namei.c | 16 +- fs/cachefiles/ondemand.c | 378 ++++++++++++++++++++++++++++++++++++++ include/linux/fscache.h | 1 + include/trace/events/cachefiles.h | 2 + include/uapi/linux/cachefiles.h | 50 +++++ 9 files changed, 577 insertions(+), 15 deletions(-) create mode 100644 fs/cachefiles/ondemand.c create mode 100644 include/uapi/linux/cachefiles.h (limited to 'include/uapi/linux') diff --git a/fs/cachefiles/Kconfig b/fs/cachefiles/Kconfig index 719faeeda168..8df715640a48 100644 --- a/fs/cachefiles/Kconfig +++ b/fs/cachefiles/Kconfig @@ -26,3 +26,15 @@ config CACHEFILES_ERROR_INJECTION help This permits error injection to be enabled in cachefiles whilst a cache is in service. + +config CACHEFILES_ONDEMAND + bool "Support for on-demand read" + depends on CACHEFILES + default n + help + This permits userspace to enable the cachefiles on-demand read mode. + In this mode, when a cache miss occurs, responsibility for fetching + the data lies with the cachefiles backend instead of with the netfs + and is delegated to userspace. + + If unsure, say N. diff --git a/fs/cachefiles/Makefile b/fs/cachefiles/Makefile index 16d811f1a2fa..c37a7a9af10b 100644 --- a/fs/cachefiles/Makefile +++ b/fs/cachefiles/Makefile @@ -16,5 +16,6 @@ cachefiles-y := \ xattr.o cachefiles-$(CONFIG_CACHEFILES_ERROR_INJECTION) += error_inject.o +cachefiles-$(CONFIG_CACHEFILES_ONDEMAND) += ondemand.o obj-$(CONFIG_CACHEFILES) := cachefiles.o diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c index 7ac04ee2c0a0..d5417da7f792 100644 --- a/fs/cachefiles/daemon.c +++ b/fs/cachefiles/daemon.c @@ -75,6 +75,9 @@ static const struct cachefiles_daemon_cmd cachefiles_daemon_cmds[] = { { "inuse", cachefiles_daemon_inuse }, { "secctx", cachefiles_daemon_secctx }, { "tag", cachefiles_daemon_tag }, +#ifdef CONFIG_CACHEFILES_ONDEMAND + { "copen", cachefiles_ondemand_copen }, +#endif { "", NULL } }; @@ -108,6 +111,8 @@ static int cachefiles_daemon_open(struct inode *inode, struct file *file) INIT_LIST_HEAD(&cache->volumes); INIT_LIST_HEAD(&cache->object_list); spin_lock_init(&cache->object_list_lock); + xa_init_flags(&cache->reqs, XA_FLAGS_ALLOC); + xa_init_flags(&cache->ondemand_ids, XA_FLAGS_ALLOC1); /* set default caching limits * - limit at 1% free space and/or free files @@ -126,6 +131,39 @@ static int cachefiles_daemon_open(struct inode *inode, struct file *file) return 0; } +static void cachefiles_flush_reqs(struct cachefiles_cache *cache) +{ + struct xarray *xa = &cache->reqs; + struct cachefiles_req *req; + unsigned long index; + + /* + * Make sure the following two operations won't be reordered. + * 1) set CACHEFILES_DEAD bit + * 2) flush requests in the xarray + * Otherwise the request may be enqueued after xarray has been + * flushed, leaving the orphan request never being completed. + * + * CPU 1 CPU 2 + * ===== ===== + * flush requests in the xarray + * test CACHEFILES_DEAD bit + * enqueue the request + * set CACHEFILES_DEAD bit + */ + smp_mb(); + + xa_lock(xa); + xa_for_each(xa, index, req) { + req->error = -EIO; + complete(&req->done); + } + xa_unlock(xa); + + xa_destroy(&cache->reqs); + xa_destroy(&cache->ondemand_ids); +} + /* * Release a cache. */ @@ -139,6 +177,8 @@ static int cachefiles_daemon_release(struct inode *inode, struct file *file) set_bit(CACHEFILES_DEAD, &cache->flags); + if (cachefiles_in_ondemand_mode(cache)) + cachefiles_flush_reqs(cache); cachefiles_daemon_unbind(cache); /* clean up the control file interface */ @@ -152,23 +192,14 @@ static int cachefiles_daemon_release(struct inode *inode, struct file *file) return 0; } -/* - * Read the cache state. - */ -static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer, - size_t buflen, loff_t *pos) +static ssize_t cachefiles_do_daemon_read(struct cachefiles_cache *cache, + char __user *_buffer, size_t buflen) { - struct cachefiles_cache *cache = file->private_data; unsigned long long b_released; unsigned f_released; char buffer[256]; int n; - //_enter(",,%zu,", buflen); - - if (!test_bit(CACHEFILES_READY, &cache->flags)) - return 0; - /* check how much space the cache has */ cachefiles_has_space(cache, 0, 0, cachefiles_has_space_check); @@ -206,6 +237,25 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer, return n; } +/* + * Read the cache state. + */ +static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer, + size_t buflen, loff_t *pos) +{ + struct cachefiles_cache *cache = file->private_data; + + //_enter(",,%zu,", buflen); + + if (!test_bit(CACHEFILES_READY, &cache->flags)) + return 0; + + if (cachefiles_in_ondemand_mode(cache)) + return cachefiles_ondemand_daemon_read(cache, _buffer, buflen); + else + return cachefiles_do_daemon_read(cache, _buffer, buflen); +} + /* * Take a command from cachefilesd, parse it and act on it. */ @@ -297,8 +347,13 @@ static __poll_t cachefiles_daemon_poll(struct file *file, poll_wait(file, &cache->daemon_pollwq, poll); mask = 0; - if (test_bit(CACHEFILES_STATE_CHANGED, &cache->flags)) - mask |= EPOLLIN; + if (cachefiles_in_ondemand_mode(cache)) { + if (!xa_empty(&cache->reqs)) + mask |= EPOLLIN; + } else { + if (test_bit(CACHEFILES_STATE_CHANGED, &cache->flags)) + mask |= EPOLLIN; + } if (test_bit(CACHEFILES_CULLING, &cache->flags)) mask |= EPOLLOUT; diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h index e80673d0ab97..4f5150a96849 100644 --- a/fs/cachefiles/internal.h +++ b/fs/cachefiles/internal.h @@ -15,6 +15,8 @@ #include #include #include +#include +#include #define CACHEFILES_DIO_BLOCK_SIZE 4096 @@ -58,8 +60,13 @@ struct cachefiles_object { enum cachefiles_content content_info:8; /* Info about content presence */ unsigned long flags; #define CACHEFILES_OBJECT_USING_TMPFILE 0 /* Have an unlinked tmpfile */ +#ifdef CONFIG_CACHEFILES_ONDEMAND + int ondemand_id; +#endif }; +#define CACHEFILES_ONDEMAND_ID_CLOSED -1 + /* * Cache files cache definition */ @@ -98,11 +105,30 @@ struct cachefiles_cache { #define CACHEFILES_DEAD 1 /* T if cache dead */ #define CACHEFILES_CULLING 2 /* T if cull engaged */ #define CACHEFILES_STATE_CHANGED 3 /* T if state changed (poll trigger) */ +#define CACHEFILES_ONDEMAND_MODE 4 /* T if in on-demand read mode */ char *rootdirname; /* name of cache root directory */ char *secctx; /* LSM security context */ char *tag; /* cache binding tag */ + struct xarray reqs; /* xarray of pending on-demand requests */ + struct xarray ondemand_ids; /* xarray for ondemand_id allocation */ + u32 ondemand_id_next; }; +static inline bool cachefiles_in_ondemand_mode(struct cachefiles_cache *cache) +{ + return IS_ENABLED(CONFIG_CACHEFILES_ONDEMAND) && + test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags); +} + +struct cachefiles_req { + struct cachefiles_object *object; + struct completion done; + int error; + struct cachefiles_msg msg; +}; + +#define CACHEFILES_REQ_NEW XA_MARK_1 + #include static inline @@ -250,6 +276,31 @@ extern struct file *cachefiles_create_tmpfile(struct cachefiles_object *object); extern bool cachefiles_commit_tmpfile(struct cachefiles_cache *cache, struct cachefiles_object *object); +/* + * ondemand.c + */ +#ifdef CONFIG_CACHEFILES_ONDEMAND +extern ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, + char __user *_buffer, size_t buflen); + +extern int cachefiles_ondemand_copen(struct cachefiles_cache *cache, + char *args); + +extern int cachefiles_ondemand_init_object(struct cachefiles_object *object); + +#else +static inline ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, + char __user *_buffer, size_t buflen) +{ + return -EOPNOTSUPP; +} + +static inline int cachefiles_ondemand_init_object(struct cachefiles_object *object) +{ + return 0; +} +#endif + /* * security.c */ diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c index ca9f3e4ec4b3..facf2ebe464b 100644 --- a/fs/cachefiles/namei.c +++ b/fs/cachefiles/namei.c @@ -452,10 +452,9 @@ struct file *cachefiles_create_tmpfile(struct cachefiles_object *object) struct dentry *fan = volume->fanout[(u8)object->cookie->key_hash]; struct file *file; struct path path; - uint64_t ni_size = object->cookie->object_size; + uint64_t ni_size; long ret; - ni_size = round_up(ni_size, CACHEFILES_DIO_BLOCK_SIZE); cachefiles_begin_secure(cache, &saved_cred); @@ -481,6 +480,15 @@ struct file *cachefiles_create_tmpfile(struct cachefiles_object *object) goto out_dput; } + ret = cachefiles_ondemand_init_object(object); + if (ret < 0) { + file = ERR_PTR(ret); + goto out_unuse; + } + + ni_size = object->cookie->object_size; + ni_size = round_up(ni_size, CACHEFILES_DIO_BLOCK_SIZE); + if (ni_size > 0) { trace_cachefiles_trunc(object, d_backing_inode(path.dentry), 0, ni_size, cachefiles_trunc_expand_tmpfile); @@ -586,6 +594,10 @@ static bool cachefiles_open_file(struct cachefiles_object *object, } _debug("file -> %pd positive", dentry); + ret = cachefiles_ondemand_init_object(object); + if (ret < 0) + goto error_fput; + ret = cachefiles_check_auxdata(object, file); if (ret < 0) goto check_failed; diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c new file mode 100644 index 000000000000..64fc312b16d3 --- /dev/null +++ b/fs/cachefiles/ondemand.c @@ -0,0 +1,378 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#include +#include +#include +#include "internal.h" + +static int cachefiles_ondemand_fd_release(struct inode *inode, + struct file *file) +{ + struct cachefiles_object *object = file->private_data; + struct cachefiles_cache *cache = object->volume->cache; + int object_id = object->ondemand_id; + + object->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED; + xa_erase(&cache->ondemand_ids, object_id); + cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd); + return 0; +} + +static ssize_t cachefiles_ondemand_fd_write_iter(struct kiocb *kiocb, + struct iov_iter *iter) +{ + struct cachefiles_object *object = kiocb->ki_filp->private_data; + struct cachefiles_cache *cache = object->volume->cache; + struct file *file = object->file; + size_t len = iter->count; + loff_t pos = kiocb->ki_pos; + const struct cred *saved_cred; + int ret; + + if (!file) + return -ENOBUFS; + + cachefiles_begin_secure(cache, &saved_cred); + ret = __cachefiles_prepare_write(object, file, &pos, &len, true); + cachefiles_end_secure(cache, saved_cred); + if (ret < 0) + return ret; + + ret = __cachefiles_write(object, file, pos, iter, NULL, NULL); + if (!ret) + ret = len; + + return ret; +} + +static loff_t cachefiles_ondemand_fd_llseek(struct file *filp, loff_t pos, + int whence) +{ + struct cachefiles_object *object = filp->private_data; + struct file *file = object->file; + + if (!file) + return -ENOBUFS; + + return vfs_llseek(file, pos, whence); +} + +static const struct file_operations cachefiles_ondemand_fd_fops = { + .owner = THIS_MODULE, + .release = cachefiles_ondemand_fd_release, + .write_iter = cachefiles_ondemand_fd_write_iter, + .llseek = cachefiles_ondemand_fd_llseek, +}; + +/* + * OPEN request Completion (copen) + * - command: "copen ," + * indicates the object size if >=0, error code if negative + */ +int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args) +{ + struct cachefiles_req *req; + struct fscache_cookie *cookie; + char *pid, *psize; + unsigned long id; + long size; + int ret; + + if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags)) + return -EOPNOTSUPP; + + if (!*args) { + pr_err("Empty id specified\n"); + return -EINVAL; + } + + pid = args; + psize = strchr(args, ','); + if (!psize) { + pr_err("Cache size is not specified\n"); + return -EINVAL; + } + + *psize = 0; + psize++; + + ret = kstrtoul(pid, 0, &id); + if (ret) + return ret; + + req = xa_erase(&cache->reqs, id); + if (!req) + return -EINVAL; + + /* fail OPEN request if copen format is invalid */ + ret = kstrtol(psize, 0, &size); + if (ret) { + req->error = ret; + goto out; + } + + /* fail OPEN request if daemon reports an error */ + if (size < 0) { + if (!IS_ERR_VALUE(size)) + size = -EINVAL; + req->error = size; + goto out; + } + + cookie = req->object->cookie; + cookie->object_size = size; + if (size) + clear_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags); + else + set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags); + +out: + complete(&req->done); + return ret; +} + +static int cachefiles_ondemand_get_fd(struct cachefiles_req *req) +{ + struct cachefiles_object *object; + struct cachefiles_cache *cache; + struct cachefiles_open *load; + struct file *file; + u32 object_id; + int ret, fd; + + object = cachefiles_grab_object(req->object, + cachefiles_obj_get_ondemand_fd); + cache = object->volume->cache; + + ret = xa_alloc_cyclic(&cache->ondemand_ids, &object_id, NULL, + XA_LIMIT(1, INT_MAX), + &cache->ondemand_id_next, GFP_KERNEL); + if (ret < 0) + goto err; + + fd = get_unused_fd_flags(O_WRONLY); + if (fd < 0) { + ret = fd; + goto err_free_id; + } + + file = anon_inode_getfile("[cachefiles]", &cachefiles_ondemand_fd_fops, + object, O_WRONLY); + if (IS_ERR(file)) { + ret = PTR_ERR(file); + goto err_put_fd; + } + + file->f_mode |= FMODE_PWRITE | FMODE_LSEEK; + fd_install(fd, file); + + load = (void *)req->msg.data; + load->fd = fd; + req->msg.object_id = object_id; + object->ondemand_id = object_id; + return 0; + +err_put_fd: + put_unused_fd(fd); +err_free_id: + xa_erase(&cache->ondemand_ids, object_id); +err: + cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd); + return ret; +} + +ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, + char __user *_buffer, size_t buflen) +{ + struct cachefiles_req *req; + struct cachefiles_msg *msg; + unsigned long id = 0; + size_t n; + int ret = 0; + XA_STATE(xas, &cache->reqs, 0); + + /* + * Search for a request that has not ever been processed, to prevent + * requests from being processed repeatedly. + */ + xa_lock(&cache->reqs); + req = xas_find_marked(&xas, UINT_MAX, CACHEFILES_REQ_NEW); + if (!req) { + xa_unlock(&cache->reqs); + return 0; + } + + msg = &req->msg; + n = msg->len; + + if (n > buflen) { + xa_unlock(&cache->reqs); + return -EMSGSIZE; + } + + xas_clear_mark(&xas, CACHEFILES_REQ_NEW); + xa_unlock(&cache->reqs); + + id = xas.xa_index; + msg->msg_id = id; + + if (msg->opcode == CACHEFILES_OP_OPEN) { + ret = cachefiles_ondemand_get_fd(req); + if (ret) + goto error; + } + + if (copy_to_user(_buffer, msg, n) != 0) { + ret = -EFAULT; + goto err_put_fd; + } + + return n; + +err_put_fd: + if (msg->opcode == CACHEFILES_OP_OPEN) + close_fd(((struct cachefiles_open *)msg->data)->fd); +error: + xa_erase(&cache->reqs, id); + req->error = ret; + complete(&req->done); + return ret; +} + +typedef int (*init_req_fn)(struct cachefiles_req *req, void *private); + +static int cachefiles_ondemand_send_req(struct cachefiles_object *object, + enum cachefiles_opcode opcode, + size_t data_len, + init_req_fn init_req, + void *private) +{ + struct cachefiles_cache *cache = object->volume->cache; + struct cachefiles_req *req; + XA_STATE(xas, &cache->reqs, 0); + int ret; + + if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags)) + return 0; + + if (test_bit(CACHEFILES_DEAD, &cache->flags)) + return -EIO; + + req = kzalloc(sizeof(*req) + data_len, GFP_KERNEL); + if (!req) + return -ENOMEM; + + req->object = object; + init_completion(&req->done); + req->msg.opcode = opcode; + req->msg.len = sizeof(struct cachefiles_msg) + data_len; + + ret = init_req(req, private); + if (ret) + goto out; + + do { + /* + * Stop enqueuing the request when daemon is dying. The + * following two operations need to be atomic as a whole. + * 1) check cache state, and + * 2) enqueue request if cache is alive. + * Otherwise the request may be enqueued after xarray has been + * flushed, leaving the orphan request never being completed. + * + * CPU 1 CPU 2 + * ===== ===== + * test CACHEFILES_DEAD bit + * set CACHEFILES_DEAD bit + * flush requests in the xarray + * enqueue the request + */ + xas_lock(&xas); + + if (test_bit(CACHEFILES_DEAD, &cache->flags)) { + xas_unlock(&xas); + ret = -EIO; + goto out; + } + + /* coupled with the barrier in cachefiles_flush_reqs() */ + smp_mb(); + + xas.xa_index = 0; + xas_find_marked(&xas, UINT_MAX, XA_FREE_MARK); + if (xas.xa_node == XAS_RESTART) + xas_set_err(&xas, -EBUSY); + xas_store(&xas, req); + xas_clear_mark(&xas, XA_FREE_MARK); + xas_set_mark(&xas, CACHEFILES_REQ_NEW); + xas_unlock(&xas); + } while (xas_nomem(&xas, GFP_KERNEL)); + + ret = xas_error(&xas); + if (ret) + goto out; + + wake_up_all(&cache->daemon_pollwq); + wait_for_completion(&req->done); + ret = req->error; +out: + kfree(req); + return ret; +} + +static int cachefiles_ondemand_init_open_req(struct cachefiles_req *req, + void *private) +{ + struct cachefiles_object *object = req->object; + struct fscache_cookie *cookie = object->cookie; + struct fscache_volume *volume = object->volume->vcookie; + struct cachefiles_open *load = (void *)req->msg.data; + size_t volume_key_size, cookie_key_size; + void *volume_key, *cookie_key; + + /* + * Volume key is a NUL-terminated string. key[0] stores strlen() of the + * string, followed by the content of the string (excluding '\0'). + */ + volume_key_size = volume->key[0] + 1; + volume_key = volume->key + 1; + + /* Cookie key is binary data, which is netfs specific. */ + cookie_key_size = cookie->key_len; + cookie_key = fscache_get_key(cookie); + + if (!(object->cookie->advice & FSCACHE_ADV_WANT_CACHE_SIZE)) { + pr_err("WANT_CACHE_SIZE is needed for on-demand mode\n"); + return -EINVAL; + } + + load->volume_key_size = volume_key_size; + load->cookie_key_size = cookie_key_size; + memcpy(load->data, volume_key, volume_key_size); + memcpy(load->data + volume_key_size, cookie_key, cookie_key_size); + + return 0; +} + +int cachefiles_ondemand_init_object(struct cachefiles_object *object) +{ + struct fscache_cookie *cookie = object->cookie; + struct fscache_volume *volume = object->volume->vcookie; + size_t volume_key_size, cookie_key_size, data_len; + + /* + * CacheFiles will firstly check the cache file under the root cache + * directory. If the coherency check failed, it will fallback to + * creating a new tmpfile as the cache file. Reuse the previously + * allocated object ID if any. + */ + if (object->ondemand_id > 0) + return 0; + + volume_key_size = volume->key[0] + 1; + cookie_key_size = cookie->key_len; + data_len = sizeof(struct cachefiles_open) + + volume_key_size + cookie_key_size; + + return cachefiles_ondemand_send_req(object, CACHEFILES_OP_OPEN, + data_len, cachefiles_ondemand_init_open_req, NULL); +} diff --git a/include/linux/fscache.h b/include/linux/fscache.h index e25539072463..72585c9729a2 100644 --- a/include/linux/fscache.h +++ b/include/linux/fscache.h @@ -39,6 +39,7 @@ struct fscache_cookie; #define FSCACHE_ADV_SINGLE_CHUNK 0x01 /* The object is a single chunk of data */ #define FSCACHE_ADV_WRITE_CACHE 0x00 /* Do cache if written to locally */ #define FSCACHE_ADV_WRITE_NOCACHE 0x02 /* Don't cache if written to locally */ +#define FSCACHE_ADV_WANT_CACHE_SIZE 0x04 /* Retrieve cache size at runtime */ #define FSCACHE_INVAL_DIO_WRITE 0x01 /* Invalidate due to DIO write */ diff --git a/include/trace/events/cachefiles.h b/include/trace/events/cachefiles.h index 311c14a20e70..93df9391bd7f 100644 --- a/include/trace/events/cachefiles.h +++ b/include/trace/events/cachefiles.h @@ -31,6 +31,8 @@ enum cachefiles_obj_ref_trace { cachefiles_obj_see_lookup_failed, cachefiles_obj_see_withdraw_cookie, cachefiles_obj_see_withdrawal, + cachefiles_obj_get_ondemand_fd, + cachefiles_obj_put_ondemand_fd, }; enum fscache_why_object_killed { diff --git a/include/uapi/linux/cachefiles.h b/include/uapi/linux/cachefiles.h new file mode 100644 index 000000000000..521f2fe4fe9c --- /dev/null +++ b/include/uapi/linux/cachefiles.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _LINUX_CACHEFILES_H +#define _LINUX_CACHEFILES_H + +#include + +/* + * Fscache ensures that the maximum length of cookie key is 255. The volume key + * is controlled by netfs, and generally no bigger than 255. + */ +#define CACHEFILES_MSG_MAX_SIZE 1024 + +enum cachefiles_opcode { + CACHEFILES_OP_OPEN, +}; + +/* + * Message Header + * + * @msg_id a unique ID identifying this message + * @opcode message type, CACHEFILE_OP_* + * @len message length, including message header and following data + * @object_id a unique ID identifying a cache file + * @data message type specific payload + */ +struct cachefiles_msg { + __u32 msg_id; + __u32 opcode; + __u32 len; + __u32 object_id; + __u8 data[]; +}; + +/* + * @data contains the volume_key followed directly by the cookie_key. volume_key + * is a NUL-terminated string; @volume_key_size indicates the size of the volume + * key in bytes. cookie_key is binary data, which is netfs specific; + * @cookie_key_size indicates the size of the cookie key in bytes. + * + * @fd identifies an anon_fd referring to the cache file. + */ +struct cachefiles_open { + __u32 volume_key_size; + __u32 cookie_key_size; + __u32 fd; + __u32 flags; + __u8 data[]; +}; + +#endif -- cgit v1.2.3-59-g8ed1b From 324b954ac80cff0d11ddb6bde9b6631e45e98620 Mon Sep 17 00:00:00 2001 From: Jeffle Xu Date: Mon, 25 Apr 2022 20:21:26 +0800 Subject: cachefiles: notify the user daemon when withdrawing cookie Notify the user daemon that cookie is going to be withdrawn, providing a hint that the associated anonymous fd can be closed. Be noted that this is only a hint. The user daemon may close the associated anonymous fd when receiving the CLOSE request, then it will receive another anonymous fd when the cookie gets looked up. Or it may ignore the CLOSE request, and keep writing data through the anonymous fd. However the next time the cookie gets looked up, the user daemon will still receive another new anonymous fd. Signed-off-by: Jeffle Xu Acked-by: David Howells Link: https://lore.kernel.org/r/20220425122143.56815-5-jefflexu@linux.alibaba.com Signed-off-by: Gao Xiang --- fs/cachefiles/interface.c | 2 ++ fs/cachefiles/internal.h | 5 +++++ fs/cachefiles/ondemand.c | 38 ++++++++++++++++++++++++++++++++++++++ include/uapi/linux/cachefiles.h | 1 + 4 files changed, 46 insertions(+) (limited to 'include/uapi/linux') diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c index ae93cee9d25d..a69073a1d3f0 100644 --- a/fs/cachefiles/interface.c +++ b/fs/cachefiles/interface.c @@ -362,6 +362,8 @@ static void cachefiles_withdraw_cookie(struct fscache_cookie *cookie) spin_unlock(&cache->object_list_lock); } + cachefiles_ondemand_clean_object(object); + if (object->file) { cachefiles_begin_secure(cache, &saved_cred); cachefiles_clean_up_object(object, cache); diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h index e5c612888f84..da388ba127eb 100644 --- a/fs/cachefiles/internal.h +++ b/fs/cachefiles/internal.h @@ -290,6 +290,7 @@ extern int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args); extern int cachefiles_ondemand_init_object(struct cachefiles_object *object); +extern void cachefiles_ondemand_clean_object(struct cachefiles_object *object); #else static inline ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, @@ -302,6 +303,10 @@ static inline int cachefiles_ondemand_init_object(struct cachefiles_object *obje { return 0; } + +static inline void cachefiles_ondemand_clean_object(struct cachefiles_object *object) +{ +} #endif /* diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c index 7946ee6c40be..11b1c15ac697 100644 --- a/fs/cachefiles/ondemand.c +++ b/fs/cachefiles/ondemand.c @@ -229,6 +229,12 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, goto err_put_fd; } + /* CLOSE request has no reply */ + if (msg->opcode == CACHEFILES_OP_CLOSE) { + xa_erase(&cache->reqs, id); + complete(&req->done); + } + return n; err_put_fd: @@ -300,6 +306,13 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object, /* coupled with the barrier in cachefiles_flush_reqs() */ smp_mb(); + if (opcode != CACHEFILES_OP_OPEN && object->ondemand_id <= 0) { + WARN_ON_ONCE(object->ondemand_id == 0); + xas_unlock(&xas); + ret = -EIO; + goto out; + } + xas.xa_index = 0; xas_find_marked(&xas, UINT_MAX, XA_FREE_MARK); if (xas.xa_node == XAS_RESTART) @@ -356,6 +369,25 @@ static int cachefiles_ondemand_init_open_req(struct cachefiles_req *req, return 0; } +static int cachefiles_ondemand_init_close_req(struct cachefiles_req *req, + void *private) +{ + struct cachefiles_object *object = req->object; + int object_id = object->ondemand_id; + + /* + * It's possible that object id is still 0 if the cookie looking up + * phase failed before OPEN request has ever been sent. Also avoid + * sending CLOSE request for CACHEFILES_ONDEMAND_ID_CLOSED, which means + * anon_fd has already been closed. + */ + if (object_id <= 0) + return -ENOENT; + + req->msg.object_id = object_id; + return 0; +} + int cachefiles_ondemand_init_object(struct cachefiles_object *object) { struct fscache_cookie *cookie = object->cookie; @@ -379,3 +411,9 @@ int cachefiles_ondemand_init_object(struct cachefiles_object *object) return cachefiles_ondemand_send_req(object, CACHEFILES_OP_OPEN, data_len, cachefiles_ondemand_init_open_req, NULL); } + +void cachefiles_ondemand_clean_object(struct cachefiles_object *object) +{ + cachefiles_ondemand_send_req(object, CACHEFILES_OP_CLOSE, 0, + cachefiles_ondemand_init_close_req, NULL); +} diff --git a/include/uapi/linux/cachefiles.h b/include/uapi/linux/cachefiles.h index 521f2fe4fe9c..37a0071037c8 100644 --- a/include/uapi/linux/cachefiles.h +++ b/include/uapi/linux/cachefiles.h @@ -12,6 +12,7 @@ enum cachefiles_opcode { CACHEFILES_OP_OPEN, + CACHEFILES_OP_CLOSE, }; /* -- cgit v1.2.3-59-g8ed1b From 9032b6e8589f269743984aac53e82e4835be16dc Mon Sep 17 00:00:00 2001 From: Jeffle Xu Date: Mon, 25 Apr 2022 20:21:27 +0800 Subject: cachefiles: implement on-demand read Implement the data plane of on-demand read mode. The early implementation [1] place the entry to cachefiles_ondemand_read() in fscache_read(). However, fscache_read() can only detect if the requested file range is fully cache miss, whilst we need to notify the user daemon as long as there's a hole inside the requested file range. Thus the entry is now placed in cachefiles_prepare_read(). When working in on-demand read mode, once a hole detected, the read routine will send a READ request to the user daemon. The user daemon needs to fetch the data and write it to the cache file. After sending the READ request, the read routine will hang there, until the READ request is handled by the user daemon. Then it will retry to read from the same file range. If no progress encountered, the read routine will fail then. A new NETFS_SREQ_ONDEMAND flag is introduced to indicate that on-demand read should be done when a cache miss encountered. [1] https://lore.kernel.org/all/20220406075612.60298-6-jefflexu@linux.alibaba.com/ #v8 Signed-off-by: Jeffle Xu Acked-by: David Howells Link: https://lore.kernel.org/r/20220425122143.56815-6-jefflexu@linux.alibaba.com Signed-off-by: Gao Xiang --- fs/cachefiles/internal.h | 9 +++++ fs/cachefiles/io.c | 15 ++++++-- fs/cachefiles/ondemand.c | 77 +++++++++++++++++++++++++++++++++++++++++ include/linux/netfs.h | 1 + include/uapi/linux/cachefiles.h | 17 +++++++++ 5 files changed, 117 insertions(+), 2 deletions(-) (limited to 'include/uapi/linux') diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h index da388ba127eb..6cba2c6de2f9 100644 --- a/fs/cachefiles/internal.h +++ b/fs/cachefiles/internal.h @@ -292,6 +292,9 @@ extern int cachefiles_ondemand_copen(struct cachefiles_cache *cache, extern int cachefiles_ondemand_init_object(struct cachefiles_object *object); extern void cachefiles_ondemand_clean_object(struct cachefiles_object *object); +extern int cachefiles_ondemand_read(struct cachefiles_object *object, + loff_t pos, size_t len); + #else static inline ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, char __user *_buffer, size_t buflen) @@ -307,6 +310,12 @@ static inline int cachefiles_ondemand_init_object(struct cachefiles_object *obje static inline void cachefiles_ondemand_clean_object(struct cachefiles_object *object) { } + +static inline int cachefiles_ondemand_read(struct cachefiles_object *object, + loff_t pos, size_t len) +{ + return -EOPNOTSUPP; +} #endif /* diff --git a/fs/cachefiles/io.c b/fs/cachefiles/io.c index 50a14e8f0aac..000a28f46e59 100644 --- a/fs/cachefiles/io.c +++ b/fs/cachefiles/io.c @@ -403,6 +403,7 @@ static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest * enum netfs_io_source ret = NETFS_DOWNLOAD_FROM_SERVER; loff_t off, to; ino_t ino = file ? file_inode(file)->i_ino : 0; + int rc; _enter("%zx @%llx/%llx", subreq->len, subreq->start, i_size); @@ -415,7 +416,8 @@ static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest * if (test_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags)) { __set_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags); why = cachefiles_trace_read_no_data; - goto out_no_object; + if (!test_bit(NETFS_SREQ_ONDEMAND, &subreq->flags)) + goto out_no_object; } /* The object and the file may be being created in the background. */ @@ -432,7 +434,7 @@ static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest * object = cachefiles_cres_object(cres); cache = object->volume->cache; cachefiles_begin_secure(cache, &saved_cred); - +retry: off = cachefiles_inject_read_error(); if (off == 0) off = vfs_llseek(file, subreq->start, SEEK_DATA); @@ -483,6 +485,15 @@ static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest * download_and_store: __set_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags); + if (test_bit(NETFS_SREQ_ONDEMAND, &subreq->flags)) { + rc = cachefiles_ondemand_read(object, subreq->start, + subreq->len); + if (!rc) { + __clear_bit(NETFS_SREQ_ONDEMAND, &subreq->flags); + goto retry; + } + ret = NETFS_INVALID_READ; + } out: cachefiles_end_secure(cache, saved_cred); out_no_object: diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c index 11b1c15ac697..3470d4e8f0cb 100644 --- a/fs/cachefiles/ondemand.c +++ b/fs/cachefiles/ondemand.c @@ -10,8 +10,25 @@ static int cachefiles_ondemand_fd_release(struct inode *inode, struct cachefiles_object *object = file->private_data; struct cachefiles_cache *cache = object->volume->cache; int object_id = object->ondemand_id; + struct cachefiles_req *req; + XA_STATE(xas, &cache->reqs, 0); + xa_lock(&cache->reqs); object->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED; + + /* + * Flush all pending READ requests since their completion depends on + * anon_fd. + */ + xas_for_each(&xas, req, ULONG_MAX) { + if (req->msg.opcode == CACHEFILES_OP_READ) { + req->error = -EIO; + complete(&req->done); + xas_store(&xas, NULL); + } + } + xa_unlock(&cache->reqs); + xa_erase(&cache->ondemand_ids, object_id); cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd); cachefiles_put_unbind_pincount(cache); @@ -57,11 +74,35 @@ static loff_t cachefiles_ondemand_fd_llseek(struct file *filp, loff_t pos, return vfs_llseek(file, pos, whence); } +static long cachefiles_ondemand_fd_ioctl(struct file *filp, unsigned int ioctl, + unsigned long arg) +{ + struct cachefiles_object *object = filp->private_data; + struct cachefiles_cache *cache = object->volume->cache; + struct cachefiles_req *req; + unsigned long id; + + if (ioctl != CACHEFILES_IOC_READ_COMPLETE) + return -EINVAL; + + if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags)) + return -EOPNOTSUPP; + + id = arg; + req = xa_erase(&cache->reqs, id); + if (!req) + return -EINVAL; + + complete(&req->done); + return 0; +} + static const struct file_operations cachefiles_ondemand_fd_fops = { .owner = THIS_MODULE, .release = cachefiles_ondemand_fd_release, .write_iter = cachefiles_ondemand_fd_write_iter, .llseek = cachefiles_ondemand_fd_llseek, + .unlocked_ioctl = cachefiles_ondemand_fd_ioctl, }; /* @@ -388,6 +429,32 @@ static int cachefiles_ondemand_init_close_req(struct cachefiles_req *req, return 0; } +struct cachefiles_read_ctx { + loff_t off; + size_t len; +}; + +static int cachefiles_ondemand_init_read_req(struct cachefiles_req *req, + void *private) +{ + struct cachefiles_object *object = req->object; + struct cachefiles_read *load = (void *)req->msg.data; + struct cachefiles_read_ctx *read_ctx = private; + int object_id = object->ondemand_id; + + /* Stop enqueuing requests when daemon has closed anon_fd. */ + if (object_id <= 0) { + WARN_ON_ONCE(object_id == 0); + pr_info_once("READ: anonymous fd closed prematurely.\n"); + return -EIO; + } + + req->msg.object_id = object_id; + load->off = read_ctx->off; + load->len = read_ctx->len; + return 0; +} + int cachefiles_ondemand_init_object(struct cachefiles_object *object) { struct fscache_cookie *cookie = object->cookie; @@ -417,3 +484,13 @@ void cachefiles_ondemand_clean_object(struct cachefiles_object *object) cachefiles_ondemand_send_req(object, CACHEFILES_OP_CLOSE, 0, cachefiles_ondemand_init_close_req, NULL); } + +int cachefiles_ondemand_read(struct cachefiles_object *object, + loff_t pos, size_t len) +{ + struct cachefiles_read_ctx read_ctx = {pos, len}; + + return cachefiles_ondemand_send_req(object, CACHEFILES_OP_READ, + sizeof(struct cachefiles_read), + cachefiles_ondemand_init_read_req, &read_ctx); +} diff --git a/include/linux/netfs.h b/include/linux/netfs.h index c7bf1eaf51d5..057d04efaf79 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -159,6 +159,7 @@ struct netfs_io_subrequest { #define NETFS_SREQ_SHORT_IO 2 /* Set if the I/O was short */ #define NETFS_SREQ_SEEK_DATA_READ 3 /* Set if ->read() should SEEK_DATA first */ #define NETFS_SREQ_NO_PROGRESS 4 /* Set if we didn't manage to read any data */ +#define NETFS_SREQ_ONDEMAND 5 /* Set if it's from on-demand read mode */ }; enum netfs_io_origin { diff --git a/include/uapi/linux/cachefiles.h b/include/uapi/linux/cachefiles.h index 37a0071037c8..78caa73e5343 100644 --- a/include/uapi/linux/cachefiles.h +++ b/include/uapi/linux/cachefiles.h @@ -3,6 +3,7 @@ #define _LINUX_CACHEFILES_H #include +#include /* * Fscache ensures that the maximum length of cookie key is 255. The volume key @@ -13,6 +14,7 @@ enum cachefiles_opcode { CACHEFILES_OP_OPEN, CACHEFILES_OP_CLOSE, + CACHEFILES_OP_READ, }; /* @@ -48,4 +50,19 @@ struct cachefiles_open { __u8 data[]; }; +/* + * @off indicates the starting offset of the requested file range + * @len indicates the length of the requested file range + */ +struct cachefiles_read { + __u64 off; + __u64 len; +}; + +/* + * Reply for READ request + * @arg for this ioctl is the @id field of READ request. + */ +#define CACHEFILES_IOC_READ_COMPLETE _IOW(0x98, 1, int) + #endif -- cgit v1.2.3-59-g8ed1b From c7fb19428d67dd0a2a78a4f237af01d39c78dc5a Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sat, 30 Apr 2022 14:38:53 -0600 Subject: io_uring: add support for ring mapped supplied buffers Provided buffers allow an application to supply io_uring with buffers that can then be grabbed for a read/receive request, when the data source is ready to deliver data. The existing scheme relies on using IORING_OP_PROVIDE_BUFFERS to do that, but it can be difficult to use in real world applications. It's pretty efficient if the application is able to supply back batches of provided buffers when they have been consumed and the application is ready to recycle them, but if fragmentation occurs in the buffer space, it can become difficult to supply enough buffers at the time. This hurts efficiency. Add a register op, IORING_REGISTER_PBUF_RING, which allows an application to setup a shared queue for each buffer group of provided buffers. The application can then supply buffers simply by adding them to this ring, and the kernel can consume then just as easily. The ring shares the head with the application, the tail remains private in the kernel. Provided buffers setup with IORING_REGISTER_PBUF_RING cannot use IORING_OP_{PROVIDE,REMOVE}_BUFFERS for adding or removing entries to the ring, they must use the mapped ring. Mapped provided buffer rings can co-exist with normal provided buffers, just not within the same group ID. To gauge overhead of the existing scheme and evaluate the mapped ring approach, a simple NOP benchmark was written. It uses a ring of 128 entries, and submits/completes 32 at the time. 'Replenish' is how many buffers are provided back at the time after they have been consumed: Test Replenish NOPs/sec ================================================================ No provided buffers NA ~30M Provided buffers 32 ~16M Provided buffers 1 ~10M Ring buffers 32 ~27M Ring buffers 1 ~27M The ring mapped buffers perform almost as well as not using provided buffers at all, and they don't care if you provided 1 or more back at the same time. This means application can just replenish as they go, rather than need to batch and compact, further reducing overhead in the application. The NOP benchmark above doesn't need to do any compaction, so that overhead isn't even reflected in the above test. Co-developed-by: Dylan Yudaken Signed-off-by: Jens Axboe --- fs/io_uring.c | 234 +++++++++++++++++++++++++++++++++++++++--- include/uapi/linux/io_uring.h | 36 +++++++ 2 files changed, 258 insertions(+), 12 deletions(-) (limited to 'include/uapi/linux') diff --git a/fs/io_uring.c b/fs/io_uring.c index 384cdbd40941..78192a9e7684 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -285,9 +285,26 @@ struct io_rsrc_data { bool quiesce; }; +#define IO_BUFFER_LIST_BUF_PER_PAGE (PAGE_SIZE / sizeof(struct io_uring_buf)) struct io_buffer_list { - struct list_head buf_list; + /* + * If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not, + * then these are classic provided buffers and ->buf_list is used. + */ + union { + struct list_head buf_list; + struct { + struct page **buf_pages; + struct io_uring_buf_ring *buf_ring; + }; + }; __u16 bgid; + + /* below is for ring provided buffers */ + __u16 buf_nr_pages; + __u16 nr_entries; + __u32 head; + __u32 mask; }; struct io_buffer { @@ -804,6 +821,7 @@ enum { REQ_F_NEED_CLEANUP_BIT, REQ_F_POLLED_BIT, REQ_F_BUFFER_SELECTED_BIT, + REQ_F_BUFFER_RING_BIT, REQ_F_COMPLETE_INLINE_BIT, REQ_F_REISSUE_BIT, REQ_F_CREDS_BIT, @@ -855,6 +873,8 @@ enum { REQ_F_POLLED = BIT(REQ_F_POLLED_BIT), /* buffer already selected */ REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT), + /* buffer selected from ring, needs commit */ + REQ_F_BUFFER_RING = BIT(REQ_F_BUFFER_RING_BIT), /* completion is deferred through io_comp_state */ REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT), /* caller should reissue async */ @@ -979,6 +999,12 @@ struct io_kiocb { /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */ struct io_buffer *kbuf; + + /* + * stores buffer ID for ring provided buffers, valid IFF + * REQ_F_BUFFER_RING is set. + */ + struct io_buffer_list *buf_list; }; union { @@ -1470,8 +1496,14 @@ static inline void io_req_set_rsrc_node(struct io_kiocb *req, static unsigned int __io_put_kbuf(struct io_kiocb *req, struct list_head *list) { - req->flags &= ~REQ_F_BUFFER_SELECTED; - list_add(&req->kbuf->list, list); + if (req->flags & REQ_F_BUFFER_RING) { + if (req->buf_list) + req->buf_list->head++; + req->flags &= ~REQ_F_BUFFER_RING; + } else { + list_add(&req->kbuf->list, list); + req->flags &= ~REQ_F_BUFFER_SELECTED; + } return IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT); } @@ -1480,7 +1512,7 @@ static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req) { lockdep_assert_held(&req->ctx->completion_lock); - if (likely(!(req->flags & REQ_F_BUFFER_SELECTED))) + if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))) return 0; return __io_put_kbuf(req, &req->ctx->io_buffers_comp); } @@ -1490,7 +1522,7 @@ static inline unsigned int io_put_kbuf(struct io_kiocb *req, { unsigned int cflags; - if (likely(!(req->flags & REQ_F_BUFFER_SELECTED))) + if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))) return 0; /* @@ -1505,7 +1537,10 @@ static inline unsigned int io_put_kbuf(struct io_kiocb *req, * We migrate buffers from the comp_list to the issue cache list * when we need one. */ - if (issue_flags & IO_URING_F_UNLOCKED) { + if (req->flags & REQ_F_BUFFER_RING) { + /* no buffers to recycle for this case */ + cflags = __io_put_kbuf(req, NULL); + } else if (issue_flags & IO_URING_F_UNLOCKED) { struct io_ring_ctx *ctx = req->ctx; spin_lock(&ctx->completion_lock); @@ -1535,11 +1570,23 @@ static void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags) struct io_buffer_list *bl; struct io_buffer *buf; - if (likely(!(req->flags & REQ_F_BUFFER_SELECTED))) + if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))) return; /* don't recycle if we already did IO to this buffer */ if (req->flags & REQ_F_PARTIAL_IO) return; + /* + * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear + * the flag and hence ensure that bl->head doesn't get incremented. + * If the tail has already been incremented, hang on to it. + */ + if (req->flags & REQ_F_BUFFER_RING) { + if (req->buf_list) { + req->buf_index = req->buf_list->bgid; + req->flags &= ~REQ_F_BUFFER_RING; + } + return; + } io_ring_submit_lock(ctx, issue_flags); @@ -3487,6 +3534,53 @@ static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len, return ret; } +static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len, + struct io_buffer_list *bl, + unsigned int issue_flags) +{ + struct io_uring_buf_ring *br = bl->buf_ring; + struct io_uring_buf *buf; + __u32 head = bl->head; + + if (unlikely(smp_load_acquire(&br->tail) == head)) { + io_ring_submit_unlock(req->ctx, issue_flags); + return ERR_PTR(-ENOBUFS); + } + + head &= bl->mask; + if (head < IO_BUFFER_LIST_BUF_PER_PAGE) { + buf = &br->bufs[head]; + } else { + int off = head & (IO_BUFFER_LIST_BUF_PER_PAGE - 1); + int index = head / IO_BUFFER_LIST_BUF_PER_PAGE - 1; + buf = page_address(bl->buf_pages[index]); + buf += off; + } + if (*len > buf->len) + *len = buf->len; + req->flags |= REQ_F_BUFFER_RING; + req->buf_list = bl; + req->buf_index = buf->bid; + + if (!(issue_flags & IO_URING_F_UNLOCKED)) + return u64_to_user_ptr(buf->addr); + + /* + * If we came in unlocked, we have no choice but to + * consume the buffer here. This does mean it'll be + * pinned until the IO completes. But coming in + * unlocked means we're in io-wq context, hence there + * should be no further retry. For the locked case, the + * caller must ensure to call the commit when the + * transfer completes (or if we get -EAGAIN and must + * poll or retry). + */ + req->buf_list = NULL; + bl->head++; + io_ring_submit_unlock(req->ctx, issue_flags); + return u64_to_user_ptr(buf->addr); +} + static void __user *io_buffer_select(struct io_kiocb *req, size_t *len, unsigned int issue_flags) { @@ -3502,6 +3596,9 @@ static void __user *io_buffer_select(struct io_kiocb *req, size_t *len, } /* selection helpers drop the submit lock again, if needed */ + if (bl->buf_nr_pages) + return io_ring_buffer_select(req, len, bl, issue_flags); + return io_provided_buffer_select(req, len, bl, issue_flags); } @@ -3558,7 +3655,7 @@ static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, unsigned int issue_flags) { - if (req->flags & REQ_F_BUFFER_SELECTED) { + if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) { iov[0].iov_base = u64_to_user_ptr(req->rw.addr); iov[0].iov_len = req->rw.len; return 0; @@ -3578,7 +3675,7 @@ static inline bool io_do_buffer_select(struct io_kiocb *req) { if (!(req->flags & REQ_F_BUFFER_SELECT)) return false; - return !(req->flags & REQ_F_BUFFER_SELECTED); + return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)); } static struct iovec *__io_import_iovec(int rw, struct io_kiocb *req, @@ -4872,6 +4969,18 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx, if (!nbufs) return 0; + if (bl->buf_nr_pages) { + int j; + + i = bl->buf_ring->tail - bl->head; + for (j = 0; j < bl->buf_nr_pages; j++) + unpin_user_page(bl->buf_pages[j]); + kvfree(bl->buf_pages); + bl->buf_pages = NULL; + bl->buf_nr_pages = 0; + return i; + } + /* the head kbuf is the list itself */ while (!list_empty(&bl->buf_list)) { struct io_buffer *nxt; @@ -4898,8 +5007,12 @@ static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags) ret = -ENOENT; bl = io_buffer_get_list(ctx, p->bgid); - if (bl) - ret = __io_remove_buffers(ctx, bl, p->nbufs); + if (bl) { + ret = -EINVAL; + /* can't use provide/remove buffers command on mapped buffers */ + if (!bl->buf_nr_pages) + ret = __io_remove_buffers(ctx, bl, p->nbufs); + } if (ret < 0) req_set_fail(req); @@ -5047,7 +5160,7 @@ static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags) bl = io_buffer_get_list(ctx, p->bgid); if (unlikely(!bl)) { - bl = kmalloc(sizeof(*bl), GFP_KERNEL); + bl = kzalloc(sizeof(*bl), GFP_KERNEL); if (!bl) { ret = -ENOMEM; goto err; @@ -5058,6 +5171,11 @@ static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags) goto err; } } + /* can't add buffers via this command for a mapped buffer ring */ + if (bl->buf_nr_pages) { + ret = -EINVAL; + goto err; + } ret = io_add_buffers(ctx, p, bl); err: @@ -12011,6 +12129,83 @@ err: return ret; } +static int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg) +{ + struct io_uring_buf_ring *br; + struct io_uring_buf_reg reg; + struct io_buffer_list *bl; + struct page **pages; + int nr_pages; + + if (copy_from_user(®, arg, sizeof(reg))) + return -EFAULT; + + if (reg.pad || reg.resv[0] || reg.resv[1] || reg.resv[2]) + return -EINVAL; + if (!reg.ring_addr) + return -EFAULT; + if (reg.ring_addr & ~PAGE_MASK) + return -EINVAL; + if (!is_power_of_2(reg.ring_entries)) + return -EINVAL; + + if (unlikely(reg.bgid < BGID_ARRAY && !ctx->io_bl)) { + int ret = io_init_bl_list(ctx); + if (ret) + return ret; + } + + bl = io_buffer_get_list(ctx, reg.bgid); + if (bl && bl->buf_nr_pages) + return -EEXIST; + if (!bl) { + bl = kzalloc(sizeof(*bl), GFP_KERNEL); + if (!bl) + return -ENOMEM; + } + + pages = io_pin_pages(reg.ring_addr, + struct_size(br, bufs, reg.ring_entries), + &nr_pages); + if (IS_ERR(pages)) { + kfree(bl); + return PTR_ERR(pages); + } + + br = page_address(pages[0]); + bl->buf_pages = pages; + bl->buf_nr_pages = nr_pages; + bl->nr_entries = reg.ring_entries; + bl->buf_ring = br; + bl->mask = reg.ring_entries - 1; + io_buffer_add_list(ctx, bl, reg.bgid); + return 0; +} + +static int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg) +{ + struct io_uring_buf_reg reg; + struct io_buffer_list *bl; + + if (copy_from_user(®, arg, sizeof(reg))) + return -EFAULT; + if (reg.pad || reg.resv[0] || reg.resv[1] || reg.resv[2]) + return -EINVAL; + + bl = io_buffer_get_list(ctx, reg.bgid); + if (!bl) + return -ENOENT; + if (!bl->buf_nr_pages) + return -EINVAL; + + __io_remove_buffers(ctx, bl, -1U); + if (bl->bgid >= BGID_ARRAY) { + xa_erase(&ctx->io_bl_xa, bl->bgid); + kfree(bl); + } + return 0; +} + static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, void __user *arg, unsigned nr_args) __releases(ctx->uring_lock) @@ -12142,6 +12337,18 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, case IORING_UNREGISTER_RING_FDS: ret = io_ringfd_unregister(ctx, arg, nr_args); break; + case IORING_REGISTER_PBUF_RING: + ret = -EINVAL; + if (!arg || nr_args != 1) + break; + ret = io_register_pbuf_ring(ctx, arg); + break; + case IORING_UNREGISTER_PBUF_RING: + ret = -EINVAL; + if (!arg || nr_args != 1) + break; + ret = io_unregister_pbuf_ring(ctx, arg); + break; default: ret = -EINVAL; break; @@ -12227,6 +12434,9 @@ static int __init io_uring_init(void) /* ->buf_index is u16 */ BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16)); BUILD_BUG_ON(BGID_ARRAY * sizeof(struct io_buffer_list) > PAGE_SIZE); + BUILD_BUG_ON(offsetof(struct io_uring_buf_ring, bufs) != 0); + BUILD_BUG_ON(offsetof(struct io_uring_buf, resv) != + offsetof(struct io_uring_buf_ring, tail)); /* should fit into one byte */ BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8)); diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 15f821af9242..ddf969ae5a79 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -384,6 +384,10 @@ enum { IORING_REGISTER_RING_FDS = 20, IORING_UNREGISTER_RING_FDS = 21, + /* register ring based provide buffer group */ + IORING_REGISTER_PBUF_RING = 22, + IORING_UNREGISTER_PBUF_RING = 23, + /* this goes last */ IORING_REGISTER_LAST }; @@ -461,6 +465,38 @@ struct io_uring_restriction { __u32 resv2[3]; }; +struct io_uring_buf { + __u64 addr; + __u32 len; + __u16 bid; + __u16 resv; +}; + +struct io_uring_buf_ring { + union { + /* + * To avoid spilling into more pages than we need to, the + * ring tail is overlaid with the io_uring_buf->resv field. + */ + struct { + __u64 resv1; + __u32 resv2; + __u16 resv3; + __u16 tail; + }; + struct io_uring_buf bufs[0]; + }; +}; + +/* argument for IORING_(UN)REGISTER_PBUF_RING */ +struct io_uring_buf_reg { + __u64 ring_addr; + __u32 ring_entries; + __u16 bgid; + __u16 pad; + __u64 resv[3]; +}; + /* * io_uring_restriction->opcode values */ -- cgit v1.2.3-59-g8ed1b From c1318b39c7d36bd5139a9c71044ff2b2d3c6f9d8 Mon Sep 17 00:00:00 2001 From: Boris Pismenny Date: Wed, 18 May 2022 12:27:31 +0300 Subject: tls: Add opt-in zerocopy mode of sendfile() TLS device offload copies sendfile data to a bounce buffer before transmitting. It allows to maintain the valid MAC on TLS records when the file contents change and a part of TLS record has to be retransmitted on TCP level. In many common use cases (like serving static files over HTTPS) the file contents are not changed on the fly. In many use cases breaking the connection is totally acceptable if the file is changed during transmission, because it would be received corrupted in any case. This commit allows to optimize performance for such use cases to providing a new optional mode of TLS sendfile(), in which the extra copy is skipped. Removing this copy improves performance significantly, as TLS and TCP sendfile perform the same operations, and the only overhead is TLS header/trailer insertion. The new mode can only be enabled with the new socket option named TLS_TX_ZEROCOPY_SENDFILE on per-socket basis. It preserves backwards compatibility with existing applications that rely on the copying behavior. The new mode is safe, meaning that unsolicited modifications of the file being sent can't break integrity of the kernel. The worst thing that can happen is sending a corrupted TLS record, which is in any case not forbidden when using regular TCP sockets. Sockets other than TLS device offload are not affected by the new socket option. The actual status of zerocopy sendfile can be queried with sock_diag. Performance numbers in a single-core test with 24 HTTPS streams on nginx, under 100% CPU load: * non-zerocopy: 33.6 Gbit/s * zerocopy: 79.92 Gbit/s CPU: Intel(R) Xeon(R) Platinum 8380 CPU @ 2.30GHz Signed-off-by: Boris Pismenny Signed-off-by: Tariq Toukan Signed-off-by: Maxim Mikityanskiy Reviewed-by: Jakub Kicinski Link: https://lore.kernel.org/r/20220518092731.1243494-1-maximmi@nvidia.com Signed-off-by: Paolo Abeni --- include/net/tls.h | 1 + include/uapi/linux/tls.h | 2 ++ net/tls/tls_device.c | 53 ++++++++++++++++++++++++++++++++++------------ net/tls/tls_main.c | 55 ++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 98 insertions(+), 13 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/net/tls.h b/include/net/tls.h index b59f0a63292b..8017f1703447 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -238,6 +238,7 @@ struct tls_context { u8 tx_conf:3; u8 rx_conf:3; + u8 zerocopy_sendfile:1; int (*push_pending_record)(struct sock *sk, int flags); void (*sk_write_space)(struct sock *sk); diff --git a/include/uapi/linux/tls.h b/include/uapi/linux/tls.h index 5f38be0ec0f3..ac39328eabe7 100644 --- a/include/uapi/linux/tls.h +++ b/include/uapi/linux/tls.h @@ -39,6 +39,7 @@ /* TLS socket options */ #define TLS_TX 1 /* Set transmit parameters */ #define TLS_RX 2 /* Set receive parameters */ +#define TLS_TX_ZEROCOPY_SENDFILE 3 /* transmit zerocopy sendfile */ /* Supported versions */ #define TLS_VERSION_MINOR(ver) ((ver) & 0xFF) @@ -160,6 +161,7 @@ enum { TLS_INFO_CIPHER, TLS_INFO_TXCONF, TLS_INFO_RXCONF, + TLS_INFO_ZC_SENDFILE, __TLS_INFO_MAX, }; #define TLS_INFO_MAX (__TLS_INFO_MAX - 1) diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index bca00521ebc1..ec6f4b699a2b 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -411,10 +411,16 @@ static int tls_device_copy_data(void *addr, size_t bytes, struct iov_iter *i) return 0; } +union tls_iter_offset { + struct iov_iter *msg_iter; + int offset; +}; + static int tls_push_data(struct sock *sk, - struct iov_iter *msg_iter, + union tls_iter_offset iter_offset, size_t size, int flags, - unsigned char record_type) + unsigned char record_type, + struct page *zc_page) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_prot_info *prot = &tls_ctx->prot_info; @@ -480,12 +486,21 @@ handle_error: } record = ctx->open_record; - copy = min_t(size_t, size, (pfrag->size - pfrag->offset)); - copy = min_t(size_t, copy, (max_open_record_len - record->len)); - if (copy) { + copy = min_t(size_t, size, max_open_record_len - record->len); + if (copy && zc_page) { + struct page_frag zc_pfrag; + + zc_pfrag.page = zc_page; + zc_pfrag.offset = iter_offset.offset; + zc_pfrag.size = copy; + tls_append_frag(record, &zc_pfrag, copy); + } else if (copy) { + copy = min_t(size_t, copy, pfrag->size - pfrag->offset); + rc = tls_device_copy_data(page_address(pfrag->page) + - pfrag->offset, copy, msg_iter); + pfrag->offset, copy, + iter_offset.msg_iter); if (rc) goto handle_error; tls_append_frag(record, pfrag, copy); @@ -540,6 +555,7 @@ int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) { unsigned char record_type = TLS_RECORD_TYPE_DATA; struct tls_context *tls_ctx = tls_get_ctx(sk); + union tls_iter_offset iter; int rc; mutex_lock(&tls_ctx->tx_lock); @@ -551,8 +567,8 @@ int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) goto out; } - rc = tls_push_data(sk, &msg->msg_iter, size, - msg->msg_flags, record_type); + iter.msg_iter = &msg->msg_iter; + rc = tls_push_data(sk, iter, size, msg->msg_flags, record_type, NULL); out: release_sock(sk); @@ -564,7 +580,8 @@ int tls_device_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags) { struct tls_context *tls_ctx = tls_get_ctx(sk); - struct iov_iter msg_iter; + union tls_iter_offset iter_offset; + struct iov_iter msg_iter; char *kaddr; struct kvec iov; int rc; @@ -580,12 +597,20 @@ int tls_device_sendpage(struct sock *sk, struct page *page, goto out; } + if (tls_ctx->zerocopy_sendfile) { + iter_offset.offset = offset; + rc = tls_push_data(sk, iter_offset, size, + flags, TLS_RECORD_TYPE_DATA, page); + goto out; + } + kaddr = kmap(page); iov.iov_base = kaddr + offset; iov.iov_len = size; iov_iter_kvec(&msg_iter, WRITE, &iov, 1, size); - rc = tls_push_data(sk, &msg_iter, size, - flags, TLS_RECORD_TYPE_DATA); + iter_offset.msg_iter = &msg_iter; + rc = tls_push_data(sk, iter_offset, size, flags, TLS_RECORD_TYPE_DATA, + NULL); kunmap(page); out: @@ -656,10 +681,12 @@ EXPORT_SYMBOL(tls_get_record); static int tls_device_push_pending_record(struct sock *sk, int flags) { - struct iov_iter msg_iter; + union tls_iter_offset iter; + struct iov_iter msg_iter; iov_iter_kvec(&msg_iter, WRITE, NULL, 0, 0); - return tls_push_data(sk, &msg_iter, 0, flags, TLS_RECORD_TYPE_DATA); + iter.msg_iter = &msg_iter; + return tls_push_data(sk, iter, 0, flags, TLS_RECORD_TYPE_DATA, NULL); } void tls_device_write_space(struct sock *sk, struct tls_context *ctx) diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 7b2b0e7ffee4..b91ddc110786 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -513,6 +513,26 @@ out: return rc; } +static int do_tls_getsockopt_tx_zc(struct sock *sk, char __user *optval, + int __user *optlen) +{ + struct tls_context *ctx = tls_get_ctx(sk); + unsigned int value; + int len; + + if (get_user(len, optlen)) + return -EFAULT; + + if (len != sizeof(value)) + return -EINVAL; + + value = ctx->zerocopy_sendfile; + if (copy_to_user(optval, &value, sizeof(value))) + return -EFAULT; + + return 0; +} + static int do_tls_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen) { @@ -524,6 +544,9 @@ static int do_tls_getsockopt(struct sock *sk, int optname, rc = do_tls_getsockopt_conf(sk, optval, optlen, optname == TLS_TX); break; + case TLS_TX_ZEROCOPY_SENDFILE: + rc = do_tls_getsockopt_tx_zc(sk, optval, optlen); + break; default: rc = -ENOPROTOOPT; break; @@ -675,6 +698,26 @@ err_crypto_info: return rc; } +static int do_tls_setsockopt_tx_zc(struct sock *sk, sockptr_t optval, + unsigned int optlen) +{ + struct tls_context *ctx = tls_get_ctx(sk); + unsigned int value; + + if (sockptr_is_null(optval) || optlen != sizeof(value)) + return -EINVAL; + + if (copy_from_sockptr(&value, optval, sizeof(value))) + return -EFAULT; + + if (value > 1) + return -EINVAL; + + ctx->zerocopy_sendfile = value; + + return 0; +} + static int do_tls_setsockopt(struct sock *sk, int optname, sockptr_t optval, unsigned int optlen) { @@ -688,6 +731,11 @@ static int do_tls_setsockopt(struct sock *sk, int optname, sockptr_t optval, optname == TLS_TX); release_sock(sk); break; + case TLS_TX_ZEROCOPY_SENDFILE: + lock_sock(sk); + rc = do_tls_setsockopt_tx_zc(sk, optval, optlen); + release_sock(sk); + break; default: rc = -ENOPROTOOPT; break; @@ -921,6 +969,12 @@ static int tls_get_info(const struct sock *sk, struct sk_buff *skb) if (err) goto nla_failure; + if (ctx->tx_conf == TLS_HW && ctx->zerocopy_sendfile) { + err = nla_put_flag(skb, TLS_INFO_ZC_SENDFILE); + if (err) + goto nla_failure; + } + rcu_read_unlock(); nla_nest_end(skb, start); return 0; @@ -940,6 +994,7 @@ static size_t tls_get_info_size(const struct sock *sk) nla_total_size(sizeof(u16)) + /* TLS_INFO_CIPHER */ nla_total_size(sizeof(u16)) + /* TLS_INFO_RXCONF */ nla_total_size(sizeof(u16)) + /* TLS_INFO_TXCONF */ + nla_total_size(0) + /* TLS_INFO_ZC_SENDFILE */ 0; return size; -- cgit v1.2.3-59-g8ed1b From 58e5bdeb9c2b06895e723c0b1e670f54510ff782 Mon Sep 17 00:00:00 2001 From: Kanchan Joshi Date: Fri, 20 May 2022 14:36:30 +0530 Subject: nvme: enable uring-passthrough for admin commands Add two new opcodes that userspace can use for admin commands: NVME_URING_CMD_ADMIN : non-vectroed NVME_URING_CMD_ADMIN_VEC : vectored variant Wire up support when these are issued on controller node(/dev/nvmeX). Signed-off-by: Kanchan Joshi Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20220520090630.70394-3-joshi.k@samsung.com Signed-off-by: Jens Axboe --- drivers/nvme/host/core.c | 1 + drivers/nvme/host/ioctl.c | 23 +++++++++++++++++++++++ drivers/nvme/host/nvme.h | 1 + include/uapi/linux/nvme_ioctl.h | 2 ++ 4 files changed, 27 insertions(+) (limited to 'include/uapi/linux') diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 682df98db341..1a984045e49c 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -3146,6 +3146,7 @@ static const struct file_operations nvme_dev_fops = { .release = nvme_dev_release, .unlocked_ioctl = nvme_dev_ioctl, .compat_ioctl = compat_ptr_ioctl, + .uring_cmd = nvme_dev_uring_cmd, }; static ssize_t nvme_sysfs_reset(struct device *dev, diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index 114b490592b0..096b1b47d750 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -686,6 +686,29 @@ int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd, } #endif /* CONFIG_NVME_MULTIPATH */ +int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags) +{ + struct nvme_ctrl *ctrl = ioucmd->file->private_data; + int ret; + + ret = nvme_uring_cmd_checks(issue_flags); + if (ret) + return ret; + + switch (ioucmd->cmd_op) { + case NVME_URING_CMD_ADMIN: + ret = nvme_uring_cmd_io(ctrl, NULL, ioucmd, issue_flags, false); + break; + case NVME_URING_CMD_ADMIN_VEC: + ret = nvme_uring_cmd_io(ctrl, NULL, ioucmd, issue_flags, true); + break; + default: + ret = -ENOTTY; + } + + return ret; +} + static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp) { struct nvme_ns *ns; diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 086ccbdd7003..26d35c557588 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -787,6 +787,7 @@ int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags); int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo); +int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags); extern const struct attribute_group *nvme_ns_id_attr_groups[]; extern const struct pr_ops nvme_pr_ops; diff --git a/include/uapi/linux/nvme_ioctl.h b/include/uapi/linux/nvme_ioctl.h index 0b1876aa5a59..2f76cba67166 100644 --- a/include/uapi/linux/nvme_ioctl.h +++ b/include/uapi/linux/nvme_ioctl.h @@ -108,5 +108,7 @@ struct nvme_uring_cmd { /* io_uring async commands: */ #define NVME_URING_CMD_IO _IOWR('N', 0x80, struct nvme_uring_cmd) #define NVME_URING_CMD_IO_VEC _IOWR('N', 0x81, struct nvme_uring_cmd) +#define NVME_URING_CMD_ADMIN _IOWR('N', 0x82, struct nvme_uring_cmd) +#define NVME_URING_CMD_ADMIN_VEC _IOWR('N', 0x83, struct nvme_uring_cmd) #endif /* _UAPI_LINUX_NVME_IOCTL_H */ -- cgit v1.2.3-59-g8ed1b From 3bc253c2e652cf5f12cd8c00d80d8ec55d67d1a7 Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Thu, 19 May 2022 16:30:10 -0700 Subject: bpf: Add bpf_skc_to_mptcp_sock_proto This patch implements a new struct bpf_func_proto, named bpf_skc_to_mptcp_sock_proto. Define a new bpf_id BTF_SOCK_TYPE_MPTCP, and a new helper bpf_skc_to_mptcp_sock(), which invokes another new helper bpf_mptcp_sock_from_subflow() in net/mptcp/bpf.c to get struct mptcp_sock from a given subflow socket. v2: Emit BTF type, add func_id checks in verifier.c and bpf_trace.c, remove build check for CONFIG_BPF_JIT v5: Drop EXPORT_SYMBOL (Martin) Co-developed-by: Nicolas Rybowski Co-developed-by: Matthieu Baerts Signed-off-by: Nicolas Rybowski Signed-off-by: Matthieu Baerts Signed-off-by: Geliang Tang Signed-off-by: Mat Martineau Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220519233016.105670-2-mathew.j.martineau@linux.intel.com --- include/linux/bpf.h | 1 + include/linux/btf_ids.h | 3 ++- include/net/mptcp.h | 6 ++++++ include/uapi/linux/bpf.h | 7 +++++++ kernel/bpf/verifier.c | 1 + kernel/trace/bpf_trace.c | 2 ++ net/core/filter.c | 18 ++++++++++++++++++ net/mptcp/Makefile | 2 ++ net/mptcp/bpf.c | 21 +++++++++++++++++++++ scripts/bpf_doc.py | 2 ++ tools/include/uapi/linux/bpf.h | 7 +++++++ 11 files changed, 69 insertions(+), 1 deletion(-) create mode 100644 net/mptcp/bpf.c (limited to 'include/uapi/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index c107392b0ba7..a3ef078401cf 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -2231,6 +2231,7 @@ extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto; extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto; extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto; extern const struct bpf_func_proto bpf_skc_to_unix_sock_proto; +extern const struct bpf_func_proto bpf_skc_to_mptcp_sock_proto; extern const struct bpf_func_proto bpf_copy_from_user_proto; extern const struct bpf_func_proto bpf_snprintf_btf_proto; extern const struct bpf_func_proto bpf_snprintf_proto; diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h index bc5d9cc34e4c..335a19092368 100644 --- a/include/linux/btf_ids.h +++ b/include/linux/btf_ids.h @@ -178,7 +178,8 @@ extern struct btf_id_set name; BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP6, tcp6_sock) \ BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, udp_sock) \ BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_UNIX, unix_sock) + BTF_SOCK_TYPE(BTF_SOCK_TYPE_UNIX, unix_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_MPTCP, mptcp_sock) enum { #define BTF_SOCK_TYPE(name, str) name, diff --git a/include/net/mptcp.h b/include/net/mptcp.h index 8b1afd6f5cc4..2ba09de955c7 100644 --- a/include/net/mptcp.h +++ b/include/net/mptcp.h @@ -284,4 +284,10 @@ static inline int mptcpv6_init(void) { return 0; } static inline void mptcpv6_handle_mapped(struct sock *sk, bool mapped) { } #endif +#if defined(CONFIG_MPTCP) && defined(CONFIG_BPF_SYSCALL) +struct mptcp_sock *bpf_mptcp_sock_from_subflow(struct sock *sk); +#else +static inline struct mptcp_sock *bpf_mptcp_sock_from_subflow(struct sock *sk) { return NULL; } +#endif + #endif /* __NET_MPTCP_H */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 0210f85131b3..56688bee20d9 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -5172,6 +5172,12 @@ union bpf_attr { * Return * Map value associated to *key* on *cpu*, or **NULL** if no entry * was found or *cpu* is invalid. + * + * struct mptcp_sock *bpf_skc_to_mptcp_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *mptcp_sock* pointer. + * Return + * *sk* if casting is valid, or **NULL** otherwise. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5370,6 +5376,7 @@ union bpf_attr { FN(ima_file_hash), \ FN(kptr_xchg), \ FN(map_lookup_percpu_elem), \ + FN(skc_to_mptcp_sock), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 9b59581026f8..14e8c17d3d8d 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -509,6 +509,7 @@ static bool is_ptr_cast_function(enum bpf_func_id func_id) func_id == BPF_FUNC_skc_to_tcp_sock || func_id == BPF_FUNC_skc_to_tcp6_sock || func_id == BPF_FUNC_skc_to_udp6_sock || + func_id == BPF_FUNC_skc_to_mptcp_sock || func_id == BPF_FUNC_skc_to_tcp_timewait_sock || func_id == BPF_FUNC_skc_to_tcp_request_sock; } diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 7141ca8a1c2d..10b157a6d73e 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1705,6 +1705,8 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_skc_to_udp6_sock_proto; case BPF_FUNC_skc_to_unix_sock: return &bpf_skc_to_unix_sock_proto; + case BPF_FUNC_skc_to_mptcp_sock: + return &bpf_skc_to_mptcp_sock_proto; case BPF_FUNC_sk_storage_get: return &bpf_sk_storage_get_tracing_proto; case BPF_FUNC_sk_storage_delete: diff --git a/net/core/filter.c b/net/core/filter.c index fe0da529d00f..5af58eb48587 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -78,6 +78,7 @@ #include #include #include +#include static const struct bpf_func_proto * bpf_sk_base_func_proto(enum bpf_func_id func_id); @@ -11281,6 +11282,20 @@ const struct bpf_func_proto bpf_skc_to_unix_sock_proto = { .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_UNIX], }; +BPF_CALL_1(bpf_skc_to_mptcp_sock, struct sock *, sk) +{ + BTF_TYPE_EMIT(struct mptcp_sock); + return (unsigned long)bpf_mptcp_sock_from_subflow(sk); +} + +const struct bpf_func_proto bpf_skc_to_mptcp_sock_proto = { + .func = bpf_skc_to_mptcp_sock, + .gpl_only = false, + .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, + .arg1_type = ARG_PTR_TO_SOCK_COMMON, + .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_MPTCP], +}; + BPF_CALL_1(bpf_sock_from_file, struct file *, file) { return (unsigned long)sock_from_file(file); @@ -11323,6 +11338,9 @@ bpf_sk_base_func_proto(enum bpf_func_id func_id) case BPF_FUNC_skc_to_unix_sock: func = &bpf_skc_to_unix_sock_proto; break; + case BPF_FUNC_skc_to_mptcp_sock: + func = &bpf_skc_to_mptcp_sock_proto; + break; case BPF_FUNC_ktime_get_coarse_ns: return &bpf_ktime_get_coarse_ns_proto; default: diff --git a/net/mptcp/Makefile b/net/mptcp/Makefile index e54daceac58b..99dddf08ca73 100644 --- a/net/mptcp/Makefile +++ b/net/mptcp/Makefile @@ -10,3 +10,5 @@ obj-$(CONFIG_INET_MPTCP_DIAG) += mptcp_diag.o mptcp_crypto_test-objs := crypto_test.o mptcp_token_test-objs := token_test.o obj-$(CONFIG_MPTCP_KUNIT_TEST) += mptcp_crypto_test.o mptcp_token_test.o + +obj-$(CONFIG_BPF_SYSCALL) += bpf.o diff --git a/net/mptcp/bpf.c b/net/mptcp/bpf.c new file mode 100644 index 000000000000..5a0a84ad94af --- /dev/null +++ b/net/mptcp/bpf.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Multipath TCP + * + * Copyright (c) 2020, Tessares SA. + * Copyright (c) 2022, SUSE. + * + * Author: Nicolas Rybowski + */ + +#define pr_fmt(fmt) "MPTCP: " fmt + +#include +#include "protocol.h" + +struct mptcp_sock *bpf_mptcp_sock_from_subflow(struct sock *sk) +{ + if (sk && sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP && sk_is_mptcp(sk)) + return mptcp_sk(mptcp_subflow_ctx(sk)->conn); + + return NULL; +} diff --git a/scripts/bpf_doc.py b/scripts/bpf_doc.py index 096625242475..d5452f7eb996 100755 --- a/scripts/bpf_doc.py +++ b/scripts/bpf_doc.py @@ -633,6 +633,7 @@ class PrinterHelpers(Printer): 'struct socket', 'struct file', 'struct bpf_timer', + 'struct mptcp_sock', ] known_types = { '...', @@ -682,6 +683,7 @@ class PrinterHelpers(Printer): 'struct socket', 'struct file', 'struct bpf_timer', + 'struct mptcp_sock', } mapped_types = { 'u8': '__u8', diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 0210f85131b3..56688bee20d9 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -5172,6 +5172,12 @@ union bpf_attr { * Return * Map value associated to *key* on *cpu*, or **NULL** if no entry * was found or *cpu* is invalid. + * + * struct mptcp_sock *bpf_skc_to_mptcp_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *mptcp_sock* pointer. + * Return + * *sk* if casting is valid, or **NULL** otherwise. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5370,6 +5376,7 @@ union bpf_attr { FN(ima_file_hash), \ FN(kptr_xchg), \ FN(map_lookup_percpu_elem), \ + FN(skc_to_mptcp_sock), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper -- cgit v1.2.3-59-g8ed1b From a13e248ff90e81e9322406c0e618cf2168702f4e Mon Sep 17 00:00:00 2001 From: Mickaël Salaün Date: Fri, 6 May 2022 18:08:11 +0200 Subject: landlock: Fix landlock_add_rule(2) documentation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It is not mandatory to pass a file descriptor obtained with the O_PATH flag. Also, replace rule's accesses with ruleset's accesses. Link: https://lore.kernel.org/r/20220506160820.524344-2-mic@digikod.net Cc: stable@vger.kernel.org Signed-off-by: Mickaël Salaün --- include/uapi/linux/landlock.h | 5 +++-- security/landlock/syscalls.c | 7 +++---- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/landlock.h b/include/uapi/linux/landlock.h index 15c31abb0d76..21c8d58283c9 100644 --- a/include/uapi/linux/landlock.h +++ b/include/uapi/linux/landlock.h @@ -62,8 +62,9 @@ struct landlock_path_beneath_attr { */ __u64 allowed_access; /** - * @parent_fd: File descriptor, open with ``O_PATH``, which identifies - * the parent directory of a file hierarchy, or just a file. + * @parent_fd: File descriptor, preferably opened with ``O_PATH``, + * which identifies the parent directory of a file hierarchy, or just a + * file. */ __s32 parent_fd; /* diff --git a/security/landlock/syscalls.c b/security/landlock/syscalls.c index 2fde978bf8ca..7edc1d50e2bf 100644 --- a/security/landlock/syscalls.c +++ b/security/landlock/syscalls.c @@ -292,14 +292,13 @@ out_fdput: * * - EOPNOTSUPP: Landlock is supported by the kernel but disabled at boot time; * - EINVAL: @flags is not 0, or inconsistent access in the rule (i.e. - * &landlock_path_beneath_attr.allowed_access is not a subset of the rule's - * accesses); + * &landlock_path_beneath_attr.allowed_access is not a subset of the + * ruleset handled accesses); * - ENOMSG: Empty accesses (e.g. &landlock_path_beneath_attr.allowed_access); * - EBADF: @ruleset_fd is not a file descriptor for the current thread, or a * member of @rule_attr is not a file descriptor as expected; * - EBADFD: @ruleset_fd is not a ruleset file descriptor, or a member of - * @rule_attr is not the expected file descriptor type (e.g. file open - * without O_PATH); + * @rule_attr is not the expected file descriptor type; * - EPERM: @ruleset_fd has no write access to the underlying ruleset; * - EFAULT: @rule_attr inconsistency. */ -- cgit v1.2.3-59-g8ed1b From b91c3e4ea756b12b7d992529226edce1cfd854d7 Mon Sep 17 00:00:00 2001 From: Mickaël Salaün Date: Fri, 6 May 2022 18:10:57 +0200 Subject: landlock: Add support for file reparenting with LANDLOCK_ACCESS_FS_REFER MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a new LANDLOCK_ACCESS_FS_REFER access right to enable policy writers to allow sandboxed processes to link and rename files from and to a specific set of file hierarchies. This access right should be composed with LANDLOCK_ACCESS_FS_MAKE_* for the destination of a link or rename, and with LANDLOCK_ACCESS_FS_REMOVE_* for a source of a rename. This lift a Landlock limitation that always denied changing the parent of an inode. Renaming or linking to the same directory is still always allowed, whatever LANDLOCK_ACCESS_FS_REFER is used or not, because it is not considered a threat to user data. However, creating multiple links or renaming to a different parent directory may lead to privilege escalations if not handled properly. Indeed, we must be sure that the source doesn't gain more privileges by being accessible from the destination. This is handled by making sure that the source hierarchy (including the referenced file or directory itself) restricts at least as much the destination hierarchy. If it is not the case, an EXDEV error is returned, making it potentially possible for user space to copy the file hierarchy instead of moving or linking it. Instead of creating different access rights for the source and the destination, we choose to make it simple and consistent for users. Indeed, considering the previous constraint, it would be weird to require such destination access right to be also granted to the source (to make it a superset). Moreover, RENAME_EXCHANGE would also add to the confusion because of paths being both a source and a destination. See the provided documentation for additional details. New tests are provided with a following commit. Reviewed-by: Paul Moore Signed-off-by: Mickaël Salaün Link: https://lore.kernel.org/r/20220506161102.525323-8-mic@digikod.net --- include/uapi/linux/landlock.h | 27 +- security/landlock/fs.c | 600 +++++++++++++++++++++++---- security/landlock/limits.h | 2 +- security/landlock/syscalls.c | 2 +- tools/testing/selftests/landlock/base_test.c | 2 +- tools/testing/selftests/landlock/fs_test.c | 3 +- 6 files changed, 556 insertions(+), 80 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/landlock.h b/include/uapi/linux/landlock.h index 21c8d58283c9..23df4e0e8ace 100644 --- a/include/uapi/linux/landlock.h +++ b/include/uapi/linux/landlock.h @@ -21,8 +21,14 @@ struct landlock_ruleset_attr { /** * @handled_access_fs: Bitmask of actions (cf. `Filesystem flags`_) * that is handled by this ruleset and should then be forbidden if no - * rule explicitly allow them. This is needed for backward - * compatibility reasons. + * rule explicitly allow them: it is a deny-by-default list that should + * contain as much Landlock access rights as possible. Indeed, all + * Landlock filesystem access rights that are not part of + * handled_access_fs are allowed. This is needed for backward + * compatibility reasons. One exception is the + * LANDLOCK_ACCESS_FS_REFER access right, which is always implicitly + * handled, but must still be explicitly handled to add new rules with + * this access right. */ __u64 handled_access_fs; }; @@ -112,6 +118,22 @@ struct landlock_path_beneath_attr { * - %LANDLOCK_ACCESS_FS_MAKE_FIFO: Create (or rename or link) a named pipe. * - %LANDLOCK_ACCESS_FS_MAKE_BLOCK: Create (or rename or link) a block device. * - %LANDLOCK_ACCESS_FS_MAKE_SYM: Create (or rename or link) a symbolic link. + * - %LANDLOCK_ACCESS_FS_REFER: Link or rename a file from or to a different + * directory (i.e. reparent a file hierarchy). This access right is + * available since the second version of the Landlock ABI. This is also the + * only access right which is always considered handled by any ruleset in + * such a way that reparenting a file hierarchy is always denied by default. + * To avoid privilege escalation, it is not enough to add a rule with this + * access right. When linking or renaming a file, the destination directory + * hierarchy must also always have the same or a superset of restrictions of + * the source hierarchy. If it is not the case, or if the domain doesn't + * handle this access right, such actions are denied by default with errno + * set to EXDEV. Linking also requires a LANDLOCK_ACCESS_FS_MAKE_* access + * right on the destination directory, and renaming also requires a + * LANDLOCK_ACCESS_FS_REMOVE_* access right on the source's (file or + * directory) parent. Otherwise, such actions are denied with errno set to + * EACCES. The EACCES errno prevails over EXDEV to let user space + * efficiently deal with an unrecoverable error. * * .. warning:: * @@ -137,6 +159,7 @@ struct landlock_path_beneath_attr { #define LANDLOCK_ACCESS_FS_MAKE_FIFO (1ULL << 10) #define LANDLOCK_ACCESS_FS_MAKE_BLOCK (1ULL << 11) #define LANDLOCK_ACCESS_FS_MAKE_SYM (1ULL << 12) +#define LANDLOCK_ACCESS_FS_REFER (1ULL << 13) /* clang-format on */ #endif /* _UAPI_LINUX_LANDLOCK_H */ diff --git a/security/landlock/fs.c b/security/landlock/fs.c index 30b42cdee52e..ec5a6247cd3e 100644 --- a/security/landlock/fs.c +++ b/security/landlock/fs.c @@ -4,6 +4,7 @@ * * Copyright © 2016-2020 Mickaël Salaün * Copyright © 2018-2020 ANSSI + * Copyright © 2021-2022 Microsoft Corporation */ #include @@ -273,40 +274,262 @@ static inline bool is_nouser_or_private(const struct dentry *dentry) unlikely(IS_PRIVATE(d_backing_inode(dentry)))); } -static int check_access_path(const struct landlock_ruleset *const domain, - const struct path *const path, - const access_mask_t access_request) +static inline access_mask_t +get_handled_accesses(const struct landlock_ruleset *const domain) { - layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {}; - bool allowed = false, has_access = false; - struct path walker_path; - size_t i; + access_mask_t access_dom = 0; + unsigned long access_bit; + + for (access_bit = 0; access_bit < LANDLOCK_NUM_ACCESS_FS; + access_bit++) { + size_t layer_level; + + for (layer_level = 0; layer_level < domain->num_layers; + layer_level++) { + if (domain->fs_access_masks[layer_level] & + BIT_ULL(access_bit)) { + access_dom |= BIT_ULL(access_bit); + break; + } + } + } + return access_dom; +} + +static inline access_mask_t +init_layer_masks(const struct landlock_ruleset *const domain, + const access_mask_t access_request, + layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS]) +{ + access_mask_t handled_accesses = 0; + size_t layer_level; + memset(layer_masks, 0, sizeof(*layer_masks)); + /* An empty access request can happen because of O_WRONLY | O_RDWR. */ if (!access_request) return 0; - if (WARN_ON_ONCE(!domain || !path)) - return 0; - if (is_nouser_or_private(path->dentry)) - return 0; - if (WARN_ON_ONCE(domain->num_layers < 1)) - return -EACCES; - /* Saves all layers handling a subset of requested accesses. */ - for (i = 0; i < domain->num_layers; i++) { + /* Saves all handled accesses per layer. */ + for (layer_level = 0; layer_level < domain->num_layers; layer_level++) { const unsigned long access_req = access_request; unsigned long access_bit; for_each_set_bit(access_bit, &access_req, - ARRAY_SIZE(layer_masks)) { - if (domain->fs_access_masks[i] & BIT_ULL(access_bit)) { - layer_masks[access_bit] |= BIT_ULL(i); - has_access = true; + ARRAY_SIZE(*layer_masks)) { + if (domain->fs_access_masks[layer_level] & + BIT_ULL(access_bit)) { + (*layer_masks)[access_bit] |= + BIT_ULL(layer_level); + handled_accesses |= BIT_ULL(access_bit); } } } - /* An access request not handled by the domain is allowed. */ - if (!has_access) + return handled_accesses; +} + +/* + * Check that a destination file hierarchy has more restrictions than a source + * file hierarchy. This is only used for link and rename actions. + * + * @layer_masks_child2: Optional child masks. + */ +static inline bool no_more_access( + const layer_mask_t (*const layer_masks_parent1)[LANDLOCK_NUM_ACCESS_FS], + const layer_mask_t (*const layer_masks_child1)[LANDLOCK_NUM_ACCESS_FS], + const bool child1_is_directory, + const layer_mask_t (*const layer_masks_parent2)[LANDLOCK_NUM_ACCESS_FS], + const layer_mask_t (*const layer_masks_child2)[LANDLOCK_NUM_ACCESS_FS], + const bool child2_is_directory) +{ + unsigned long access_bit; + + for (access_bit = 0; access_bit < ARRAY_SIZE(*layer_masks_parent2); + access_bit++) { + /* Ignores accesses that only make sense for directories. */ + const bool is_file_access = + !!(BIT_ULL(access_bit) & ACCESS_FILE); + + if (child1_is_directory || is_file_access) { + /* + * Checks if the destination restrictions are a + * superset of the source ones (i.e. inherited access + * rights without child exceptions): + * restrictions(parent2) >= restrictions(child1) + */ + if ((((*layer_masks_parent1)[access_bit] & + (*layer_masks_child1)[access_bit]) | + (*layer_masks_parent2)[access_bit]) != + (*layer_masks_parent2)[access_bit]) + return false; + } + + if (!layer_masks_child2) + continue; + if (child2_is_directory || is_file_access) { + /* + * Checks inverted restrictions for RENAME_EXCHANGE: + * restrictions(parent1) >= restrictions(child2) + */ + if ((((*layer_masks_parent2)[access_bit] & + (*layer_masks_child2)[access_bit]) | + (*layer_masks_parent1)[access_bit]) != + (*layer_masks_parent1)[access_bit]) + return false; + } + } + return true; +} + +/* + * Removes @layer_masks accesses that are not requested. + * + * Returns true if the request is allowed, false otherwise. + */ +static inline bool +scope_to_request(const access_mask_t access_request, + layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS]) +{ + const unsigned long access_req = access_request; + unsigned long access_bit; + + if (WARN_ON_ONCE(!layer_masks)) + return true; + + for_each_clear_bit(access_bit, &access_req, ARRAY_SIZE(*layer_masks)) + (*layer_masks)[access_bit] = 0; + return !memchr_inv(layer_masks, 0, sizeof(*layer_masks)); +} + +/* + * Returns true if there is at least one access right different than + * LANDLOCK_ACCESS_FS_REFER. + */ +static inline bool +is_eacces(const layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS], + const access_mask_t access_request) +{ + unsigned long access_bit; + /* LANDLOCK_ACCESS_FS_REFER alone must return -EXDEV. */ + const unsigned long access_check = access_request & + ~LANDLOCK_ACCESS_FS_REFER; + + if (!layer_masks) + return false; + + for_each_set_bit(access_bit, &access_check, ARRAY_SIZE(*layer_masks)) { + if ((*layer_masks)[access_bit]) + return true; + } + return false; +} + +/** + * check_access_path_dual - Check accesses for requests with a common path + * + * @domain: Domain to check against. + * @path: File hierarchy to walk through. + * @access_request_parent1: Accesses to check, once @layer_masks_parent1 is + * equal to @layer_masks_parent2 (if any). This is tied to the unique + * requested path for most actions, or the source in case of a refer action + * (i.e. rename or link), or the source and destination in case of + * RENAME_EXCHANGE. + * @layer_masks_parent1: Pointer to a matrix of layer masks per access + * masks, identifying the layers that forbid a specific access. Bits from + * this matrix can be unset according to the @path walk. An empty matrix + * means that @domain allows all possible Landlock accesses (i.e. not only + * those identified by @access_request_parent1). This matrix can + * initially refer to domain layer masks and, when the accesses for the + * destination and source are the same, to requested layer masks. + * @dentry_child1: Dentry to the initial child of the parent1 path. This + * pointer must be NULL for non-refer actions (i.e. not link nor rename). + * @access_request_parent2: Similar to @access_request_parent1 but for a + * request involving a source and a destination. This refers to the + * destination, except in case of RENAME_EXCHANGE where it also refers to + * the source. Must be set to 0 when using a simple path request. + * @layer_masks_parent2: Similar to @layer_masks_parent1 but for a refer + * action. This must be NULL otherwise. + * @dentry_child2: Dentry to the initial child of the parent2 path. This + * pointer is only set for RENAME_EXCHANGE actions and must be NULL + * otherwise. + * + * This helper first checks that the destination has a superset of restrictions + * compared to the source (if any) for a common path. Because of + * RENAME_EXCHANGE actions, source and destinations may be swapped. It then + * checks that the collected accesses and the remaining ones are enough to + * allow the request. + * + * Returns: + * - 0 if the access request is granted; + * - -EACCES if it is denied because of access right other than + * LANDLOCK_ACCESS_FS_REFER; + * - -EXDEV if the renaming or linking would be a privileged escalation + * (according to each layered policies), or if LANDLOCK_ACCESS_FS_REFER is + * not allowed by the source or the destination. + */ +static int check_access_path_dual( + const struct landlock_ruleset *const domain, + const struct path *const path, + const access_mask_t access_request_parent1, + layer_mask_t (*const layer_masks_parent1)[LANDLOCK_NUM_ACCESS_FS], + const struct dentry *const dentry_child1, + const access_mask_t access_request_parent2, + layer_mask_t (*const layer_masks_parent2)[LANDLOCK_NUM_ACCESS_FS], + const struct dentry *const dentry_child2) +{ + bool allowed_parent1 = false, allowed_parent2 = false, is_dom_check, + child1_is_directory = true, child2_is_directory = true; + struct path walker_path; + access_mask_t access_masked_parent1, access_masked_parent2; + layer_mask_t _layer_masks_child1[LANDLOCK_NUM_ACCESS_FS], + _layer_masks_child2[LANDLOCK_NUM_ACCESS_FS]; + layer_mask_t(*layer_masks_child1)[LANDLOCK_NUM_ACCESS_FS] = NULL, + (*layer_masks_child2)[LANDLOCK_NUM_ACCESS_FS] = NULL; + + if (!access_request_parent1 && !access_request_parent2) return 0; + if (WARN_ON_ONCE(!domain || !path)) + return 0; + if (is_nouser_or_private(path->dentry)) + return 0; + if (WARN_ON_ONCE(domain->num_layers < 1 || !layer_masks_parent1)) + return -EACCES; + + if (unlikely(layer_masks_parent2)) { + if (WARN_ON_ONCE(!dentry_child1)) + return -EACCES; + /* + * For a double request, first check for potential privilege + * escalation by looking at domain handled accesses (which are + * a superset of the meaningful requested accesses). + */ + access_masked_parent1 = access_masked_parent2 = + get_handled_accesses(domain); + is_dom_check = true; + } else { + if (WARN_ON_ONCE(dentry_child1 || dentry_child2)) + return -EACCES; + /* For a simple request, only check for requested accesses. */ + access_masked_parent1 = access_request_parent1; + access_masked_parent2 = access_request_parent2; + is_dom_check = false; + } + + if (unlikely(dentry_child1)) { + unmask_layers(find_rule(domain, dentry_child1), + init_layer_masks(domain, LANDLOCK_MASK_ACCESS_FS, + &_layer_masks_child1), + &_layer_masks_child1); + layer_masks_child1 = &_layer_masks_child1; + child1_is_directory = d_is_dir(dentry_child1); + } + if (unlikely(dentry_child2)) { + unmask_layers(find_rule(domain, dentry_child2), + init_layer_masks(domain, LANDLOCK_MASK_ACCESS_FS, + &_layer_masks_child2), + &_layer_masks_child2); + layer_masks_child2 = &_layer_masks_child2; + child2_is_directory = d_is_dir(dentry_child2); + } walker_path = *path; path_get(&walker_path); @@ -316,11 +539,52 @@ static int check_access_path(const struct landlock_ruleset *const domain, */ while (true) { struct dentry *parent_dentry; + const struct landlock_rule *rule; + + /* + * If at least all accesses allowed on the destination are + * already allowed on the source, respectively if there is at + * least as much as restrictions on the destination than on the + * source, then we can safely refer files from the source to + * the destination without risking a privilege escalation. + * This also applies in the case of RENAME_EXCHANGE, which + * implies checks on both direction. This is crucial for + * standalone multilayered security policies. Furthermore, + * this helps avoid policy writers to shoot themselves in the + * foot. + */ + if (unlikely(is_dom_check && + no_more_access( + layer_masks_parent1, layer_masks_child1, + child1_is_directory, layer_masks_parent2, + layer_masks_child2, + child2_is_directory))) { + allowed_parent1 = scope_to_request( + access_request_parent1, layer_masks_parent1); + allowed_parent2 = scope_to_request( + access_request_parent2, layer_masks_parent2); + + /* Stops when all accesses are granted. */ + if (allowed_parent1 && allowed_parent2) + break; - allowed = unmask_layers(find_rule(domain, walker_path.dentry), - access_request, &layer_masks); - if (allowed) - /* Stops when a rule from each layer grants access. */ + /* + * Now, downgrades the remaining checks from domain + * handled accesses to requested accesses. + */ + is_dom_check = false; + access_masked_parent1 = access_request_parent1; + access_masked_parent2 = access_request_parent2; + } + + rule = find_rule(domain, walker_path.dentry); + allowed_parent1 = unmask_layers(rule, access_masked_parent1, + layer_masks_parent1); + allowed_parent2 = unmask_layers(rule, access_masked_parent2, + layer_masks_parent2); + + /* Stops when a rule from each layer grants access. */ + if (allowed_parent1 && allowed_parent2) break; jump_up: @@ -333,7 +597,6 @@ jump_up: * Stops at the real root. Denies access * because not all layers have granted access. */ - allowed = false; break; } } @@ -343,7 +606,8 @@ jump_up: * access to internal filesystems (e.g. nsfs, which is * reachable through /proc//ns/). */ - allowed = !!(walker_path.mnt->mnt_flags & MNT_INTERNAL); + allowed_parent1 = allowed_parent2 = + !!(walker_path.mnt->mnt_flags & MNT_INTERNAL); break; } parent_dentry = dget_parent(walker_path.dentry); @@ -351,7 +615,36 @@ jump_up: walker_path.dentry = parent_dentry; } path_put(&walker_path); - return allowed ? 0 : -EACCES; + + if (allowed_parent1 && allowed_parent2) + return 0; + + /* + * This prioritizes EACCES over EXDEV for all actions, including + * renames with RENAME_EXCHANGE. + */ + if (likely(is_eacces(layer_masks_parent1, access_request_parent1) || + is_eacces(layer_masks_parent2, access_request_parent2))) + return -EACCES; + + /* + * Gracefully forbids reparenting if the destination directory + * hierarchy is not a superset of restrictions of the source directory + * hierarchy, or if LANDLOCK_ACCESS_FS_REFER is not allowed by the + * source or the destination. + */ + return -EXDEV; +} + +static inline int check_access_path(const struct landlock_ruleset *const domain, + const struct path *const path, + access_mask_t access_request) +{ + layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {}; + + access_request = init_layer_masks(domain, access_request, &layer_masks); + return check_access_path_dual(domain, path, access_request, + &layer_masks, NULL, 0, NULL, NULL); } static inline int current_check_access_path(const struct path *const path, @@ -398,6 +691,206 @@ static inline access_mask_t maybe_remove(const struct dentry *const dentry) LANDLOCK_ACCESS_FS_REMOVE_FILE; } +/** + * collect_domain_accesses - Walk through a file path and collect accesses + * + * @domain: Domain to check against. + * @mnt_root: Last directory to check. + * @dir: Directory to start the walk from. + * @layer_masks_dom: Where to store the collected accesses. + * + * This helper is useful to begin a path walk from the @dir directory to a + * @mnt_root directory used as a mount point. This mount point is the common + * ancestor between the source and the destination of a renamed and linked + * file. While walking from @dir to @mnt_root, we record all the domain's + * allowed accesses in @layer_masks_dom. + * + * This is similar to check_access_path_dual() but much simpler because it only + * handles walking on the same mount point and only check one set of accesses. + * + * Returns: + * - true if all the domain access rights are allowed for @dir; + * - false if the walk reached @mnt_root. + */ +static bool collect_domain_accesses( + const struct landlock_ruleset *const domain, + const struct dentry *const mnt_root, struct dentry *dir, + layer_mask_t (*const layer_masks_dom)[LANDLOCK_NUM_ACCESS_FS]) +{ + unsigned long access_dom; + bool ret = false; + + if (WARN_ON_ONCE(!domain || !mnt_root || !dir || !layer_masks_dom)) + return true; + if (is_nouser_or_private(dir)) + return true; + + access_dom = init_layer_masks(domain, LANDLOCK_MASK_ACCESS_FS, + layer_masks_dom); + + dget(dir); + while (true) { + struct dentry *parent_dentry; + + /* Gets all layers allowing all domain accesses. */ + if (unmask_layers(find_rule(domain, dir), access_dom, + layer_masks_dom)) { + /* + * Stops when all handled accesses are allowed by at + * least one rule in each layer. + */ + ret = true; + break; + } + + /* We should not reach a root other than @mnt_root. */ + if (dir == mnt_root || WARN_ON_ONCE(IS_ROOT(dir))) + break; + + parent_dentry = dget_parent(dir); + dput(dir); + dir = parent_dentry; + } + dput(dir); + return ret; +} + +/** + * current_check_refer_path - Check if a rename or link action is allowed + * + * @old_dentry: File or directory requested to be moved or linked. + * @new_dir: Destination parent directory. + * @new_dentry: Destination file or directory. + * @removable: Sets to true if it is a rename operation. + * @exchange: Sets to true if it is a rename operation with RENAME_EXCHANGE. + * + * Because of its unprivileged constraints, Landlock relies on file hierarchies + * (and not only inodes) to tie access rights to files. Being able to link or + * rename a file hierarchy brings some challenges. Indeed, moving or linking a + * file (i.e. creating a new reference to an inode) can have an impact on the + * actions allowed for a set of files if it would change its parent directory + * (i.e. reparenting). + * + * To avoid trivial access right bypasses, Landlock first checks if the file or + * directory requested to be moved would gain new access rights inherited from + * its new hierarchy. Before returning any error, Landlock then checks that + * the parent source hierarchy and the destination hierarchy would allow the + * link or rename action. If it is not the case, an error with EACCES is + * returned to inform user space that there is no way to remove or create the + * requested source file type. If it should be allowed but the new inherited + * access rights would be greater than the source access rights, then the + * kernel returns an error with EXDEV. Prioritizing EACCES over EXDEV enables + * user space to abort the whole operation if there is no way to do it, or to + * manually copy the source to the destination if this remains allowed, e.g. + * because file creation is allowed on the destination directory but not direct + * linking. + * + * To achieve this goal, the kernel needs to compare two file hierarchies: the + * one identifying the source file or directory (including itself), and the + * destination one. This can be seen as a multilayer partial ordering problem. + * The kernel walks through these paths and collects in a matrix the access + * rights that are denied per layer. These matrices are then compared to see + * if the destination one has more (or the same) restrictions as the source + * one. If this is the case, the requested action will not return EXDEV, which + * doesn't mean the action is allowed. The parent hierarchy of the source + * (i.e. parent directory), and the destination hierarchy must also be checked + * to verify that they explicitly allow such action (i.e. referencing, + * creation and potentially removal rights). The kernel implementation is then + * required to rely on potentially four matrices of access rights: one for the + * source file or directory (i.e. the child), a potentially other one for the + * other source/destination (in case of RENAME_EXCHANGE), one for the source + * parent hierarchy and a last one for the destination hierarchy. These + * ephemeral matrices take some space on the stack, which limits the number of + * layers to a deemed reasonable number: 16. + * + * Returns: + * - 0 if access is allowed; + * - -EXDEV if @old_dentry would inherit new access rights from @new_dir; + * - -EACCES if file removal or creation is denied. + */ +static int current_check_refer_path(struct dentry *const old_dentry, + const struct path *const new_dir, + struct dentry *const new_dentry, + const bool removable, const bool exchange) +{ + const struct landlock_ruleset *const dom = + landlock_get_current_domain(); + bool allow_parent1, allow_parent2; + access_mask_t access_request_parent1, access_request_parent2; + struct path mnt_dir; + layer_mask_t layer_masks_parent1[LANDLOCK_NUM_ACCESS_FS], + layer_masks_parent2[LANDLOCK_NUM_ACCESS_FS]; + + if (!dom) + return 0; + if (WARN_ON_ONCE(dom->num_layers < 1)) + return -EACCES; + if (unlikely(d_is_negative(old_dentry))) + return -ENOENT; + if (exchange) { + if (unlikely(d_is_negative(new_dentry))) + return -ENOENT; + access_request_parent1 = + get_mode_access(d_backing_inode(new_dentry)->i_mode); + } else { + access_request_parent1 = 0; + } + access_request_parent2 = + get_mode_access(d_backing_inode(old_dentry)->i_mode); + if (removable) { + access_request_parent1 |= maybe_remove(old_dentry); + access_request_parent2 |= maybe_remove(new_dentry); + } + + /* The mount points are the same for old and new paths, cf. EXDEV. */ + if (old_dentry->d_parent == new_dir->dentry) { + /* + * The LANDLOCK_ACCESS_FS_REFER access right is not required + * for same-directory referer (i.e. no reparenting). + */ + access_request_parent1 = init_layer_masks( + dom, access_request_parent1 | access_request_parent2, + &layer_masks_parent1); + return check_access_path_dual(dom, new_dir, + access_request_parent1, + &layer_masks_parent1, NULL, 0, + NULL, NULL); + } + + /* Backward compatibility: no reparenting support. */ + if (!(get_handled_accesses(dom) & LANDLOCK_ACCESS_FS_REFER)) + return -EXDEV; + + access_request_parent1 |= LANDLOCK_ACCESS_FS_REFER; + access_request_parent2 |= LANDLOCK_ACCESS_FS_REFER; + + /* Saves the common mount point. */ + mnt_dir.mnt = new_dir->mnt; + mnt_dir.dentry = new_dir->mnt->mnt_root; + + /* new_dir->dentry is equal to new_dentry->d_parent */ + allow_parent1 = collect_domain_accesses(dom, mnt_dir.dentry, + old_dentry->d_parent, + &layer_masks_parent1); + allow_parent2 = collect_domain_accesses( + dom, mnt_dir.dentry, new_dir->dentry, &layer_masks_parent2); + + if (allow_parent1 && allow_parent2) + return 0; + + /* + * To be able to compare source and destination domain access rights, + * take into account the @old_dentry access rights aggregated with its + * parent access rights. This will be useful to compare with the + * destination parent access rights. + */ + return check_access_path_dual(dom, &mnt_dir, access_request_parent1, + &layer_masks_parent1, old_dentry, + access_request_parent2, + &layer_masks_parent2, + exchange ? new_dentry : NULL); +} + /* Inode hooks */ static void hook_inode_free_security(struct inode *const inode) @@ -591,32 +1084,12 @@ static int hook_sb_pivotroot(const struct path *const old_path, /* Path hooks */ -/* - * Creating multiple links or renaming may lead to privilege escalations if not - * handled properly. Indeed, we must be sure that the source doesn't gain more - * privileges by being accessible from the destination. This is getting more - * complex when dealing with multiple layers. The whole picture can be seen as - * a multilayer partial ordering problem. A future version of Landlock will - * deal with that. - */ static int hook_path_link(struct dentry *const old_dentry, const struct path *const new_dir, struct dentry *const new_dentry) { - const struct landlock_ruleset *const dom = - landlock_get_current_domain(); - - if (!dom) - return 0; - /* The mount points are the same for old and new paths, cf. EXDEV. */ - if (old_dentry->d_parent != new_dir->dentry) - /* Gracefully forbids reparenting. */ - return -EXDEV; - if (unlikely(d_is_negative(old_dentry))) - return -ENOENT; - return check_access_path( - dom, new_dir, - get_mode_access(d_backing_inode(old_dentry)->i_mode)); + return current_check_refer_path(old_dentry, new_dir, new_dentry, false, + false); } static int hook_path_rename(const struct path *const old_dir, @@ -625,30 +1098,9 @@ static int hook_path_rename(const struct path *const old_dir, struct dentry *const new_dentry, const unsigned int flags) { - const struct landlock_ruleset *const dom = - landlock_get_current_domain(); - u32 exchange_access = 0; - - if (!dom) - return 0; - /* The mount points are the same for old and new paths, cf. EXDEV. */ - if (old_dir->dentry != new_dir->dentry) - /* Gracefully forbids reparenting. */ - return -EXDEV; - if (flags & RENAME_EXCHANGE) { - if (unlikely(d_is_negative(new_dentry))) - return -ENOENT; - exchange_access = - get_mode_access(d_backing_inode(new_dentry)->i_mode); - } - if (unlikely(d_is_negative(old_dentry))) - return -ENOENT; - /* RENAME_EXCHANGE is handled because directories are the same. */ - return check_access_path( - dom, old_dir, - maybe_remove(old_dentry) | maybe_remove(new_dentry) | - exchange_access | - get_mode_access(d_backing_inode(old_dentry)->i_mode)); + /* old_dir refers to old_dentry->d_parent and new_dir->mnt */ + return current_check_refer_path(old_dentry, new_dir, new_dentry, true, + !!(flags & RENAME_EXCHANGE)); } static int hook_path_mkdir(const struct path *const dir, diff --git a/security/landlock/limits.h b/security/landlock/limits.h index 17c2a2e7fe1e..b54184ab9439 100644 --- a/security/landlock/limits.h +++ b/security/landlock/limits.h @@ -18,7 +18,7 @@ #define LANDLOCK_MAX_NUM_LAYERS 16 #define LANDLOCK_MAX_NUM_RULES U32_MAX -#define LANDLOCK_LAST_ACCESS_FS LANDLOCK_ACCESS_FS_MAKE_SYM +#define LANDLOCK_LAST_ACCESS_FS LANDLOCK_ACCESS_FS_REFER #define LANDLOCK_MASK_ACCESS_FS ((LANDLOCK_LAST_ACCESS_FS << 1) - 1) #define LANDLOCK_NUM_ACCESS_FS __const_hweight64(LANDLOCK_MASK_ACCESS_FS) diff --git a/security/landlock/syscalls.c b/security/landlock/syscalls.c index 507d43827afe..735a0865ea11 100644 --- a/security/landlock/syscalls.c +++ b/security/landlock/syscalls.c @@ -129,7 +129,7 @@ static const struct file_operations ruleset_fops = { .write = fop_dummy_write, }; -#define LANDLOCK_ABI_VERSION 1 +#define LANDLOCK_ABI_VERSION 2 /** * sys_landlock_create_ruleset - Create a new ruleset diff --git a/tools/testing/selftests/landlock/base_test.c b/tools/testing/selftests/landlock/base_test.c index 35f64832b869..da9290817866 100644 --- a/tools/testing/selftests/landlock/base_test.c +++ b/tools/testing/selftests/landlock/base_test.c @@ -75,7 +75,7 @@ TEST(abi_version) const struct landlock_ruleset_attr ruleset_attr = { .handled_access_fs = LANDLOCK_ACCESS_FS_READ_FILE, }; - ASSERT_EQ(1, landlock_create_ruleset(NULL, 0, + ASSERT_EQ(2, landlock_create_ruleset(NULL, 0, LANDLOCK_CREATE_RULESET_VERSION)); ASSERT_EQ(-1, landlock_create_ruleset(&ruleset_attr, 0, diff --git a/tools/testing/selftests/landlock/fs_test.c b/tools/testing/selftests/landlock/fs_test.c index a4fdcda62bde..69f9c7409198 100644 --- a/tools/testing/selftests/landlock/fs_test.c +++ b/tools/testing/selftests/landlock/fs_test.c @@ -401,7 +401,7 @@ TEST_F_FORK(layout1, inval) LANDLOCK_ACCESS_FS_WRITE_FILE | \ LANDLOCK_ACCESS_FS_READ_FILE) -#define ACCESS_LAST LANDLOCK_ACCESS_FS_MAKE_SYM +#define ACCESS_LAST LANDLOCK_ACCESS_FS_REFER #define ACCESS_ALL ( \ ACCESS_FILE | \ @@ -414,6 +414,7 @@ TEST_F_FORK(layout1, inval) LANDLOCK_ACCESS_FS_MAKE_SOCK | \ LANDLOCK_ACCESS_FS_MAKE_FIFO | \ LANDLOCK_ACCESS_FS_MAKE_BLOCK | \ + LANDLOCK_ACCESS_FS_MAKE_SYM | \ ACCESS_LAST) /* clang-format on */ -- cgit v1.2.3-59-g8ed1b From 97e03f521050c092919591e668107b3d69c5f426 Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Mon, 23 May 2022 14:07:07 -0700 Subject: bpf: Add verifier support for dynptrs This patch adds the bulk of the verifier work for supporting dynamic pointers (dynptrs) in bpf. A bpf_dynptr is opaque to the bpf program. It is a 16-byte structure defined internally as: struct bpf_dynptr_kern { void *data; u32 size; u32 offset; } __aligned(8); The upper 8 bits of *size* is reserved (it contains extra metadata about read-only status and dynptr type). Consequently, a dynptr only supports memory less than 16 MB. There are different types of dynptrs (eg malloc, ringbuf, ...). In this patchset, the most basic one, dynptrs to a bpf program's local memory, is added. For now only local memory that is of reg type PTR_TO_MAP_VALUE is supported. In the verifier, dynptr state information will be tracked in stack slots. When the program passes in an uninitialized dynptr (ARG_PTR_TO_DYNPTR | MEM_UNINIT), the stack slots corresponding to the frame pointer where the dynptr resides at are marked STACK_DYNPTR. For helper functions that take in initialized dynptrs (eg bpf_dynptr_read + bpf_dynptr_write which are added later in this patchset), the verifier enforces that the dynptr has been initialized properly by checking that their corresponding stack slots have been marked as STACK_DYNPTR. The 6th patch in this patchset adds test cases that the verifier should successfully reject, such as for example attempting to use a dynptr after doing a direct write into it inside the bpf program. Signed-off-by: Joanne Koong Signed-off-by: Andrii Nakryiko Acked-by: Andrii Nakryiko Acked-by: David Vernet Link: https://lore.kernel.org/bpf/20220523210712.3641569-2-joannelkoong@gmail.com --- include/linux/bpf.h | 28 ++++++ include/linux/bpf_verifier.h | 18 ++++ include/uapi/linux/bpf.h | 5 ++ kernel/bpf/verifier.c | 188 ++++++++++++++++++++++++++++++++++++++++- scripts/bpf_doc.py | 2 + tools/include/uapi/linux/bpf.h | 5 ++ 6 files changed, 243 insertions(+), 3 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index a9b1875212f6..b26c8176b9e0 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -392,10 +392,15 @@ enum bpf_type_flag { MEM_UNINIT = BIT(7 + BPF_BASE_TYPE_BITS), + /* DYNPTR points to memory local to the bpf program. */ + DYNPTR_TYPE_LOCAL = BIT(8 + BPF_BASE_TYPE_BITS), + __BPF_TYPE_FLAG_MAX, __BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1, }; +#define DYNPTR_TYPE_FLAG_MASK DYNPTR_TYPE_LOCAL + /* Max number of base types. */ #define BPF_BASE_TYPE_LIMIT (1UL << BPF_BASE_TYPE_BITS) @@ -438,6 +443,7 @@ enum bpf_arg_type { ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */ ARG_PTR_TO_TIMER, /* pointer to bpf_timer */ ARG_PTR_TO_KPTR, /* pointer to referenced kptr */ + ARG_PTR_TO_DYNPTR, /* pointer to bpf_dynptr. See bpf_type_flag for dynptr type */ __BPF_ARG_TYPE_MAX, /* Extended arg_types. */ @@ -2376,4 +2382,26 @@ int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, u32 **bin_buf, u32 num_args); void bpf_bprintf_cleanup(void); +/* the implementation of the opaque uapi struct bpf_dynptr */ +struct bpf_dynptr_kern { + void *data; + /* Size represents the number of usable bytes of dynptr data. + * If for example the offset is at 4 for a local dynptr whose data is + * of type u64, the number of usable bytes is 4. + * + * The upper 8 bits are reserved. It is as follows: + * Bits 0 - 23 = size + * Bits 24 - 30 = dynptr type + * Bit 31 = whether dynptr is read-only + */ + u32 size; + u32 offset; +} __aligned(8); + +enum bpf_dynptr_type { + BPF_DYNPTR_TYPE_INVALID, + /* Points to memory that is local to the bpf program */ + BPF_DYNPTR_TYPE_LOCAL, +}; + #endif /* _LINUX_BPF_H */ diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 1f1e7f2ea967..af5b2135215e 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -72,6 +72,18 @@ struct bpf_reg_state { u32 mem_size; /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */ + /* For dynptr stack slots */ + struct { + enum bpf_dynptr_type type; + /* A dynptr is 16 bytes so it takes up 2 stack slots. + * We need to track which slot is the first slot + * to protect against cases where the user may try to + * pass in an address starting at the second slot of the + * dynptr. + */ + bool first_slot; + } dynptr; + /* Max size from any of the above. */ struct { unsigned long raw1; @@ -174,9 +186,15 @@ enum bpf_stack_slot_type { STACK_SPILL, /* register spilled into stack */ STACK_MISC, /* BPF program wrote some data into this slot */ STACK_ZERO, /* BPF program wrote constant zero */ + /* A dynptr is stored in this stack slot. The type of dynptr + * is stored in bpf_stack_state->spilled_ptr.dynptr.type + */ + STACK_DYNPTR, }; #define BPF_REG_SIZE 8 /* size of eBPF register in bytes */ +#define BPF_DYNPTR_SIZE sizeof(struct bpf_dynptr_kern) +#define BPF_DYNPTR_NR_SLOTS (BPF_DYNPTR_SIZE / BPF_REG_SIZE) struct bpf_stack_state { struct bpf_reg_state spilled_ptr; diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 56688bee20d9..610944cb3389 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -6528,6 +6528,11 @@ struct bpf_timer { __u64 :64; } __attribute__((aligned(8))); +struct bpf_dynptr { + __u64 :64; + __u64 :64; +} __attribute__((aligned(8))); + struct bpf_sysctl { __u32 write; /* Sysctl is being read (= 0) or written (= 1). * Allows 1,2,4-byte read, but no write. diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 45153cbc2bd6..1fd0b81c3fec 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -259,6 +259,7 @@ struct bpf_call_arg_meta { u32 ret_btf_id; u32 subprogno; struct bpf_map_value_off_desc *kptr_off_desc; + u8 uninit_dynptr_regno; }; struct btf *btf_vmlinux; @@ -581,6 +582,7 @@ static char slot_type_char[] = { [STACK_SPILL] = 'r', [STACK_MISC] = 'm', [STACK_ZERO] = '0', + [STACK_DYNPTR] = 'd', }; static void print_liveness(struct bpf_verifier_env *env, @@ -596,6 +598,25 @@ static void print_liveness(struct bpf_verifier_env *env, verbose(env, "D"); } +static int get_spi(s32 off) +{ + return (-off - 1) / BPF_REG_SIZE; +} + +static bool is_spi_bounds_valid(struct bpf_func_state *state, int spi, int nr_slots) +{ + int allocated_slots = state->allocated_stack / BPF_REG_SIZE; + + /* We need to check that slots between [spi - nr_slots + 1, spi] are + * within [0, allocated_stack). + * + * Please note that the spi grows downwards. For example, a dynptr + * takes the size of two stack slots; the first slot will be at + * spi and the second slot will be at spi - 1. + */ + return spi - nr_slots + 1 >= 0 && spi < allocated_slots; +} + static struct bpf_func_state *func(struct bpf_verifier_env *env, const struct bpf_reg_state *reg) { @@ -647,6 +668,108 @@ static void mark_verifier_state_scratched(struct bpf_verifier_env *env) env->scratched_stack_slots = ~0ULL; } +static enum bpf_dynptr_type arg_to_dynptr_type(enum bpf_arg_type arg_type) +{ + switch (arg_type & DYNPTR_TYPE_FLAG_MASK) { + case DYNPTR_TYPE_LOCAL: + return BPF_DYNPTR_TYPE_LOCAL; + default: + return BPF_DYNPTR_TYPE_INVALID; + } +} + +static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg, + enum bpf_arg_type arg_type, int insn_idx) +{ + struct bpf_func_state *state = func(env, reg); + enum bpf_dynptr_type type; + int spi, i; + + spi = get_spi(reg->off); + + if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS)) + return -EINVAL; + + for (i = 0; i < BPF_REG_SIZE; i++) { + state->stack[spi].slot_type[i] = STACK_DYNPTR; + state->stack[spi - 1].slot_type[i] = STACK_DYNPTR; + } + + type = arg_to_dynptr_type(arg_type); + if (type == BPF_DYNPTR_TYPE_INVALID) + return -EINVAL; + + state->stack[spi].spilled_ptr.dynptr.first_slot = true; + state->stack[spi].spilled_ptr.dynptr.type = type; + state->stack[spi - 1].spilled_ptr.dynptr.type = type; + + return 0; +} + +static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg) +{ + struct bpf_func_state *state = func(env, reg); + int spi, i; + + spi = get_spi(reg->off); + + if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS)) + return -EINVAL; + + for (i = 0; i < BPF_REG_SIZE; i++) { + state->stack[spi].slot_type[i] = STACK_INVALID; + state->stack[spi - 1].slot_type[i] = STACK_INVALID; + } + + state->stack[spi].spilled_ptr.dynptr.first_slot = false; + state->stack[spi].spilled_ptr.dynptr.type = 0; + state->stack[spi - 1].spilled_ptr.dynptr.type = 0; + + return 0; +} + +static bool is_dynptr_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_reg_state *reg) +{ + struct bpf_func_state *state = func(env, reg); + int spi = get_spi(reg->off); + int i; + + if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS)) + return true; + + for (i = 0; i < BPF_REG_SIZE; i++) { + if (state->stack[spi].slot_type[i] == STACK_DYNPTR || + state->stack[spi - 1].slot_type[i] == STACK_DYNPTR) + return false; + } + + return true; +} + +static bool is_dynptr_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg, + enum bpf_arg_type arg_type) +{ + struct bpf_func_state *state = func(env, reg); + int spi = get_spi(reg->off); + int i; + + if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS) || + !state->stack[spi].spilled_ptr.dynptr.first_slot) + return false; + + for (i = 0; i < BPF_REG_SIZE; i++) { + if (state->stack[spi].slot_type[i] != STACK_DYNPTR || + state->stack[spi - 1].slot_type[i] != STACK_DYNPTR) + return false; + } + + /* ARG_PTR_TO_DYNPTR takes any type of dynptr */ + if (arg_type == ARG_PTR_TO_DYNPTR) + return true; + + return state->stack[spi].spilled_ptr.dynptr.type == arg_to_dynptr_type(arg_type); +} + /* The reg state of a pointer or a bounded scalar was saved when * it was spilled to the stack. */ @@ -5400,6 +5523,11 @@ static bool arg_type_is_release(enum bpf_arg_type type) return type & OBJ_RELEASE; } +static bool arg_type_is_dynptr(enum bpf_arg_type type) +{ + return base_type(type) == ARG_PTR_TO_DYNPTR; +} + static int int_ptr_type_to_size(enum bpf_arg_type type) { if (type == ARG_PTR_TO_INT) @@ -5539,6 +5667,7 @@ static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = { [ARG_PTR_TO_CONST_STR] = &const_str_ptr_types, [ARG_PTR_TO_TIMER] = &timer_types, [ARG_PTR_TO_KPTR] = &kptr_types, + [ARG_PTR_TO_DYNPTR] = &stack_ptr_types, }; static int check_reg_type(struct bpf_verifier_env *env, u32 regno, @@ -5628,8 +5757,13 @@ int check_func_arg_reg_off(struct bpf_verifier_env *env, bool fixed_off_ok = false; switch ((u32)type) { - case SCALAR_VALUE: /* Pointer types where reg offset is explicitly allowed: */ + case PTR_TO_STACK: + if (arg_type_is_dynptr(arg_type) && reg->off % BPF_REG_SIZE) { + verbose(env, "cannot pass in dynptr at an offset\n"); + return -EINVAL; + } + fallthrough; case PTR_TO_PACKET: case PTR_TO_PACKET_META: case PTR_TO_MAP_KEY: @@ -5639,7 +5773,7 @@ int check_func_arg_reg_off(struct bpf_verifier_env *env, case PTR_TO_MEM | MEM_ALLOC: case PTR_TO_BUF: case PTR_TO_BUF | MEM_RDONLY: - case PTR_TO_STACK: + case SCALAR_VALUE: /* Some of the argument types nevertheless require a * zero register offset. */ @@ -5837,6 +5971,36 @@ skip_type_check: bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO); err = check_mem_size_reg(env, reg, regno, zero_size_allowed, meta); + } else if (arg_type_is_dynptr(arg_type)) { + if (arg_type & MEM_UNINIT) { + if (!is_dynptr_reg_valid_uninit(env, reg)) { + verbose(env, "Dynptr has to be an uninitialized dynptr\n"); + return -EINVAL; + } + + /* We only support one dynptr being uninitialized at the moment, + * which is sufficient for the helper functions we have right now. + */ + if (meta->uninit_dynptr_regno) { + verbose(env, "verifier internal error: multiple uninitialized dynptr args\n"); + return -EFAULT; + } + + meta->uninit_dynptr_regno = regno; + } else if (!is_dynptr_reg_valid_init(env, reg, arg_type)) { + const char *err_extra = ""; + + switch (arg_type & DYNPTR_TYPE_FLAG_MASK) { + case DYNPTR_TYPE_LOCAL: + err_extra = "local "; + break; + default: + break; + } + verbose(env, "Expected an initialized %sdynptr as arg #%d\n", + err_extra, arg + 1); + return -EINVAL; + } } else if (arg_type_is_alloc_size(arg_type)) { if (!tnum_is_const(reg->var_off)) { verbose(env, "R%d is not a known constant'\n", @@ -6970,9 +7134,27 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn regs = cur_regs(env); + if (meta.uninit_dynptr_regno) { + /* we write BPF_DW bits (8 bytes) at a time */ + for (i = 0; i < BPF_DYNPTR_SIZE; i += 8) { + err = check_mem_access(env, insn_idx, meta.uninit_dynptr_regno, + i, BPF_DW, BPF_WRITE, -1, false); + if (err) + return err; + } + + err = mark_stack_slots_dynptr(env, ®s[meta.uninit_dynptr_regno], + fn->arg_type[meta.uninit_dynptr_regno - BPF_REG_1], + insn_idx); + if (err) + return err; + } + if (meta.release_regno) { err = -EINVAL; - if (meta.ref_obj_id) + if (arg_type_is_dynptr(fn->arg_type[meta.release_regno - BPF_REG_1])) + err = unmark_stack_slots_dynptr(env, ®s[meta.release_regno]); + else if (meta.ref_obj_id) err = release_reference(env, meta.ref_obj_id); /* meta.ref_obj_id can only be 0 if register that is meant to be * released is NULL, which must be > R0. diff --git a/scripts/bpf_doc.py b/scripts/bpf_doc.py index d5452f7eb996..855b937e7585 100755 --- a/scripts/bpf_doc.py +++ b/scripts/bpf_doc.py @@ -634,6 +634,7 @@ class PrinterHelpers(Printer): 'struct file', 'struct bpf_timer', 'struct mptcp_sock', + 'struct bpf_dynptr', ] known_types = { '...', @@ -684,6 +685,7 @@ class PrinterHelpers(Printer): 'struct file', 'struct bpf_timer', 'struct mptcp_sock', + 'struct bpf_dynptr', } mapped_types = { 'u8': '__u8', diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 56688bee20d9..610944cb3389 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -6528,6 +6528,11 @@ struct bpf_timer { __u64 :64; } __attribute__((aligned(8))); +struct bpf_dynptr { + __u64 :64; + __u64 :64; +} __attribute__((aligned(8))); + struct bpf_sysctl { __u32 write; /* Sysctl is being read (= 0) or written (= 1). * Allows 1,2,4-byte read, but no write. -- cgit v1.2.3-59-g8ed1b From 263ae152e96253f40c2c276faad8629e096b3bad Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Mon, 23 May 2022 14:07:08 -0700 Subject: bpf: Add bpf_dynptr_from_mem for local dynptrs This patch adds a new api bpf_dynptr_from_mem: long bpf_dynptr_from_mem(void *data, u32 size, u64 flags, struct bpf_dynptr *ptr); which initializes a dynptr to point to a bpf program's local memory. For now only local memory that is of reg type PTR_TO_MAP_VALUE is supported. Signed-off-by: Joanne Koong Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220523210712.3641569-3-joannelkoong@gmail.com --- include/uapi/linux/bpf.h | 12 ++++++++ kernel/bpf/helpers.c | 65 ++++++++++++++++++++++++++++++++++++++++++ kernel/bpf/verifier.c | 6 ++++ tools/include/uapi/linux/bpf.h | 12 ++++++++ 4 files changed, 95 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 610944cb3389..9be3644457dd 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -5178,6 +5178,17 @@ union bpf_attr { * Dynamically cast a *sk* pointer to a *mptcp_sock* pointer. * Return * *sk* if casting is valid, or **NULL** otherwise. + * + * long bpf_dynptr_from_mem(void *data, u32 size, u64 flags, struct bpf_dynptr *ptr) + * Description + * Get a dynptr to local memory *data*. + * + * *data* must be a ptr to a map value. + * The maximum *size* supported is DYNPTR_MAX_SIZE. + * *flags* is currently unused. + * Return + * 0 on success, -E2BIG if the size exceeds DYNPTR_MAX_SIZE, + * -EINVAL if flags is not 0. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5377,6 +5388,7 @@ union bpf_attr { FN(kptr_xchg), \ FN(map_lookup_percpu_elem), \ FN(skc_to_mptcp_sock), \ + FN(dynptr_from_mem), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index bad96131a510..d3e935c2e25e 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -1412,6 +1412,69 @@ const struct bpf_func_proto bpf_kptr_xchg_proto = { .arg2_btf_id = BPF_PTR_POISON, }; +/* Since the upper 8 bits of dynptr->size is reserved, the + * maximum supported size is 2^24 - 1. + */ +#define DYNPTR_MAX_SIZE ((1UL << 24) - 1) +#define DYNPTR_TYPE_SHIFT 28 + +static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_type type) +{ + ptr->size |= type << DYNPTR_TYPE_SHIFT; +} + +static int bpf_dynptr_check_size(u32 size) +{ + return size > DYNPTR_MAX_SIZE ? -E2BIG : 0; +} + +static void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data, + enum bpf_dynptr_type type, u32 offset, u32 size) +{ + ptr->data = data; + ptr->offset = offset; + ptr->size = size; + bpf_dynptr_set_type(ptr, type); +} + +static void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr) +{ + memset(ptr, 0, sizeof(*ptr)); +} + +BPF_CALL_4(bpf_dynptr_from_mem, void *, data, u32, size, u64, flags, struct bpf_dynptr_kern *, ptr) +{ + int err; + + err = bpf_dynptr_check_size(size); + if (err) + goto error; + + /* flags is currently unsupported */ + if (flags) { + err = -EINVAL; + goto error; + } + + bpf_dynptr_init(ptr, data, BPF_DYNPTR_TYPE_LOCAL, 0, size); + + return 0; + +error: + bpf_dynptr_set_null(ptr); + return err; +} + +const struct bpf_func_proto bpf_dynptr_from_mem_proto = { + .func = bpf_dynptr_from_mem, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_UNINIT_MEM, + .arg2_type = ARG_CONST_SIZE_OR_ZERO, + .arg3_type = ARG_ANYTHING, + .arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL | MEM_UNINIT, +}; + const struct bpf_func_proto bpf_get_current_task_proto __weak; const struct bpf_func_proto bpf_get_current_task_btf_proto __weak; const struct bpf_func_proto bpf_probe_read_user_proto __weak; @@ -1466,6 +1529,8 @@ bpf_base_func_proto(enum bpf_func_id func_id) return &bpf_loop_proto; case BPF_FUNC_strncmp: return &bpf_strncmp_proto; + case BPF_FUNC_dynptr_from_mem: + return &bpf_dynptr_from_mem_proto; default: break; } diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 1fd0b81c3fec..b657d46f886e 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -7204,6 +7204,12 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, set_loop_callback_state); break; + case BPF_FUNC_dynptr_from_mem: + if (regs[BPF_REG_1].type != PTR_TO_MAP_VALUE) { + verbose(env, "Unsupported reg type %s for bpf_dynptr_from_mem data\n", + reg_type_str(env, regs[BPF_REG_1].type)); + return -EACCES; + } } if (err) diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 610944cb3389..9be3644457dd 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -5178,6 +5178,17 @@ union bpf_attr { * Dynamically cast a *sk* pointer to a *mptcp_sock* pointer. * Return * *sk* if casting is valid, or **NULL** otherwise. + * + * long bpf_dynptr_from_mem(void *data, u32 size, u64 flags, struct bpf_dynptr *ptr) + * Description + * Get a dynptr to local memory *data*. + * + * *data* must be a ptr to a map value. + * The maximum *size* supported is DYNPTR_MAX_SIZE. + * *flags* is currently unused. + * Return + * 0 on success, -E2BIG if the size exceeds DYNPTR_MAX_SIZE, + * -EINVAL if flags is not 0. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5377,6 +5388,7 @@ union bpf_attr { FN(kptr_xchg), \ FN(map_lookup_percpu_elem), \ FN(skc_to_mptcp_sock), \ + FN(dynptr_from_mem), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper -- cgit v1.2.3-59-g8ed1b From bc34dee65a65e9c920c420005b8a43f2a721a458 Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Mon, 23 May 2022 14:07:09 -0700 Subject: bpf: Dynptr support for ring buffers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, our only way of writing dynamically-sized data into a ring buffer is through bpf_ringbuf_output but this incurs an extra memcpy cost. bpf_ringbuf_reserve + bpf_ringbuf_commit avoids this extra memcpy, but it can only safely support reservation sizes that are statically known since the verifier cannot guarantee that the bpf program won’t access memory outside the reserved space. The bpf_dynptr abstraction allows for dynamically-sized ring buffer reservations without the extra memcpy. There are 3 new APIs: long bpf_ringbuf_reserve_dynptr(void *ringbuf, u32 size, u64 flags, struct bpf_dynptr *ptr); void bpf_ringbuf_submit_dynptr(struct bpf_dynptr *ptr, u64 flags); void bpf_ringbuf_discard_dynptr(struct bpf_dynptr *ptr, u64 flags); These closely follow the functionalities of the original ringbuf APIs. For example, all ringbuffer dynptrs that have been reserved must be either submitted or discarded before the program exits. Signed-off-by: Joanne Koong Signed-off-by: Andrii Nakryiko Acked-by: Andrii Nakryiko Acked-by: David Vernet Link: https://lore.kernel.org/bpf/20220523210712.3641569-4-joannelkoong@gmail.com --- include/linux/bpf.h | 15 +++++++- include/linux/bpf_verifier.h | 2 ++ include/uapi/linux/bpf.h | 35 +++++++++++++++++++ kernel/bpf/helpers.c | 14 +++++--- kernel/bpf/ringbuf.c | 78 ++++++++++++++++++++++++++++++++++++++++++ kernel/bpf/verifier.c | 52 ++++++++++++++++++++++++++-- tools/include/uapi/linux/bpf.h | 35 +++++++++++++++++++ 7 files changed, 223 insertions(+), 8 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index b26c8176b9e0..c72321b6f306 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -395,11 +395,14 @@ enum bpf_type_flag { /* DYNPTR points to memory local to the bpf program. */ DYNPTR_TYPE_LOCAL = BIT(8 + BPF_BASE_TYPE_BITS), + /* DYNPTR points to a ringbuf record. */ + DYNPTR_TYPE_RINGBUF = BIT(9 + BPF_BASE_TYPE_BITS), + __BPF_TYPE_FLAG_MAX, __BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1, }; -#define DYNPTR_TYPE_FLAG_MASK DYNPTR_TYPE_LOCAL +#define DYNPTR_TYPE_FLAG_MASK (DYNPTR_TYPE_LOCAL | DYNPTR_TYPE_RINGBUF) /* Max number of base types. */ #define BPF_BASE_TYPE_LIMIT (1UL << BPF_BASE_TYPE_BITS) @@ -2231,6 +2234,9 @@ extern const struct bpf_func_proto bpf_ringbuf_reserve_proto; extern const struct bpf_func_proto bpf_ringbuf_submit_proto; extern const struct bpf_func_proto bpf_ringbuf_discard_proto; extern const struct bpf_func_proto bpf_ringbuf_query_proto; +extern const struct bpf_func_proto bpf_ringbuf_reserve_dynptr_proto; +extern const struct bpf_func_proto bpf_ringbuf_submit_dynptr_proto; +extern const struct bpf_func_proto bpf_ringbuf_discard_dynptr_proto; extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto; extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto; extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto; @@ -2402,6 +2408,13 @@ enum bpf_dynptr_type { BPF_DYNPTR_TYPE_INVALID, /* Points to memory that is local to the bpf program */ BPF_DYNPTR_TYPE_LOCAL, + /* Underlying data is a ringbuf record */ + BPF_DYNPTR_TYPE_RINGBUF, }; +void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data, + enum bpf_dynptr_type type, u32 offset, u32 size); +void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr); +int bpf_dynptr_check_size(u32 size); + #endif /* _LINUX_BPF_H */ diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index af5b2135215e..e8439f6cbe57 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -100,6 +100,8 @@ struct bpf_reg_state { * for the purpose of tracking that it's freed. * For PTR_TO_SOCKET this is used to share which pointers retain the * same reference to the socket, to determine proper reference freeing. + * For stack slots that are dynptrs, this is used to track references to + * the dynptr to determine proper reference freeing. */ u32 id; /* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 9be3644457dd..081a55540aa5 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -5189,6 +5189,38 @@ union bpf_attr { * Return * 0 on success, -E2BIG if the size exceeds DYNPTR_MAX_SIZE, * -EINVAL if flags is not 0. + * + * long bpf_ringbuf_reserve_dynptr(void *ringbuf, u32 size, u64 flags, struct bpf_dynptr *ptr) + * Description + * Reserve *size* bytes of payload in a ring buffer *ringbuf* + * through the dynptr interface. *flags* must be 0. + * + * Please note that a corresponding bpf_ringbuf_submit_dynptr or + * bpf_ringbuf_discard_dynptr must be called on *ptr*, even if the + * reservation fails. This is enforced by the verifier. + * Return + * 0 on success, or a negative error in case of failure. + * + * void bpf_ringbuf_submit_dynptr(struct bpf_dynptr *ptr, u64 flags) + * Description + * Submit reserved ring buffer sample, pointed to by *data*, + * through the dynptr interface. This is a no-op if the dynptr is + * invalid/null. + * + * For more information on *flags*, please see + * 'bpf_ringbuf_submit'. + * Return + * Nothing. Always succeeds. + * + * void bpf_ringbuf_discard_dynptr(struct bpf_dynptr *ptr, u64 flags) + * Description + * Discard reserved ring buffer sample through the dynptr + * interface. This is a no-op if the dynptr is invalid/null. + * + * For more information on *flags*, please see + * 'bpf_ringbuf_discard'. + * Return + * Nothing. Always succeeds. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5389,6 +5421,9 @@ union bpf_attr { FN(map_lookup_percpu_elem), \ FN(skc_to_mptcp_sock), \ FN(dynptr_from_mem), \ + FN(ringbuf_reserve_dynptr), \ + FN(ringbuf_submit_dynptr), \ + FN(ringbuf_discard_dynptr), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index d3e935c2e25e..abb08999ff56 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -1423,13 +1423,13 @@ static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_typ ptr->size |= type << DYNPTR_TYPE_SHIFT; } -static int bpf_dynptr_check_size(u32 size) +int bpf_dynptr_check_size(u32 size) { return size > DYNPTR_MAX_SIZE ? -E2BIG : 0; } -static void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data, - enum bpf_dynptr_type type, u32 offset, u32 size) +void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data, + enum bpf_dynptr_type type, u32 offset, u32 size) { ptr->data = data; ptr->offset = offset; @@ -1437,7 +1437,7 @@ static void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data, bpf_dynptr_set_type(ptr, type); } -static void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr) +void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr) { memset(ptr, 0, sizeof(*ptr)); } @@ -1523,6 +1523,12 @@ bpf_base_func_proto(enum bpf_func_id func_id) return &bpf_ringbuf_discard_proto; case BPF_FUNC_ringbuf_query: return &bpf_ringbuf_query_proto; + case BPF_FUNC_ringbuf_reserve_dynptr: + return &bpf_ringbuf_reserve_dynptr_proto; + case BPF_FUNC_ringbuf_submit_dynptr: + return &bpf_ringbuf_submit_dynptr_proto; + case BPF_FUNC_ringbuf_discard_dynptr: + return &bpf_ringbuf_discard_dynptr_proto; case BPF_FUNC_for_each_map_elem: return &bpf_for_each_map_elem_proto; case BPF_FUNC_loop: diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c index 311264ab80c4..ded4faeca192 100644 --- a/kernel/bpf/ringbuf.c +++ b/kernel/bpf/ringbuf.c @@ -475,3 +475,81 @@ const struct bpf_func_proto bpf_ringbuf_query_proto = { .arg1_type = ARG_CONST_MAP_PTR, .arg2_type = ARG_ANYTHING, }; + +BPF_CALL_4(bpf_ringbuf_reserve_dynptr, struct bpf_map *, map, u32, size, u64, flags, + struct bpf_dynptr_kern *, ptr) +{ + struct bpf_ringbuf_map *rb_map; + void *sample; + int err; + + if (unlikely(flags)) { + bpf_dynptr_set_null(ptr); + return -EINVAL; + } + + err = bpf_dynptr_check_size(size); + if (err) { + bpf_dynptr_set_null(ptr); + return err; + } + + rb_map = container_of(map, struct bpf_ringbuf_map, map); + + sample = __bpf_ringbuf_reserve(rb_map->rb, size); + if (!sample) { + bpf_dynptr_set_null(ptr); + return -EINVAL; + } + + bpf_dynptr_init(ptr, sample, BPF_DYNPTR_TYPE_RINGBUF, 0, size); + + return 0; +} + +const struct bpf_func_proto bpf_ringbuf_reserve_dynptr_proto = { + .func = bpf_ringbuf_reserve_dynptr, + .ret_type = RET_INTEGER, + .arg1_type = ARG_CONST_MAP_PTR, + .arg2_type = ARG_ANYTHING, + .arg3_type = ARG_ANYTHING, + .arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_RINGBUF | MEM_UNINIT, +}; + +BPF_CALL_2(bpf_ringbuf_submit_dynptr, struct bpf_dynptr_kern *, ptr, u64, flags) +{ + if (!ptr->data) + return 0; + + bpf_ringbuf_commit(ptr->data, flags, false /* discard */); + + bpf_dynptr_set_null(ptr); + + return 0; +} + +const struct bpf_func_proto bpf_ringbuf_submit_dynptr_proto = { + .func = bpf_ringbuf_submit_dynptr, + .ret_type = RET_VOID, + .arg1_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_RINGBUF | OBJ_RELEASE, + .arg2_type = ARG_ANYTHING, +}; + +BPF_CALL_2(bpf_ringbuf_discard_dynptr, struct bpf_dynptr_kern *, ptr, u64, flags) +{ + if (!ptr->data) + return 0; + + bpf_ringbuf_commit(ptr->data, flags, true /* discard */); + + bpf_dynptr_set_null(ptr); + + return 0; +} + +const struct bpf_func_proto bpf_ringbuf_discard_dynptr_proto = { + .func = bpf_ringbuf_discard_dynptr, + .ret_type = RET_VOID, + .arg1_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_RINGBUF | OBJ_RELEASE, + .arg2_type = ARG_ANYTHING, +}; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index b657d46f886e..8be140351966 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -187,6 +187,9 @@ struct bpf_verifier_stack_elem { POISON_POINTER_DELTA)) #define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV)) +static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx); +static int release_reference(struct bpf_verifier_env *env, int ref_obj_id); + static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux) { return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON; @@ -673,17 +676,24 @@ static enum bpf_dynptr_type arg_to_dynptr_type(enum bpf_arg_type arg_type) switch (arg_type & DYNPTR_TYPE_FLAG_MASK) { case DYNPTR_TYPE_LOCAL: return BPF_DYNPTR_TYPE_LOCAL; + case DYNPTR_TYPE_RINGBUF: + return BPF_DYNPTR_TYPE_RINGBUF; default: return BPF_DYNPTR_TYPE_INVALID; } } +static bool dynptr_type_refcounted(enum bpf_dynptr_type type) +{ + return type == BPF_DYNPTR_TYPE_RINGBUF; +} + static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg, enum bpf_arg_type arg_type, int insn_idx) { struct bpf_func_state *state = func(env, reg); enum bpf_dynptr_type type; - int spi, i; + int spi, i, id; spi = get_spi(reg->off); @@ -703,6 +713,16 @@ static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_ state->stack[spi].spilled_ptr.dynptr.type = type; state->stack[spi - 1].spilled_ptr.dynptr.type = type; + if (dynptr_type_refcounted(type)) { + /* The id is used to track proper releasing */ + id = acquire_reference_state(env, insn_idx); + if (id < 0) + return id; + + state->stack[spi].spilled_ptr.id = id; + state->stack[spi - 1].spilled_ptr.id = id; + } + return 0; } @@ -721,6 +741,13 @@ static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_re state->stack[spi - 1].slot_type[i] = STACK_INVALID; } + /* Invalidate any slices associated with this dynptr */ + if (dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) { + release_reference(env, state->stack[spi].spilled_ptr.id); + state->stack[spi].spilled_ptr.id = 0; + state->stack[spi - 1].spilled_ptr.id = 0; + } + state->stack[spi].spilled_ptr.dynptr.first_slot = false; state->stack[spi].spilled_ptr.dynptr.type = 0; state->stack[spi - 1].spilled_ptr.dynptr.type = 0; @@ -5859,7 +5886,16 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg, skip_type_check: if (arg_type_is_release(arg_type)) { - if (!reg->ref_obj_id && !register_is_null(reg)) { + if (arg_type_is_dynptr(arg_type)) { + struct bpf_func_state *state = func(env, reg); + int spi = get_spi(reg->off); + + if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS) || + !state->stack[spi].spilled_ptr.id) { + verbose(env, "arg %d is an unacquired reference\n", regno); + return -EINVAL; + } + } else if (!reg->ref_obj_id && !register_is_null(reg)) { verbose(env, "R%d must be referenced when passed to release function\n", regno); return -EINVAL; @@ -5994,9 +6030,13 @@ skip_type_check: case DYNPTR_TYPE_LOCAL: err_extra = "local "; break; + case DYNPTR_TYPE_RINGBUF: + err_extra = "ringbuf "; + break; default: break; } + verbose(env, "Expected an initialized %sdynptr as arg #%d\n", err_extra, arg + 1); return -EINVAL; @@ -6122,7 +6162,10 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env, case BPF_MAP_TYPE_RINGBUF: if (func_id != BPF_FUNC_ringbuf_output && func_id != BPF_FUNC_ringbuf_reserve && - func_id != BPF_FUNC_ringbuf_query) + func_id != BPF_FUNC_ringbuf_query && + func_id != BPF_FUNC_ringbuf_reserve_dynptr && + func_id != BPF_FUNC_ringbuf_submit_dynptr && + func_id != BPF_FUNC_ringbuf_discard_dynptr) goto error; break; case BPF_MAP_TYPE_STACK_TRACE: @@ -6238,6 +6281,9 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env, case BPF_FUNC_ringbuf_output: case BPF_FUNC_ringbuf_reserve: case BPF_FUNC_ringbuf_query: + case BPF_FUNC_ringbuf_reserve_dynptr: + case BPF_FUNC_ringbuf_submit_dynptr: + case BPF_FUNC_ringbuf_discard_dynptr: if (map->map_type != BPF_MAP_TYPE_RINGBUF) goto error; break; diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 9be3644457dd..081a55540aa5 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -5189,6 +5189,38 @@ union bpf_attr { * Return * 0 on success, -E2BIG if the size exceeds DYNPTR_MAX_SIZE, * -EINVAL if flags is not 0. + * + * long bpf_ringbuf_reserve_dynptr(void *ringbuf, u32 size, u64 flags, struct bpf_dynptr *ptr) + * Description + * Reserve *size* bytes of payload in a ring buffer *ringbuf* + * through the dynptr interface. *flags* must be 0. + * + * Please note that a corresponding bpf_ringbuf_submit_dynptr or + * bpf_ringbuf_discard_dynptr must be called on *ptr*, even if the + * reservation fails. This is enforced by the verifier. + * Return + * 0 on success, or a negative error in case of failure. + * + * void bpf_ringbuf_submit_dynptr(struct bpf_dynptr *ptr, u64 flags) + * Description + * Submit reserved ring buffer sample, pointed to by *data*, + * through the dynptr interface. This is a no-op if the dynptr is + * invalid/null. + * + * For more information on *flags*, please see + * 'bpf_ringbuf_submit'. + * Return + * Nothing. Always succeeds. + * + * void bpf_ringbuf_discard_dynptr(struct bpf_dynptr *ptr, u64 flags) + * Description + * Discard reserved ring buffer sample through the dynptr + * interface. This is a no-op if the dynptr is invalid/null. + * + * For more information on *flags*, please see + * 'bpf_ringbuf_discard'. + * Return + * Nothing. Always succeeds. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5389,6 +5421,9 @@ union bpf_attr { FN(map_lookup_percpu_elem), \ FN(skc_to_mptcp_sock), \ FN(dynptr_from_mem), \ + FN(ringbuf_reserve_dynptr), \ + FN(ringbuf_submit_dynptr), \ + FN(ringbuf_discard_dynptr), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper -- cgit v1.2.3-59-g8ed1b From 13bbbfbea7598ea9f8d9c3d73bf053bb57f9c4b2 Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Mon, 23 May 2022 14:07:10 -0700 Subject: bpf: Add bpf_dynptr_read and bpf_dynptr_write This patch adds two helper functions, bpf_dynptr_read and bpf_dynptr_write: long bpf_dynptr_read(void *dst, u32 len, struct bpf_dynptr *src, u32 offset); long bpf_dynptr_write(struct bpf_dynptr *dst, u32 offset, void *src, u32 len); The dynptr passed into these functions must be valid dynptrs that have been initialized. Signed-off-by: Joanne Koong Signed-off-by: Andrii Nakryiko Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220523210712.3641569-5-joannelkoong@gmail.com --- include/uapi/linux/bpf.h | 19 ++++++++++ kernel/bpf/helpers.c | 78 ++++++++++++++++++++++++++++++++++++++++++ tools/include/uapi/linux/bpf.h | 19 ++++++++++ 3 files changed, 116 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 081a55540aa5..efe2505650e6 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -5221,6 +5221,23 @@ union bpf_attr { * 'bpf_ringbuf_discard'. * Return * Nothing. Always succeeds. + * + * long bpf_dynptr_read(void *dst, u32 len, struct bpf_dynptr *src, u32 offset) + * Description + * Read *len* bytes from *src* into *dst*, starting from *offset* + * into *src*. + * Return + * 0 on success, -E2BIG if *offset* + *len* exceeds the length + * of *src*'s data, -EINVAL if *src* is an invalid dynptr. + * + * long bpf_dynptr_write(struct bpf_dynptr *dst, u32 offset, void *src, u32 len) + * Description + * Write *len* bytes from *src* into *dst*, starting from *offset* + * into *dst*. + * Return + * 0 on success, -E2BIG if *offset* + *len* exceeds the length + * of *dst*'s data, -EINVAL if *dst* is an invalid dynptr or if *dst* + * is a read-only dynptr. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5424,6 +5441,8 @@ union bpf_attr { FN(ringbuf_reserve_dynptr), \ FN(ringbuf_submit_dynptr), \ FN(ringbuf_discard_dynptr), \ + FN(dynptr_read), \ + FN(dynptr_write), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index abb08999ff56..8cef3fb0d143 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -1417,12 +1417,24 @@ const struct bpf_func_proto bpf_kptr_xchg_proto = { */ #define DYNPTR_MAX_SIZE ((1UL << 24) - 1) #define DYNPTR_TYPE_SHIFT 28 +#define DYNPTR_SIZE_MASK 0xFFFFFF +#define DYNPTR_RDONLY_BIT BIT(31) + +static bool bpf_dynptr_is_rdonly(struct bpf_dynptr_kern *ptr) +{ + return ptr->size & DYNPTR_RDONLY_BIT; +} static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_type type) { ptr->size |= type << DYNPTR_TYPE_SHIFT; } +static u32 bpf_dynptr_get_size(struct bpf_dynptr_kern *ptr) +{ + return ptr->size & DYNPTR_SIZE_MASK; +} + int bpf_dynptr_check_size(u32 size) { return size > DYNPTR_MAX_SIZE ? -E2BIG : 0; @@ -1442,6 +1454,16 @@ void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr) memset(ptr, 0, sizeof(*ptr)); } +static int bpf_dynptr_check_off_len(struct bpf_dynptr_kern *ptr, u32 offset, u32 len) +{ + u32 size = bpf_dynptr_get_size(ptr); + + if (len > size || offset > size - len) + return -E2BIG; + + return 0; +} + BPF_CALL_4(bpf_dynptr_from_mem, void *, data, u32, size, u64, flags, struct bpf_dynptr_kern *, ptr) { int err; @@ -1475,6 +1497,58 @@ const struct bpf_func_proto bpf_dynptr_from_mem_proto = { .arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL | MEM_UNINIT, }; +BPF_CALL_4(bpf_dynptr_read, void *, dst, u32, len, struct bpf_dynptr_kern *, src, u32, offset) +{ + int err; + + if (!src->data) + return -EINVAL; + + err = bpf_dynptr_check_off_len(src, offset, len); + if (err) + return err; + + memcpy(dst, src->data + src->offset + offset, len); + + return 0; +} + +const struct bpf_func_proto bpf_dynptr_read_proto = { + .func = bpf_dynptr_read, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_UNINIT_MEM, + .arg2_type = ARG_CONST_SIZE_OR_ZERO, + .arg3_type = ARG_PTR_TO_DYNPTR, + .arg4_type = ARG_ANYTHING, +}; + +BPF_CALL_4(bpf_dynptr_write, struct bpf_dynptr_kern *, dst, u32, offset, void *, src, u32, len) +{ + int err; + + if (!dst->data || bpf_dynptr_is_rdonly(dst)) + return -EINVAL; + + err = bpf_dynptr_check_off_len(dst, offset, len); + if (err) + return err; + + memcpy(dst->data + dst->offset + offset, src, len); + + return 0; +} + +const struct bpf_func_proto bpf_dynptr_write_proto = { + .func = bpf_dynptr_write, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_DYNPTR, + .arg2_type = ARG_ANYTHING, + .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg4_type = ARG_CONST_SIZE_OR_ZERO, +}; + const struct bpf_func_proto bpf_get_current_task_proto __weak; const struct bpf_func_proto bpf_get_current_task_btf_proto __weak; const struct bpf_func_proto bpf_probe_read_user_proto __weak; @@ -1537,6 +1611,10 @@ bpf_base_func_proto(enum bpf_func_id func_id) return &bpf_strncmp_proto; case BPF_FUNC_dynptr_from_mem: return &bpf_dynptr_from_mem_proto; + case BPF_FUNC_dynptr_read: + return &bpf_dynptr_read_proto; + case BPF_FUNC_dynptr_write: + return &bpf_dynptr_write_proto; default: break; } diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 081a55540aa5..efe2505650e6 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -5221,6 +5221,23 @@ union bpf_attr { * 'bpf_ringbuf_discard'. * Return * Nothing. Always succeeds. + * + * long bpf_dynptr_read(void *dst, u32 len, struct bpf_dynptr *src, u32 offset) + * Description + * Read *len* bytes from *src* into *dst*, starting from *offset* + * into *src*. + * Return + * 0 on success, -E2BIG if *offset* + *len* exceeds the length + * of *src*'s data, -EINVAL if *src* is an invalid dynptr. + * + * long bpf_dynptr_write(struct bpf_dynptr *dst, u32 offset, void *src, u32 len) + * Description + * Write *len* bytes from *src* into *dst*, starting from *offset* + * into *dst*. + * Return + * 0 on success, -E2BIG if *offset* + *len* exceeds the length + * of *dst*'s data, -EINVAL if *dst* is an invalid dynptr or if *dst* + * is a read-only dynptr. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5424,6 +5441,8 @@ union bpf_attr { FN(ringbuf_reserve_dynptr), \ FN(ringbuf_submit_dynptr), \ FN(ringbuf_discard_dynptr), \ + FN(dynptr_read), \ + FN(dynptr_write), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper -- cgit v1.2.3-59-g8ed1b From 34d4ef5775f776ec4b0d53a02d588bf3195cada6 Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Mon, 23 May 2022 14:07:11 -0700 Subject: bpf: Add dynptr data slices This patch adds a new helper function void *bpf_dynptr_data(struct bpf_dynptr *ptr, u32 offset, u32 len); which returns a pointer to the underlying data of a dynptr. *len* must be a statically known value. The bpf program may access the returned data slice as a normal buffer (eg can do direct reads and writes), since the verifier associates the length with the returned pointer, and enforces that no out of bounds accesses occur. Signed-off-by: Joanne Koong Signed-off-by: Andrii Nakryiko Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20220523210712.3641569-6-joannelkoong@gmail.com --- include/linux/bpf.h | 1 + include/uapi/linux/bpf.h | 12 ++++++++++++ kernel/bpf/helpers.c | 28 ++++++++++++++++++++++++++++ kernel/bpf/verifier.c | 23 +++++++++++++++++++++++ tools/include/uapi/linux/bpf.h | 12 ++++++++++++ 5 files changed, 76 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index c72321b6f306..a7080c86fa76 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -488,6 +488,7 @@ enum bpf_return_type { RET_PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_TCP_SOCK, RET_PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCK_COMMON, RET_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | MEM_ALLOC | RET_PTR_TO_ALLOC_MEM, + RET_PTR_TO_DYNPTR_MEM_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_ALLOC_MEM, RET_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_BTF_ID, /* This must be the last entry. Its purpose is to ensure the enum is diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index efe2505650e6..f4009dbdf62d 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -5238,6 +5238,17 @@ union bpf_attr { * 0 on success, -E2BIG if *offset* + *len* exceeds the length * of *dst*'s data, -EINVAL if *dst* is an invalid dynptr or if *dst* * is a read-only dynptr. + * + * void *bpf_dynptr_data(struct bpf_dynptr *ptr, u32 offset, u32 len) + * Description + * Get a pointer to the underlying dynptr data. + * + * *len* must be a statically known value. The returned data slice + * is invalidated whenever the dynptr is invalidated. + * Return + * Pointer to the underlying dynptr data, NULL if the dynptr is + * read-only, if the dynptr is invalid, or if the offset and length + * is out of bounds. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5443,6 +5454,7 @@ union bpf_attr { FN(ringbuf_discard_dynptr), \ FN(dynptr_read), \ FN(dynptr_write), \ + FN(dynptr_data), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 8cef3fb0d143..225806a02efb 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -1549,6 +1549,32 @@ const struct bpf_func_proto bpf_dynptr_write_proto = { .arg4_type = ARG_CONST_SIZE_OR_ZERO, }; +BPF_CALL_3(bpf_dynptr_data, struct bpf_dynptr_kern *, ptr, u32, offset, u32, len) +{ + int err; + + if (!ptr->data) + return 0; + + err = bpf_dynptr_check_off_len(ptr, offset, len); + if (err) + return 0; + + if (bpf_dynptr_is_rdonly(ptr)) + return 0; + + return (unsigned long)(ptr->data + ptr->offset + offset); +} + +const struct bpf_func_proto bpf_dynptr_data_proto = { + .func = bpf_dynptr_data, + .gpl_only = false, + .ret_type = RET_PTR_TO_DYNPTR_MEM_OR_NULL, + .arg1_type = ARG_PTR_TO_DYNPTR, + .arg2_type = ARG_ANYTHING, + .arg3_type = ARG_CONST_ALLOC_SIZE_OR_ZERO, +}; + const struct bpf_func_proto bpf_get_current_task_proto __weak; const struct bpf_func_proto bpf_get_current_task_btf_proto __weak; const struct bpf_func_proto bpf_probe_read_user_proto __weak; @@ -1615,6 +1641,8 @@ bpf_base_func_proto(enum bpf_func_id func_id) return &bpf_dynptr_read_proto; case BPF_FUNC_dynptr_write: return &bpf_dynptr_write_proto; + case BPF_FUNC_dynptr_data: + return &bpf_dynptr_data_proto; default: break; } diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 8be140351966..aedac2ac02b9 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -5832,6 +5832,14 @@ int check_func_arg_reg_off(struct bpf_verifier_env *env, return __check_ptr_off_reg(env, reg, regno, fixed_off_ok); } +static u32 stack_slot_get_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg) +{ + struct bpf_func_state *state = func(env, reg); + int spi = get_spi(reg->off); + + return state->stack[spi].spilled_ptr.id; +} + static int check_func_arg(struct bpf_verifier_env *env, u32 arg, struct bpf_call_arg_meta *meta, const struct bpf_func_proto *fn) @@ -7384,6 +7392,21 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn regs[BPF_REG_0].id = id; /* For release_reference() */ regs[BPF_REG_0].ref_obj_id = id; + } else if (func_id == BPF_FUNC_dynptr_data) { + int dynptr_id = 0, i; + + /* Find the id of the dynptr we're acquiring a reference to */ + for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) { + if (arg_type_is_dynptr(fn->arg_type[i])) { + if (dynptr_id) { + verbose(env, "verifier internal error: multiple dynptr args in func\n"); + return -EFAULT; + } + dynptr_id = stack_slot_get_id(env, ®s[BPF_REG_1 + i]); + } + } + /* For release_reference() */ + regs[BPF_REG_0].ref_obj_id = dynptr_id; } do_refine_retval_range(regs, fn->ret_type, func_id, &meta); diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index efe2505650e6..f4009dbdf62d 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -5238,6 +5238,17 @@ union bpf_attr { * 0 on success, -E2BIG if *offset* + *len* exceeds the length * of *dst*'s data, -EINVAL if *dst* is an invalid dynptr or if *dst* * is a read-only dynptr. + * + * void *bpf_dynptr_data(struct bpf_dynptr *ptr, u32 offset, u32 len) + * Description + * Get a pointer to the underlying dynptr data. + * + * *len* must be a statically known value. The returned data slice + * is invalidated whenever the dynptr is invalidated. + * Return + * Pointer to the underlying dynptr data, NULL if the dynptr is + * read-only, if the dynptr is invalid, or if the offset and length + * is out of bounds. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5443,6 +5454,7 @@ union bpf_attr { FN(ringbuf_discard_dynptr), \ FN(dynptr_read), \ FN(dynptr_write), \ + FN(dynptr_data), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper -- cgit v1.2.3-59-g8ed1b From e5499dd7253c8382d03f687f19a854adcc688357 Mon Sep 17 00:00:00 2001 From: Sean Young Date: Wed, 25 May 2022 14:08:30 +0100 Subject: media: lirc: revert removal of unused feature flags Commit b2a90f4fcb14 ("media: lirc: remove unused lirc features") removed feature flags which were never implemented, but they are still used by the lirc daemon went built from source. Reinstate these symbols in order not to break the lirc build. Fixes: b2a90f4fcb14 ("media: lirc: remove unused lirc features") Link: https://lore.kernel.org/all/a0470450-ecfd-2918-e04a-7b57c1fd7694@kernel.org/ Reported-by: Jiri Slaby Cc: Mauro Carvalho Chehab Signed-off-by: Sean Young Signed-off-by: Linus Torvalds --- include/uapi/linux/lirc.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/lirc.h b/include/uapi/linux/lirc.h index 23b0f2c8ba81..8d7ca7c6af42 100644 --- a/include/uapi/linux/lirc.h +++ b/include/uapi/linux/lirc.h @@ -84,6 +84,13 @@ #define LIRC_CAN_SEND(x) ((x)&LIRC_CAN_SEND_MASK) #define LIRC_CAN_REC(x) ((x)&LIRC_CAN_REC_MASK) +/* + * Unused features. These features were never implemented, in tree or + * out of tree. These definitions are here so not to break the lircd build. + */ +#define LIRC_CAN_SET_REC_FILTER 0 +#define LIRC_CAN_NOTIFY_DECODE 0 + /*** IOCTL commands for lirc driver ***/ #define LIRC_GET_FEATURES _IOR('i', 0x00000000, __u32) -- cgit v1.2.3-59-g8ed1b From caa28984163cb63ea0be4cb8dbf05defdc7303f9 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Wed, 25 May 2022 09:02:19 -0700 Subject: linux/types.h: reinstate "__bitwise__" macro for user space use Commit c724c866bb70 ("linux/types.h: remove unnecessary __bitwise__") was right that there are no users of __bitwise__ in the kernel, but it turns out there are user space users of it that do expect it. It is, after all, in the uapi directory, so user space usage is to be expected. Instead of reverting the commit completely, let's just clarify the situation so that it doesn't happen again, and have some in-code explanations for why that "__bitwise__" still exists. Reported-by: Jiri Slaby Cc: Bjorn Helgaas Link: https://lore.kernel.org/all/b5c0a68d-8387-4909-beea-f70ab9e6e3d5@kernel.org/ Signed-off-by: Linus Torvalds --- include/uapi/linux/types.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/types.h b/include/uapi/linux/types.h index c4dc597f3dcf..308433be33c2 100644 --- a/include/uapi/linux/types.h +++ b/include/uapi/linux/types.h @@ -26,6 +26,9 @@ #define __bitwise #endif +/* The kernel doesn't use this legacy form, but user space does */ +#define __bitwise__ __bitwise + typedef __u16 __bitwise __le16; typedef __u16 __bitwise __be16; typedef __u32 __bitwise __le32; -- cgit v1.2.3-59-g8ed1b From a7c41b4687f5902af70cd559806990930c8a307b Mon Sep 17 00:00:00 2001 From: Xiaoguang Wang Date: Mon, 30 May 2022 21:15:20 +0800 Subject: io_uring: let IORING_OP_FILES_UPDATE support choosing fixed file slots One big issue with the file registration feature is that it needs user space apps to maintain free slot info about io_uring's fixed file table, which really is a burden for development. io_uring now supports choosing free file slot for user space apps by using IORING_FILE_INDEX_ALLOC flag in accept, open, and socket operations, but they need the app to use direct accept or direct open, which not all apps are prepared to use yet. To support apps that still need real fds, make use of the registration feature easier. Let IORING_OP_FILES_UPDATE support choosing fixed file slots, which will store picked fixed files slots in fd array and let cqe return the number of slots allocated. Suggested-by: Hao Xu Signed-off-by: Xiaoguang Wang [axboe: move flag to uapi io_uring header, change goto to break, init] Signed-off-by: Jens Axboe --- fs/io_uring.c | 72 +++++++++++++++++++++++++++++++++++++------ include/uapi/linux/io_uring.h | 6 ++++ 2 files changed, 68 insertions(+), 10 deletions(-) (limited to 'include/uapi/linux') diff --git a/fs/io_uring.c b/fs/io_uring.c index 11524ea86f1f..ed3416a7b2e9 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -574,6 +574,7 @@ struct io_close { struct file *file; int fd; u32 file_slot; + u32 flags; }; struct io_timeout_data { @@ -1366,7 +1367,9 @@ static int io_req_prep_async(struct io_kiocb *req); static int io_install_fixed_file(struct io_kiocb *req, struct file *file, unsigned int issue_flags, u32 slot_index); -static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags); +static int __io_close_fixed(struct io_kiocb *req, unsigned int issue_flags, + unsigned int offset); +static inline int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags); static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer); static void io_eventfd_signal(struct io_ring_ctx *ctx); @@ -5947,14 +5950,18 @@ static int io_statx(struct io_kiocb *req, unsigned int issue_flags) static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { - if (sqe->off || sqe->addr || sqe->len || sqe->rw_flags || sqe->buf_index) + if (sqe->off || sqe->addr || sqe->len || sqe->buf_index) return -EINVAL; if (req->flags & REQ_F_FIXED_FILE) return -EBADF; req->close.fd = READ_ONCE(sqe->fd); req->close.file_slot = READ_ONCE(sqe->file_index); - if (req->close.file_slot && req->close.fd) + req->close.flags = READ_ONCE(sqe->close_flags); + if (req->close.flags & ~IORING_CLOSE_FD_AND_FILE_SLOT) + return -EINVAL; + if (!(req->close.flags & IORING_CLOSE_FD_AND_FILE_SLOT) && + req->close.file_slot && req->close.fd) return -EINVAL; return 0; @@ -5970,7 +5977,8 @@ static int io_close(struct io_kiocb *req, unsigned int issue_flags) if (req->close.file_slot) { ret = io_close_fixed(req, issue_flags); - goto err; + if (ret || !(req->close.flags & IORING_CLOSE_FD_AND_FILE_SLOT)) + goto err; } spin_lock(&files->file_lock); @@ -8003,6 +8011,41 @@ static int io_files_update_prep(struct io_kiocb *req, return 0; } +static int io_files_update_with_index_alloc(struct io_kiocb *req, + unsigned int issue_flags) +{ + __s32 __user *fds = u64_to_user_ptr(req->rsrc_update.arg); + unsigned int done; + struct file *file; + int ret, fd; + + for (done = 0; done < req->rsrc_update.nr_args; done++) { + if (copy_from_user(&fd, &fds[done], sizeof(fd))) { + ret = -EFAULT; + break; + } + + file = fget(fd); + if (!file) { + ret = -EBADF; + break; + } + ret = io_fixed_fd_install(req, issue_flags, file, + IORING_FILE_INDEX_ALLOC); + if (ret < 0) + break; + if (copy_to_user(&fds[done], &ret, sizeof(ret))) { + ret = -EFAULT; + __io_close_fixed(req, issue_flags, ret); + break; + } + } + + if (done) + return done; + return ret; +} + static int io_files_update(struct io_kiocb *req, unsigned int issue_flags) { struct io_ring_ctx *ctx = req->ctx; @@ -8016,10 +8059,14 @@ static int io_files_update(struct io_kiocb *req, unsigned int issue_flags) up.resv = 0; up.resv2 = 0; - io_ring_submit_lock(ctx, issue_flags); - ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE, - &up, req->rsrc_update.nr_args); - io_ring_submit_unlock(ctx, issue_flags); + if (req->rsrc_update.offset == IORING_FILE_INDEX_ALLOC) { + ret = io_files_update_with_index_alloc(req, issue_flags); + } else { + io_ring_submit_lock(ctx, issue_flags); + ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE, + &up, req->rsrc_update.nr_args); + io_ring_submit_unlock(ctx, issue_flags); + } if (ret < 0) req_set_fail(req); @@ -10183,9 +10230,9 @@ err: return ret; } -static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags) +static int __io_close_fixed(struct io_kiocb *req, unsigned int issue_flags, + unsigned int offset) { - unsigned int offset = req->close.file_slot - 1; struct io_ring_ctx *ctx = req->ctx; struct io_fixed_file *file_slot; struct file *file; @@ -10222,6 +10269,11 @@ out: return ret; } +static inline int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags) +{ + return __io_close_fixed(req, issue_flags, req->close.file_slot - 1); +} + static int __io_sqe_files_update(struct io_ring_ctx *ctx, struct io_uring_rsrc_update2 *up, unsigned nr_args) diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 53e7dae92e42..776e0278f9dd 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -47,6 +47,7 @@ struct io_uring_sqe { __u32 unlink_flags; __u32 hardlink_flags; __u32 xattr_flags; + __u32 close_flags; }; __u64 user_data; /* data to be passed back at completion time */ /* pack this to avoid bogus arm OABI complaints */ @@ -258,6 +259,11 @@ enum io_uring_op { */ #define IORING_ACCEPT_MULTISHOT (1U << 0) +/* + * close flags, store in sqe->close_flags + */ +#define IORING_CLOSE_FD_AND_FILE_SLOT (1U << 0) + /* * IO completion data structure (Completion Queue Entry) */ -- cgit v1.2.3-59-g8ed1b From 3e0b8f529c10037ae0b369fc892e524eae5a5485 Mon Sep 17 00:00:00 2001 From: Arun Ajith S Date: Mon, 30 May 2022 10:14:14 +0000 Subject: net/ipv6: Expand and rename accept_unsolicited_na to accept_untracked_na RFC 9131 changes default behaviour of handling RX of NA messages when the corresponding entry is absent in the neighbour cache. The current implementation is limited to accept just unsolicited NAs. However, the RFC is more generic where it also accepts solicited NAs. Both types should result in adding a STALE entry for this case. Expand accept_untracked_na behaviour to also accept solicited NAs to be compliant with the RFC and rename the sysctl knob to accept_untracked_na. Fixes: f9a2fb73318e ("net/ipv6: Introduce accept_unsolicited_na knob to implement router-side changes for RFC9131") Signed-off-by: Arun Ajith S Reviewed-by: David Ahern Link: https://lore.kernel.org/r/20220530101414.65439-1-aajith@arista.com Signed-off-by: Paolo Abeni --- Documentation/networking/ip-sysctl.rst | 23 +++++------- include/linux/ipv6.h | 2 +- include/uapi/linux/ipv6.h | 2 +- net/ipv6/addrconf.c | 6 ++-- net/ipv6/ndisc.c | 42 +++++++++++++--------- .../selftests/net/ndisc_unsolicited_na_test.sh | 23 ++++++------ 6 files changed, 50 insertions(+), 48 deletions(-) (limited to 'include/uapi/linux') diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst index b882d4238581..04216564a03c 100644 --- a/Documentation/networking/ip-sysctl.rst +++ b/Documentation/networking/ip-sysctl.rst @@ -2474,21 +2474,16 @@ drop_unsolicited_na - BOOLEAN By default this is turned off. -accept_unsolicited_na - BOOLEAN - Add a new neighbour cache entry in STALE state for routers on receiving an - unsolicited neighbour advertisement with target link-layer address option - specified. This is as per router-side behavior documented in RFC9131. - This has lower precedence than drop_unsolicited_na. +accept_untracked_na - BOOLEAN + Add a new neighbour cache entry in STALE state for routers on receiving a + neighbour advertisement (either solicited or unsolicited) with target + link-layer address option specified if no neighbour entry is already + present for the advertised IPv6 address. Without this knob, NAs received + for untracked addresses (absent in neighbour cache) are silently ignored. + + This is as per router-side behaviour documented in RFC9131. - ==== ====== ====== ============================================== - drop accept fwding behaviour - ---- ------ ------ ---------------------------------------------- - 1 X X Drop NA packet and don't pass up the stack - 0 0 X Pass NA packet up the stack, don't update NC - 0 1 0 Pass NA packet up the stack, don't update NC - 0 1 1 Pass NA packet up the stack, and add a STALE - NC entry - ==== ====== ====== ============================================== + This has lower precedence than drop_unsolicited_na. This will optimize the return path for the initial off-link communication that is initiated by a directly connected host, by ensuring that diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 38c8203d52cb..37dfdcfcdd54 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -61,7 +61,7 @@ struct ipv6_devconf { __s32 suppress_frag_ndisc; __s32 accept_ra_mtu; __s32 drop_unsolicited_na; - __s32 accept_unsolicited_na; + __s32 accept_untracked_na; struct ipv6_stable_secret { bool initialized; struct in6_addr secret; diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h index 549ddeaf788b..03cdbe798fe3 100644 --- a/include/uapi/linux/ipv6.h +++ b/include/uapi/linux/ipv6.h @@ -194,7 +194,7 @@ enum { DEVCONF_IOAM6_ID, DEVCONF_IOAM6_ID_WIDE, DEVCONF_NDISC_EVICT_NOCARRIER, - DEVCONF_ACCEPT_UNSOLICITED_NA, + DEVCONF_ACCEPT_UNTRACKED_NA, DEVCONF_MAX }; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index ca0aa744593e..1b1932502e9e 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -5586,7 +5586,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf, array[DEVCONF_IOAM6_ID] = cnf->ioam6_id; array[DEVCONF_IOAM6_ID_WIDE] = cnf->ioam6_id_wide; array[DEVCONF_NDISC_EVICT_NOCARRIER] = cnf->ndisc_evict_nocarrier; - array[DEVCONF_ACCEPT_UNSOLICITED_NA] = cnf->accept_unsolicited_na; + array[DEVCONF_ACCEPT_UNTRACKED_NA] = cnf->accept_untracked_na; } static inline size_t inet6_ifla6_size(void) @@ -7038,8 +7038,8 @@ static const struct ctl_table addrconf_sysctl[] = { .extra2 = (void *)SYSCTL_ONE, }, { - .procname = "accept_unsolicited_na", - .data = &ipv6_devconf.accept_unsolicited_na, + .procname = "accept_untracked_na", + .data = &ipv6_devconf.accept_untracked_na, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 254addad0dd3..b0dfe97ea4ee 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -979,7 +979,7 @@ static void ndisc_recv_na(struct sk_buff *skb) struct inet6_dev *idev = __in6_dev_get(dev); struct inet6_ifaddr *ifp; struct neighbour *neigh; - bool create_neigh; + u8 new_state; if (skb->len < sizeof(struct nd_msg)) { ND_PRINTK(2, warn, "NA: packet too short\n"); @@ -1000,7 +1000,7 @@ static void ndisc_recv_na(struct sk_buff *skb) /* For some 802.11 wireless deployments (and possibly other networks), * there will be a NA proxy and unsolicitd packets are attacks * and thus should not be accepted. - * drop_unsolicited_na takes precedence over accept_unsolicited_na + * drop_unsolicited_na takes precedence over accept_untracked_na */ if (!msg->icmph.icmp6_solicited && idev && idev->cnf.drop_unsolicited_na) @@ -1041,25 +1041,33 @@ static void ndisc_recv_na(struct sk_buff *skb) in6_ifa_put(ifp); return; } + + neigh = neigh_lookup(&nd_tbl, &msg->target, dev); + /* RFC 9131 updates original Neighbour Discovery RFC 4861. - * An unsolicited NA can now create a neighbour cache entry - * on routers if it has Target LL Address option. + * NAs with Target LL Address option without a corresponding + * entry in the neighbour cache can now create a STALE neighbour + * cache entry on routers. + * + * entry accept fwding solicited behaviour + * ------- ------ ------ --------- ---------------------- + * present X X 0 Set state to STALE + * present X X 1 Set state to REACHABLE + * absent 0 X X Do nothing + * absent 1 0 X Do nothing + * absent 1 1 X Add a new STALE entry * - * drop accept fwding behaviour - * ---- ------ ------ ---------------------------------------------- - * 1 X X Drop NA packet and don't pass up the stack - * 0 0 X Pass NA packet up the stack, don't update NC - * 0 1 0 Pass NA packet up the stack, don't update NC - * 0 1 1 Pass NA packet up the stack, and add a STALE - * NC entry * Note that we don't do a (daddr == all-routers-mcast) check. */ - create_neigh = !msg->icmph.icmp6_solicited && lladdr && - idev && idev->cnf.forwarding && - idev->cnf.accept_unsolicited_na; - neigh = __neigh_lookup(&nd_tbl, &msg->target, dev, create_neigh); + new_state = msg->icmph.icmp6_solicited ? NUD_REACHABLE : NUD_STALE; + if (!neigh && lladdr && + idev && idev->cnf.forwarding && + idev->cnf.accept_untracked_na) { + neigh = neigh_create(&nd_tbl, &msg->target, dev); + new_state = NUD_STALE; + } - if (neigh) { + if (neigh && !IS_ERR(neigh)) { u8 old_flags = neigh->flags; struct net *net = dev_net(dev); @@ -1079,7 +1087,7 @@ static void ndisc_recv_na(struct sk_buff *skb) } ndisc_update(dev, neigh, lladdr, - msg->icmph.icmp6_solicited ? NUD_REACHABLE : NUD_STALE, + new_state, NEIGH_UPDATE_F_WEAK_OVERRIDE| (msg->icmph.icmp6_override ? NEIGH_UPDATE_F_OVERRIDE : 0)| NEIGH_UPDATE_F_OVERRIDE_ISROUTER| diff --git a/tools/testing/selftests/net/ndisc_unsolicited_na_test.sh b/tools/testing/selftests/net/ndisc_unsolicited_na_test.sh index f508657ee126..86e621b7b9c7 100755 --- a/tools/testing/selftests/net/ndisc_unsolicited_na_test.sh +++ b/tools/testing/selftests/net/ndisc_unsolicited_na_test.sh @@ -1,15 +1,14 @@ #!/bin/bash # SPDX-License-Identifier: GPL-2.0 -# This test is for the accept_unsolicited_na feature to +# This test is for the accept_untracked_na feature to # enable RFC9131 behaviour. The following is the test-matrix. # drop accept fwding behaviour # ---- ------ ------ ---------------------------------------------- -# 1 X X Drop NA packet and don't pass up the stack -# 0 0 X Pass NA packet up the stack, don't update NC -# 0 1 0 Pass NA packet up the stack, don't update NC -# 0 1 1 Pass NA packet up the stack, and add a STALE -# NC entry +# 1 X X Don't update NC +# 0 0 X Don't update NC +# 0 1 0 Don't update NC +# 0 1 1 Add a STALE NC entry ret=0 # Kselftest framework requirement - SKIP code is 4. @@ -72,7 +71,7 @@ setup() set -e local drop_unsolicited_na=$1 - local accept_unsolicited_na=$2 + local accept_untracked_na=$2 local forwarding=$3 # Setup two namespaces and a veth tunnel across them. @@ -93,7 +92,7 @@ setup() ${IP_ROUTER_EXEC} sysctl -qw \ ${ROUTER_CONF}.drop_unsolicited_na=${drop_unsolicited_na} ${IP_ROUTER_EXEC} sysctl -qw \ - ${ROUTER_CONF}.accept_unsolicited_na=${accept_unsolicited_na} + ${ROUTER_CONF}.accept_untracked_na=${accept_untracked_na} ${IP_ROUTER_EXEC} sysctl -qw ${ROUTER_CONF}.disable_ipv6=0 ${IP_ROUTER} addr add ${ROUTER_ADDR_WITH_MASK} dev ${ROUTER_INTF} @@ -144,13 +143,13 @@ link_up() { verify_ndisc() { local drop_unsolicited_na=$1 - local accept_unsolicited_na=$2 + local accept_untracked_na=$2 local forwarding=$3 neigh_show_output=$(${IP_ROUTER} neigh show \ to ${HOST_ADDR} dev ${ROUTER_INTF} nud stale) if [ ${drop_unsolicited_na} -eq 0 ] && \ - [ ${accept_unsolicited_na} -eq 1 ] && \ + [ ${accept_untracked_na} -eq 1 ] && \ [ ${forwarding} -eq 1 ]; then # Neighbour entry expected to be present for 011 case [[ ${neigh_show_output} ]] @@ -179,14 +178,14 @@ test_unsolicited_na_combination() { test_unsolicited_na_common $1 $2 $3 test_msg=("test_unsolicited_na: " "drop_unsolicited_na=$1 " - "accept_unsolicited_na=$2 " + "accept_untracked_na=$2 " "forwarding=$3") log_test $? 0 "${test_msg[*]}" cleanup } test_unsolicited_na_combinations() { - # Args: drop_unsolicited_na accept_unsolicited_na forwarding + # Args: drop_unsolicited_na accept_untracked_na forwarding # Expect entry test_unsolicited_na_combination 0 1 1 -- cgit v1.2.3-59-g8ed1b From 13b00b135665c92065a27c0c39dd97e0f380bd4f Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Wed, 18 May 2022 16:38:00 +0300 Subject: vdpa: Add support for querying vendor statistics Allows to read vendor statistics of a vdpa device. The specific statistics data are received from the upstream driver in the form of an (attribute name, attribute value) pairs. An example of statistics for mlx5_vdpa device are: received_desc - number of descriptors received by the virtqueue completed_desc - number of descriptors completed by the virtqueue A descriptor using indirect buffers is still counted as 1. In addition, N chained descriptors are counted correctly N times as one would expect. A new callback was added to vdpa_config_ops which provides the means for the vdpa driver to return statistics results. The interface allows for reading all the supported virtqueues, including the control virtqueue if it exists. Below are some examples taken from mlx5_vdpa which are introduced in the following patch: 1. Read statistics for the virtqueue at index 1 $ vdpa dev vstats show vdpa-a qidx 1 vdpa-a: queue_type tx queue_index 1 received_desc 3844836 completed_desc 3844836 2. Read statistics for the virtqueue at index 32 $ vdpa dev vstats show vdpa-a qidx 32 vdpa-a: queue_type control_vq queue_index 32 received_desc 62 completed_desc 62 3. Read statisitics for the virtqueue at index 0 with json output $ vdpa -j dev vstats show vdpa-a qidx 0 {"vstats":{"vdpa-a":{ "queue_type":"rx","queue_index":0,"name":"received_desc","value":417776,\ "name":"completed_desc","value":417548}}} 4. Read statistics for the virtqueue at index 0 with preety json output $ vdpa -jp dev vstats show vdpa-a qidx 0 { "vstats": { "vdpa-a": { "queue_type": "rx", "queue_index": 0, "name": "received_desc", "value": 417776, "name": "completed_desc", "value": 417548 } } } Signed-off-by: Eli Cohen Message-Id: <20220518133804.1075129-3-elic@nvidia.com> Signed-off-by: Michael S. Tsirkin --- drivers/vdpa/vdpa.c | 162 ++++++++++++++++++++++++++++++++++++++++++++++ include/linux/vdpa.h | 3 + include/uapi/linux/vdpa.h | 6 ++ 3 files changed, 171 insertions(+) (limited to 'include/uapi/linux') diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c index fac89a0d8178..31b5eb2c0778 100644 --- a/drivers/vdpa/vdpa.c +++ b/drivers/vdpa/vdpa.c @@ -914,6 +914,108 @@ out: return err; } +static int vdpa_fill_stats_rec(struct vdpa_device *vdev, struct sk_buff *msg, + struct genl_info *info, u32 index) +{ + struct virtio_net_config config = {}; + u64 features; + u16 max_vqp; + u8 status; + int err; + + status = vdev->config->get_status(vdev); + if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) { + NL_SET_ERR_MSG_MOD(info->extack, "feature negotiation not complete"); + return -EAGAIN; + } + vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config)); + + max_vqp = le16_to_cpu(config.max_virtqueue_pairs); + if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, max_vqp)) + return -EMSGSIZE; + + features = vdev->config->get_driver_features(vdev); + if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES, + features, VDPA_ATTR_PAD)) + return -EMSGSIZE; + + if (nla_put_u32(msg, VDPA_ATTR_DEV_QUEUE_INDEX, index)) + return -EMSGSIZE; + + err = vdev->config->get_vendor_vq_stats(vdev, index, msg, info->extack); + if (err) + return err; + + return 0; +} + +static int vendor_stats_fill(struct vdpa_device *vdev, struct sk_buff *msg, + struct genl_info *info, u32 index) +{ + int err; + + mutex_lock(&vdev->cf_mutex); + if (!vdev->config->get_vendor_vq_stats) { + err = -EOPNOTSUPP; + goto out; + } + + err = vdpa_fill_stats_rec(vdev, msg, info, index); +out: + mutex_unlock(&vdev->cf_mutex); + return err; +} + +static int vdpa_dev_vendor_stats_fill(struct vdpa_device *vdev, + struct sk_buff *msg, + struct genl_info *info, u32 index) +{ + u32 device_id; + void *hdr; + int err; + u32 portid = info->snd_portid; + u32 seq = info->snd_seq; + u32 flags = 0; + + hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, + VDPA_CMD_DEV_VSTATS_GET); + if (!hdr) + return -EMSGSIZE; + + if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) { + err = -EMSGSIZE; + goto undo_msg; + } + + device_id = vdev->config->get_device_id(vdev); + if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) { + err = -EMSGSIZE; + goto undo_msg; + } + + switch (device_id) { + case VIRTIO_ID_NET: + if (index > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX) { + NL_SET_ERR_MSG_MOD(info->extack, "queue index excceeds max value"); + err = -ERANGE; + break; + } + + err = vendor_stats_fill(vdev, msg, info, index); + break; + default: + err = -EOPNOTSUPP; + break; + } + genlmsg_end(msg, hdr); + + return err; + +undo_msg: + genlmsg_cancel(msg, hdr); + return err; +} + static int vdpa_nl_cmd_dev_config_get_doit(struct sk_buff *skb, struct genl_info *info) { struct vdpa_device *vdev; @@ -995,6 +1097,60 @@ vdpa_nl_cmd_dev_config_get_dumpit(struct sk_buff *msg, struct netlink_callback * return msg->len; } +static int vdpa_nl_cmd_dev_stats_get_doit(struct sk_buff *skb, + struct genl_info *info) +{ + struct vdpa_device *vdev; + struct sk_buff *msg; + const char *devname; + struct device *dev; + u32 index; + int err; + + if (!info->attrs[VDPA_ATTR_DEV_NAME]) + return -EINVAL; + + if (!info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX]) + return -EINVAL; + + devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]); + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + index = nla_get_u32(info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX]); + mutex_lock(&vdpa_dev_mutex); + dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match); + if (!dev) { + NL_SET_ERR_MSG_MOD(info->extack, "device not found"); + err = -ENODEV; + goto dev_err; + } + vdev = container_of(dev, struct vdpa_device, dev); + if (!vdev->mdev) { + NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device"); + err = -EINVAL; + goto mdev_err; + } + err = vdpa_dev_vendor_stats_fill(vdev, msg, info, index); + if (err) + goto mdev_err; + + err = genlmsg_reply(msg, info); + + put_device(dev); + mutex_unlock(&vdpa_dev_mutex); + + return err; + +mdev_err: + put_device(dev); +dev_err: + nlmsg_free(msg); + mutex_unlock(&vdpa_dev_mutex); + return err; +} + static const struct nla_policy vdpa_nl_policy[VDPA_ATTR_MAX + 1] = { [VDPA_ATTR_MGMTDEV_BUS_NAME] = { .type = NLA_NUL_STRING }, [VDPA_ATTR_MGMTDEV_DEV_NAME] = { .type = NLA_STRING }, @@ -1035,6 +1191,12 @@ static const struct genl_ops vdpa_nl_ops[] = { .doit = vdpa_nl_cmd_dev_config_get_doit, .dumpit = vdpa_nl_cmd_dev_config_get_dumpit, }, + { + .cmd = VDPA_CMD_DEV_VSTATS_GET, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + .doit = vdpa_nl_cmd_dev_stats_get_doit, + .flags = GENL_ADMIN_PERM, + }, }; static struct genl_family vdpa_nl_family __ro_after_init = { diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index 8943a209202e..2ae8443331e1 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -276,6 +276,9 @@ struct vdpa_config_ops { const struct vdpa_vq_state *state); int (*get_vq_state)(struct vdpa_device *vdev, u16 idx, struct vdpa_vq_state *state); + int (*get_vendor_vq_stats)(struct vdpa_device *vdev, u16 idx, + struct sk_buff *msg, + struct netlink_ext_ack *extack); struct vdpa_notification_area (*get_vq_notification)(struct vdpa_device *vdev, u16 idx); /* vq irq is not expected to be changed once DRIVER_OK is set */ diff --git a/include/uapi/linux/vdpa.h b/include/uapi/linux/vdpa.h index 1061d8d2d09d..25c55cab3d7c 100644 --- a/include/uapi/linux/vdpa.h +++ b/include/uapi/linux/vdpa.h @@ -18,6 +18,7 @@ enum vdpa_command { VDPA_CMD_DEV_DEL, VDPA_CMD_DEV_GET, /* can dump */ VDPA_CMD_DEV_CONFIG_GET, /* can dump */ + VDPA_CMD_DEV_VSTATS_GET, }; enum vdpa_attr { @@ -46,6 +47,11 @@ enum vdpa_attr { VDPA_ATTR_DEV_NEGOTIATED_FEATURES, /* u64 */ VDPA_ATTR_DEV_MGMTDEV_MAX_VQS, /* u32 */ VDPA_ATTR_DEV_SUPPORTED_FEATURES, /* u64 */ + + VDPA_ATTR_DEV_QUEUE_INDEX, /* u32 */ + VDPA_ATTR_DEV_VENDOR_ATTR_NAME, /* string */ + VDPA_ATTR_DEV_VENDOR_ATTR_VALUE, /* u64 */ + /* new attributes must be added above here */ VDPA_ATTR_MAX, }; -- cgit v1.2.3-59-g8ed1b From 175d493c3c3e09a3abaa843068fae0f0ad42c47e Mon Sep 17 00:00:00 2001 From: Gautam Dawar Date: Wed, 30 Mar 2022 23:33:41 +0530 Subject: vhost: move the backend feature bits to vhost_types.h We should store feature bits in vhost_types.h as what has been done for e.g VHOST_F_LOG_ALL. Signed-off-by: Jason Wang Signed-off-by: Gautam Dawar Message-Id: <20220330180436.24644-2-gdawar@xilinx.com> Signed-off-by: Michael S. Tsirkin --- include/uapi/linux/vhost.h | 5 ----- include/uapi/linux/vhost_types.h | 5 +++++ 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h index 5d99e7c242a2..8f7b4a95d6f9 100644 --- a/include/uapi/linux/vhost.h +++ b/include/uapi/linux/vhost.h @@ -89,11 +89,6 @@ /* Set or get vhost backend capability */ -/* Use message type V2 */ -#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1 -/* IOTLB can accept batching hints */ -#define VHOST_BACKEND_F_IOTLB_BATCH 0x2 - #define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64) #define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64) diff --git a/include/uapi/linux/vhost_types.h b/include/uapi/linux/vhost_types.h index f7f6a3a28977..76ee7016c501 100644 --- a/include/uapi/linux/vhost_types.h +++ b/include/uapi/linux/vhost_types.h @@ -153,4 +153,9 @@ struct vhost_vdpa_iova_range { /* vhost-net should add virtio_net_hdr for RX, and strip for TX packets. */ #define VHOST_NET_F_VIRTIO_NET_HDR 27 +/* Use message type V2 */ +#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1 +/* IOTLB can accept batching hints */ +#define VHOST_BACKEND_F_IOTLB_BATCH 0x2 + #endif -- cgit v1.2.3-59-g8ed1b From 91233ad711866f4e375742d84ef3ed6aab9daa96 Mon Sep 17 00:00:00 2001 From: Gautam Dawar Date: Wed, 30 Mar 2022 23:33:49 +0530 Subject: vhost: support ASID in IOTLB API This patches allows userspace to send ASID based IOTLB message to vhost. This idea is to use the reserved u32 field in the existing V2 IOTLB message. Vhost device should advertise this capability via VHOST_BACKEND_F_IOTLB_ASID backend feature. Signed-off-by: Jason Wang Signed-off-by: Gautam Dawar Message-Id: <20220330180436.24644-10-gdawar@xilinx.com> Signed-off-by: Michael S. Tsirkin --- drivers/vhost/vdpa.c | 5 ++++- drivers/vhost/vhost.c | 23 ++++++++++++++++++----- drivers/vhost/vhost.h | 4 ++-- include/uapi/linux/vhost_types.h | 6 +++++- 4 files changed, 29 insertions(+), 9 deletions(-) (limited to 'include/uapi/linux') diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 9202ff97ddb5..174c9e81df4e 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -870,7 +870,7 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v, msg->perm); } -static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, +static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, u32 asid, struct vhost_iotlb_msg *msg) { struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev); @@ -879,6 +879,9 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, struct vhost_iotlb *iotlb = v->iotlb; int r = 0; + if (asid != 0) + return -EINVAL; + mutex_lock(&dev->mutex); r = vhost_dev_check_owner(dev); diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index d02173fb290c..d1e58f976f6e 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -468,7 +468,7 @@ void vhost_dev_init(struct vhost_dev *dev, struct vhost_virtqueue **vqs, int nvqs, int iov_limit, int weight, int byte_weight, bool use_worker, - int (*msg_handler)(struct vhost_dev *dev, + int (*msg_handler)(struct vhost_dev *dev, u32 asid, struct vhost_iotlb_msg *msg)) { struct vhost_virtqueue *vq; @@ -1090,11 +1090,14 @@ static bool umem_access_ok(u64 uaddr, u64 size, int access) return true; } -static int vhost_process_iotlb_msg(struct vhost_dev *dev, +static int vhost_process_iotlb_msg(struct vhost_dev *dev, u32 asid, struct vhost_iotlb_msg *msg) { int ret = 0; + if (asid != 0) + return -EINVAL; + mutex_lock(&dev->mutex); vhost_dev_lock_vqs(dev); switch (msg->type) { @@ -1141,6 +1144,7 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev, struct vhost_iotlb_msg msg; size_t offset; int type, ret; + u32 asid = 0; ret = copy_from_iter(&type, sizeof(type), from); if (ret != sizeof(type)) { @@ -1156,7 +1160,16 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev, offset = offsetof(struct vhost_msg, iotlb) - sizeof(int); break; case VHOST_IOTLB_MSG_V2: - offset = sizeof(__u32); + if (vhost_backend_has_feature(dev->vqs[0], + VHOST_BACKEND_F_IOTLB_ASID)) { + ret = copy_from_iter(&asid, sizeof(asid), from); + if (ret != sizeof(asid)) { + ret = -EINVAL; + goto done; + } + offset = sizeof(__u16); + } else + offset = sizeof(__u32); break; default: ret = -EINVAL; @@ -1178,9 +1191,9 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev, } if (dev->msg_handler) - ret = dev->msg_handler(dev, &msg); + ret = dev->msg_handler(dev, asid, &msg); else - ret = vhost_process_iotlb_msg(dev, &msg); + ret = vhost_process_iotlb_msg(dev, asid, &msg); if (ret) { ret = -EFAULT; goto done; diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 638bb640d6b4..9f238d6c7b58 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -161,7 +161,7 @@ struct vhost_dev { int byte_weight; u64 kcov_handle; bool use_worker; - int (*msg_handler)(struct vhost_dev *dev, + int (*msg_handler)(struct vhost_dev *dev, u32 asid, struct vhost_iotlb_msg *msg); }; @@ -169,7 +169,7 @@ bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len); void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs, int iov_limit, int weight, int byte_weight, bool use_worker, - int (*msg_handler)(struct vhost_dev *dev, + int (*msg_handler)(struct vhost_dev *dev, u32 asid, struct vhost_iotlb_msg *msg)); long vhost_dev_set_owner(struct vhost_dev *dev); bool vhost_dev_has_owner(struct vhost_dev *dev); diff --git a/include/uapi/linux/vhost_types.h b/include/uapi/linux/vhost_types.h index 76ee7016c501..634cee485abb 100644 --- a/include/uapi/linux/vhost_types.h +++ b/include/uapi/linux/vhost_types.h @@ -87,7 +87,7 @@ struct vhost_msg { struct vhost_msg_v2 { __u32 type; - __u32 reserved; + __u32 asid; union { struct vhost_iotlb_msg iotlb; __u8 padding[64]; @@ -157,5 +157,9 @@ struct vhost_vdpa_iova_range { #define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1 /* IOTLB can accept batching hints */ #define VHOST_BACKEND_F_IOTLB_BATCH 0x2 +/* IOTLB can accept address space identifier through V2 type of IOTLB + * message + */ +#define VHOST_BACKEND_F_IOTLB_ASID 0x3 #endif -- cgit v1.2.3-59-g8ed1b From 3ace88bd37436abc84906312146fe5158a469142 Mon Sep 17 00:00:00 2001 From: Gautam Dawar Date: Wed, 30 Mar 2022 23:33:51 +0530 Subject: vhost-vdpa: introduce uAPI to get the number of virtqueue groups Follows the vDPA support for multiple address spaces, this patch introduce uAPI for the userspace to know the number of virtqueue groups supported by the vDPA device. Signed-off-by: Jason Wang Signed-off-by: Gautam Dawar Message-Id: <20220330180436.24644-12-gdawar@xilinx.com> Signed-off-by: Michael S. Tsirkin --- drivers/vhost/vdpa.c | 4 ++++ include/uapi/linux/vhost.h | 4 +++- 2 files changed, 7 insertions(+), 1 deletion(-) (limited to 'include/uapi/linux') diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index cd1bee536c46..92f78df0f685 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -559,6 +559,10 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep, case VHOST_VDPA_GET_VRING_NUM: r = vhost_vdpa_get_vring_num(v, argp); break; + case VHOST_VDPA_GET_GROUP_NUM: + r = copy_to_user(argp, &v->vdpa->ngroups, + sizeof(v->vdpa->ngroups)); + break; case VHOST_SET_LOG_BASE: case VHOST_SET_LOG_FD: r = -ENOIOCTLCMD; diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h index 8f7b4a95d6f9..61317c61d768 100644 --- a/include/uapi/linux/vhost.h +++ b/include/uapi/linux/vhost.h @@ -145,11 +145,13 @@ /* Get the valid iova range */ #define VHOST_VDPA_GET_IOVA_RANGE _IOR(VHOST_VIRTIO, 0x78, \ struct vhost_vdpa_iova_range) - /* Get the config size */ #define VHOST_VDPA_GET_CONFIG_SIZE _IOR(VHOST_VIRTIO, 0x79, __u32) /* Get the count of all virtqueues */ #define VHOST_VDPA_GET_VQS_COUNT _IOR(VHOST_VIRTIO, 0x80, __u32) +/* Get the number of virtqueue groups. */ +#define VHOST_VDPA_GET_GROUP_NUM _IOR(VHOST_VIRTIO, 0x81, __u32) + #endif -- cgit v1.2.3-59-g8ed1b From a0c95f201170bd559737d3cdc8a950aea62f29c6 Mon Sep 17 00:00:00 2001 From: Gautam Dawar Date: Wed, 30 Mar 2022 23:33:52 +0530 Subject: vhost-vdpa: introduce uAPI to get the number of address spaces This patch introduces the uAPI for getting the number of address spaces supported by this vDPA device. Signed-off-by: Jason Wang Signed-off-by: Gautam Dawar Message-Id: <20220330180436.24644-13-gdawar@xilinx.com> Signed-off-by: Michael S. Tsirkin --- drivers/vhost/vdpa.c | 3 +++ include/uapi/linux/vhost.h | 2 ++ 2 files changed, 5 insertions(+) (limited to 'include/uapi/linux') diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 92f78df0f685..a017011ad1f5 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -563,6 +563,9 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep, r = copy_to_user(argp, &v->vdpa->ngroups, sizeof(v->vdpa->ngroups)); break; + case VHOST_VDPA_GET_AS_NUM: + r = copy_to_user(argp, &v->vdpa->nas, sizeof(v->vdpa->nas)); + break; case VHOST_SET_LOG_BASE: case VHOST_SET_LOG_FD: r = -ENOIOCTLCMD; diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h index 61317c61d768..51322008901a 100644 --- a/include/uapi/linux/vhost.h +++ b/include/uapi/linux/vhost.h @@ -154,4 +154,6 @@ /* Get the number of virtqueue groups. */ #define VHOST_VDPA_GET_GROUP_NUM _IOR(VHOST_VIRTIO, 0x81, __u32) +/* Get the number of address spaces. */ +#define VHOST_VDPA_GET_AS_NUM _IOR(VHOST_VIRTIO, 0x7A, unsigned int) #endif -- cgit v1.2.3-59-g8ed1b From 2d1fcb7758e49fd9caf150f3c70804b95b2ce80c Mon Sep 17 00:00:00 2001 From: Gautam Dawar Date: Wed, 30 Mar 2022 23:33:53 +0530 Subject: vhost-vdpa: uAPI to get virtqueue group id Follows the support for virtqueue group in vDPA. This patches introduces uAPI to get the virtqueue group ID for a specific virtqueue in vhost-vdpa. Signed-off-by: Jason Wang Signed-off-by: Gautam Dawar Message-Id: <20220330180436.24644-14-gdawar@xilinx.com> Signed-off-by: Michael S. Tsirkin --- drivers/vhost/vdpa.c | 8 ++++++++ include/uapi/linux/vhost.h | 8 ++++++++ 2 files changed, 16 insertions(+) (limited to 'include/uapi/linux') diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index a017011ad1f5..aa5cacdc5263 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -465,6 +465,14 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, return -EFAULT; ops->set_vq_ready(vdpa, idx, s.num); return 0; + case VHOST_VDPA_GET_VRING_GROUP: + s.index = idx; + s.num = ops->get_vq_group(vdpa, idx); + if (s.num >= vdpa->ngroups) + return -EIO; + else if (copy_to_user(argp, &s, sizeof(s))) + return -EFAULT; + return 0; case VHOST_GET_VRING_BASE: r = ops->get_vq_state(v->vdpa, idx, &vq_state); if (r) diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h index 51322008901a..668914c87f74 100644 --- a/include/uapi/linux/vhost.h +++ b/include/uapi/linux/vhost.h @@ -156,4 +156,12 @@ /* Get the number of address spaces. */ #define VHOST_VDPA_GET_AS_NUM _IOR(VHOST_VIRTIO, 0x7A, unsigned int) + +/* Get the group for a virtqueue: read index, write group in num, + * The virtqueue index is stored in the index field of + * vhost_vring_state. The group for this specific virtqueue is + * returned via num field of vhost_vring_state. + */ +#define VHOST_VDPA_GET_VRING_GROUP _IOWR(VHOST_VIRTIO, 0x7B, \ + struct vhost_vring_state) #endif -- cgit v1.2.3-59-g8ed1b From 84d7c8fd3aade2fe79313003ed06ede431ec2a6d Mon Sep 17 00:00:00 2001 From: Gautam Dawar Date: Wed, 30 Mar 2022 23:33:54 +0530 Subject: vhost-vdpa: introduce uAPI to set group ASID Follows the vDPA support for associating ASID to a specific virtqueue group. This patch adds a uAPI to support setting them from userspace. Signed-off-by: Jason Wang Signed-off-by: Gautam Dawar Message-Id: <20220330180436.24644-15-gdawar@xilinx.com> Signed-off-by: Michael S. Tsirkin --- drivers/vhost/vdpa.c | 8 ++++++++ include/uapi/linux/vhost.h | 7 +++++++ 2 files changed, 15 insertions(+) (limited to 'include/uapi/linux') diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index aa5cacdc5263..6c7ee0f18892 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -473,6 +473,14 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, else if (copy_to_user(argp, &s, sizeof(s))) return -EFAULT; return 0; + case VHOST_VDPA_SET_GROUP_ASID: + if (copy_from_user(&s, argp, sizeof(s))) + return -EFAULT; + if (s.num >= vdpa->nas) + return -EINVAL; + if (!ops->set_group_asid) + return -EOPNOTSUPP; + return ops->set_group_asid(vdpa, idx, s.num); case VHOST_GET_VRING_BASE: r = ops->get_vq_state(v->vdpa, idx, &vq_state); if (r) diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h index 668914c87f74..cab645d4a645 100644 --- a/include/uapi/linux/vhost.h +++ b/include/uapi/linux/vhost.h @@ -164,4 +164,11 @@ */ #define VHOST_VDPA_GET_VRING_GROUP _IOWR(VHOST_VIRTIO, 0x7B, \ struct vhost_vring_state) +/* Set the ASID for a virtqueue group. The group index is stored in + * the index field of vhost_vring_state, the ASID associated with this + * group is stored at num field of vhost_vring_state. + */ +#define VHOST_VDPA_SET_GROUP_ASID _IOW(VHOST_VIRTIO, 0x7C, \ + struct vhost_vring_state) + #endif -- cgit v1.2.3-59-g8ed1b From 662ce1dc9caf493c309200edbe38d186f1ea20d0 Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Wed, 1 Jun 2022 15:55:25 -0700 Subject: delayacct: track delays from write-protect copy Delay accounting does not track the delay of write-protect copy. When tasks trigger many write-protect copys(include COW and unsharing of anonymous pages[1]), it may spend a amount of time waiting for them. To get the delay of tasks in write-protect copy, could help users to evaluate the impact of using KSM or fork() or GUP. Also update tools/accounting/getdelays.c: / # ./getdelays -dl -p 231 print delayacct stats ON listen forever PID 231 CPU count real total virtual total delay total delay average 6247 1859000000 2154070021 1674255063 0.268ms IO count delay total delay average 0 0 0ms SWAP count delay total delay average 0 0 0ms RECLAIM count delay total delay average 0 0 0ms THRASHING count delay total delay average 0 0 0ms COMPACT count delay total delay average 3 72758 0ms WPCOPY count delay total delay average 3635 271567604 0ms [1] commit 31cc5bc4af70("mm: support GUP-triggered unsharing of anonymous pages") Link: https://lkml.kernel.org/r/20220409014342.2505532-1-yang.yang29@zte.com.cn Signed-off-by: Yang Yang Reviewed-by: David Hildenbrand Reviewed-by: Jiang Xuexin Reviewed-by: Ran Xiaokai Reviewed-by: wangyong Cc: Jonathan Corbet Cc: Balbir Singh Cc: Mike Kravetz Cc: Stephen Rothwell Signed-off-by: Andrew Morton --- Documentation/accounting/delay-accounting.rst | 5 ++++- include/linux/delayacct.h | 28 +++++++++++++++++++++++++++ include/uapi/linux/taskstats.h | 6 +++++- kernel/delayacct.c | 16 +++++++++++++++ mm/hugetlb.c | 8 ++++++++ mm/memory.c | 8 ++++++++ tools/accounting/getdelays.c | 8 +++++++- 7 files changed, 76 insertions(+), 3 deletions(-) (limited to 'include/uapi/linux') diff --git a/Documentation/accounting/delay-accounting.rst b/Documentation/accounting/delay-accounting.rst index 197fe319cbec..241d1a87f2cd 100644 --- a/Documentation/accounting/delay-accounting.rst +++ b/Documentation/accounting/delay-accounting.rst @@ -15,6 +15,7 @@ c) swapping in pages d) memory reclaim e) thrashing page cache f) direct compact +g) write-protect copy and makes these statistics available to userspace through the taskstats interface. @@ -48,7 +49,7 @@ this structure. See for a description of the fields pertaining to delay accounting. It will generally be in the form of counters returning the cumulative delay seen for cpu, sync block I/O, swapin, memory reclaim, thrash page -cache, direct compact etc. +cache, direct compact, write-protect copy etc. Taking the difference of two successive readings of a given counter (say cpu_delay_total) for a task will give the delay @@ -117,6 +118,8 @@ Get sum of delays, since system boot, for all pids with tgid 5:: 0 0 0ms COMPACT count delay total delay average 0 0 0ms + WPCOPY count delay total delay average + 0 0 0ms Get IO accounting for pid 1, it works only with -p:: diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h index 6b16a6930a19..58aea2d7385c 100644 --- a/include/linux/delayacct.h +++ b/include/linux/delayacct.h @@ -45,9 +45,13 @@ struct task_delay_info { u64 compact_start; u64 compact_delay; /* wait for memory compact */ + u64 wpcopy_start; + u64 wpcopy_delay; /* wait for write-protect copy */ + u32 freepages_count; /* total count of memory reclaim */ u32 thrashing_count; /* total count of thrash waits */ u32 compact_count; /* total count of memory compact */ + u32 wpcopy_count; /* total count of write-protect copy */ }; #endif @@ -75,6 +79,8 @@ extern void __delayacct_swapin_start(void); extern void __delayacct_swapin_end(void); extern void __delayacct_compact_start(void); extern void __delayacct_compact_end(void); +extern void __delayacct_wpcopy_start(void); +extern void __delayacct_wpcopy_end(void); static inline void delayacct_tsk_init(struct task_struct *tsk) { @@ -191,6 +197,24 @@ static inline void delayacct_compact_end(void) __delayacct_compact_end(); } +static inline void delayacct_wpcopy_start(void) +{ + if (!static_branch_unlikely(&delayacct_key)) + return; + + if (current->delays) + __delayacct_wpcopy_start(); +} + +static inline void delayacct_wpcopy_end(void) +{ + if (!static_branch_unlikely(&delayacct_key)) + return; + + if (current->delays) + __delayacct_wpcopy_end(); +} + #else static inline void delayacct_init(void) {} @@ -225,6 +249,10 @@ static inline void delayacct_compact_start(void) {} static inline void delayacct_compact_end(void) {} +static inline void delayacct_wpcopy_start(void) +{} +static inline void delayacct_wpcopy_end(void) +{} #endif /* CONFIG_TASK_DELAY_ACCT */ diff --git a/include/uapi/linux/taskstats.h b/include/uapi/linux/taskstats.h index 736154171489..a7f5b11a8f1b 100644 --- a/include/uapi/linux/taskstats.h +++ b/include/uapi/linux/taskstats.h @@ -34,7 +34,7 @@ */ -#define TASKSTATS_VERSION 12 +#define TASKSTATS_VERSION 13 #define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN * in linux/sched.h */ @@ -194,6 +194,10 @@ struct taskstats { __u64 ac_exe_dev; /* program binary device ID */ __u64 ac_exe_inode; /* program binary inode number */ /* v12 end */ + + /* v13: Delay waiting for write-protect copy */ + __u64 wpcopy_count; + __u64 wpcopy_delay_total; }; diff --git a/kernel/delayacct.c b/kernel/delayacct.c index 2c1e18f7c5cf..164ed9ef77a3 100644 --- a/kernel/delayacct.c +++ b/kernel/delayacct.c @@ -177,11 +177,14 @@ int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) d->thrashing_delay_total = (tmp < d->thrashing_delay_total) ? 0 : tmp; tmp = d->compact_delay_total + tsk->delays->compact_delay; d->compact_delay_total = (tmp < d->compact_delay_total) ? 0 : tmp; + tmp = d->wpcopy_delay_total + tsk->delays->wpcopy_delay; + d->wpcopy_delay_total = (tmp < d->wpcopy_delay_total) ? 0 : tmp; d->blkio_count += tsk->delays->blkio_count; d->swapin_count += tsk->delays->swapin_count; d->freepages_count += tsk->delays->freepages_count; d->thrashing_count += tsk->delays->thrashing_count; d->compact_count += tsk->delays->compact_count; + d->wpcopy_count += tsk->delays->wpcopy_count; raw_spin_unlock_irqrestore(&tsk->delays->lock, flags); return 0; @@ -249,3 +252,16 @@ void __delayacct_compact_end(void) ¤t->delays->compact_delay, ¤t->delays->compact_count); } + +void __delayacct_wpcopy_start(void) +{ + current->delays->wpcopy_start = local_clock(); +} + +void __delayacct_wpcopy_end(void) +{ + delayacct_end(¤t->delays->lock, + ¤t->delays->wpcopy_start, + ¤t->delays->wpcopy_delay, + ¤t->delays->wpcopy_count); +} diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 7c468ac1d069..a57e1be41401 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -5230,6 +5231,8 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma, pte = huge_ptep_get(ptep); old_page = pte_page(pte); + delayacct_wpcopy_start(); + retry_avoidcopy: /* * If no-one else is actually using this page, we're the exclusive @@ -5240,6 +5243,8 @@ retry_avoidcopy: page_move_anon_rmap(old_page, vma); if (likely(!unshare)) set_huge_ptep_writable(vma, haddr, ptep); + + delayacct_wpcopy_end(); return 0; } VM_BUG_ON_PAGE(PageAnon(old_page) && PageAnonExclusive(old_page), @@ -5309,6 +5314,7 @@ retry_avoidcopy: * race occurs while re-acquiring page table * lock, and our job is done. */ + delayacct_wpcopy_end(); return 0; } @@ -5367,6 +5373,8 @@ out_release_old: put_page(old_page); spin_lock(ptl); /* Caller expects lock to be held */ + + delayacct_wpcopy_end(); return ret; } diff --git a/mm/memory.c b/mm/memory.c index 21dadf03f089..7a089145cad4 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3090,6 +3090,8 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) int page_copied = 0; struct mmu_notifier_range range; + delayacct_wpcopy_start(); + if (unlikely(anon_vma_prepare(vma))) goto oom; @@ -3114,6 +3116,8 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) put_page(new_page); if (old_page) put_page(old_page); + + delayacct_wpcopy_end(); return 0; } } @@ -3220,12 +3224,16 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) free_swap_cache(old_page); put_page(old_page); } + + delayacct_wpcopy_end(); return (page_copied && !unshare) ? VM_FAULT_WRITE : 0; oom_free_new: put_page(new_page); oom: if (old_page) put_page(old_page); + + delayacct_wpcopy_end(); return VM_FAULT_OOM; } diff --git a/tools/accounting/getdelays.c b/tools/accounting/getdelays.c index 11e86739456d..e83e6e47a21e 100644 --- a/tools/accounting/getdelays.c +++ b/tools/accounting/getdelays.c @@ -207,6 +207,8 @@ static void print_delayacct(struct taskstats *t) "THRASHING%12s%15s%15s\n" " %15llu%15llu%15llums\n" "COMPACT %12s%15s%15s\n" + " %15llu%15llu%15llums\n" + "WPCOPY %12s%15s%15s\n" " %15llu%15llu%15llums\n", "count", "real total", "virtual total", "delay total", "delay average", @@ -234,7 +236,11 @@ static void print_delayacct(struct taskstats *t) "count", "delay total", "delay average", (unsigned long long)t->compact_count, (unsigned long long)t->compact_delay_total, - average_ms(t->compact_delay_total, t->compact_count)); + average_ms(t->compact_delay_total, t->compact_count), + "count", "delay total", "delay average", + (unsigned long long)t->wpcopy_count, + (unsigned long long)t->wpcopy_delay_total, + average_ms(t->wpcopy_delay_total, t->wpcopy_count)); } static void task_context_switch_counts(struct taskstats *t) -- cgit v1.2.3-59-g8ed1b From 8d3398ba2a0d1e25690f830192b7834acab003ec Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Tue, 31 May 2022 11:43:45 +0200 Subject: socket: Don't use u8 type in uapi socket.h Use plain 255 instead, which also avoid introducing an additional header dependency on Fixes: 26859240e4ee ("txhash: Add socket option to control TX hash rethink behavior") Signed-off-by: Tobias Klauser Link: https://lore.kernel.org/r/20220531094345.13801-1-tklauser@distanz.ch Signed-off-by: Jakub Kicinski --- include/uapi/linux/socket.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/socket.h b/include/uapi/linux/socket.h index 51d6bb2f6765..d3fcd3b5ec53 100644 --- a/include/uapi/linux/socket.h +++ b/include/uapi/linux/socket.h @@ -31,7 +31,7 @@ struct __kernel_sockaddr_storage { #define SOCK_BUF_LOCK_MASK (SOCK_SNDBUF_LOCK | SOCK_RCVBUF_LOCK) -#define SOCK_TXREHASH_DEFAULT ((u8)-1) +#define SOCK_TXREHASH_DEFAULT 255 #define SOCK_TXREHASH_DISABLED 0 #define SOCK_TXREHASH_ENABLED 1 -- cgit v1.2.3-59-g8ed1b From 8cc5b032240ae5220b62c689c20459d3e1825b2d Mon Sep 17 00:00:00 2001 From: Carlos Llamas Date: Wed, 1 Jun 2022 01:00:17 +0000 Subject: binder: fix sender_euid type in uapi header The {pid,uid}_t fields of struct binder_transaction were recently replaced to use kernel types in commit 169adc2b6b3c ("android/binder.h: add linux/android/binder(fs).h to UAPI compile-test coverage"). However, using __kernel_uid_t here breaks backwards compatibility in architectures using 16-bits for this type, since glibc and some others still expect a 32-bit uid_t. Instead, let's use __kernel_uid32_t which avoids this compatibility problem. Fixes: 169adc2b6b3c ("android/binder.h: add linux/android/binder(fs).h to UAPI compile-test coverage") Reported-by: Christopher Ferris Signed-off-by: Carlos Llamas Acked-by: Todd Kjos Signed-off-by: Arnd Bergmann --- include/uapi/linux/android/binder.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h index 11157fae8a8e..688bcdaeed53 100644 --- a/include/uapi/linux/android/binder.h +++ b/include/uapi/linux/android/binder.h @@ -289,7 +289,7 @@ struct binder_transaction_data { /* General information about the transaction. */ __u32 flags; __kernel_pid_t sender_pid; - __kernel_uid_t sender_euid; + __kernel_uid32_t sender_euid; binder_size_t data_size; /* number of bytes of data */ binder_size_t offsets_size; /* number of bytes of offsets */ -- cgit v1.2.3-59-g8ed1b From 08145b087e4481458f6075f3af58021a3cf8a940 Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Tue, 31 May 2022 18:04:10 +0800 Subject: LoongArch: Add ELF-related definitions Add ELF-related definitions for LoongArch, including: EM_LOONGARCH, KEXEC_ARCH_LOONGARCH, AUDIT_ARCH_LOONGARCH32, AUDIT_ARCH_LOONGARCH64 and NT_LOONGARCH_*. Reviewed-by: WANG Xuerui Reviewed-by: Jiaxun Yang Signed-off-by: Huacai Chen --- include/uapi/linux/audit.h | 2 ++ include/uapi/linux/elf-em.h | 1 + include/uapi/linux/elf.h | 5 +++++ include/uapi/linux/kexec.h | 1 + scripts/sorttable.c | 5 +++++ 5 files changed, 14 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h index 8eda133ca4c1..7c1dc818b1d5 100644 --- a/include/uapi/linux/audit.h +++ b/include/uapi/linux/audit.h @@ -439,6 +439,8 @@ enum { #define AUDIT_ARCH_UNICORE (EM_UNICORE|__AUDIT_ARCH_LE) #define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) #define AUDIT_ARCH_XTENSA (EM_XTENSA) +#define AUDIT_ARCH_LOONGARCH32 (EM_LOONGARCH|__AUDIT_ARCH_LE) +#define AUDIT_ARCH_LOONGARCH64 (EM_LOONGARCH|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) #define AUDIT_PERM_EXEC 1 #define AUDIT_PERM_WRITE 2 diff --git a/include/uapi/linux/elf-em.h b/include/uapi/linux/elf-em.h index f47e853546fa..ef38c2bc5ab7 100644 --- a/include/uapi/linux/elf-em.h +++ b/include/uapi/linux/elf-em.h @@ -51,6 +51,7 @@ #define EM_RISCV 243 /* RISC-V */ #define EM_BPF 247 /* Linux BPF - in-kernel virtual machine */ #define EM_CSKY 252 /* C-SKY */ +#define EM_LOONGARCH 258 /* LoongArch */ #define EM_FRV 0x5441 /* Fujitsu FR-V */ /* diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h index c4abd09c3da9..2b9f5e9985e5 100644 --- a/include/uapi/linux/elf.h +++ b/include/uapi/linux/elf.h @@ -438,6 +438,11 @@ typedef struct elf64_shdr { #define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers */ #define NT_MIPS_FP_MODE 0x801 /* MIPS floating-point mode */ #define NT_MIPS_MSA 0x802 /* MIPS SIMD registers */ +#define NT_LOONGARCH_CPUCFG 0xa00 /* LoongArch CPU config registers */ +#define NT_LOONGARCH_CSR 0xa01 /* LoongArch control and status registers */ +#define NT_LOONGARCH_LSX 0xa02 /* LoongArch Loongson SIMD Extension registers */ +#define NT_LOONGARCH_LASX 0xa03 /* LoongArch Loongson Advanced SIMD Extension registers */ +#define NT_LOONGARCH_LBT 0xa04 /* LoongArch Loongson Binary Translation registers */ /* Note types with note name "GNU" */ #define NT_GNU_PROPERTY_TYPE_0 5 diff --git a/include/uapi/linux/kexec.h b/include/uapi/linux/kexec.h index fb7e2ef60825..981016e05cfa 100644 --- a/include/uapi/linux/kexec.h +++ b/include/uapi/linux/kexec.h @@ -43,6 +43,7 @@ #define KEXEC_ARCH_MIPS ( 8 << 16) #define KEXEC_ARCH_AARCH64 (183 << 16) #define KEXEC_ARCH_RISCV (243 << 16) +#define KEXEC_ARCH_LOONGARCH (258 << 16) /* The artificial cap on the number of segments passed to kexec_load. */ #define KEXEC_SEGMENT_MAX 16 diff --git a/scripts/sorttable.c b/scripts/sorttable.c index d00504c5f530..fba40e99f354 100644 --- a/scripts/sorttable.c +++ b/scripts/sorttable.c @@ -60,6 +60,10 @@ #define EM_RISCV 243 #endif +#ifndef EM_LOONGARCH +#define EM_LOONGARCH 258 +#endif + static uint32_t (*r)(const uint32_t *); static uint16_t (*r2)(const uint16_t *); static uint64_t (*r8)(const uint64_t *); @@ -313,6 +317,7 @@ static int do_file(char const *const fname, void *addr) case EM_ARCOMPACT: case EM_ARCV2: case EM_ARM: + case EM_LOONGARCH: case EM_MICROBLAZE: case EM_MIPS: case EM_XTENSA: -- cgit v1.2.3-59-g8ed1b From b489a6e5871690735752f8875f411e4d0cd8e5df Mon Sep 17 00:00:00 2001 From: Maxim Mikityanskiy Date: Wed, 8 Jun 2022 18:34:25 +0300 Subject: tls: Rename TLS_INFO_ZC_SENDFILE to TLS_INFO_ZC_TX To embrace possible future optimizations of TLS, rename zerocopy sendfile definitions to more generic ones: * setsockopt: TLS_TX_ZEROCOPY_SENDFILE- > TLS_TX_ZEROCOPY_RO * sock_diag: TLS_INFO_ZC_SENDFILE -> TLS_INFO_ZC_RO_TX RO stands for readonly and emphasizes that the application shouldn't modify the data being transmitted with zerocopy to avoid potential disconnection. Fixes: c1318b39c7d3 ("tls: Add opt-in zerocopy mode of sendfile()") Signed-off-by: Maxim Mikityanskiy Link: https://lore.kernel.org/r/20220608153425.3151146-1-maximmi@nvidia.com Signed-off-by: Jakub Kicinski --- include/uapi/linux/tls.h | 4 ++-- net/tls/tls_main.c | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/tls.h b/include/uapi/linux/tls.h index ac39328eabe7..bb8f80812b0b 100644 --- a/include/uapi/linux/tls.h +++ b/include/uapi/linux/tls.h @@ -39,7 +39,7 @@ /* TLS socket options */ #define TLS_TX 1 /* Set transmit parameters */ #define TLS_RX 2 /* Set receive parameters */ -#define TLS_TX_ZEROCOPY_SENDFILE 3 /* transmit zerocopy sendfile */ +#define TLS_TX_ZEROCOPY_RO 3 /* TX zerocopy (only sendfile now) */ /* Supported versions */ #define TLS_VERSION_MINOR(ver) ((ver) & 0xFF) @@ -161,7 +161,7 @@ enum { TLS_INFO_CIPHER, TLS_INFO_TXCONF, TLS_INFO_RXCONF, - TLS_INFO_ZC_SENDFILE, + TLS_INFO_ZC_RO_TX, __TLS_INFO_MAX, }; #define TLS_INFO_MAX (__TLS_INFO_MAX - 1) diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index b91ddc110786..da176411c1b5 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -544,7 +544,7 @@ static int do_tls_getsockopt(struct sock *sk, int optname, rc = do_tls_getsockopt_conf(sk, optval, optlen, optname == TLS_TX); break; - case TLS_TX_ZEROCOPY_SENDFILE: + case TLS_TX_ZEROCOPY_RO: rc = do_tls_getsockopt_tx_zc(sk, optval, optlen); break; default: @@ -731,7 +731,7 @@ static int do_tls_setsockopt(struct sock *sk, int optname, sockptr_t optval, optname == TLS_TX); release_sock(sk); break; - case TLS_TX_ZEROCOPY_SENDFILE: + case TLS_TX_ZEROCOPY_RO: lock_sock(sk); rc = do_tls_setsockopt_tx_zc(sk, optval, optlen); release_sock(sk); @@ -970,7 +970,7 @@ static int tls_get_info(const struct sock *sk, struct sk_buff *skb) goto nla_failure; if (ctx->tx_conf == TLS_HW && ctx->zerocopy_sendfile) { - err = nla_put_flag(skb, TLS_INFO_ZC_SENDFILE); + err = nla_put_flag(skb, TLS_INFO_ZC_RO_TX); if (err) goto nla_failure; } @@ -994,7 +994,7 @@ static size_t tls_get_info_size(const struct sock *sk) nla_total_size(sizeof(u16)) + /* TLS_INFO_CIPHER */ nla_total_size(sizeof(u16)) + /* TLS_INFO_RXCONF */ nla_total_size(sizeof(u16)) + /* TLS_INFO_TXCONF */ - nla_total_size(0) + /* TLS_INFO_ZC_SENDFILE */ + nla_total_size(0) + /* TLS_INFO_ZC_RO_TX */ 0; return size; -- cgit v1.2.3-59-g8ed1b