From 8c3e34a4ff85142ca5dba3f18cbc2061899e2612 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 13 Jun 2016 12:16:05 +0100 Subject: rxrpc: Rename files matching ar-*.c to git rid of the "ar-" prefix Rename files matching net/rxrpc/ar-*.c to get rid of the "ar-" prefix. This will aid splitting those files by making easier to come up with new names. Note that the not all files are simply renamed from ar-X.c to X.c. The following exceptions are made: (*) ar-call.c -> call_object.c ar-ack.c -> call_event.c call_object.c is going to contain the core of the call object handling. Call event handling is all going to be in call_event.c. (*) ar-accept.c -> call_accept.c Incoming call handling is going to be here. (*) ar-connection.c -> conn_object.c ar-connevent.c -> conn_event.c The former file is going to have the basic connection object handling, but there will likely be some differentiation between client connections and service connections in additional files later. The latter file will have all the connection-level event handling. (*) ar-local.c -> local_object.c This will have the local endpoint object handling code. The local endpoint event handling code will later be split out into local_event.c. (*) ar-peer.c -> peer_object.c This will have the peer endpoint object handling code. Peer event handling code will be placed in peer_event.c (for the moment, there is none). (*) ar-error.c -> peer_event.c This will become the peer event handling code, though for the moment it's actually driven from the local endpoint's perspective. Note that I haven't renamed ar-transport.c to transport_object.c as the intention is to delete it when the rxrpc_transport struct is excised. The only file that actually has its contents changed is net/rxrpc/Makefile. net/rxrpc/ar-internal.h will need its section marker comments updating, but I'll do that in a separate patch to make it easier for git to follow the history across the rename. I may also want to rename ar-internal.h at some point - but that would mean updating all the #includes and I'd rather do that in a separate step. Signed-off-by: David Howells +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ar-internal.h" + +/* + * pass a connection-level abort onto all calls on that connection + */ +static void rxrpc_abort_calls(struct rxrpc_connection *conn, int state, + u32 abort_code) +{ + struct rxrpc_call *call; + struct rb_node *p; + + _enter("{%d},%x", conn->debug_id, abort_code); + + read_lock_bh(&conn->lock); + + for (p = rb_first(&conn->calls); p; p = rb_next(p)) { + call = rb_entry(p, struct rxrpc_call, conn_node); + write_lock(&call->state_lock); + if (call->state <= RXRPC_CALL_COMPLETE) { + call->state = state; + if (state == RXRPC_CALL_LOCALLY_ABORTED) { + call->local_abort = conn->local_abort; + set_bit(RXRPC_CALL_EV_CONN_ABORT, &call->events); + } else { + call->remote_abort = conn->remote_abort; + set_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events); + } + rxrpc_queue_call(call); + } + write_unlock(&call->state_lock); + } + + read_unlock_bh(&conn->lock); + _leave(""); +} + +/* + * generate a connection-level abort + */ +static int rxrpc_abort_connection(struct rxrpc_connection *conn, + u32 error, u32 abort_code) +{ + struct rxrpc_wire_header whdr; + struct msghdr msg; + struct kvec iov[2]; + __be32 word; + size_t len; + u32 serial; + int ret; + + _enter("%d,,%u,%u", conn->debug_id, error, abort_code); + + /* generate a connection-level abort */ + spin_lock_bh(&conn->state_lock); + if (conn->state < RXRPC_CONN_REMOTELY_ABORTED) { + conn->state = RXRPC_CONN_LOCALLY_ABORTED; + conn->error = error; + spin_unlock_bh(&conn->state_lock); + } else { + spin_unlock_bh(&conn->state_lock); + _leave(" = 0 [already dead]"); + return 0; + } + + rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, abort_code); + + msg.msg_name = &conn->trans->peer->srx.transport; + msg.msg_namelen = conn->trans->peer->srx.transport_len; + msg.msg_control = NULL; + msg.msg_controllen = 0; + msg.msg_flags = 0; + + whdr.epoch = htonl(conn->epoch); + whdr.cid = htonl(conn->cid); + whdr.callNumber = 0; + whdr.seq = 0; + whdr.type = RXRPC_PACKET_TYPE_ABORT; + whdr.flags = conn->out_clientflag; + whdr.userStatus = 0; + whdr.securityIndex = conn->security_ix; + whdr._rsvd = 0; + whdr.serviceId = htons(conn->service_id); + + word = htonl(conn->local_abort); + + iov[0].iov_base = &whdr; + iov[0].iov_len = sizeof(whdr); + iov[1].iov_base = &word; + iov[1].iov_len = sizeof(word); + + len = iov[0].iov_len + iov[1].iov_len; + + serial = atomic_inc_return(&conn->serial); + whdr.serial = htonl(serial); + _proto("Tx CONN ABORT %%%u { %d }", serial, conn->local_abort); + + ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len); + if (ret < 0) { + _debug("sendmsg failed: %d", ret); + return -EAGAIN; + } + + _leave(" = 0"); + return 0; +} + +/* + * mark a call as being on a now-secured channel + * - must be called with softirqs disabled + */ +static void rxrpc_call_is_secure(struct rxrpc_call *call) +{ + _enter("%p", call); + if (call) { + read_lock(&call->state_lock); + if (call->state < RXRPC_CALL_COMPLETE && + !test_and_set_bit(RXRPC_CALL_EV_SECURED, &call->events)) + rxrpc_queue_call(call); + read_unlock(&call->state_lock); + } +} + +/* + * connection-level Rx packet processor + */ +static int rxrpc_process_event(struct rxrpc_connection *conn, + struct sk_buff *skb, + u32 *_abort_code) +{ + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); + __be32 wtmp; + u32 abort_code; + int loop, ret; + + if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) { + kleave(" = -ECONNABORTED [%u]", conn->state); + return -ECONNABORTED; + } + + _enter("{%d},{%u,%%%u},", conn->debug_id, sp->hdr.type, sp->hdr.serial); + + switch (sp->hdr.type) { + case RXRPC_PACKET_TYPE_ABORT: + if (skb_copy_bits(skb, 0, &wtmp, sizeof(wtmp)) < 0) + return -EPROTO; + abort_code = ntohl(wtmp); + _proto("Rx ABORT %%%u { ac=%d }", sp->hdr.serial, abort_code); + + conn->state = RXRPC_CONN_REMOTELY_ABORTED; + rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED, + abort_code); + return -ECONNABORTED; + + case RXRPC_PACKET_TYPE_CHALLENGE: + return conn->security->respond_to_challenge(conn, skb, + _abort_code); + + case RXRPC_PACKET_TYPE_RESPONSE: + ret = conn->security->verify_response(conn, skb, _abort_code); + if (ret < 0) + return ret; + + ret = conn->security->init_connection_security(conn); + if (ret < 0) + return ret; + + conn->security->prime_packet_security(conn); + read_lock_bh(&conn->lock); + spin_lock(&conn->state_lock); + + if (conn->state == RXRPC_CONN_SERVER_CHALLENGING) { + conn->state = RXRPC_CONN_SERVER; + for (loop = 0; loop < RXRPC_MAXCALLS; loop++) + rxrpc_call_is_secure(conn->channels[loop]); + } + + spin_unlock(&conn->state_lock); + read_unlock_bh(&conn->lock); + return 0; + + default: + _leave(" = -EPROTO [%u]", sp->hdr.type); + return -EPROTO; + } +} + +/* + * set up security and issue a challenge + */ +static void rxrpc_secure_connection(struct rxrpc_connection *conn) +{ + u32 abort_code; + int ret; + + _enter("{%d}", conn->debug_id); + + ASSERT(conn->security_ix != 0); + + if (!conn->key) { + _debug("set up security"); + ret = rxrpc_init_server_conn_security(conn); + switch (ret) { + case 0: + break; + case -ENOENT: + abort_code = RX_CALL_DEAD; + goto abort; + default: + abort_code = RXKADNOAUTH; + goto abort; + } + } + + if (conn->security->issue_challenge(conn) < 0) { + abort_code = RX_CALL_DEAD; + ret = -ENOMEM; + goto abort; + } + + _leave(""); + return; + +abort: + _debug("abort %d, %d", ret, abort_code); + rxrpc_abort_connection(conn, -ret, abort_code); + _leave(" [aborted]"); +} + +/* + * connection-level event processor + */ +void rxrpc_process_connection(struct work_struct *work) +{ + struct rxrpc_connection *conn = + container_of(work, struct rxrpc_connection, processor); + struct sk_buff *skb; + u32 abort_code = RX_PROTOCOL_ERROR; + int ret; + + _enter("{%d}", conn->debug_id); + + atomic_inc(&conn->usage); + + if (test_and_clear_bit(RXRPC_CONN_CHALLENGE, &conn->events)) { + rxrpc_secure_connection(conn); + rxrpc_put_connection(conn); + } + + /* go through the conn-level event packets, releasing the ref on this + * connection that each one has when we've finished with it */ + while ((skb = skb_dequeue(&conn->rx_queue))) { + ret = rxrpc_process_event(conn, skb, &abort_code); + switch (ret) { + case -EPROTO: + case -EKEYEXPIRED: + case -EKEYREJECTED: + goto protocol_error; + case -EAGAIN: + goto requeue_and_leave; + case -ECONNABORTED: + default: + rxrpc_put_connection(conn); + rxrpc_free_skb(skb); + break; + } + } + +out: + rxrpc_put_connection(conn); + _leave(""); + return; + +requeue_and_leave: + skb_queue_head(&conn->rx_queue, skb); + goto out; + +protocol_error: + if (rxrpc_abort_connection(conn, -ret, abort_code) < 0) + goto requeue_and_leave; + rxrpc_put_connection(conn); + rxrpc_free_skb(skb); + _leave(" [EPROTO]"); + goto out; +} + +/* + * put a packet up for transport-level abort + */ +void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb) +{ + CHECK_SLAB_OKAY(&local->usage); + + if (!atomic_inc_not_zero(&local->usage)) { + printk("resurrected on reject\n"); + BUG(); + } + + skb_queue_tail(&local->reject_queue, skb); + rxrpc_queue_work(&local->rejecter); +} + +/* + * reject packets through the local endpoint + */ +void rxrpc_reject_packets(struct work_struct *work) +{ + union { + struct sockaddr sa; + struct sockaddr_in sin; + } sa; + struct rxrpc_skb_priv *sp; + struct rxrpc_wire_header whdr; + struct rxrpc_local *local; + struct sk_buff *skb; + struct msghdr msg; + struct kvec iov[2]; + size_t size; + __be32 code; + + local = container_of(work, struct rxrpc_local, rejecter); + rxrpc_get_local(local); + + _enter("%d", local->debug_id); + + iov[0].iov_base = &whdr; + iov[0].iov_len = sizeof(whdr); + iov[1].iov_base = &code; + iov[1].iov_len = sizeof(code); + size = sizeof(whdr) + sizeof(code); + + msg.msg_name = &sa; + msg.msg_control = NULL; + msg.msg_controllen = 0; + msg.msg_flags = 0; + + memset(&sa, 0, sizeof(sa)); + sa.sa.sa_family = local->srx.transport.family; + switch (sa.sa.sa_family) { + case AF_INET: + msg.msg_namelen = sizeof(sa.sin); + break; + default: + msg.msg_namelen = 0; + break; + } + + memset(&whdr, 0, sizeof(whdr)); + whdr.type = RXRPC_PACKET_TYPE_ABORT; + + while ((skb = skb_dequeue(&local->reject_queue))) { + sp = rxrpc_skb(skb); + switch (sa.sa.sa_family) { + case AF_INET: + sa.sin.sin_port = udp_hdr(skb)->source; + sa.sin.sin_addr.s_addr = ip_hdr(skb)->saddr; + code = htonl(skb->priority); + + whdr.epoch = htonl(sp->hdr.epoch); + whdr.cid = htonl(sp->hdr.cid); + whdr.callNumber = htonl(sp->hdr.callNumber); + whdr.serviceId = htons(sp->hdr.serviceId); + whdr.flags = sp->hdr.flags; + whdr.flags ^= RXRPC_CLIENT_INITIATED; + whdr.flags &= RXRPC_CLIENT_INITIATED; + + kernel_sendmsg(local->socket, &msg, iov, 2, size); + break; + + default: + break; + } + + rxrpc_free_skb(skb); + rxrpc_put_local(local); + } + + rxrpc_put_local(local); + _leave(""); +} -- cgit v1.2.3-59-g8ed1b From 4f95dd78a77edc42454de55bb32332be293fb461 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 4 Apr 2016 14:00:35 +0100 Subject: rxrpc: Rework local endpoint management Rework the local RxRPC endpoint management. Local endpoint objects are maintained in a flat list as before. This should be okay as there shouldn't be more than one per open AF_RXRPC socket (there can be fewer as local endpoints can be shared if their local service ID is 0 and they share the same local transport parameters). Changes: (1) Local endpoints may now only be shared if they have local service ID 0 (ie. they're not being used for listening). This prevents a scenario where process A is listening of the Cache Manager port and process B contacts a fileserver - which may then attempt to send CM requests back to B. But if A and B are sharing a local endpoint, A will get the CM requests meant for B. (2) We use a mutex to handle lookups and don't provide RCU-only lookups since we only expect to access the list when opening a socket or destroying an endpoint. The local endpoint object is pointed to by the transport socket's sk_user_data for the life of the transport socket - allowing us to refer to it directly from the sk_data_ready and sk_error_report callbacks. (3) atomic_inc_not_zero() now exists and can be used to only share a local endpoint if the last reference hasn't yet gone. (4) We can remove rxrpc_local_lock - a spinlock that had to be taken with BH processing disabled given that we assume sk_user_data won't change under us. (5) The transport socket is shut down before we clear the sk_user_data pointer so that we can be sure that the transport socket's callbacks won't be invoked once the RCU destruction is scheduled. (6) Local endpoints have a work item that handles both destruction and event processing. The means that destruction doesn't then need to wait for event processing. The event queues can then be cleared after the transport socket is shut down. (7) Local endpoints are no longer available for resurrection beyond the life of the sockets that had them open. As soon as their last ref goes, they are scheduled for destruction and may not have their usage count moved from 0. Signed-off-by: David Howells --- net/rxrpc/af_rxrpc.c | 19 ++- net/rxrpc/ar-internal.h | 55 ++++---- net/rxrpc/call_accept.c | 25 +--- net/rxrpc/conn_event.c | 15 +- net/rxrpc/input.c | 29 +--- net/rxrpc/local_event.c | 10 +- net/rxrpc/local_object.c | 353 ++++++++++++++++++++++++++++------------------- 7 files changed, 276 insertions(+), 230 deletions(-) (limited to 'net/rxrpc/conn_event.c') diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index ba373caddbeb..c83c3c75d665 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -102,6 +102,8 @@ static int rxrpc_validate_address(struct rxrpc_sock *rx, switch (srx->transport.family) { case AF_INET: + if (srx->transport_len < sizeof(struct sockaddr_in)) + return -EINVAL; _debug("INET: %x @ %pI4", ntohs(srx->transport.sin.sin_port), &srx->transport.sin.sin_addr); @@ -835,12 +837,27 @@ static void __exit af_rxrpc_exit(void) rxrpc_destroy_all_calls(); rxrpc_destroy_all_connections(); rxrpc_destroy_all_transports(); - rxrpc_destroy_all_locals(); ASSERTCMP(atomic_read(&rxrpc_n_skbs), ==, 0); + /* We need to flush the scheduled work twice because the local endpoint + * records involve a work item in their destruction as they can only be + * destroyed from process context. However, a connection may have a + * work item outstanding - and this will pin the local endpoint record + * until the connection goes away. + * + * Peers don't pin locals and calls pin sockets - which prevents the + * module from being unloaded - so we should only need two flushes. + */ _debug("flush scheduled work"); flush_workqueue(rxrpc_workqueue); + _debug("flush scheduled work 2"); + flush_workqueue(rxrpc_workqueue); + _debug("synchronise RCU"); + rcu_barrier(); + _debug("destroy locals"); + rxrpc_destroy_all_locals(); + remove_proc_entry("rxrpc_conns", init_net.proc_net); remove_proc_entry("rxrpc_calls", init_net.proc_net); destroy_workqueue(rxrpc_workqueue); diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index fa50b09eaa63..c168268467cd 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -170,25 +170,26 @@ struct rxrpc_security { }; /* - * RxRPC local transport endpoint definition - * - matched by local port, address and protocol type + * RxRPC local transport endpoint description + * - owned by a single AF_RXRPC socket + * - pointed to by transport socket struct sk_user_data */ struct rxrpc_local { + struct rcu_head rcu; + atomic_t usage; + struct list_head link; struct socket *socket; /* my UDP socket */ - struct work_struct destroyer; /* endpoint destroyer */ - struct work_struct acceptor; /* incoming call processor */ - struct work_struct rejecter; /* packet reject writer */ - struct work_struct event_processor; /* endpoint event processor */ + struct work_struct processor; struct list_head services; /* services listening on this endpoint */ - struct list_head link; /* link in endpoint list */ struct rw_semaphore defrag_sem; /* control re-enablement of IP DF bit */ struct sk_buff_head accept_queue; /* incoming calls awaiting acceptance */ struct sk_buff_head reject_queue; /* packets awaiting rejection */ struct sk_buff_head event_queue; /* endpoint event packets awaiting processing */ + struct mutex conn_lock; /* Client connection creation lock */ spinlock_t lock; /* access lock */ rwlock_t services_lock; /* lock for services list */ - atomic_t usage; int debug_id; /* debug ID for printks */ + bool dead; struct sockaddr_rxrpc srx; /* local address */ }; @@ -487,7 +488,7 @@ extern struct rxrpc_transport *rxrpc_name_to_transport(struct rxrpc_sock *, /* * call_accept.c */ -void rxrpc_accept_incoming_calls(struct work_struct *); +void rxrpc_accept_incoming_calls(struct rxrpc_local *); struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long); int rxrpc_reject_call(struct rxrpc_sock *); @@ -527,7 +528,7 @@ void __exit rxrpc_destroy_all_calls(void); */ void rxrpc_process_connection(struct work_struct *); void rxrpc_reject_packet(struct rxrpc_local *, struct sk_buff *); -void rxrpc_reject_packets(struct work_struct *); +void rxrpc_reject_packets(struct rxrpc_local *); /* * conn_object.c @@ -575,17 +576,32 @@ int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time_t, /* * local_event.c */ -extern void rxrpc_process_local_events(struct work_struct *); +extern void rxrpc_process_local_events(struct rxrpc_local *); /* * local_object.c */ -extern rwlock_t rxrpc_local_lock; - -struct rxrpc_local *rxrpc_lookup_local(struct sockaddr_rxrpc *); -void rxrpc_put_local(struct rxrpc_local *); +struct rxrpc_local *rxrpc_lookup_local(const struct sockaddr_rxrpc *); +void __rxrpc_put_local(struct rxrpc_local *); void __exit rxrpc_destroy_all_locals(void); +static inline void rxrpc_get_local(struct rxrpc_local *local) +{ + atomic_inc(&local->usage); +} + +static inline +struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local) +{ + return atomic_inc_not_zero(&local->usage) ? local : NULL; +} + +static inline void rxrpc_put_local(struct rxrpc_local *local) +{ + if (atomic_dec_and_test(&local->usage)) + __rxrpc_put_local(local); +} + /* * misc.c */ @@ -874,15 +890,6 @@ static inline void rxrpc_purge_queue(struct sk_buff_head *list) rxrpc_free_skb(skb); } -static inline void __rxrpc_get_local(struct rxrpc_local *local, const char *f) -{ - CHECK_SLAB_OKAY(&local->usage); - if (atomic_inc_return(&local->usage) == 1) - printk("resurrected (%s)\n", f); -} - -#define rxrpc_get_local(LOCAL) __rxrpc_get_local((LOCAL), __func__) - #define rxrpc_get_call(CALL) \ do { \ CHECK_SLAB_OKAY(&(CALL)->usage); \ diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index e5723f4dce89..50136c76ebd1 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c @@ -202,10 +202,8 @@ error_nofree: * accept incoming calls that need peer, transport and/or connection setting up * - the packets we get are all incoming client DATA packets that have seq == 1 */ -void rxrpc_accept_incoming_calls(struct work_struct *work) +void rxrpc_accept_incoming_calls(struct rxrpc_local *local) { - struct rxrpc_local *local = - container_of(work, struct rxrpc_local, acceptor); struct rxrpc_skb_priv *sp; struct sockaddr_rxrpc srx; struct rxrpc_sock *rx; @@ -215,21 +213,8 @@ void rxrpc_accept_incoming_calls(struct work_struct *work) _enter("%d", local->debug_id); - read_lock_bh(&rxrpc_local_lock); - if (atomic_read(&local->usage) > 0) - rxrpc_get_local(local); - else - local = NULL; - read_unlock_bh(&rxrpc_local_lock); - if (!local) { - _leave(" [local dead]"); - return; - } - -process_next_packet: skb = skb_dequeue(&local->accept_queue); if (!skb) { - rxrpc_put_local(local); _leave("\n"); return; } @@ -292,7 +277,7 @@ found_service: case -ECONNRESET: /* old calls are ignored */ case -ECONNABORTED: /* aborted calls are reaborted or ignored */ case 0: - goto process_next_packet; + return; case -ECONNREFUSED: goto invalid_service; case -EBUSY: @@ -308,18 +293,18 @@ backlog_full: busy: rxrpc_busy(local, &srx, &whdr); rxrpc_free_skb(skb); - goto process_next_packet; + return; invalid_service: skb->priority = RX_INVALID_OPERATION; rxrpc_reject_packet(local, skb); - goto process_next_packet; + return; /* can't change connection security type mid-flow */ security_mismatch: skb->priority = RX_PROTOCOL_ERROR; rxrpc_reject_packet(local, skb); - goto process_next_packet; + return; } /* diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c index 8bdd692d4862..00c92b614485 100644 --- a/net/rxrpc/conn_event.c +++ b/net/rxrpc/conn_event.c @@ -314,19 +314,14 @@ void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb) { CHECK_SLAB_OKAY(&local->usage); - if (!atomic_inc_not_zero(&local->usage)) { - printk("resurrected on reject\n"); - BUG(); - } - skb_queue_tail(&local->reject_queue, skb); - rxrpc_queue_work(&local->rejecter); + rxrpc_queue_work(&local->processor); } /* * reject packets through the local endpoint */ -void rxrpc_reject_packets(struct work_struct *work) +void rxrpc_reject_packets(struct rxrpc_local *local) { union { struct sockaddr sa; @@ -334,16 +329,12 @@ void rxrpc_reject_packets(struct work_struct *work) } sa; struct rxrpc_skb_priv *sp; struct rxrpc_wire_header whdr; - struct rxrpc_local *local; struct sk_buff *skb; struct msghdr msg; struct kvec iov[2]; size_t size; __be32 code; - local = container_of(work, struct rxrpc_local, rejecter); - rxrpc_get_local(local); - _enter("%d", local->debug_id); iov[0].iov_base = &whdr; @@ -395,9 +386,7 @@ void rxrpc_reject_packets(struct work_struct *work) } rxrpc_free_skb(skb); - rxrpc_put_local(local); } - rxrpc_put_local(local); _leave(""); } diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 3b405dbf3a05..47fb167af3e4 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -594,9 +594,8 @@ static void rxrpc_post_packet_to_local(struct rxrpc_local *local, { _enter("%p,%p", local, skb); - atomic_inc(&local->usage); skb_queue_tail(&local->event_queue, skb); - rxrpc_queue_work(&local->event_processor); + rxrpc_queue_work(&local->processor); } /* @@ -664,11 +663,15 @@ cant_find_conn: /* * handle data received on the local endpoint * - may be called in interrupt context + * + * The socket is locked by the caller and this prevents the socket from being + * shut down and the local endpoint from going away, thus sk_user_data will not + * be cleared until this function returns. */ void rxrpc_data_ready(struct sock *sk) { struct rxrpc_skb_priv *sp; - struct rxrpc_local *local; + struct rxrpc_local *local = sk->sk_user_data; struct sk_buff *skb; int ret; @@ -676,21 +679,8 @@ void rxrpc_data_ready(struct sock *sk) ASSERT(!irqs_disabled()); - read_lock_bh(&rxrpc_local_lock); - local = sk->sk_user_data; - if (local && atomic_read(&local->usage) > 0) - rxrpc_get_local(local); - else - local = NULL; - read_unlock_bh(&rxrpc_local_lock); - if (!local) { - _leave(" [local dead]"); - return; - } - skb = skb_recv_datagram(sk, 0, 1, &ret); if (!skb) { - rxrpc_put_local(local); if (ret == -EAGAIN) return; _debug("UDP socket error %d", ret); @@ -704,7 +694,6 @@ void rxrpc_data_ready(struct sock *sk) /* we'll probably need to checksum it (didn't call sock_recvmsg) */ if (skb_checksum_complete(skb)) { rxrpc_free_skb(skb); - rxrpc_put_local(local); __UDP_INC_STATS(&init_net, UDP_MIB_INERRORS, 0); _leave(" [CSUM failed]"); return; @@ -769,7 +758,6 @@ void rxrpc_data_ready(struct sock *sk) } out: - rxrpc_put_local(local); return; cant_route_call: @@ -779,8 +767,7 @@ cant_route_call: if (sp->hdr.seq == 1) { _debug("first packet"); skb_queue_tail(&local->accept_queue, skb); - rxrpc_queue_work(&local->acceptor); - rxrpc_put_local(local); + rxrpc_queue_work(&local->processor); _leave(" [incoming]"); return; } @@ -793,13 +780,11 @@ cant_route_call: _debug("reject type %d",sp->hdr.type); rxrpc_reject_packet(local, skb); } - rxrpc_put_local(local); _leave(" [no call]"); return; bad_message: skb->priority = RX_PROTOCOL_ERROR; rxrpc_reject_packet(local, skb); - rxrpc_put_local(local); _leave(" [badmsg]"); } diff --git a/net/rxrpc/local_event.c b/net/rxrpc/local_event.c index 194db2e6d548..31a3f86ef2f6 100644 --- a/net/rxrpc/local_event.c +++ b/net/rxrpc/local_event.c @@ -82,17 +82,15 @@ static void rxrpc_send_version_request(struct rxrpc_local *local, /* * Process event packets targetted at a local endpoint. */ -void rxrpc_process_local_events(struct work_struct *work) +void rxrpc_process_local_events(struct rxrpc_local *local) { - struct rxrpc_local *local = container_of(work, struct rxrpc_local, event_processor); struct sk_buff *skb; char v; _enter(""); - atomic_inc(&local->usage); - - while ((skb = skb_dequeue(&local->event_queue))) { + skb = skb_dequeue(&local->event_queue); + if (skb) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); _debug("{%d},{%u}", local->debug_id, sp->hdr.type); @@ -111,10 +109,8 @@ void rxrpc_process_local_events(struct work_struct *work) break; } - rxrpc_put_local(local); rxrpc_free_skb(skb); } - rxrpc_put_local(local); _leave(""); } diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c index c1b8d745bf5e..009b321712bc 100644 --- a/net/rxrpc/local_object.c +++ b/net/rxrpc/local_object.c @@ -1,6 +1,6 @@ /* Local endpoint object management * - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or @@ -17,40 +17,72 @@ #include #include #include +#include #include #include #include "ar-internal.h" -static LIST_HEAD(rxrpc_locals); -DEFINE_RWLOCK(rxrpc_local_lock); -static DECLARE_RWSEM(rxrpc_local_sem); -static DECLARE_WAIT_QUEUE_HEAD(rxrpc_local_wq); +static void rxrpc_local_processor(struct work_struct *); +static void rxrpc_local_rcu(struct rcu_head *); -static void rxrpc_destroy_local(struct work_struct *work); +static DEFINE_MUTEX(rxrpc_local_mutex); +static LIST_HEAD(rxrpc_local_endpoints); /* - * allocate a new local + * Compare a local to an address. Return -ve, 0 or +ve to indicate less than, + * same or greater than. + * + * We explicitly don't compare the RxRPC service ID as we want to reject + * conflicting uses by differing services. Further, we don't want to share + * addresses with different options (IPv6), so we don't compare those bits + * either. */ -static -struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx) +static long rxrpc_local_cmp_key(const struct rxrpc_local *local, + const struct sockaddr_rxrpc *srx) +{ + long diff; + + diff = ((local->srx.transport_type - srx->transport_type) ?: + (local->srx.transport_len - srx->transport_len) ?: + (local->srx.transport.family - srx->transport.family)); + if (diff != 0) + return diff; + + switch (srx->transport.family) { + case AF_INET: + /* If the choice of UDP port is left up to the transport, then + * the endpoint record doesn't match. + */ + return ((u16 __force)local->srx.transport.sin.sin_port - + (u16 __force)srx->transport.sin.sin_port) ?: + memcmp(&local->srx.transport.sin.sin_addr, + &srx->transport.sin.sin_addr, + sizeof(struct in_addr)); + default: + BUG(); + } +} + +/* + * Allocate a new local endpoint. + */ +static struct rxrpc_local *rxrpc_alloc_local(const struct sockaddr_rxrpc *srx) { struct rxrpc_local *local; local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL); if (local) { - INIT_WORK(&local->destroyer, &rxrpc_destroy_local); - INIT_WORK(&local->acceptor, &rxrpc_accept_incoming_calls); - INIT_WORK(&local->rejecter, &rxrpc_reject_packets); - INIT_WORK(&local->event_processor, &rxrpc_process_local_events); - INIT_LIST_HEAD(&local->services); + atomic_set(&local->usage, 1); INIT_LIST_HEAD(&local->link); + INIT_WORK(&local->processor, rxrpc_local_processor); + INIT_LIST_HEAD(&local->services); init_rwsem(&local->defrag_sem); skb_queue_head_init(&local->accept_queue); skb_queue_head_init(&local->reject_queue); skb_queue_head_init(&local->event_queue); + mutex_init(&local->conn_lock); spin_lock_init(&local->lock); rwlock_init(&local->services_lock); - atomic_set(&local->usage, 1); local->debug_id = atomic_inc_return(&rxrpc_debug_id); memcpy(&local->srx, srx, sizeof(*srx)); } @@ -61,9 +93,9 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx) /* * create the local socket - * - must be called with rxrpc_local_sem writelocked + * - must be called with rxrpc_local_mutex locked */ -static int rxrpc_create_local(struct rxrpc_local *local) +static int rxrpc_open_socket(struct rxrpc_local *local) { struct sock *sock; int ret, opt; @@ -82,10 +114,10 @@ static int rxrpc_create_local(struct rxrpc_local *local) if (local->srx.transport_len > sizeof(sa_family_t)) { _debug("bind"); ret = kernel_bind(local->socket, - (struct sockaddr *) &local->srx.transport, + (struct sockaddr *)&local->srx.transport, local->srx.transport_len); if (ret < 0) { - _debug("bind failed"); + _debug("bind failed %d", ret); goto error; } } @@ -108,10 +140,6 @@ static int rxrpc_create_local(struct rxrpc_local *local) goto error; } - write_lock_bh(&rxrpc_local_lock); - list_add(&local->link, &rxrpc_locals); - write_unlock_bh(&rxrpc_local_lock); - /* set the socket up */ sock = local->socket->sk; sock->sk_user_data = local; @@ -131,188 +159,227 @@ error: } /* - * create a new local endpoint using the specified UDP address + * Look up or create a new local endpoint using the specified local address. */ -struct rxrpc_local *rxrpc_lookup_local(struct sockaddr_rxrpc *srx) +struct rxrpc_local *rxrpc_lookup_local(const struct sockaddr_rxrpc *srx) { struct rxrpc_local *local; + struct list_head *cursor; + const char *age; + long diff; int ret; - _enter("{%d,%u,%pI4+%hu}", - srx->transport_type, - srx->transport.family, - &srx->transport.sin.sin_addr, - ntohs(srx->transport.sin.sin_port)); - - down_write(&rxrpc_local_sem); + if (srx->transport.family == AF_INET) { + _enter("{%d,%u,%pI4+%hu}", + srx->transport_type, + srx->transport.family, + &srx->transport.sin.sin_addr, + ntohs(srx->transport.sin.sin_port)); + } else { + _enter("{%d,%u}", + srx->transport_type, + srx->transport.family); + return ERR_PTR(-EAFNOSUPPORT); + } - /* see if we have a suitable local local endpoint already */ - read_lock_bh(&rxrpc_local_lock); + mutex_lock(&rxrpc_local_mutex); - list_for_each_entry(local, &rxrpc_locals, link) { - _debug("CMP {%d,%u,%pI4+%hu}", - local->srx.transport_type, - local->srx.transport.family, - &local->srx.transport.sin.sin_addr, - ntohs(local->srx.transport.sin.sin_port)); + for (cursor = rxrpc_local_endpoints.next; + cursor != &rxrpc_local_endpoints; + cursor = cursor->next) { + local = list_entry(cursor, struct rxrpc_local, link); - if (local->srx.transport_type != srx->transport_type || - local->srx.transport.family != srx->transport.family) + diff = rxrpc_local_cmp_key(local, srx); + if (diff < 0) continue; + if (diff > 0) + break; + + /* Services aren't allowed to share transport sockets, so + * reject that here. It is possible that the object is dying - + * but it may also still have the local transport address that + * we want bound. + */ + if (srx->srx_service) { + local = NULL; + goto addr_in_use; + } - switch (srx->transport.family) { - case AF_INET: - if (local->srx.transport.sin.sin_port != - srx->transport.sin.sin_port) - continue; - if (memcmp(&local->srx.transport.sin.sin_addr, - &srx->transport.sin.sin_addr, - sizeof(struct in_addr)) != 0) - continue; - goto found_local; - - default: - BUG(); + /* Found a match. We replace a dying object. Attempting to + * bind the transport socket may still fail if we're attempting + * to use a local address that the dying object is still using. + */ + if (!atomic_inc_not_zero(&local->usage)) { + cursor = cursor->next; + list_del_init(&local->link); + break; } - } - read_unlock_bh(&rxrpc_local_lock); + age = "old"; + goto found; + } - /* we didn't find one, so we need to create one */ local = rxrpc_alloc_local(srx); - if (!local) { - up_write(&rxrpc_local_sem); - return ERR_PTR(-ENOMEM); - } + if (!local) + goto nomem; - ret = rxrpc_create_local(local); - if (ret < 0) { - up_write(&rxrpc_local_sem); - kfree(local); - _leave(" = %d", ret); - return ERR_PTR(ret); - } + ret = rxrpc_open_socket(local); + if (ret < 0) + goto sock_error; + + list_add_tail(&local->link, cursor); + age = "new"; - up_write(&rxrpc_local_sem); +found: + mutex_unlock(&rxrpc_local_mutex); - _net("LOCAL new %d {%d,%u,%pI4+%hu}", + _net("LOCAL %s %d {%d,%u,%pI4+%hu}", + age, local->debug_id, local->srx.transport_type, local->srx.transport.family, &local->srx.transport.sin.sin_addr, ntohs(local->srx.transport.sin.sin_port)); - _leave(" = %p [new]", local); + _leave(" = %p", local); return local; -found_local: - rxrpc_get_local(local); - read_unlock_bh(&rxrpc_local_lock); - up_write(&rxrpc_local_sem); +nomem: + ret = -ENOMEM; +sock_error: + mutex_unlock(&rxrpc_local_mutex); + kfree(local); + _leave(" = %d", ret); + return ERR_PTR(ret); - _net("LOCAL old %d {%d,%u,%pI4+%hu}", - local->debug_id, - local->srx.transport_type, - local->srx.transport.family, - &local->srx.transport.sin.sin_addr, - ntohs(local->srx.transport.sin.sin_port)); +addr_in_use: + mutex_unlock(&rxrpc_local_mutex); + _leave(" = -EADDRINUSE"); + return ERR_PTR(-EADDRINUSE); +} - _leave(" = %p [reuse]", local); - return local; +/* + * A local endpoint reached its end of life. + */ +void __rxrpc_put_local(struct rxrpc_local *local) +{ + _enter("%d", local->debug_id); + rxrpc_queue_work(&local->processor); } /* - * release a local endpoint + * Destroy a local endpoint's socket and then hand the record to RCU to dispose + * of. + * + * Closing the socket cannot be done from bottom half context or RCU callback + * context because it might sleep. */ -void rxrpc_put_local(struct rxrpc_local *local) +static void rxrpc_local_destroyer(struct rxrpc_local *local) { - _enter("%p{u=%d}", local, atomic_read(&local->usage)); + struct socket *socket = local->socket; - ASSERTCMP(atomic_read(&local->usage), >, 0); + _enter("%d", local->debug_id); - /* to prevent a race, the decrement and the dequeue must be effectively - * atomic */ - write_lock_bh(&rxrpc_local_lock); - if (unlikely(atomic_dec_and_test(&local->usage))) { - _debug("destroy local"); - rxrpc_queue_work(&local->destroyer); + /* We can get a race between an incoming call packet queueing the + * processor again and the work processor starting the destruction + * process which will shut down the UDP socket. + */ + if (local->dead) { + _leave(" [already dead]"); + return; } - write_unlock_bh(&rxrpc_local_lock); - _leave(""); + local->dead = true; + + mutex_lock(&rxrpc_local_mutex); + list_del_init(&local->link); + mutex_unlock(&rxrpc_local_mutex); + + ASSERT(list_empty(&local->services)); + + if (socket) { + local->socket = NULL; + kernel_sock_shutdown(socket, SHUT_RDWR); + socket->sk->sk_user_data = NULL; + sock_release(socket); + } + + /* At this point, there should be no more packets coming in to the + * local endpoint. + */ + rxrpc_purge_queue(&local->accept_queue); + rxrpc_purge_queue(&local->reject_queue); + rxrpc_purge_queue(&local->event_queue); + + _debug("rcu local %d", local->debug_id); + call_rcu(&local->rcu, rxrpc_local_rcu); } /* - * destroy a local endpoint + * Process events on an endpoint */ -static void rxrpc_destroy_local(struct work_struct *work) +static void rxrpc_local_processor(struct work_struct *work) { struct rxrpc_local *local = - container_of(work, struct rxrpc_local, destroyer); + container_of(work, struct rxrpc_local, processor); + bool again; - _enter("%p{%d}", local, atomic_read(&local->usage)); + _enter("%d", local->debug_id); - down_write(&rxrpc_local_sem); + do { + again = false; + if (atomic_read(&local->usage) == 0) + return rxrpc_local_destroyer(local); - write_lock_bh(&rxrpc_local_lock); - if (atomic_read(&local->usage) > 0) { - write_unlock_bh(&rxrpc_local_lock); - up_read(&rxrpc_local_sem); - _leave(" [resurrected]"); - return; - } + if (!skb_queue_empty(&local->accept_queue)) { + rxrpc_accept_incoming_calls(local); + again = true; + } - list_del(&local->link); - local->socket->sk->sk_user_data = NULL; - write_unlock_bh(&rxrpc_local_lock); + if (!skb_queue_empty(&local->reject_queue)) { + rxrpc_reject_packets(local); + again = true; + } - downgrade_write(&rxrpc_local_sem); + if (!skb_queue_empty(&local->event_queue)) { + rxrpc_process_local_events(local); + again = true; + } + } while (again); +} - ASSERT(list_empty(&local->services)); - ASSERT(!work_pending(&local->acceptor)); - ASSERT(!work_pending(&local->rejecter)); - ASSERT(!work_pending(&local->event_processor)); +/* + * Destroy a local endpoint after the RCU grace period expires. + */ +static void rxrpc_local_rcu(struct rcu_head *rcu) +{ + struct rxrpc_local *local = container_of(rcu, struct rxrpc_local, rcu); - /* finish cleaning up the local descriptor */ - rxrpc_purge_queue(&local->accept_queue); - rxrpc_purge_queue(&local->reject_queue); - rxrpc_purge_queue(&local->event_queue); - kernel_sock_shutdown(local->socket, SHUT_RDWR); - sock_release(local->socket); + _enter("%d", local->debug_id); - up_read(&rxrpc_local_sem); + ASSERT(!work_pending(&local->processor)); _net("DESTROY LOCAL %d", local->debug_id); kfree(local); - - if (list_empty(&rxrpc_locals)) - wake_up_all(&rxrpc_local_wq); - _leave(""); } /* - * preemptively destroy all local local endpoint rather than waiting for - * them to be destroyed + * Verify the local endpoint list is empty by this point. */ void __exit rxrpc_destroy_all_locals(void) { - DECLARE_WAITQUEUE(myself,current); + struct rxrpc_local *local; _enter(""); - /* we simply have to wait for them to go away */ - if (!list_empty(&rxrpc_locals)) { - set_current_state(TASK_UNINTERRUPTIBLE); - add_wait_queue(&rxrpc_local_wq, &myself); - - while (!list_empty(&rxrpc_locals)) { - schedule(); - set_current_state(TASK_UNINTERRUPTIBLE); - } + if (list_empty(&rxrpc_local_endpoints)) + return; - remove_wait_queue(&rxrpc_local_wq, &myself); - set_current_state(TASK_RUNNING); + mutex_lock(&rxrpc_local_mutex); + list_for_each_entry(local, &rxrpc_local_endpoints, link) { + pr_err("AF_RXRPC: Leaked local %p {%d}\n", + local, atomic_read(&local->usage)); } - - _leave(""); + mutex_unlock(&rxrpc_local_mutex); + BUG(); } -- cgit v1.2.3-59-g8ed1b From 19ffa01c9c45861ad6b181323e0d36904298e326 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 4 Apr 2016 14:00:36 +0100 Subject: rxrpc: Use structs to hold connection params and protocol info Define and use a structure to hold connection parameters. This makes it easier to pass multiple connection parameters around. Define and use a structure to hold protocol information used to hash a connection for lookup on incoming packet. Most of these fields will be disposed of eventually, including the duplicate local pointer. Whilst we're at it rename "proto" to "family" when referring to a protocol family. Signed-off-by: David Howells --- net/rxrpc/af_rxrpc.c | 55 +++++++++++++++----------- net/rxrpc/ar-internal.h | 61 ++++++++++++++++++++++------ net/rxrpc/call_event.c | 6 +-- net/rxrpc/call_object.c | 44 +++++++++++---------- net/rxrpc/conn_event.c | 8 ++-- net/rxrpc/conn_object.c | 103 ++++++++++++++++++++++++++---------------------- net/rxrpc/input.c | 4 +- net/rxrpc/key.c | 2 +- net/rxrpc/output.c | 24 +++++++---- net/rxrpc/proc.c | 12 +++--- net/rxrpc/recvmsg.c | 2 +- net/rxrpc/rxkad.c | 62 ++++++++++++++--------------- net/rxrpc/security.c | 6 +-- 13 files changed, 226 insertions(+), 163 deletions(-) (limited to 'net/rxrpc/conn_event.c') diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 9dd160bb16d2..48b45a0280c0 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -97,7 +97,7 @@ static int rxrpc_validate_address(struct rxrpc_sock *rx, srx->transport_len > len) return -EINVAL; - if (srx->transport.family != rx->proto) + if (srx->transport.family != rx->family) return -EAFNOSUPPORT; switch (srx->transport.family) { @@ -227,32 +227,30 @@ static int rxrpc_listen(struct socket *sock, int backlog) /* * find a transport by address */ -struct rxrpc_transport *rxrpc_name_to_transport(struct rxrpc_sock *rx, - struct sockaddr *addr, - int addr_len, int flags, - gfp_t gfp) +struct rxrpc_transport * +rxrpc_name_to_transport(struct rxrpc_conn_parameters *cp, + struct sockaddr *addr, + int addr_len, + gfp_t gfp) { struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) addr; struct rxrpc_transport *trans; - struct rxrpc_peer *peer; - _enter("%p,%p,%d,%d", rx, addr, addr_len, flags); - - ASSERT(rx->local != NULL); + _enter("%p,%d", addr, addr_len); - if (rx->srx.transport_type != srx->transport_type) + if (cp->local->srx.transport_type != srx->transport_type) return ERR_PTR(-ESOCKTNOSUPPORT); - if (rx->srx.transport.family != srx->transport.family) + if (cp->local->srx.transport.family != srx->transport.family) return ERR_PTR(-EAFNOSUPPORT); /* find a remote transport endpoint from the local one */ - peer = rxrpc_lookup_peer(rx->local, srx, gfp); - if (!peer) + cp->peer = rxrpc_lookup_peer(cp->local, srx, gfp); + if (!cp->peer) return ERR_PTR(-ENOMEM); /* find a transport */ - trans = rxrpc_get_transport(rx->local, peer, gfp); - rxrpc_put_peer(peer); + trans = rxrpc_get_transport(cp->local, cp->peer, gfp); + rxrpc_put_peer(cp->peer); _leave(" = %p", trans); return trans; } @@ -277,6 +275,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock, unsigned long user_call_ID, gfp_t gfp) { + struct rxrpc_conn_parameters cp; struct rxrpc_conn_bundle *bundle; struct rxrpc_transport *trans; struct rxrpc_call *call; @@ -286,18 +285,26 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock, lock_sock(&rx->sk); - trans = rxrpc_name_to_transport(rx, (struct sockaddr *)srx, - sizeof(*srx), 0, gfp); + if (!key) + key = rx->key; + if (key && !key->payload.data[0]) + key = NULL; /* a no-security key */ + + memset(&cp, 0, sizeof(cp)); + cp.local = rx->local; + cp.key = key; + cp.security_level = 0; + cp.exclusive = false; + cp.service_id = srx->srx_service; + + trans = rxrpc_name_to_transport(&cp, (struct sockaddr *)srx, + sizeof(*srx), gfp); if (IS_ERR(trans)) { call = ERR_CAST(trans); trans = NULL; goto out_notrans; } - - if (!key) - key = rx->key; - if (key && !key->payload.data[0]) - key = NULL; /* a no-security key */ + cp.peer = trans->peer; bundle = rxrpc_get_bundle(rx, trans, key, srx->srx_service, gfp); if (IS_ERR(bundle)) { @@ -305,7 +312,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock, goto out; } - call = rxrpc_new_client_call(rx, trans, bundle, user_call_ID, gfp); + call = rxrpc_new_client_call(rx, &cp, trans, bundle, user_call_ID, gfp); rxrpc_put_bundle(trans, bundle); out: rxrpc_put_transport(trans); @@ -600,7 +607,7 @@ static int rxrpc_create(struct net *net, struct socket *sock, int protocol, sk->sk_destruct = rxrpc_sock_destructor; rx = rxrpc_sk(sk); - rx->proto = protocol; + rx->family = protocol; rx->calls = RB_ROOT; INIT_LIST_HEAD(&rx->listen_link); diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index c168268467cd..efe6673deb28 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -72,7 +72,7 @@ struct rxrpc_sock { #define RXRPC_SECURITY_MAX RXRPC_SECURITY_ENCRYPT struct sockaddr_rxrpc srx; /* local address */ struct sockaddr_rxrpc connect_srx; /* Default client address from connect() */ - sa_family_t proto; /* protocol created with */ + sa_family_t family; /* protocol family created with */ }; #define rxrpc_sk(__sk) container_of((__sk), struct rxrpc_sock, sk) @@ -261,6 +261,34 @@ struct rxrpc_conn_bundle { u8 security_ix; /* security type */ }; +/* + * Keys for matching a connection. + */ +struct rxrpc_conn_proto { + unsigned long hash_key; + struct rxrpc_local *local; /* Representation of local endpoint */ + u32 epoch; /* epoch of this connection */ + u32 cid; /* connection ID */ + u8 in_clientflag; /* RXRPC_CLIENT_INITIATED if we are server */ + u8 addr_size; /* Size of the address */ + sa_family_t family; /* Transport protocol */ + __be16 port; /* Peer UDP/UDP6 port */ + union { /* Peer address */ + struct in_addr ipv4_addr; + struct in6_addr ipv6_addr; + u32 raw_addr[0]; + }; +}; + +struct rxrpc_conn_parameters { + struct rxrpc_local *local; /* Representation of local endpoint */ + struct rxrpc_peer *peer; /* Remote endpoint */ + struct key *key; /* Security details */ + bool exclusive; /* T if conn is exclusive */ + u16 service_id; /* Service ID for this connection */ + u32 security_level; /* Security level selected */ +}; + /* * RxRPC connection definition * - matched by { transport, service_id, conn_id, direction, key } @@ -269,6 +297,9 @@ struct rxrpc_conn_bundle { struct rxrpc_connection { struct rxrpc_transport *trans; /* transport session */ struct rxrpc_conn_bundle *bundle; /* connection bundle (client) */ + struct rxrpc_conn_proto proto; + struct rxrpc_conn_parameters params; + struct work_struct processor; /* connection event processor */ struct rb_node node; /* node in transport's lookup tree */ struct list_head link; /* link in master connection list */ @@ -277,7 +308,6 @@ struct rxrpc_connection { struct sk_buff_head rx_queue; /* received conn-level packets */ struct rxrpc_call *channels[RXRPC_MAXCALLS]; /* channels (active calls) */ const struct rxrpc_security *security; /* applied security module */ - struct key *key; /* security for this connection (client) */ struct key *server_key; /* security for this service */ struct crypto_skcipher *cipher; /* encryption handle */ struct rxrpc_crypt csum_iv; /* packet checksum base */ @@ -308,13 +338,8 @@ struct rxrpc_connection { u8 size_align; /* data size alignment (for security) */ u8 header_size; /* rxrpc + security header size */ u8 security_size; /* security header size */ - u32 security_level; /* security level negotiated */ u32 security_nonce; /* response re-use preventer */ - u32 epoch; /* epoch of this connection */ - u32 cid; /* connection ID */ - u16 service_id; /* service ID for this connection */ u8 security_ix; /* security type */ - u8 in_clientflag; /* RXRPC_CLIENT_INITIATED if we are server */ u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */ }; @@ -448,7 +473,7 @@ struct rxrpc_call { unsigned long hash_key; /* Full hash key */ u8 in_clientflag; /* Copy of conn->in_clientflag for hashing */ struct rxrpc_local *local; /* Local endpoint. Used for hashing. */ - sa_family_t proto; /* Frame protocol */ + sa_family_t family; /* Frame protocol */ u32 call_id; /* call ID on connection */ u32 cid; /* connection ID plus channel index */ u32 epoch; /* epoch of this connection */ @@ -481,9 +506,9 @@ extern u32 rxrpc_epoch; extern atomic_t rxrpc_debug_id; extern struct workqueue_struct *rxrpc_workqueue; -extern struct rxrpc_transport *rxrpc_name_to_transport(struct rxrpc_sock *, +extern struct rxrpc_transport *rxrpc_name_to_transport(struct rxrpc_conn_parameters *, struct sockaddr *, - int, int, gfp_t); + int, gfp_t); /* * call_accept.c @@ -512,6 +537,7 @@ struct rxrpc_call *rxrpc_find_call_hash(struct rxrpc_host_header *, void *, sa_family_t, const void *); struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long); struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *, + struct rxrpc_conn_parameters *, struct rxrpc_transport *, struct rxrpc_conn_bundle *, unsigned long, gfp_t); @@ -541,8 +567,9 @@ struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *, struct rxrpc_transport *, struct key *, u16, gfp_t); void rxrpc_put_bundle(struct rxrpc_transport *, struct rxrpc_conn_bundle *); -int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_transport *, - struct rxrpc_conn_bundle *, struct rxrpc_call *, gfp_t); +int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_conn_parameters *, + struct rxrpc_transport *, struct rxrpc_conn_bundle *, + struct rxrpc_call *, gfp_t); void rxrpc_put_connection(struct rxrpc_connection *); void __exit rxrpc_destroy_all_connections(void); struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *, @@ -550,6 +577,16 @@ struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *, extern struct rxrpc_connection * rxrpc_incoming_connection(struct rxrpc_transport *, struct rxrpc_host_header *); +static inline bool rxrpc_conn_is_client(const struct rxrpc_connection *conn) +{ + return conn->out_clientflag; +} + +static inline bool rxrpc_conn_is_service(const struct rxrpc_connection *conn) +{ + return conn->proto.in_clientflag; +} + /* * input.c */ diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c index e610b106c913..1571dfb95aa3 100644 --- a/net/rxrpc/call_event.c +++ b/net/rxrpc/call_event.c @@ -842,7 +842,7 @@ void rxrpc_process_call(struct work_struct *work) msg.msg_controllen = 0; msg.msg_flags = 0; - whdr.epoch = htonl(call->conn->epoch); + whdr.epoch = htonl(call->conn->proto.epoch); whdr.cid = htonl(call->cid); whdr.callNumber = htonl(call->call_id); whdr.seq = 0; @@ -1264,7 +1264,7 @@ maybe_reschedule: if (call->state >= RXRPC_CALL_COMPLETE && !list_empty(&call->accept_link)) { _debug("X unlinking once-pending call %p { e=%lx f=%lx c=%x }", - call, call->events, call->flags, call->conn->cid); + call, call->events, call->flags, call->conn->proto.cid); read_lock_bh(&call->state_lock); if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) && @@ -1282,7 +1282,7 @@ error: * this means there's a race between clearing the flag and setting the * work pending bit and the work item being processed again */ if (call->events && !work_pending(&call->processor)) { - _debug("jumpstart %x", call->conn->cid); + _debug("jumpstart %x", call->conn->proto.cid); rxrpc_queue_call(call); } diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index 8b4d47b3ccac..b7c6011c71bb 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -71,7 +71,7 @@ static unsigned long rxrpc_call_hashfunc( u32 call_id, u32 epoch, u16 service_id, - sa_family_t proto, + sa_family_t family, void *localptr, unsigned int addr_size, const u8 *peer_addr) @@ -92,7 +92,7 @@ static unsigned long rxrpc_call_hashfunc( key += (cid & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT; key += cid & RXRPC_CHANNELMASK; key += in_clientflag; - key += proto; + key += family; /* Step through the peer address in 16-bit portions for speed */ for (i = 0, p = (const u16 *)peer_addr; i < addr_size >> 1; i++, p++) key += *p; @@ -109,7 +109,7 @@ static void rxrpc_call_hash_add(struct rxrpc_call *call) unsigned int addr_size = 0; _enter(""); - switch (call->proto) { + switch (call->family) { case AF_INET: addr_size = sizeof(call->peer_ip.ipv4_addr); break; @@ -121,7 +121,7 @@ static void rxrpc_call_hash_add(struct rxrpc_call *call) } key = rxrpc_call_hashfunc(call->in_clientflag, call->cid, call->call_id, call->epoch, - call->service_id, call->proto, + call->service_id, call->family, call->conn->trans->local, addr_size, call->peer_ip.ipv6_addr); /* Store the full key in the call */ @@ -151,7 +151,7 @@ static void rxrpc_call_hash_del(struct rxrpc_call *call) struct rxrpc_call *rxrpc_find_call_hash( struct rxrpc_host_header *hdr, void *localptr, - sa_family_t proto, + sa_family_t family, const void *peer_addr) { unsigned long key; @@ -161,7 +161,7 @@ struct rxrpc_call *rxrpc_find_call_hash( u8 in_clientflag = hdr->flags & RXRPC_CLIENT_INITIATED; _enter(""); - switch (proto) { + switch (family) { case AF_INET: addr_size = sizeof(call->peer_ip.ipv4_addr); break; @@ -174,7 +174,7 @@ struct rxrpc_call *rxrpc_find_call_hash( key = rxrpc_call_hashfunc(in_clientflag, hdr->cid, hdr->callNumber, hdr->epoch, hdr->serviceId, - proto, localptr, addr_size, + family, localptr, addr_size, peer_addr); hash_for_each_possible_rcu(rxrpc_call_hash, call, hash_node, key) { if (call->hash_key == key && @@ -182,7 +182,7 @@ struct rxrpc_call *rxrpc_find_call_hash( call->cid == hdr->cid && call->in_clientflag == in_clientflag && call->service_id == hdr->serviceId && - call->proto == proto && + call->family == family && call->local == localptr && memcmp(call->peer_ip.ipv6_addr, peer_addr, addr_size) == 0 && @@ -286,6 +286,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp) */ static struct rxrpc_call *rxrpc_alloc_client_call( struct rxrpc_sock *rx, + struct rxrpc_conn_parameters *cp, struct rxrpc_transport *trans, struct rxrpc_conn_bundle *bundle, gfp_t gfp) @@ -307,16 +308,16 @@ static struct rxrpc_call *rxrpc_alloc_client_call( call->socket = rx; call->rx_data_post = 1; - ret = rxrpc_connect_call(rx, trans, bundle, call, gfp); + ret = rxrpc_connect_call(rx, cp, trans, bundle, call, gfp); if (ret < 0) { kmem_cache_free(rxrpc_call_jar, call); return ERR_PTR(ret); } /* Record copies of information for hashtable lookup */ - call->proto = rx->proto; - call->local = trans->local; - switch (call->proto) { + call->family = rx->family; + call->local = call->conn->params.local; + switch (call->family) { case AF_INET: call->peer_ip.ipv4_addr = trans->peer->srx.transport.sin.sin_addr.s_addr; @@ -327,9 +328,9 @@ static struct rxrpc_call *rxrpc_alloc_client_call( sizeof(call->peer_ip.ipv6_addr)); break; } - call->epoch = call->conn->epoch; - call->service_id = call->conn->service_id; - call->in_clientflag = call->conn->in_clientflag; + call->epoch = call->conn->proto.epoch; + call->service_id = call->conn->params.service_id; + call->in_clientflag = call->conn->proto.in_clientflag; /* Add the new call to the hashtable */ rxrpc_call_hash_add(call); @@ -349,6 +350,7 @@ static struct rxrpc_call *rxrpc_alloc_client_call( * - called in process context with IRQs enabled */ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, + struct rxrpc_conn_parameters *cp, struct rxrpc_transport *trans, struct rxrpc_conn_bundle *bundle, unsigned long user_call_ID, @@ -361,7 +363,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, rx, trans->debug_id, bundle ? bundle->debug_id : -1, user_call_ID); - call = rxrpc_alloc_client_call(rx, trans, bundle, gfp); + call = rxrpc_alloc_client_call(rx, cp, trans, bundle, gfp); if (IS_ERR(call)) { _leave(" = %ld", PTR_ERR(call)); return call; @@ -524,9 +526,9 @@ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx, write_unlock_bh(&rxrpc_call_lock); /* Record copies of information for hashtable lookup */ - call->proto = rx->proto; + call->family = rx->family; call->local = conn->trans->local; - switch (call->proto) { + switch (call->family) { case AF_INET: call->peer_ip.ipv4_addr = conn->trans->peer->srx.transport.sin.sin_addr.s_addr; @@ -539,9 +541,9 @@ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx, default: break; } - call->epoch = conn->epoch; - call->service_id = conn->service_id; - call->in_clientflag = conn->in_clientflag; + call->epoch = conn->proto.epoch; + call->service_id = conn->params.service_id; + call->in_clientflag = conn->proto.in_clientflag; /* Add the new call to the hashtable */ rxrpc_call_hash_add(call); diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c index 00c92b614485..51e280c662e0 100644 --- a/net/rxrpc/conn_event.c +++ b/net/rxrpc/conn_event.c @@ -94,8 +94,8 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn, msg.msg_controllen = 0; msg.msg_flags = 0; - whdr.epoch = htonl(conn->epoch); - whdr.cid = htonl(conn->cid); + whdr.epoch = htonl(conn->proto.epoch); + whdr.cid = htonl(conn->proto.cid); whdr.callNumber = 0; whdr.seq = 0; whdr.type = RXRPC_PACKET_TYPE_ABORT; @@ -103,7 +103,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn, whdr.userStatus = 0; whdr.securityIndex = conn->security_ix; whdr._rsvd = 0; - whdr.serviceId = htons(conn->service_id); + whdr.serviceId = htons(conn->params.service_id); word = htonl(conn->local_abort); @@ -220,7 +220,7 @@ static void rxrpc_secure_connection(struct rxrpc_connection *conn) ASSERT(conn->security_ix != 0); - if (!conn->key) { + if (!conn->params.key) { _debug("set up security"); ret = rxrpc_init_server_conn_security(conn); switch (ret) { diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c index 8ecde4b77b55..c6787b6f459f 100644 --- a/net/rxrpc/conn_object.c +++ b/net/rxrpc/conn_object.c @@ -220,7 +220,7 @@ static void rxrpc_assign_connection_id(struct rxrpc_connection *conn) _enter(""); - epoch = conn->epoch; + epoch = conn->proto.epoch; write_lock_bh(&conn->trans->conn_lock); @@ -237,13 +237,13 @@ attempt_insertion: parent = *p; xconn = rb_entry(parent, struct rxrpc_connection, node); - if (epoch < xconn->epoch) + if (epoch < xconn->proto.epoch) p = &(*p)->rb_left; - else if (epoch > xconn->epoch) + else if (epoch > xconn->proto.epoch) p = &(*p)->rb_right; - else if (cid < xconn->cid) + else if (cid < xconn->proto.cid) p = &(*p)->rb_left; - else if (cid > xconn->cid) + else if (cid > xconn->proto.cid) p = &(*p)->rb_right; else goto id_exists; @@ -254,7 +254,7 @@ attempt_insertion: rb_link_node(&conn->node, parent, p); rb_insert_color(&conn->node, &conn->trans->client_conns); - conn->cid = cid; + conn->proto.cid = cid; write_unlock_bh(&conn->trans->conn_lock); _leave(" [CID %x]", cid); return; @@ -275,8 +275,8 @@ id_exists: goto attempt_insertion; xconn = rb_entry(parent, struct rxrpc_connection, node); - if (epoch < xconn->epoch || - cid < xconn->cid) + if (epoch < xconn->proto.epoch || + cid < xconn->proto.cid) goto attempt_insertion; } } @@ -318,8 +318,8 @@ static void rxrpc_add_call_ID_to_conn(struct rxrpc_connection *conn, * connect a call on an exclusive connection */ static int rxrpc_connect_exclusive(struct rxrpc_sock *rx, + struct rxrpc_conn_parameters *cp, struct rxrpc_transport *trans, - u16 service_id, struct rxrpc_call *call, gfp_t gfp) { @@ -340,19 +340,21 @@ static int rxrpc_connect_exclusive(struct rxrpc_sock *rx, conn->trans = trans; conn->bundle = NULL; - conn->service_id = service_id; - conn->epoch = rxrpc_epoch; - conn->in_clientflag = 0; + conn->params = *cp; + conn->proto.local = cp->local; + conn->proto.epoch = rxrpc_epoch; + conn->proto.cid = 0; + conn->proto.in_clientflag = 0; + conn->proto.family = cp->peer->srx.transport.family; conn->out_clientflag = RXRPC_CLIENT_INITIATED; - conn->cid = 0; conn->state = RXRPC_CONN_CLIENT; conn->avail_calls = RXRPC_MAXCALLS - 1; - conn->security_level = rx->min_sec_level; - conn->key = key_get(rx->key); + + key_get(conn->params.key); ret = rxrpc_init_client_conn_security(conn); if (ret < 0) { - key_put(conn->key); + key_put(conn->params.key); kfree(conn); _leave(" = %d [key]", ret); return ret; @@ -389,7 +391,7 @@ found_channel: conn->channels[chan] = call; call->conn = conn; call->channel = chan; - call->cid = conn->cid | chan; + call->cid = conn->proto.cid | chan; call->call_id = ++conn->call_counter; _net("CONNECT client on conn %d chan %d as call %x", @@ -412,6 +414,7 @@ no_free_channels: * - called in process context with IRQs enabled */ int rxrpc_connect_call(struct rxrpc_sock *rx, + struct rxrpc_conn_parameters *cp, struct rxrpc_transport *trans, struct rxrpc_conn_bundle *bundle, struct rxrpc_call *call, @@ -425,8 +428,7 @@ int rxrpc_connect_call(struct rxrpc_sock *rx, _enter("%p,%lx,", rx, call->user_call_ID); if (test_bit(RXRPC_SOCK_EXCLUSIVE_CONN, &rx->flags)) - return rxrpc_connect_exclusive(rx, trans, bundle->service_id, - call, gfp); + return rxrpc_connect_exclusive(rx, cp, trans, call, gfp); spin_lock(&trans->client_lock); for (;;) { @@ -517,19 +519,21 @@ int rxrpc_connect_call(struct rxrpc_sock *rx, candidate->trans = trans; candidate->bundle = bundle; - candidate->service_id = bundle->service_id; - candidate->epoch = rxrpc_epoch; - candidate->in_clientflag = 0; + candidate->params = *cp; + candidate->proto.local = cp->local; + candidate->proto.epoch = rxrpc_epoch; + candidate->proto.cid = 0; + candidate->proto.in_clientflag = 0; + candidate->proto.family = cp->peer->srx.transport.family; candidate->out_clientflag = RXRPC_CLIENT_INITIATED; - candidate->cid = 0; candidate->state = RXRPC_CONN_CLIENT; candidate->avail_calls = RXRPC_MAXCALLS; - candidate->security_level = rx->min_sec_level; - candidate->key = key_get(bundle->key); + + key_get(candidate->params.key); ret = rxrpc_init_client_conn_security(candidate); if (ret < 0) { - key_put(candidate->key); + key_put(candidate->params.key); kfree(candidate); _leave(" = %d [key]", ret); return ret; @@ -577,7 +581,7 @@ found_channel: conn->channels[chan] = call; call->conn = conn; call->channel = chan; - call->cid = conn->cid | chan; + call->cid = conn->proto.cid | chan; call->call_id = ++conn->call_counter; _net("CONNECT client on conn %d chan %d as call %x", @@ -626,15 +630,15 @@ rxrpc_incoming_connection(struct rxrpc_transport *trans, while (p) { conn = rb_entry(p, struct rxrpc_connection, node); - _debug("maybe %x", conn->cid); + _debug("maybe %x", conn->proto.cid); - if (epoch < conn->epoch) + if (epoch < conn->proto.epoch) p = p->rb_left; - else if (epoch > conn->epoch) + else if (epoch > conn->proto.epoch) p = p->rb_right; - else if (cid < conn->cid) + else if (cid < conn->proto.cid) p = p->rb_left; - else if (cid > conn->cid) + else if (cid > conn->proto.cid) p = p->rb_right; else goto found_extant_connection; @@ -650,14 +654,17 @@ rxrpc_incoming_connection(struct rxrpc_transport *trans, } candidate->trans = trans; - candidate->epoch = hdr->epoch; - candidate->cid = hdr->cid & RXRPC_CIDMASK; - candidate->service_id = hdr->serviceId; + candidate->proto.local = trans->local; + candidate->proto.epoch = hdr->epoch; + candidate->proto.cid = hdr->cid & RXRPC_CIDMASK; + candidate->proto.in_clientflag = RXRPC_CLIENT_INITIATED; + candidate->params.local = trans->local; + candidate->params.peer = trans->peer; + candidate->params.service_id = hdr->serviceId; candidate->security_ix = hdr->securityIndex; - candidate->in_clientflag = RXRPC_CLIENT_INITIATED; candidate->out_clientflag = 0; candidate->state = RXRPC_CONN_SERVER; - if (candidate->service_id) + if (candidate->params.service_id) candidate->state = RXRPC_CONN_SERVER_UNSECURED; write_lock_bh(&trans->conn_lock); @@ -668,13 +675,13 @@ rxrpc_incoming_connection(struct rxrpc_transport *trans, p = *pp; conn = rb_entry(p, struct rxrpc_connection, node); - if (epoch < conn->epoch) + if (epoch < conn->proto.epoch) pp = &(*pp)->rb_left; - else if (epoch > conn->epoch) + else if (epoch > conn->proto.epoch) pp = &(*pp)->rb_right; - else if (cid < conn->cid) + else if (cid < conn->proto.cid) pp = &(*pp)->rb_left; - else if (cid > conn->cid) + else if (cid > conn->proto.cid) pp = &(*pp)->rb_right; else goto found_extant_second; @@ -696,7 +703,7 @@ rxrpc_incoming_connection(struct rxrpc_transport *trans, new = "new"; success: - _net("CONNECTION %s %d {%x}", new, conn->debug_id, conn->cid); + _net("CONNECTION %s %d {%x}", new, conn->debug_id, conn->proto.cid); _leave(" = %p {u=%d}", conn, atomic_read(&conn->usage)); return conn; @@ -754,15 +761,15 @@ struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *trans, while (p) { conn = rb_entry(p, struct rxrpc_connection, node); - _debug("maybe %x", conn->cid); + _debug("maybe %x", conn->proto.cid); - if (epoch < conn->epoch) + if (epoch < conn->proto.epoch) p = p->rb_left; - else if (epoch > conn->epoch) + else if (epoch > conn->proto.epoch) p = p->rb_right; - else if (cid < conn->cid) + else if (cid < conn->proto.cid) p = p->rb_left; - else if (cid > conn->cid) + else if (cid > conn->proto.cid) p = p->rb_right; else goto found; @@ -816,7 +823,7 @@ static void rxrpc_destroy_connection(struct rxrpc_connection *conn) rxrpc_purge_queue(&conn->rx_queue); conn->security->clear(conn); - key_put(conn->key); + key_put(conn->params.key); key_put(conn->server_key); rxrpc_put_transport(conn->trans); diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index e11e4d785127..c030abd4d2d8 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -360,7 +360,7 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb) case RXRPC_PACKET_TYPE_BUSY: _proto("Rx BUSY %%%u", sp->hdr.serial); - if (call->conn->out_clientflag) + if (rxrpc_conn_is_service(call->conn)) goto protocol_error; write_lock_bh(&call->state_lock); @@ -533,7 +533,7 @@ static void rxrpc_post_packet_to_call(struct rxrpc_call *call, case RXRPC_CALL_COMPLETE: case RXRPC_CALL_CLIENT_FINAL_ACK: /* complete server call */ - if (call->conn->in_clientflag) + if (rxrpc_conn_is_service(call->conn)) goto dead_call; /* resend last packet of a completed call */ _debug("final ack again"); diff --git a/net/rxrpc/key.c b/net/rxrpc/key.c index 4ad56fafe3a7..18c737a61d80 100644 --- a/net/rxrpc/key.c +++ b/net/rxrpc/key.c @@ -987,7 +987,7 @@ int rxrpc_get_server_data_key(struct rxrpc_connection *conn, if (ret < 0) goto error; - conn->key = key; + conn->params.key = key; _leave(" = 0 [%d]", key_serial(key)); return 0; diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c index e6fb3863b0bc..8c51745cccea 100644 --- a/net/rxrpc/output.c +++ b/net/rxrpc/output.c @@ -133,6 +133,7 @@ static struct rxrpc_call * rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, unsigned long user_call_ID) { + struct rxrpc_conn_parameters cp; struct rxrpc_conn_bundle *bundle; struct rxrpc_transport *trans; struct rxrpc_call *call; @@ -146,23 +147,32 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, if (!msg->msg_name) return ERR_PTR(-EDESTADDRREQ); - trans = rxrpc_name_to_transport(rx, msg->msg_name, msg->msg_namelen, 0, + key = rx->key; + if (key && !rx->key->payload.data[0]) + key = NULL; + + memset(&cp, 0, sizeof(cp)); + cp.local = rx->local; + cp.key = rx->key; + cp.security_level = rx->min_sec_level; + cp.exclusive = test_bit(RXRPC_SOCK_EXCLUSIVE_CONN, &rx->flags); + cp.service_id = srx->srx_service; + trans = rxrpc_name_to_transport(&cp, msg->msg_name, msg->msg_namelen, GFP_KERNEL); if (IS_ERR(trans)) { ret = PTR_ERR(trans); goto out; } + cp.peer = trans->peer; - key = rx->key; - if (key && !rx->key->payload.data[0]) - key = NULL; - bundle = rxrpc_get_bundle(rx, trans, key, srx->srx_service, GFP_KERNEL); + bundle = rxrpc_get_bundle(rx, trans, cp.key, srx->srx_service, + GFP_KERNEL); if (IS_ERR(bundle)) { ret = PTR_ERR(bundle); goto out_trans; } - call = rxrpc_new_client_call(rx, trans, bundle, user_call_ID, + call = rxrpc_new_client_call(rx, &cp, trans, bundle, user_call_ID, GFP_KERNEL); rxrpc_put_bundle(trans, bundle); rxrpc_put_transport(trans); @@ -664,7 +674,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, seq = atomic_inc_return(&call->sequence); - sp->hdr.epoch = conn->epoch; + sp->hdr.epoch = conn->proto.epoch; sp->hdr.cid = call->cid; sp->hdr.callNumber = call->call_id; sp->hdr.seq = seq; diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c index 225163bc658d..bbee05850801 100644 --- a/net/rxrpc/proc.c +++ b/net/rxrpc/proc.c @@ -74,10 +74,10 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v) " %-8.8s %08x %lx\n", lbuff, rbuff, - call->conn->service_id, + call->conn->params.service_id, call->cid, call->call_id, - call->conn->in_clientflag ? "Svc" : "Clt", + rxrpc_conn_is_service(call->conn) ? "Svc" : "Clt", atomic_read(&call->usage), rxrpc_call_states[call->state], call->remote_abort ?: call->local_abort, @@ -157,13 +157,13 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v) " %s %08x %08x %08x\n", lbuff, rbuff, - conn->service_id, - conn->cid, + conn->params.service_id, + conn->proto.cid, conn->call_counter, - conn->in_clientflag ? "Svc" : "Clt", + rxrpc_conn_is_service(conn) ? "Svc" : "Clt", atomic_read(&conn->usage), rxrpc_conn_states[conn->state], - key_serial(conn->key), + key_serial(conn->params.key), atomic_read(&conn->serial), atomic_read(&conn->hi_serial)); diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index 59706b9f2f7a..c5bac4e0db71 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c @@ -205,7 +205,7 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, /* we transferred the whole data packet */ if (sp->hdr.flags & RXRPC_LAST_PACKET) { _debug("last"); - if (call->conn->out_clientflag) { + if (rxrpc_conn_is_client(call->conn)) { /* last byte of reply received */ ret = copied; goto terminal_message; diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c index 36a634027d9d..134c2713ae23 100644 --- a/net/rxrpc/rxkad.c +++ b/net/rxrpc/rxkad.c @@ -58,9 +58,9 @@ static int rxkad_init_connection_security(struct rxrpc_connection *conn) struct rxrpc_key_token *token; int ret; - _enter("{%d},{%x}", conn->debug_id, key_serial(conn->key)); + _enter("{%d},{%x}", conn->debug_id, key_serial(conn->params.key)); - token = conn->key->payload.data[0]; + token = conn->params.key->payload.data[0]; conn->security_ix = token->security_index; ci = crypto_alloc_skcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC); @@ -74,7 +74,7 @@ static int rxkad_init_connection_security(struct rxrpc_connection *conn) sizeof(token->kad->session_key)) < 0) BUG(); - switch (conn->security_level) { + switch (conn->params.security_level) { case RXRPC_SECURITY_PLAIN: break; case RXRPC_SECURITY_AUTH: @@ -115,14 +115,14 @@ static void rxkad_prime_packet_security(struct rxrpc_connection *conn) _enter(""); - if (!conn->key) + if (!conn->params.key) return; - token = conn->key->payload.data[0]; + token = conn->params.key->payload.data[0]; memcpy(&iv, token->kad->session_key, sizeof(iv)); - tmpbuf.x[0] = htonl(conn->epoch); - tmpbuf.x[1] = htonl(conn->cid); + tmpbuf.x[0] = htonl(conn->proto.epoch); + tmpbuf.x[1] = htonl(conn->proto.cid); tmpbuf.x[2] = 0; tmpbuf.x[3] = htonl(conn->security_ix); @@ -220,7 +220,7 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call, rxkhdr.checksum = 0; /* encrypt from the session key */ - token = call->conn->key->payload.data[0]; + token = call->conn->params.key->payload.data[0]; memcpy(&iv, token->kad->session_key, sizeof(iv)); sg_init_one(&sg[0], sechdr, sizeof(rxkhdr)); @@ -277,13 +277,13 @@ static int rxkad_secure_packet(const struct rxrpc_call *call, sp = rxrpc_skb(skb); _enter("{%d{%x}},{#%u},%zu,", - call->debug_id, key_serial(call->conn->key), sp->hdr.seq, - data_size); + call->debug_id, key_serial(call->conn->params.key), + sp->hdr.seq, data_size); if (!call->conn->cipher) return 0; - ret = key_validate(call->conn->key); + ret = key_validate(call->conn->params.key); if (ret < 0) return ret; @@ -312,7 +312,7 @@ static int rxkad_secure_packet(const struct rxrpc_call *call, y = 1; /* zero checksums are not permitted */ sp->hdr.cksum = y; - switch (call->conn->security_level) { + switch (call->conn->params.security_level) { case RXRPC_SECURITY_PLAIN: ret = 0; break; @@ -446,7 +446,7 @@ static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call, skb_to_sgvec(skb, sg, 0, skb->len); /* decrypt from the session key */ - token = call->conn->key->payload.data[0]; + token = call->conn->params.key->payload.data[0]; memcpy(&iv, token->kad->session_key, sizeof(iv)); skcipher_request_set_tfm(req, call->conn->cipher); @@ -516,7 +516,7 @@ static int rxkad_verify_packet(const struct rxrpc_call *call, sp = rxrpc_skb(skb); _enter("{%d{%x}},{#%u}", - call->debug_id, key_serial(call->conn->key), sp->hdr.seq); + call->debug_id, key_serial(call->conn->params.key), sp->hdr.seq); if (!call->conn->cipher) return 0; @@ -557,7 +557,7 @@ static int rxkad_verify_packet(const struct rxrpc_call *call, return -EPROTO; } - switch (call->conn->security_level) { + switch (call->conn->params.security_level) { case RXRPC_SECURITY_PLAIN: ret = 0; break; @@ -589,9 +589,9 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn) u32 serial; int ret; - _enter("{%d,%x}", conn->debug_id, key_serial(conn->key)); + _enter("{%d,%x}", conn->debug_id, key_serial(conn->params.key)); - ret = key_validate(conn->key); + ret = key_validate(conn->params.key); if (ret < 0) return ret; @@ -608,8 +608,8 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn) msg.msg_controllen = 0; msg.msg_flags = 0; - whdr.epoch = htonl(conn->epoch); - whdr.cid = htonl(conn->cid); + whdr.epoch = htonl(conn->proto.epoch); + whdr.cid = htonl(conn->proto.cid); whdr.callNumber = 0; whdr.seq = 0; whdr.type = RXRPC_PACKET_TYPE_CHALLENGE; @@ -617,7 +617,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn) whdr.userStatus = 0; whdr.securityIndex = conn->security_ix; whdr._rsvd = 0; - whdr.serviceId = htons(conn->service_id); + whdr.serviceId = htons(conn->params.service_id); iov[0].iov_base = &whdr; iov[0].iov_len = sizeof(whdr); @@ -771,14 +771,14 @@ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn, u32 version, nonce, min_level, abort_code; int ret; - _enter("{%d,%x}", conn->debug_id, key_serial(conn->key)); + _enter("{%d,%x}", conn->debug_id, key_serial(conn->params.key)); - if (!conn->key) { + if (!conn->params.key) { _leave(" = -EPROTO [no key]"); return -EPROTO; } - ret = key_validate(conn->key); + ret = key_validate(conn->params.key); if (ret < 0) { *_abort_code = RXKADEXPIRED; return ret; @@ -801,20 +801,20 @@ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn, goto protocol_error; abort_code = RXKADLEVELFAIL; - if (conn->security_level < min_level) + if (conn->params.security_level < min_level) goto protocol_error; - token = conn->key->payload.data[0]; + token = conn->params.key->payload.data[0]; /* build the response packet */ memset(&resp, 0, sizeof(resp)); resp.version = htonl(RXKAD_VERSION); - resp.encrypted.epoch = htonl(conn->epoch); - resp.encrypted.cid = htonl(conn->cid); + resp.encrypted.epoch = htonl(conn->proto.epoch); + resp.encrypted.cid = htonl(conn->proto.cid); resp.encrypted.securityIndex = htonl(conn->security_ix); resp.encrypted.inc_nonce = htonl(nonce + 1); - resp.encrypted.level = htonl(conn->security_level); + resp.encrypted.level = htonl(conn->params.security_level); resp.kvno = htonl(token->kad->kvno); resp.ticket_len = htonl(token->kad->ticket_len); @@ -1096,9 +1096,9 @@ static int rxkad_verify_response(struct rxrpc_connection *conn, rxkad_decrypt_response(conn, &response, &session_key); abort_code = RXKADSEALEDINCON; - if (ntohl(response.encrypted.epoch) != conn->epoch) + if (ntohl(response.encrypted.epoch) != conn->proto.epoch) goto protocol_error_free; - if (ntohl(response.encrypted.cid) != conn->cid) + if (ntohl(response.encrypted.cid) != conn->proto.cid) goto protocol_error_free; if (ntohl(response.encrypted.securityIndex) != conn->security_ix) goto protocol_error_free; @@ -1122,7 +1122,7 @@ static int rxkad_verify_response(struct rxrpc_connection *conn, level = ntohl(response.encrypted.level); if (level > RXRPC_SECURITY_ENCRYPT) goto protocol_error_free; - conn->security_level = level; + conn->params.security_level = level; /* create a key to hold the security data and expiration time - after * this the connection security can be handled in exactly the same way diff --git a/net/rxrpc/security.c b/net/rxrpc/security.c index d223253b22fa..40955d0f2693 100644 --- a/net/rxrpc/security.c +++ b/net/rxrpc/security.c @@ -76,7 +76,7 @@ int rxrpc_init_client_conn_security(struct rxrpc_connection *conn) { const struct rxrpc_security *sec; struct rxrpc_key_token *token; - struct key *key = conn->key; + struct key *key = conn->params.key; int ret; _enter("{%d},{%x}", conn->debug_id, key_serial(key)); @@ -121,7 +121,7 @@ int rxrpc_init_server_conn_security(struct rxrpc_connection *conn) _enter(""); - sprintf(kdesc, "%u:%u", conn->service_id, conn->security_ix); + sprintf(kdesc, "%u:%u", conn->params.service_id, conn->security_ix); sec = rxrpc_security_lookup(conn->security_ix); if (!sec) { @@ -132,7 +132,7 @@ int rxrpc_init_server_conn_security(struct rxrpc_connection *conn) /* find the service */ read_lock_bh(&local->services_lock); list_for_each_entry(rx, &local->services, listen_link) { - if (rx->srx.srx_service == conn->service_id) + if (rx->srx.srx_service == conn->params.service_id) goto found_service; } -- cgit v1.2.3-59-g8ed1b From 85f32278bd98fa89dff528b0baea4ae6eea4cc5d Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 4 Apr 2016 14:00:36 +0100 Subject: rxrpc: Replace conn->trans->{local,peer} with conn->params.{local,peer} Replace accesses of conn->trans->{local,peer} with conn->params.{local,peer} thus making it easier for a future commit to remove the rxrpc_transport struct. This also reduces the number of memory accesses involved. Signed-off-by: David Howells --- net/rxrpc/call_event.c | 12 ++++++------ net/rxrpc/call_object.c | 28 ++++++++++++++-------------- net/rxrpc/conn_event.c | 6 +++--- net/rxrpc/input.c | 2 +- net/rxrpc/output.c | 2 +- net/rxrpc/proc.c | 22 ++++++++++------------ net/rxrpc/recvmsg.c | 4 ++-- net/rxrpc/rxkad.c | 12 ++++++------ net/rxrpc/security.c | 2 +- 9 files changed, 44 insertions(+), 46 deletions(-) (limited to 'net/rxrpc/conn_event.c') diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c index 1571dfb95aa3..b43faf573ed3 100644 --- a/net/rxrpc/call_event.c +++ b/net/rxrpc/call_event.c @@ -545,7 +545,7 @@ static void rxrpc_extract_ackinfo(struct rxrpc_call *call, struct sk_buff *skb, mtu = min(ntohl(ackinfo.rxMTU), ntohl(ackinfo.maxMTU)); - peer = call->conn->trans->peer; + peer = call->conn->params.peer; if (mtu < peer->maxdata) { spin_lock_bh(&peer->lock); peer->maxdata = mtu; @@ -836,8 +836,8 @@ void rxrpc_process_call(struct work_struct *work) /* there's a good chance we're going to have to send a message, so set * one up in advance */ - msg.msg_name = &call->conn->trans->peer->srx.transport; - msg.msg_namelen = call->conn->trans->peer->srx.transport_len; + msg.msg_name = &call->conn->params.peer->srx.transport; + msg.msg_namelen = call->conn->params.peer->srx.transport_len; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; @@ -1151,8 +1151,8 @@ send_ACK_with_skew: ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) - ntohl(ack.serial)); send_ACK: - mtu = call->conn->trans->peer->if_mtu; - mtu -= call->conn->trans->peer->hdrsize; + mtu = call->conn->params.peer->if_mtu; + mtu -= call->conn->params.peer->hdrsize; ackinfo.maxMTU = htonl(mtu); ackinfo.rwind = htonl(rxrpc_rx_window_size); @@ -1206,7 +1206,7 @@ send_message_2: len += iov[1].iov_len; } - ret = kernel_sendmsg(call->conn->trans->local->socket, + ret = kernel_sendmsg(call->conn->params.local->socket, &msg, iov, ioc, len); if (ret < 0) { _debug("sendmsg failed: %d", ret); diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index b7c6011c71bb..5c2dceaf6a9c 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -122,7 +122,7 @@ static void rxrpc_call_hash_add(struct rxrpc_call *call) key = rxrpc_call_hashfunc(call->in_clientflag, call->cid, call->call_id, call->epoch, call->service_id, call->family, - call->conn->trans->local, addr_size, + call->conn->params.local, addr_size, call->peer_ip.ipv6_addr); /* Store the full key in the call */ call->hash_key = key; @@ -320,11 +320,11 @@ static struct rxrpc_call *rxrpc_alloc_client_call( switch (call->family) { case AF_INET: call->peer_ip.ipv4_addr = - trans->peer->srx.transport.sin.sin_addr.s_addr; + call->conn->params.peer->srx.transport.sin.sin_addr.s_addr; break; case AF_INET6: memcpy(call->peer_ip.ipv6_addr, - trans->peer->srx.transport.sin6.sin6_addr.in6_u.u6_addr8, + call->conn->params.peer->srx.transport.sin6.sin6_addr.in6_u.u6_addr8, sizeof(call->peer_ip.ipv6_addr)); break; } @@ -334,9 +334,9 @@ static struct rxrpc_call *rxrpc_alloc_client_call( /* Add the new call to the hashtable */ rxrpc_call_hash_add(call); - spin_lock(&call->conn->trans->peer->lock); - hlist_add_head(&call->error_link, &call->conn->trans->peer->error_targets); - spin_unlock(&call->conn->trans->peer->lock); + spin_lock(&call->conn->params.peer->lock); + hlist_add_head(&call->error_link, &call->conn->params.peer->error_targets); + spin_unlock(&call->conn->params.peer->lock); call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime; add_timer(&call->lifetimer); @@ -517,9 +517,9 @@ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx, atomic_inc(&conn->usage); write_unlock_bh(&conn->lock); - spin_lock(&conn->trans->peer->lock); - hlist_add_head(&call->error_link, &conn->trans->peer->error_targets); - spin_unlock(&conn->trans->peer->lock); + spin_lock(&conn->params.peer->lock); + hlist_add_head(&call->error_link, &conn->params.peer->error_targets); + spin_unlock(&conn->params.peer->lock); write_lock_bh(&rxrpc_call_lock); list_add_tail(&call->link, &rxrpc_calls); @@ -527,15 +527,15 @@ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx, /* Record copies of information for hashtable lookup */ call->family = rx->family; - call->local = conn->trans->local; + call->local = conn->params.local; switch (call->family) { case AF_INET: call->peer_ip.ipv4_addr = - conn->trans->peer->srx.transport.sin.sin_addr.s_addr; + conn->params.peer->srx.transport.sin.sin_addr.s_addr; break; case AF_INET6: memcpy(call->peer_ip.ipv6_addr, - conn->trans->peer->srx.transport.sin6.sin6_addr.in6_u.u6_addr8, + conn->params.peer->srx.transport.sin6.sin6_addr.in6_u.u6_addr8, sizeof(call->peer_ip.ipv6_addr)); break; default: @@ -813,9 +813,9 @@ static void rxrpc_cleanup_call(struct rxrpc_call *call) } if (call->conn) { - spin_lock(&call->conn->trans->peer->lock); + spin_lock(&call->conn->params.peer->lock); hlist_del_init(&call->error_link); - spin_unlock(&call->conn->trans->peer->lock); + spin_unlock(&call->conn->params.peer->lock); write_lock_bh(&call->conn->lock); rb_erase(&call->conn_node, &call->conn->calls); diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c index 51e280c662e0..a022439f6f5a 100644 --- a/net/rxrpc/conn_event.c +++ b/net/rxrpc/conn_event.c @@ -88,8 +88,8 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn, rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, abort_code); - msg.msg_name = &conn->trans->peer->srx.transport; - msg.msg_namelen = conn->trans->peer->srx.transport_len; + msg.msg_name = &conn->params.peer->srx.transport; + msg.msg_namelen = conn->params.peer->srx.transport_len; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; @@ -118,7 +118,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn, whdr.serial = htonl(serial); _proto("Tx CONN ABORT %%%u { %d }", serial, conn->local_abort); - ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len); + ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len); if (ret < 0) { _debug("sendmsg failed: %d", ret); return -EAGAIN; diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index c030abd4d2d8..6af7f40c5030 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -560,7 +560,7 @@ static void rxrpc_post_packet_to_call(struct rxrpc_call *call, dead_call: if (sp->hdr.type != RXRPC_PACKET_TYPE_ABORT) { skb->priority = RX_CALL_DEAD; - rxrpc_reject_packet(call->conn->trans->local, skb); + rxrpc_reject_packet(call->conn->params.local, skb); goto unlock; } free_unlock: diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c index 8c51745cccea..becbaa7c0a7c 100644 --- a/net/rxrpc/output.c +++ b/net/rxrpc/output.c @@ -583,7 +583,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, goto maybe_error; } - max = call->conn->trans->peer->maxdata; + max = call->conn->params.peer->maxdata; max -= call->conn->security_size; max &= ~(call->conn->size_align - 1UL); diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c index bbee05850801..9863270691d7 100644 --- a/net/rxrpc/proc.c +++ b/net/rxrpc/proc.c @@ -46,7 +46,7 @@ static void rxrpc_call_seq_stop(struct seq_file *seq, void *v) static int rxrpc_call_seq_show(struct seq_file *seq, void *v) { - struct rxrpc_transport *trans; + struct rxrpc_connection *conn; struct rxrpc_call *call; char lbuff[4 + 4 + 4 + 4 + 5 + 1], rbuff[4 + 4 + 4 + 4 + 5 + 1]; @@ -59,15 +59,15 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v) } call = list_entry(v, struct rxrpc_call, link); - trans = call->conn->trans; + conn = call->conn; sprintf(lbuff, "%pI4:%u", - &trans->local->srx.transport.sin.sin_addr, - ntohs(trans->local->srx.transport.sin.sin_port)); + &conn->params.local->srx.transport.sin.sin_addr, + ntohs(conn->params.local->srx.transport.sin.sin_port)); sprintf(rbuff, "%pI4:%u", - &trans->peer->srx.transport.sin.sin_addr, - ntohs(trans->peer->srx.transport.sin.sin_port)); + &conn->params.peer->srx.transport.sin.sin_addr, + ntohs(conn->params.peer->srx.transport.sin.sin_port)); seq_printf(seq, "UDP %-22.22s %-22.22s %4x %08x %08x %s %3u" @@ -129,7 +129,6 @@ static void rxrpc_connection_seq_stop(struct seq_file *seq, void *v) static int rxrpc_connection_seq_show(struct seq_file *seq, void *v) { struct rxrpc_connection *conn; - struct rxrpc_transport *trans; char lbuff[4 + 4 + 4 + 4 + 5 + 1], rbuff[4 + 4 + 4 + 4 + 5 + 1]; if (v == &rxrpc_connections) { @@ -142,15 +141,14 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v) } conn = list_entry(v, struct rxrpc_connection, link); - trans = conn->trans; sprintf(lbuff, "%pI4:%u", - &trans->local->srx.transport.sin.sin_addr, - ntohs(trans->local->srx.transport.sin.sin_port)); + &conn->params.local->srx.transport.sin.sin_addr, + ntohs(conn->params.local->srx.transport.sin.sin_port)); sprintf(rbuff, "%pI4:%u", - &trans->peer->srx.transport.sin.sin_addr, - ntohs(trans->peer->srx.transport.sin.sin_port)); + &conn->params.peer->srx.transport.sin.sin_addr, + ntohs(conn->params.peer->srx.transport.sin.sin_port)); seq_printf(seq, "UDP %-22.22s %-22.22s %4x %08x %08x %s %3u" diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index c5bac4e0db71..a3fa2ed85d63 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c @@ -147,9 +147,9 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, if (!continue_call) { if (msg->msg_name) { size_t len = - sizeof(call->conn->trans->peer->srx); + sizeof(call->conn->params.peer->srx); memcpy(msg->msg_name, - &call->conn->trans->peer->srx, len); + &call->conn->params.peer->srx, len); msg->msg_namelen = len; } sock_recv_timestamp(msg, &rx->sk, skb); diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c index 134c2713ae23..23c05ec6fa28 100644 --- a/net/rxrpc/rxkad.c +++ b/net/rxrpc/rxkad.c @@ -602,8 +602,8 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn) challenge.min_level = htonl(0); challenge.__padding = 0; - msg.msg_name = &conn->trans->peer->srx.transport.sin; - msg.msg_namelen = sizeof(conn->trans->peer->srx.transport.sin); + msg.msg_name = &conn->params.peer->srx.transport.sin; + msg.msg_namelen = sizeof(conn->params.peer->srx.transport.sin); msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; @@ -630,7 +630,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn) whdr.serial = htonl(serial); _proto("Tx CHALLENGE %%%u", serial); - ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len); + ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len); if (ret < 0) { _debug("sendmsg failed: %d", ret); return -EAGAIN; @@ -657,8 +657,8 @@ static int rxkad_send_response(struct rxrpc_connection *conn, _enter(""); - msg.msg_name = &conn->trans->peer->srx.transport.sin; - msg.msg_namelen = sizeof(conn->trans->peer->srx.transport.sin); + msg.msg_name = &conn->params.peer->srx.transport.sin; + msg.msg_namelen = sizeof(conn->params.peer->srx.transport.sin); msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; @@ -684,7 +684,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn, whdr.serial = htonl(serial); _proto("Tx RESPONSE %%%u", serial); - ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len); + ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 3, len); if (ret < 0) { _debug("sendmsg failed: %d", ret); return -EAGAIN; diff --git a/net/rxrpc/security.c b/net/rxrpc/security.c index 40955d0f2693..814d285ff802 100644 --- a/net/rxrpc/security.c +++ b/net/rxrpc/security.c @@ -113,7 +113,7 @@ int rxrpc_init_client_conn_security(struct rxrpc_connection *conn) int rxrpc_init_server_conn_security(struct rxrpc_connection *conn) { const struct rxrpc_security *sec; - struct rxrpc_local *local = conn->trans->local; + struct rxrpc_local *local = conn->params.local; struct rxrpc_sock *rx; struct key *key; key_ref_t kref; -- cgit v1.2.3-59-g8ed1b From 5627cc8b961e4b07d5d649d9bd01ac929dcc1a95 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 4 Apr 2016 14:00:38 +0100 Subject: rxrpc: Provide more refcount helper functions Provide refcount helper functions for connections so that the code doesn't touch local or connection usage counts directly. Also make it such that local and peer put functions can take a NULL pointer. Signed-off-by: David Howells --- net/rxrpc/af_rxrpc.c | 7 ++----- net/rxrpc/ar-internal.h | 15 +++++++++++++-- net/rxrpc/call_accept.c | 2 +- net/rxrpc/call_object.c | 2 +- net/rxrpc/conn_event.c | 2 +- net/rxrpc/conn_object.c | 12 ++++++------ net/rxrpc/input.c | 2 +- net/rxrpc/local_object.c | 2 +- 8 files changed, 26 insertions(+), 18 deletions(-) (limited to 'net/rxrpc/conn_event.c') diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index b29bb50af5de..57dcbfc061e4 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -674,11 +674,8 @@ static int rxrpc_release_sock(struct sock *sk) flush_workqueue(rxrpc_workqueue); rxrpc_purge_queue(&sk->sk_receive_queue); - if (rx->local) { - rxrpc_put_local(rx->local); - rx->local = NULL; - } - + rxrpc_put_local(rx->local); + rx->local = NULL; key_put(rx->key); rx->key = NULL; key_put(rx->securities); diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index cfbd028aa551..c0ed5e7f22ef 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -597,6 +597,17 @@ static inline bool rxrpc_conn_is_service(const struct rxrpc_connection *conn) return conn->proto.in_clientflag; } +static inline void rxrpc_get_connection(struct rxrpc_connection *conn) +{ + atomic_inc(&conn->usage); +} + +static inline +struct rxrpc_connection *rxrpc_get_connection_maybe(struct rxrpc_connection *conn) +{ + return atomic_inc_not_zero(&conn->usage) ? conn : NULL; +} + /* * input.c */ @@ -645,7 +656,7 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local) static inline void rxrpc_put_local(struct rxrpc_local *local) { - if (atomic_dec_and_test(&local->usage)) + if (local && atomic_dec_and_test(&local->usage)) __rxrpc_put_local(local); } @@ -702,7 +713,7 @@ struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer) extern void __rxrpc_put_peer(struct rxrpc_peer *peer); static inline void rxrpc_put_peer(struct rxrpc_peer *peer) { - if (atomic_dec_and_test(&peer->usage)) + if (peer && atomic_dec_and_test(&peer->usage)) __rxrpc_put_peer(peer); } diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index 5a70dc4e28c6..833ad0622b61 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c @@ -141,7 +141,7 @@ static int rxrpc_accept_incoming_call(struct rxrpc_local *local, _debug("await conn sec"); list_add_tail(&call->accept_link, &rx->secureq); call->conn->state = RXRPC_CONN_SERVER_CHALLENGING; - atomic_inc(&call->conn->usage); + rxrpc_get_connection(call->conn); set_bit(RXRPC_CONN_CHALLENGE, &call->conn->events); rxrpc_queue_conn(call->conn); } else { diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index d83f2cbb80a9..45849a66bc56 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -515,7 +515,7 @@ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx, rb_insert_color(&call->conn_node, &conn->calls); conn->channels[call->channel] = call; sock_hold(&rx->sk); - atomic_inc(&conn->usage); + rxrpc_get_connection(conn); write_unlock_bh(&conn->lock); spin_lock(&conn->params.peer->lock); diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c index a022439f6f5a..bf6971555eac 100644 --- a/net/rxrpc/conn_event.c +++ b/net/rxrpc/conn_event.c @@ -263,7 +263,7 @@ void rxrpc_process_connection(struct work_struct *work) _enter("{%d}", conn->debug_id); - atomic_inc(&conn->usage); + rxrpc_get_connection(conn); if (test_and_clear_bit(RXRPC_CONN_CHALLENGE, &conn->events)) { rxrpc_secure_connection(conn); diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c index 312b75091d29..1754f2e2e16b 100644 --- a/net/rxrpc/conn_object.c +++ b/net/rxrpc/conn_object.c @@ -333,7 +333,7 @@ static int rxrpc_connect_exclusive(struct rxrpc_sock *rx, * channel. */ chan = 0; - atomic_inc(&conn->usage); + rxrpc_get_connection(conn); conn->avail_calls = RXRPC_MAXCALLS - 1; conn->channels[chan] = call; conn->call_counter = 1; @@ -392,7 +392,7 @@ int rxrpc_connect_call(struct rxrpc_sock *rx, conn->channels[1] == NULL || conn->channels[2] == NULL || conn->channels[3] == NULL); - atomic_inc(&conn->usage); + rxrpc_get_connection(conn); break; } @@ -412,7 +412,7 @@ int rxrpc_connect_call(struct rxrpc_sock *rx, conn->channels[1] == NULL && conn->channels[2] == NULL && conn->channels[3] == NULL); - atomic_inc(&conn->usage); + rxrpc_get_connection(conn); list_move(&conn->bundle_link, &bundle->avail_conns); break; } @@ -629,7 +629,7 @@ found_extant_connection: read_unlock_bh(&trans->conn_lock); goto security_mismatch; } - atomic_inc(&conn->usage); + rxrpc_get_connection(conn); read_unlock_bh(&trans->conn_lock); goto success; @@ -639,7 +639,7 @@ found_extant_second: write_unlock_bh(&trans->conn_lock); goto security_mismatch; } - atomic_inc(&conn->usage); + rxrpc_get_connection(conn); write_unlock_bh(&trans->conn_lock); kfree(candidate); goto success; @@ -698,7 +698,7 @@ struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *trans, return NULL; found: - atomic_inc(&conn->usage); + rxrpc_get_connection(conn); read_unlock_bh(&trans->conn_lock); _leave(" = %p", conn); return conn; diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index cf540efa9c17..799aec18aa7b 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -580,7 +580,7 @@ static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn, { _enter("%p,%p", conn, skb); - atomic_inc(&conn->usage); + rxrpc_get_connection(conn); skb_queue_tail(&conn->rx_queue, skb); rxrpc_queue_conn(conn); } diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c index 009b321712bc..5703b0d18ed4 100644 --- a/net/rxrpc/local_object.c +++ b/net/rxrpc/local_object.c @@ -209,7 +209,7 @@ struct rxrpc_local *rxrpc_lookup_local(const struct sockaddr_rxrpc *srx) * bind the transport socket may still fail if we're attempting * to use a local address that the dying object is still using. */ - if (!atomic_inc_not_zero(&local->usage)) { + if (!rxrpc_get_local_maybe(local)) { cursor = cursor->next; list_del_init(&local->link); break; -- cgit v1.2.3-59-g8ed1b From a263629da519b2064588377416e067727e2cbdf9 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 26 Jun 2016 14:55:24 -0700 Subject: rxrpc: Avoid using stack memory in SG lists in rxkad rxkad uses stack memory in SG lists which would not work if stacks were allocated from vmalloc memory. In fact, in most cases this isn't even necessary as the stack memory ends up getting copied over to kmalloc memory. This patch eliminates all the unnecessary stack memory uses by supplying the final destination directly to the crypto API. In two instances where a temporary buffer is actually needed we also switch use a scratch area in the rxrpc_call struct (only one DATA packet will be being secured or verified at a time). Finally there is no need to split a split-page buffer into two SG entries so code dealing with that has been removed. Signed-off-by: Herbert Xu Signed-off-by: Andy Lutomirski Signed-off-by: David Howells --- net/rxrpc/ar-internal.h | 8 +-- net/rxrpc/conn_event.c | 5 +- net/rxrpc/conn_object.c | 6 +- net/rxrpc/insecure.c | 7 ++- net/rxrpc/rxkad.c | 150 ++++++++++++++++-------------------------------- 5 files changed, 65 insertions(+), 111 deletions(-) (limited to 'net/rxrpc/conn_event.c') diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 702db72196fb..796368d1fb25 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -141,17 +141,16 @@ struct rxrpc_security { int (*init_connection_security)(struct rxrpc_connection *); /* prime a connection's packet security */ - void (*prime_packet_security)(struct rxrpc_connection *); + int (*prime_packet_security)(struct rxrpc_connection *); /* impose security on a packet */ - int (*secure_packet)(const struct rxrpc_call *, + int (*secure_packet)(struct rxrpc_call *, struct sk_buff *, size_t, void *); /* verify the security on a received packet */ - int (*verify_packet)(const struct rxrpc_call *, struct sk_buff *, - u32 *); + int (*verify_packet)(struct rxrpc_call *, struct sk_buff *, u32 *); /* issue a challenge */ int (*issue_challenge)(struct rxrpc_connection *); @@ -399,6 +398,7 @@ struct rxrpc_call { struct sk_buff_head rx_oos_queue; /* packets received out of sequence */ struct sk_buff *tx_pending; /* Tx socket buffer being filled */ wait_queue_head_t tx_waitq; /* wait for Tx window space to become available */ + __be32 crypto_buf[2]; /* Temporary packet crypto buffer */ unsigned long user_call_ID; /* user-defined call ID */ unsigned long creation_jif; /* time of call creation */ unsigned long flags; diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c index bf6971555eac..6a3c96707831 100644 --- a/net/rxrpc/conn_event.c +++ b/net/rxrpc/conn_event.c @@ -188,7 +188,10 @@ static int rxrpc_process_event(struct rxrpc_connection *conn, if (ret < 0) return ret; - conn->security->prime_packet_security(conn); + ret = conn->security->prime_packet_security(conn); + if (ret < 0) + return ret; + read_lock_bh(&conn->lock); spin_lock(&conn->state_lock); diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c index 2c2456ff2853..c2c0926af546 100644 --- a/net/rxrpc/conn_object.c +++ b/net/rxrpc/conn_object.c @@ -138,7 +138,9 @@ rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp) if (ret < 0) goto error_1; - conn->security->prime_packet_security(conn); + ret = conn->security->prime_packet_security(conn); + if (ret < 0) + goto error_2; write_lock(&rxrpc_connection_lock); list_add_tail(&conn->link, &rxrpc_connections); @@ -152,6 +154,8 @@ rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp) _leave(" = %p", conn); return conn; +error_2: + conn->security->clear(conn); error_1: rxrpc_put_client_connection_id(conn); error_0: diff --git a/net/rxrpc/insecure.c b/net/rxrpc/insecure.c index e571403613c1..c21ad213b337 100644 --- a/net/rxrpc/insecure.c +++ b/net/rxrpc/insecure.c @@ -17,11 +17,12 @@ static int none_init_connection_security(struct rxrpc_connection *conn) return 0; } -static void none_prime_packet_security(struct rxrpc_connection *conn) +static int none_prime_packet_security(struct rxrpc_connection *conn) { + return 0; } -static int none_secure_packet(const struct rxrpc_call *call, +static int none_secure_packet(struct rxrpc_call *call, struct sk_buff *skb, size_t data_size, void *sechdr) @@ -29,7 +30,7 @@ static int none_secure_packet(const struct rxrpc_call *call, return 0; } -static int none_verify_packet(const struct rxrpc_call *call, +static int none_verify_packet(struct rxrpc_call *call, struct sk_buff *skb, u32 *_abort_code) { diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c index 23c05ec6fa28..3acc7c1241d4 100644 --- a/net/rxrpc/rxkad.c +++ b/net/rxrpc/rxkad.c @@ -103,43 +103,43 @@ error: * prime the encryption state with the invariant parts of a connection's * description */ -static void rxkad_prime_packet_security(struct rxrpc_connection *conn) +static int rxkad_prime_packet_security(struct rxrpc_connection *conn) { struct rxrpc_key_token *token; SKCIPHER_REQUEST_ON_STACK(req, conn->cipher); - struct scatterlist sg[2]; + struct scatterlist sg; struct rxrpc_crypt iv; - struct { - __be32 x[4]; - } tmpbuf __attribute__((aligned(16))); /* must all be in same page */ + __be32 *tmpbuf; + size_t tmpsize = 4 * sizeof(__be32); _enter(""); if (!conn->params.key) - return; + return 0; + + tmpbuf = kmalloc(tmpsize, GFP_KERNEL); + if (!tmpbuf) + return -ENOMEM; token = conn->params.key->payload.data[0]; memcpy(&iv, token->kad->session_key, sizeof(iv)); - tmpbuf.x[0] = htonl(conn->proto.epoch); - tmpbuf.x[1] = htonl(conn->proto.cid); - tmpbuf.x[2] = 0; - tmpbuf.x[3] = htonl(conn->security_ix); - - sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf)); - sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf)); + tmpbuf[0] = htonl(conn->proto.epoch); + tmpbuf[1] = htonl(conn->proto.cid); + tmpbuf[2] = 0; + tmpbuf[3] = htonl(conn->security_ix); + sg_init_one(&sg, tmpbuf, tmpsize); skcipher_request_set_tfm(req, conn->cipher); skcipher_request_set_callback(req, 0, NULL, NULL); - skcipher_request_set_crypt(req, &sg[1], &sg[0], sizeof(tmpbuf), iv.x); - + skcipher_request_set_crypt(req, &sg, &sg, tmpsize, iv.x); crypto_skcipher_encrypt(req); skcipher_request_zero(req); - memcpy(&conn->csum_iv, &tmpbuf.x[2], sizeof(conn->csum_iv)); - ASSERTCMP((u32 __force)conn->csum_iv.n[0], ==, (u32 __force)tmpbuf.x[2]); - - _leave(""); + memcpy(&conn->csum_iv, tmpbuf + 2, sizeof(conn->csum_iv)); + kfree(tmpbuf); + _leave(" = 0"); + return 0; } /* @@ -152,12 +152,9 @@ static int rxkad_secure_packet_auth(const struct rxrpc_call *call, { struct rxrpc_skb_priv *sp; SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher); + struct rxkad_level1_hdr hdr; struct rxrpc_crypt iv; - struct scatterlist sg[2]; - struct { - struct rxkad_level1_hdr hdr; - __be32 first; /* first four bytes of data and padding */ - } tmpbuf __attribute__((aligned(8))); /* must all be in same page */ + struct scatterlist sg; u16 check; sp = rxrpc_skb(skb); @@ -167,24 +164,19 @@ static int rxkad_secure_packet_auth(const struct rxrpc_call *call, check = sp->hdr.seq ^ sp->hdr.callNumber; data_size |= (u32)check << 16; - tmpbuf.hdr.data_size = htonl(data_size); - memcpy(&tmpbuf.first, sechdr + 4, sizeof(tmpbuf.first)); + hdr.data_size = htonl(data_size); + memcpy(sechdr, &hdr, sizeof(hdr)); /* start the encryption afresh */ memset(&iv, 0, sizeof(iv)); - sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf)); - sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf)); - + sg_init_one(&sg, sechdr, 8); skcipher_request_set_tfm(req, call->conn->cipher); skcipher_request_set_callback(req, 0, NULL, NULL); - skcipher_request_set_crypt(req, &sg[1], &sg[0], sizeof(tmpbuf), iv.x); - + skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x); crypto_skcipher_encrypt(req); skcipher_request_zero(req); - memcpy(sechdr, &tmpbuf, sizeof(tmpbuf)); - _leave(" = 0"); return 0; } @@ -198,8 +190,7 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call, void *sechdr) { const struct rxrpc_key_token *token; - struct rxkad_level2_hdr rxkhdr - __attribute__((aligned(8))); /* must be all on one page */ + struct rxkad_level2_hdr rxkhdr; struct rxrpc_skb_priv *sp; SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher); struct rxrpc_crypt iv; @@ -218,18 +209,16 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call, rxkhdr.data_size = htonl(data_size | (u32)check << 16); rxkhdr.checksum = 0; + memcpy(sechdr, &rxkhdr, sizeof(rxkhdr)); /* encrypt from the session key */ token = call->conn->params.key->payload.data[0]; memcpy(&iv, token->kad->session_key, sizeof(iv)); sg_init_one(&sg[0], sechdr, sizeof(rxkhdr)); - sg_init_one(&sg[1], &rxkhdr, sizeof(rxkhdr)); - skcipher_request_set_tfm(req, call->conn->cipher); skcipher_request_set_callback(req, 0, NULL, NULL); - skcipher_request_set_crypt(req, &sg[1], &sg[0], sizeof(rxkhdr), iv.x); - + skcipher_request_set_crypt(req, &sg[0], &sg[0], sizeof(rxkhdr), iv.x); crypto_skcipher_encrypt(req); /* we want to encrypt the skbuff in-place */ @@ -243,9 +232,7 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call, sg_init_table(sg, nsg); skb_to_sgvec(skb, sg, 0, len); - skcipher_request_set_crypt(req, sg, sg, len, iv.x); - crypto_skcipher_encrypt(req); _leave(" = 0"); @@ -259,7 +246,7 @@ out: /* * checksum an RxRPC packet header */ -static int rxkad_secure_packet(const struct rxrpc_call *call, +static int rxkad_secure_packet(struct rxrpc_call *call, struct sk_buff *skb, size_t data_size, void *sechdr) @@ -267,10 +254,7 @@ static int rxkad_secure_packet(const struct rxrpc_call *call, struct rxrpc_skb_priv *sp; SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher); struct rxrpc_crypt iv; - struct scatterlist sg[2]; - struct { - __be32 x[2]; - } tmpbuf __attribute__((aligned(8))); /* must all be in same page */ + struct scatterlist sg; u32 x, y; int ret; @@ -293,20 +277,17 @@ static int rxkad_secure_packet(const struct rxrpc_call *call, /* calculate the security checksum */ x = call->channel << (32 - RXRPC_CIDSHIFT); x |= sp->hdr.seq & 0x3fffffff; - tmpbuf.x[0] = htonl(sp->hdr.callNumber); - tmpbuf.x[1] = htonl(x); - - sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf)); - sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf)); + call->crypto_buf[0] = htonl(sp->hdr.callNumber); + call->crypto_buf[1] = htonl(x); + sg_init_one(&sg, call->crypto_buf, 8); skcipher_request_set_tfm(req, call->conn->cipher); skcipher_request_set_callback(req, 0, NULL, NULL); - skcipher_request_set_crypt(req, &sg[1], &sg[0], sizeof(tmpbuf), iv.x); - + skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x); crypto_skcipher_encrypt(req); skcipher_request_zero(req); - y = ntohl(tmpbuf.x[1]); + y = ntohl(call->crypto_buf[1]); y = (y >> 16) & 0xffff; if (y == 0) y = 1; /* zero checksums are not permitted */ @@ -367,7 +348,6 @@ static int rxkad_verify_packet_auth(const struct rxrpc_call *call, skcipher_request_set_tfm(req, call->conn->cipher); skcipher_request_set_callback(req, 0, NULL, NULL); skcipher_request_set_crypt(req, sg, sg, 8, iv.x); - crypto_skcipher_decrypt(req); skcipher_request_zero(req); @@ -452,7 +432,6 @@ static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call, skcipher_request_set_tfm(req, call->conn->cipher); skcipher_request_set_callback(req, 0, NULL, NULL); skcipher_request_set_crypt(req, sg, sg, skb->len, iv.x); - crypto_skcipher_decrypt(req); skcipher_request_zero(req); if (sg != _sg) @@ -498,17 +477,14 @@ nomem: /* * verify the security on a received packet */ -static int rxkad_verify_packet(const struct rxrpc_call *call, +static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb, u32 *_abort_code) { SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher); struct rxrpc_skb_priv *sp; struct rxrpc_crypt iv; - struct scatterlist sg[2]; - struct { - __be32 x[2]; - } tmpbuf __attribute__((aligned(8))); /* must all be in same page */ + struct scatterlist sg; u16 cksum; u32 x, y; int ret; @@ -533,20 +509,17 @@ static int rxkad_verify_packet(const struct rxrpc_call *call, /* validate the security checksum */ x = call->channel << (32 - RXRPC_CIDSHIFT); x |= sp->hdr.seq & 0x3fffffff; - tmpbuf.x[0] = htonl(call->call_id); - tmpbuf.x[1] = htonl(x); - - sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf)); - sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf)); + call->crypto_buf[0] = htonl(call->call_id); + call->crypto_buf[1] = htonl(x); + sg_init_one(&sg, call->crypto_buf, 8); skcipher_request_set_tfm(req, call->conn->cipher); skcipher_request_set_callback(req, 0, NULL, NULL); - skcipher_request_set_crypt(req, &sg[1], &sg[0], sizeof(tmpbuf), iv.x); - + skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x); crypto_skcipher_encrypt(req); skcipher_request_zero(req); - y = ntohl(tmpbuf.x[1]); + y = ntohl(call->crypto_buf[1]); cksum = (y >> 16) & 0xffff; if (cksum == 0) cksum = 1; /* zero checksums are not permitted */ @@ -709,29 +682,6 @@ static void rxkad_calc_response_checksum(struct rxkad_response *response) response->encrypted.checksum = htonl(csum); } -/* - * load a scatterlist with a potentially split-page buffer - */ -static void rxkad_sg_set_buf2(struct scatterlist sg[2], - void *buf, size_t buflen) -{ - int nsg = 1; - - sg_init_table(sg, 2); - - sg_set_buf(&sg[0], buf, buflen); - if (sg[0].offset + buflen > PAGE_SIZE) { - /* the buffer was split over two pages */ - sg[0].length = PAGE_SIZE - sg[0].offset; - sg_set_buf(&sg[1], buf + sg[0].length, buflen - sg[0].length); - nsg++; - } - - sg_mark_end(&sg[nsg - 1]); - - ASSERTCMP(sg[0].length + sg[1].length, ==, buflen); -} - /* * encrypt the response packet */ @@ -741,17 +691,16 @@ static void rxkad_encrypt_response(struct rxrpc_connection *conn, { SKCIPHER_REQUEST_ON_STACK(req, conn->cipher); struct rxrpc_crypt iv; - struct scatterlist sg[2]; + struct scatterlist sg[1]; /* continue encrypting from where we left off */ memcpy(&iv, s2->session_key, sizeof(iv)); - rxkad_sg_set_buf2(sg, &resp->encrypted, sizeof(resp->encrypted)); - + sg_init_table(sg, 1); + sg_set_buf(sg, &resp->encrypted, sizeof(resp->encrypted)); skcipher_request_set_tfm(req, conn->cipher); skcipher_request_set_callback(req, 0, NULL, NULL); skcipher_request_set_crypt(req, sg, sg, sizeof(resp->encrypted), iv.x); - crypto_skcipher_encrypt(req); skcipher_request_zero(req); } @@ -887,10 +836,8 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn, } sg_init_one(&sg[0], ticket, ticket_len); - skcipher_request_set_callback(req, 0, NULL, NULL); skcipher_request_set_crypt(req, sg, sg, ticket_len, iv.x); - crypto_skcipher_decrypt(req); skcipher_request_free(req); @@ -1001,7 +948,7 @@ static void rxkad_decrypt_response(struct rxrpc_connection *conn, const struct rxrpc_crypt *session_key) { SKCIPHER_REQUEST_ON_STACK(req, rxkad_ci); - struct scatterlist sg[2]; + struct scatterlist sg[1]; struct rxrpc_crypt iv; _enter(",,%08x%08x", @@ -1016,12 +963,11 @@ static void rxkad_decrypt_response(struct rxrpc_connection *conn, memcpy(&iv, session_key, sizeof(iv)); - rxkad_sg_set_buf2(sg, &resp->encrypted, sizeof(resp->encrypted)); - + sg_init_table(sg, 1); + sg_set_buf(sg, &resp->encrypted, sizeof(resp->encrypted)); skcipher_request_set_tfm(req, rxkad_ci); skcipher_request_set_callback(req, 0, NULL, NULL); skcipher_request_set_crypt(req, sg, sg, sizeof(resp->encrypted), iv.x); - crypto_skcipher_decrypt(req); skcipher_request_zero(req); -- cgit v1.2.3-59-g8ed1b From 5acbee4648789ba1fe9e7942280fb1966c76bd6f Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 27 Jun 2016 10:32:02 +0100 Subject: rxrpc: Provide queuing helper functions Provide queueing helper functions so that the queueing of local and connection objects can be fixed later. The issue is that a ref on the object needs to be passed to the work queue, but the act of queueing the object may fail because the object is already queued. Testing the queuedness of an object before hand doesn't work because there can be a race with someone else trying to queue it. What will have to be done is to adjust the refcount depending on the result of the queue operation. Signed-off-by: David Howells --- net/rxrpc/ar-internal.h | 12 +++++++++++- net/rxrpc/conn_event.c | 2 +- net/rxrpc/input.c | 2 +- 3 files changed, 13 insertions(+), 3 deletions(-) (limited to 'net/rxrpc/conn_event.c') diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 796368d1fb25..45aef3ef7609 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -35,7 +35,6 @@ struct rxrpc_crypt { queue_delayed_work(rxrpc_workqueue, (WS), (D)) #define rxrpc_queue_call(CALL) rxrpc_queue_work(&(CALL)->processor) -#define rxrpc_queue_conn(CONN) rxrpc_queue_work(&(CONN)->processor) struct rxrpc_connection; @@ -566,6 +565,12 @@ static inline void rxrpc_get_connection(struct rxrpc_connection *conn) atomic_inc(&conn->usage); } + +static inline void rxrpc_queue_conn(struct rxrpc_connection *conn) +{ + rxrpc_queue_work(&conn->processor); +} + /* * input.c */ @@ -618,6 +623,11 @@ static inline void rxrpc_put_local(struct rxrpc_local *local) __rxrpc_put_local(local); } +static inline void rxrpc_queue_local(struct rxrpc_local *local) +{ + rxrpc_queue_work(&local->processor); +} + /* * misc.c */ diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c index 6a3c96707831..d7e183c6b5df 100644 --- a/net/rxrpc/conn_event.c +++ b/net/rxrpc/conn_event.c @@ -318,7 +318,7 @@ void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb) CHECK_SLAB_OKAY(&local->usage); skb_queue_tail(&local->reject_queue, skb); - rxrpc_queue_work(&local->processor); + rxrpc_queue_local(local); } /* diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 5f26cae43069..fe7ff339d7e5 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -595,7 +595,7 @@ static void rxrpc_post_packet_to_local(struct rxrpc_local *local, _enter("%p,%p", local, skb); skb_queue_tail(&local->event_queue, skb); - rxrpc_queue_work(&local->processor); + rxrpc_queue_local(local); } /* -- cgit v1.2.3-59-g8ed1b From bba304db34ec3ca0d13e7f48e5a4e9896536cacc Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 27 Jun 2016 10:32:02 +0100 Subject: rxrpc: Turn connection #defines into enums and put outside struct def Turn the connection event and state #define lists into enums and move outside of the struct definition. Whilst we're at it, change _SERVER to _SERVICE in those identifiers and add EV_ into the event name to distinguish them from flags and states. Also add a symbol indicating the number of states and use that in the state text array. Signed-off-by: David Howells --- net/rxrpc/ar-internal.h | 42 ++++++++++++++++++++++++++++++------------ net/rxrpc/call_accept.c | 6 +++--- net/rxrpc/conn_event.c | 6 +++--- net/rxrpc/conn_object.c | 4 ++-- net/rxrpc/proc.c | 18 +++++++++--------- 5 files changed, 47 insertions(+), 29 deletions(-) (limited to 'net/rxrpc/conn_event.c') diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 45aef3ef7609..3f0d0479a4da 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -253,6 +253,35 @@ struct rxrpc_conn_parameters { u32 security_level; /* Security level selected */ }; +/* + * Bits in the connection flags. + */ +enum rxrpc_conn_flag { + RXRPC_CONN_HAS_IDR, /* Has a client conn ID assigned */ +}; + +/* + * Events that can be raised upon a connection. + */ +enum rxrpc_conn_event { + RXRPC_CONN_EV_CHALLENGE, /* Send challenge packet */ +}; + +/* + * The connection protocol state. + */ +enum rxrpc_conn_proto_state { + RXRPC_CONN_UNUSED, /* Connection not yet attempted */ + RXRPC_CONN_CLIENT, /* Client connection */ + RXRPC_CONN_SERVICE_UNSECURED, /* Service unsecured connection */ + RXRPC_CONN_SERVICE_CHALLENGING, /* Service challenging for security */ + RXRPC_CONN_SERVICE, /* Service secured connection */ + RXRPC_CONN_REMOTELY_ABORTED, /* Conn aborted by peer */ + RXRPC_CONN_LOCALLY_ABORTED, /* Conn aborted locally */ + RXRPC_CONN_NETWORK_ERROR, /* Conn terminated by network error */ + RXRPC_CONN__NR_STATES +}; + /* * RxRPC connection definition * - matched by { local, peer, epoch, conn_id, direction } @@ -279,23 +308,12 @@ struct rxrpc_connection { struct crypto_skcipher *cipher; /* encryption handle */ struct rxrpc_crypt csum_iv; /* packet checksum base */ unsigned long flags; -#define RXRPC_CONN_HAS_IDR 0 /* - Has a client conn ID assigned */ unsigned long events; -#define RXRPC_CONN_CHALLENGE 0 /* send challenge packet */ unsigned long put_time; /* Time at which last put */ rwlock_t lock; /* access lock */ spinlock_t state_lock; /* state-change lock */ atomic_t usage; - enum { /* current state of connection */ - RXRPC_CONN_UNUSED, /* - connection not yet attempted */ - RXRPC_CONN_CLIENT, /* - client connection */ - RXRPC_CONN_SERVER_UNSECURED, /* - server unsecured connection */ - RXRPC_CONN_SERVER_CHALLENGING, /* - server challenging for security */ - RXRPC_CONN_SERVER, /* - server secured connection */ - RXRPC_CONN_REMOTELY_ABORTED, /* - conn aborted by peer */ - RXRPC_CONN_LOCALLY_ABORTED, /* - conn aborted locally */ - RXRPC_CONN_NETWORK_ERROR, /* - conn terminated by network error */ - } state; + enum rxrpc_conn_proto_state state : 8; /* current state of connection */ u32 local_abort; /* local abort code */ u32 remote_abort; /* remote abort code */ int error; /* local error incurred */ diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index 202e053a3c6d..1c0860df150e 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c @@ -128,12 +128,12 @@ static int rxrpc_accept_incoming_call(struct rxrpc_local *local, spin_lock(&call->conn->state_lock); if (sp->hdr.securityIndex > 0 && - call->conn->state == RXRPC_CONN_SERVER_UNSECURED) { + call->conn->state == RXRPC_CONN_SERVICE_UNSECURED) { _debug("await conn sec"); list_add_tail(&call->accept_link, &rx->secureq); - call->conn->state = RXRPC_CONN_SERVER_CHALLENGING; + call->conn->state = RXRPC_CONN_SERVICE_CHALLENGING; rxrpc_get_connection(call->conn); - set_bit(RXRPC_CONN_CHALLENGE, &call->conn->events); + set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events); rxrpc_queue_conn(call->conn); } else { _debug("conn ready"); diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c index d7e183c6b5df..b9c39b83eddb 100644 --- a/net/rxrpc/conn_event.c +++ b/net/rxrpc/conn_event.c @@ -195,8 +195,8 @@ static int rxrpc_process_event(struct rxrpc_connection *conn, read_lock_bh(&conn->lock); spin_lock(&conn->state_lock); - if (conn->state == RXRPC_CONN_SERVER_CHALLENGING) { - conn->state = RXRPC_CONN_SERVER; + if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING) { + conn->state = RXRPC_CONN_SERVICE; for (loop = 0; loop < RXRPC_MAXCALLS; loop++) rxrpc_call_is_secure(conn->channels[loop]); } @@ -268,7 +268,7 @@ void rxrpc_process_connection(struct work_struct *work) rxrpc_get_connection(conn); - if (test_and_clear_bit(RXRPC_CONN_CHALLENGE, &conn->events)) { + if (test_and_clear_bit(RXRPC_CONN_EV_CHALLENGE, &conn->events)) { rxrpc_secure_connection(conn); rxrpc_put_connection(conn); } diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c index c2c0926af546..0e022dfab034 100644 --- a/net/rxrpc/conn_object.c +++ b/net/rxrpc/conn_object.c @@ -399,9 +399,9 @@ struct rxrpc_connection *rxrpc_incoming_connection(struct rxrpc_local *local, candidate->params.service_id = sp->hdr.serviceId; candidate->security_ix = sp->hdr.securityIndex; candidate->out_clientflag = 0; - candidate->state = RXRPC_CONN_SERVER; + candidate->state = RXRPC_CONN_SERVICE; if (candidate->params.service_id) - candidate->state = RXRPC_CONN_SERVER_UNSECURED; + candidate->state = RXRPC_CONN_SERVICE_UNSECURED; write_lock_bh(&peer->conn_lock); diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c index 500cdcdc843c..2a25ab425b6f 100644 --- a/net/rxrpc/proc.c +++ b/net/rxrpc/proc.c @@ -14,15 +14,15 @@ #include #include "ar-internal.h" -static const char *const rxrpc_conn_states[] = { - [RXRPC_CONN_UNUSED] = "Unused ", - [RXRPC_CONN_CLIENT] = "Client ", - [RXRPC_CONN_SERVER_UNSECURED] = "SvUnsec ", - [RXRPC_CONN_SERVER_CHALLENGING] = "SvChall ", - [RXRPC_CONN_SERVER] = "SvSecure", - [RXRPC_CONN_REMOTELY_ABORTED] = "RmtAbort", - [RXRPC_CONN_LOCALLY_ABORTED] = "LocAbort", - [RXRPC_CONN_NETWORK_ERROR] = "NetError", +static const char *const rxrpc_conn_states[RXRPC_CONN__NR_STATES] = { + [RXRPC_CONN_UNUSED] = "Unused ", + [RXRPC_CONN_CLIENT] = "Client ", + [RXRPC_CONN_SERVICE_UNSECURED] = "SvUnsec ", + [RXRPC_CONN_SERVICE_CHALLENGING] = "SvChall ", + [RXRPC_CONN_SERVICE] = "SvSecure", + [RXRPC_CONN_REMOTELY_ABORTED] = "RmtAbort", + [RXRPC_CONN_LOCALLY_ABORTED] = "LocAbort", + [RXRPC_CONN_NETWORK_ERROR] = "NetError", }; /* -- cgit v1.2.3-59-g8ed1b From 2c4579e4b1d5a6219522c6e970500b2fd43fe1f8 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 27 Jun 2016 10:32:03 +0100 Subject: rxrpc: Move usage count getting into rxrpc_queue_conn() Rather than calling rxrpc_get_connection() manually before calling rxrpc_queue_conn(), do it inside the queue wrapper. This allows us to do some important fixes: (1) If the usage count is 0, do nothing. This prevents connections from being reanimated once they're dead. (2) If rxrpc_queue_work() fails because the work item is already queued, retract the usage count increment which would otherwise be lost. (3) Don't take a ref on the connection in the work function. By passing the ref through the work item, this is unnecessary. Doing it in the work function is too late anyway. Previously, connection-directed packets held a ref on the connection, but that's not really the best idea. And another useful changes: (*) Don't need to take a refcount on the connection in the data_ready handler unless we invoke the connection's work item. We're using RCU there so that's otherwise redundant. Signed-off-by: David Howells --- net/rxrpc/ar-internal.h | 9 ++++++++- net/rxrpc/call_accept.c | 1 - net/rxrpc/conn_event.c | 8 +------- net/rxrpc/input.c | 1 - 4 files changed, 9 insertions(+), 10 deletions(-) (limited to 'net/rxrpc/conn_event.c') diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 6583a8399c89..9fc89cdc6ae3 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -584,10 +584,17 @@ static inline void rxrpc_get_connection(struct rxrpc_connection *conn) atomic_inc(&conn->usage); } +static inline +struct rxrpc_connection *rxrpc_get_connection_maybe(struct rxrpc_connection *conn) +{ + return atomic_inc_not_zero(&conn->usage) ? conn : NULL; +} static inline void rxrpc_queue_conn(struct rxrpc_connection *conn) { - rxrpc_queue_work(&conn->processor); + if (rxrpc_get_connection_maybe(conn) && + !rxrpc_queue_work(&conn->processor)) + rxrpc_put_connection(conn); } /* diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index 1c0860df150e..5367dbe9b96f 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c @@ -132,7 +132,6 @@ static int rxrpc_accept_incoming_call(struct rxrpc_local *local, _debug("await conn sec"); list_add_tail(&call->accept_link, &rx->secureq); call->conn->state = RXRPC_CONN_SERVICE_CHALLENGING; - rxrpc_get_connection(call->conn); set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events); rxrpc_queue_conn(call->conn); } else { diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c index b9c39b83eddb..9ceddd3fd5db 100644 --- a/net/rxrpc/conn_event.c +++ b/net/rxrpc/conn_event.c @@ -266,12 +266,8 @@ void rxrpc_process_connection(struct work_struct *work) _enter("{%d}", conn->debug_id); - rxrpc_get_connection(conn); - - if (test_and_clear_bit(RXRPC_CONN_EV_CHALLENGE, &conn->events)) { + if (test_and_clear_bit(RXRPC_CONN_EV_CHALLENGE, &conn->events)) rxrpc_secure_connection(conn); - rxrpc_put_connection(conn); - } /* go through the conn-level event packets, releasing the ref on this * connection that each one has when we've finished with it */ @@ -286,7 +282,6 @@ void rxrpc_process_connection(struct work_struct *work) goto requeue_and_leave; case -ECONNABORTED: default: - rxrpc_put_connection(conn); rxrpc_free_skb(skb); break; } @@ -304,7 +299,6 @@ requeue_and_leave: protocol_error: if (rxrpc_abort_connection(conn, -ret, abort_code) < 0) goto requeue_and_leave; - rxrpc_put_connection(conn); rxrpc_free_skb(skb); _leave(" [EPROTO]"); goto out; diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index fe7ff339d7e5..b993f2dc5a09 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -580,7 +580,6 @@ static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn, { _enter("%p,%p", conn, skb); - rxrpc_get_connection(conn); skb_queue_tail(&conn->rx_queue, skb); rxrpc_queue_conn(conn); } -- cgit v1.2.3-59-g8ed1b From dee46364ce6fd0815ad9da625783eda21ccf7b06 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 27 Jun 2016 17:11:19 +0100 Subject: rxrpc: Add RCU destruction for connections and calls Add RCU destruction for connections and calls as the RCU lookup from the transport socket data_ready handler is going to come along shortly. Whilst we're at it, move the cleanup workqueue flushing and RCU barrierage into the destruction code for the objects that need it (locals and connections) and add the extra RCU barrier required for connection cleanup. Signed-off-by: David Howells --- net/rxrpc/af_rxrpc.c | 19 ------------------- net/rxrpc/ar-internal.h | 4 +++- net/rxrpc/call_object.c | 18 +++++++++++++++--- net/rxrpc/conn_event.c | 5 ++++- net/rxrpc/conn_object.c | 31 +++++++++++++++++++++++++++---- net/rxrpc/local_object.c | 19 +++++++++++-------- 6 files changed, 60 insertions(+), 36 deletions(-) (limited to 'net/rxrpc/conn_event.c') diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index d5073eb02498..d6e4e3b69dc3 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -788,26 +788,7 @@ static void __exit af_rxrpc_exit(void) proto_unregister(&rxrpc_proto); rxrpc_destroy_all_calls(); rxrpc_destroy_all_connections(); - ASSERTCMP(atomic_read(&rxrpc_n_skbs), ==, 0); - - /* We need to flush the scheduled work twice because the local endpoint - * records involve a work item in their destruction as they can only be - * destroyed from process context. However, a connection may have a - * work item outstanding - and this will pin the local endpoint record - * until the connection goes away. - * - * Peers don't pin locals and calls pin sockets - which prevents the - * module from being unloaded - so we should only need two flushes. - */ - _debug("flush scheduled work"); - flush_workqueue(rxrpc_workqueue); - _debug("flush scheduled work 2"); - flush_workqueue(rxrpc_workqueue); - _debug("synchronise RCU"); - rcu_barrier(); - _debug("destroy locals"); - rxrpc_destroy_client_conn_ids(); rxrpc_destroy_all_locals(); remove_proc_entry("rxrpc_conns", init_net.proc_net); diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 9fc89cdc6ae3..b401fa9d7963 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -292,9 +292,10 @@ struct rxrpc_connection { struct rxrpc_conn_parameters params; spinlock_t channel_lock; - struct rxrpc_call *channels[RXRPC_MAXCALLS]; /* active calls */ + struct rxrpc_call __rcu *channels[RXRPC_MAXCALLS]; /* active calls */ wait_queue_head_t channel_wq; /* queue to wait for channel to become available */ + struct rcu_head rcu; struct work_struct processor; /* connection event processor */ union { struct rb_node client_node; /* Node in local->client_conns */ @@ -398,6 +399,7 @@ enum rxrpc_call_state { * - matched by { connection, call_id } */ struct rxrpc_call { + struct rcu_head rcu; struct rxrpc_connection *conn; /* connection carrying call */ struct rxrpc_sock *socket; /* socket responsible */ struct timer_list lifetimer; /* lifetime remaining on call */ diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index b43d89c89744..2c6c57c0d52c 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -480,7 +480,8 @@ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx, write_lock_bh(&conn->lock); /* set the channel for this call */ - call = conn->channels[candidate->channel]; + call = rcu_dereference_protected(conn->channels[candidate->channel], + lockdep_is_held(&conn->lock)); _debug("channel[%u] is %p", candidate->channel, call); if (call && call->call_id == sp->hdr.callNumber) { /* already set; must've been a duplicate packet */ @@ -544,7 +545,7 @@ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx, candidate = NULL; rb_link_node(&call->conn_node, parent, p); rb_insert_color(&call->conn_node, &conn->calls); - conn->channels[call->channel] = call; + rcu_assign_pointer(conn->channels[call->channel], call); sock_hold(&rx->sk); rxrpc_get_connection(conn); write_unlock_bh(&conn->lock); @@ -794,6 +795,17 @@ void __rxrpc_put_call(struct rxrpc_call *call) _leave(""); } +/* + * Final call destruction under RCU. + */ +static void rxrpc_rcu_destroy_call(struct rcu_head *rcu) +{ + struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu); + + rxrpc_purge_queue(&call->rx_queue); + kmem_cache_free(rxrpc_call_jar, call); +} + /* * clean up a call */ @@ -849,7 +861,7 @@ static void rxrpc_cleanup_call(struct rxrpc_call *call) rxrpc_purge_queue(&call->rx_queue); ASSERT(skb_queue_empty(&call->rx_oos_queue)); sock_put(&call->socket->sk); - kmem_cache_free(rxrpc_call_jar, call); + call_rcu(&call->rcu, rxrpc_rcu_destroy_call); } /* diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c index 9ceddd3fd5db..f6ca8c5c4496 100644 --- a/net/rxrpc/conn_event.c +++ b/net/rxrpc/conn_event.c @@ -198,7 +198,10 @@ static int rxrpc_process_event(struct rxrpc_connection *conn, if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING) { conn->state = RXRPC_CONN_SERVICE; for (loop = 0; loop < RXRPC_MAXCALLS; loop++) - rxrpc_call_is_secure(conn->channels[loop]); + rxrpc_call_is_secure( + rcu_dereference_protected( + conn->channels[loop], + lockdep_is_held(&conn->lock))); } spin_unlock(&conn->state_lock); diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c index 99d18107421f..0165a629388b 100644 --- a/net/rxrpc/conn_object.c +++ b/net/rxrpc/conn_object.c @@ -542,7 +542,7 @@ void rxrpc_disconnect_call(struct rxrpc_call *call) spin_lock(&conn->channel_lock); - if (conn->channels[chan] == call) { + if (rcu_access_pointer(conn->channels[chan]) == call) { rcu_assign_pointer(conn->channels[chan], NULL); atomic_inc(&conn->avail_chans); wake_up(&conn->channel_wq); @@ -580,9 +580,12 @@ void rxrpc_put_connection(struct rxrpc_connection *conn) /* * destroy a virtual connection */ -static void rxrpc_destroy_connection(struct rxrpc_connection *conn) +static void rxrpc_destroy_connection(struct rcu_head *rcu) { - _enter("%p{%d}", conn, atomic_read(&conn->usage)); + struct rxrpc_connection *conn = + container_of(rcu, struct rxrpc_connection, rcu); + + _enter("{%d,u=%d}", conn->debug_id, atomic_read(&conn->usage)); ASSERTCMP(atomic_read(&conn->usage), ==, 0); @@ -677,7 +680,8 @@ static void rxrpc_connection_reaper(struct work_struct *work) list_del_init(&conn->link); ASSERTCMP(atomic_read(&conn->usage), ==, 0); - rxrpc_destroy_connection(conn); + skb_queue_purge(&conn->rx_queue); + call_rcu(&conn->rcu, rxrpc_destroy_connection); } _leave(""); @@ -689,11 +693,30 @@ static void rxrpc_connection_reaper(struct work_struct *work) */ void __exit rxrpc_destroy_all_connections(void) { + struct rxrpc_connection *conn, *_p; + bool leak = false; + _enter(""); rxrpc_connection_expiry = 0; cancel_delayed_work(&rxrpc_connection_reap); rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0); + flush_workqueue(rxrpc_workqueue); + + write_lock(&rxrpc_connection_lock); + list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) { + pr_err("AF_RXRPC: Leaked conn %p {%d}\n", + conn, atomic_read(&conn->usage)); + leak = true; + } + write_unlock(&rxrpc_connection_lock); + BUG_ON(leak); + + /* Make sure the local and peer records pinned by any dying connections + * are released. + */ + rcu_barrier(); + rxrpc_destroy_client_conn_ids(); _leave(""); } diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c index 3ab7764f7cd8..a753796fbe8f 100644 --- a/net/rxrpc/local_object.c +++ b/net/rxrpc/local_object.c @@ -374,14 +374,17 @@ void __exit rxrpc_destroy_all_locals(void) _enter(""); - if (list_empty(&rxrpc_local_endpoints)) - return; + flush_workqueue(rxrpc_workqueue); - mutex_lock(&rxrpc_local_mutex); - list_for_each_entry(local, &rxrpc_local_endpoints, link) { - pr_err("AF_RXRPC: Leaked local %p {%d}\n", - local, atomic_read(&local->usage)); + if (!list_empty(&rxrpc_local_endpoints)) { + mutex_lock(&rxrpc_local_mutex); + list_for_each_entry(local, &rxrpc_local_endpoints, link) { + pr_err("AF_RXRPC: Leaked local %p {%d}\n", + local, atomic_read(&local->usage)); + } + mutex_unlock(&rxrpc_local_mutex); + BUG(); } - mutex_unlock(&rxrpc_local_mutex); - BUG(); + + rcu_barrier(); } -- cgit v1.2.3-59-g8ed1b From a1399f8bb0331a1f50c76c4cac738fe57679b9bb Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 27 Jun 2016 14:39:44 +0100 Subject: rxrpc: Call channels should have separate call number spaces Each channel on a connection has a separate, independent number space from which to allocate callNumber values. It is entirely possible, for example, to have a connection with four active calls, each with call number 1. Note that the callNumber values for any particular channel don't have to start at 1, but they are supposed to increment monotonically for that channel from a client's perspective and may not be reused once the call number is transmitted (until the epoch cycles all the way back round). Currently, however, call numbers are allocated on a per-connection basis and, further, are held in an rb-tree. The rb-tree is redundant as the four channel pointers in the rxrpc_connection struct are entirely capable of pointing to all the calls currently in progress on a connection. To this end, make the following changes: (1) Handle call number allocation independently per channel. (2) Get rid of the conn->calls rb-tree. This is overkill as a connection may have a maximum of four calls in progress at any one time. Use the pointers in the channels[] array instead, indexed by the channel number from the packet. (3) For each channel, save the result of the last call that was in progress on that channel in conn->channels[] so that the final ACK or ABORT packet can be replayed if necessary. Any call earlier than that is just ignored. If we've seen the next call number in a packet, the last one is most definitely defunct. (4) When generating a RESPONSE packet for a connection, the call number counter for each channel must be included in it. (5) When parsing a RESPONSE packet for a connection, the call number counters contained therein should be used to set the minimum expected call numbers on each channel. To do in future commits: (1) Replay terminal packets based on the last call stored in conn->channels[]. (2) Connections should be retired before the callNumber space on any channel runs out. (3) A server is expected to disregard or reject any new incoming call that has a call number less than the current call number counter. The call number counter for that channel must be advanced to the new call number. Note that the server cannot just require that the next call that it sees on a channel be exactly the call number counter + 1 because then there's a scenario that could cause a problem: The client transmits a packet to initiate a connection, the network goes out, the server sends an ACK (which gets lost), the client sends an ABORT (which also gets lost); the network then reconnects, the client then reuses the call number for the next call (it doesn't know the server already saw the call number), but the server thinks it already has the first packet of this call (it doesn't know that the client doesn't know that it saw the call number the first time). Signed-off-by: David Howells --- net/rxrpc/ar-internal.h | 14 +++++---- net/rxrpc/call_object.c | 60 +++++++++++++++----------------------- net/rxrpc/conn_event.c | 24 ++++++++------- net/rxrpc/conn_object.c | 77 +++++++++++++++++++------------------------------ net/rxrpc/proc.c | 5 ++-- net/rxrpc/rxkad.c | 41 +++++++++++++++++--------- 6 files changed, 104 insertions(+), 117 deletions(-) (limited to 'net/rxrpc/conn_event.c') diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index b401fa9d7963..b697654340a8 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -292,7 +292,14 @@ struct rxrpc_connection { struct rxrpc_conn_parameters params; spinlock_t channel_lock; - struct rxrpc_call __rcu *channels[RXRPC_MAXCALLS]; /* active calls */ + + struct rxrpc_channel { + struct rxrpc_call __rcu *call; /* Active call */ + u32 call_id; /* ID of current call */ + u32 call_counter; /* Call ID counter */ + u32 last_call; /* ID of last call */ + u32 last_result; /* Result of last call (0/abort) */ + } channels[RXRPC_MAXCALLS]; wait_queue_head_t channel_wq; /* queue to wait for channel to become available */ struct rcu_head rcu; @@ -302,7 +309,6 @@ struct rxrpc_connection { struct rb_node service_node; /* Node in peer->service_conns */ }; struct list_head link; /* link in master connection list */ - struct rb_root calls; /* calls on this connection */ struct sk_buff_head rx_queue; /* received conn-level packets */ const struct rxrpc_security *security; /* applied security module */ struct key *server_key; /* security for this service */ @@ -311,7 +317,6 @@ struct rxrpc_connection { unsigned long flags; unsigned long events; unsigned long put_time; /* Time at which last put */ - rwlock_t lock; /* access lock */ spinlock_t state_lock; /* state-change lock */ atomic_t usage; enum rxrpc_conn_proto_state state : 8; /* current state of connection */ @@ -319,7 +324,6 @@ struct rxrpc_connection { u32 remote_abort; /* remote abort code */ int error; /* local error incurred */ int debug_id; /* debug ID for printks */ - unsigned int call_counter; /* call ID counter */ atomic_t serial; /* packet serial number counter */ atomic_t hi_serial; /* highest serial number received */ atomic_t avail_chans; /* number of channels available */ @@ -412,7 +416,6 @@ struct rxrpc_call { struct hlist_node error_link; /* link in error distribution list */ struct list_head accept_link; /* calls awaiting acceptance */ struct rb_node sock_node; /* node in socket call tree */ - struct rb_node conn_node; /* node in connection call tree */ struct sk_buff_head rx_queue; /* received packets */ struct sk_buff_head rx_oos_queue; /* packets received out of sequence */ struct sk_buff *tx_pending; /* Tx socket buffer being filled */ @@ -564,6 +567,7 @@ int rxrpc_connect_call(struct rxrpc_call *, struct rxrpc_conn_parameters *, struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_local *, struct rxrpc_peer *, struct sk_buff *); +void __rxrpc_disconnect_call(struct rxrpc_call *); void rxrpc_disconnect_call(struct rxrpc_call *); void rxrpc_put_connection(struct rxrpc_connection *); void __exit rxrpc_destroy_all_connections(void); diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index 2c6c57c0d52c..3f278721269e 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -456,8 +456,7 @@ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx, { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); struct rxrpc_call *call, *candidate; - struct rb_node **p, *parent; - u32 call_id; + u32 call_id, chan; _enter(",%d", conn->debug_id); @@ -467,21 +466,23 @@ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx, if (!candidate) return ERR_PTR(-EBUSY); + chan = sp->hdr.cid & RXRPC_CHANNELMASK; candidate->socket = rx; candidate->conn = conn; candidate->cid = sp->hdr.cid; candidate->call_id = sp->hdr.callNumber; - candidate->channel = sp->hdr.cid & RXRPC_CHANNELMASK; + candidate->channel = chan; candidate->rx_data_post = 0; candidate->state = RXRPC_CALL_SERVER_ACCEPTING; if (conn->security_ix > 0) candidate->state = RXRPC_CALL_SERVER_SECURING; - write_lock_bh(&conn->lock); + spin_lock(&conn->channel_lock); /* set the channel for this call */ - call = rcu_dereference_protected(conn->channels[candidate->channel], - lockdep_is_held(&conn->lock)); + call = rcu_dereference_protected(conn->channels[chan].call, + lockdep_is_held(&conn->channel_lock)); + _debug("channel[%u] is %p", candidate->channel, call); if (call && call->call_id == sp->hdr.callNumber) { /* already set; must've been a duplicate packet */ @@ -510,9 +511,9 @@ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx, call->debug_id, rxrpc_call_states[call->state]); if (call->state >= RXRPC_CALL_COMPLETE) { - conn->channels[call->channel] = NULL; + __rxrpc_disconnect_call(call); } else { - write_unlock_bh(&conn->lock); + spin_unlock(&conn->channel_lock); kmem_cache_free(rxrpc_call_jar, candidate); _leave(" = -EBUSY"); return ERR_PTR(-EBUSY); @@ -522,33 +523,22 @@ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx, /* check the call number isn't duplicate */ _debug("check dup"); call_id = sp->hdr.callNumber; - p = &conn->calls.rb_node; - parent = NULL; - while (*p) { - parent = *p; - call = rb_entry(parent, struct rxrpc_call, conn_node); - - /* The tree is sorted in order of the __be32 value without - * turning it into host order. - */ - if (call_id < call->call_id) - p = &(*p)->rb_left; - else if (call_id > call->call_id) - p = &(*p)->rb_right; - else - goto old_call; - } + + /* We just ignore calls prior to the current call ID. Terminated calls + * are handled via the connection. + */ + if (call_id <= conn->channels[chan].call_counter) + goto old_call; /* TODO: Just drop packet */ /* make the call available */ _debug("new call"); call = candidate; candidate = NULL; - rb_link_node(&call->conn_node, parent, p); - rb_insert_color(&call->conn_node, &conn->calls); - rcu_assign_pointer(conn->channels[call->channel], call); + conn->channels[chan].call_counter = call_id; + rcu_assign_pointer(conn->channels[chan].call, call); sock_hold(&rx->sk); rxrpc_get_connection(conn); - write_unlock_bh(&conn->lock); + spin_unlock(&conn->channel_lock); spin_lock(&conn->params.peer->lock); hlist_add_head(&call->error_link, &conn->params.peer->error_targets); @@ -588,19 +578,19 @@ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx, return call; extant_call: - write_unlock_bh(&conn->lock); + spin_unlock(&conn->channel_lock); kmem_cache_free(rxrpc_call_jar, candidate); _leave(" = %p {%d} [extant]", call, call ? call->debug_id : -1); return call; aborted_call: - write_unlock_bh(&conn->lock); + spin_unlock(&conn->channel_lock); kmem_cache_free(rxrpc_call_jar, candidate); _leave(" = -ECONNABORTED"); return ERR_PTR(-ECONNABORTED); old_call: - write_unlock_bh(&conn->lock); + spin_unlock(&conn->channel_lock); kmem_cache_free(rxrpc_call_jar, candidate); _leave(" = -ECONNRESET [old]"); return ERR_PTR(-ECONNRESET); @@ -648,8 +638,7 @@ void rxrpc_release_call(struct rxrpc_call *call) write_unlock_bh(&rx->call_lock); /* free up the channel for reuse */ - write_lock_bh(&conn->lock); - write_lock(&call->state_lock); + write_lock_bh(&call->state_lock); if (call->state < RXRPC_CALL_COMPLETE && call->state != RXRPC_CALL_CLIENT_FINAL_ACK) { @@ -657,10 +646,7 @@ void rxrpc_release_call(struct rxrpc_call *call) call->state = RXRPC_CALL_LOCALLY_ABORTED; call->local_abort = RX_CALL_DEAD; } - write_unlock(&call->state_lock); - - rb_erase(&call->conn_node, &conn->calls); - write_unlock_bh(&conn->lock); + write_unlock_bh(&call->state_lock); rxrpc_disconnect_call(call); diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c index f6ca8c5c4496..cee0f35bc1cf 100644 --- a/net/rxrpc/conn_event.c +++ b/net/rxrpc/conn_event.c @@ -31,15 +31,17 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn, int state, u32 abort_code) { struct rxrpc_call *call; - struct rb_node *p; + int i; _enter("{%d},%x", conn->debug_id, abort_code); - read_lock_bh(&conn->lock); + spin_lock(&conn->channel_lock); - for (p = rb_first(&conn->calls); p; p = rb_next(p)) { - call = rb_entry(p, struct rxrpc_call, conn_node); - write_lock(&call->state_lock); + for (i = 0; i < RXRPC_MAXCALLS; i++) { + call = rcu_dereference_protected( + conn->channels[i].call, + lockdep_is_held(&conn->channel_lock)); + write_lock_bh(&call->state_lock); if (call->state <= RXRPC_CALL_COMPLETE) { call->state = state; if (state == RXRPC_CALL_LOCALLY_ABORTED) { @@ -51,10 +53,10 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn, int state, } rxrpc_queue_call(call); } - write_unlock(&call->state_lock); + write_unlock_bh(&call->state_lock); } - read_unlock_bh(&conn->lock); + spin_unlock(&conn->channel_lock); _leave(""); } @@ -192,7 +194,7 @@ static int rxrpc_process_event(struct rxrpc_connection *conn, if (ret < 0) return ret; - read_lock_bh(&conn->lock); + spin_lock(&conn->channel_lock); spin_lock(&conn->state_lock); if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING) { @@ -200,12 +202,12 @@ static int rxrpc_process_event(struct rxrpc_connection *conn, for (loop = 0; loop < RXRPC_MAXCALLS; loop++) rxrpc_call_is_secure( rcu_dereference_protected( - conn->channels[loop], - lockdep_is_held(&conn->lock))); + conn->channels[loop].call, + lockdep_is_held(&conn->channel_lock))); } spin_unlock(&conn->state_lock); - read_unlock_bh(&conn->lock); + spin_unlock(&conn->channel_lock); return 0; default: diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c index 0165a629388b..ce83f3e44da2 100644 --- a/net/rxrpc/conn_object.c +++ b/net/rxrpc/conn_object.c @@ -46,10 +46,8 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp) init_waitqueue_head(&conn->channel_wq); INIT_WORK(&conn->processor, &rxrpc_process_connection); INIT_LIST_HEAD(&conn->link); - conn->calls = RB_ROOT; skb_queue_head_init(&conn->rx_queue); conn->security = &rxrpc_no_security; - rwlock_init(&conn->lock); spin_lock_init(&conn->state_lock); atomic_set(&conn->usage, 1); conn->debug_id = atomic_inc_return(&rxrpc_debug_id); @@ -62,39 +60,6 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp) return conn; } -/* - * add a call to a connection's call-by-ID tree - */ -static void rxrpc_add_call_ID_to_conn(struct rxrpc_connection *conn, - struct rxrpc_call *call) -{ - struct rxrpc_call *xcall; - struct rb_node *parent, **p; - u32 call_id; - - write_lock_bh(&conn->lock); - - call_id = call->call_id; - p = &conn->calls.rb_node; - parent = NULL; - while (*p) { - parent = *p; - xcall = rb_entry(parent, struct rxrpc_call, conn_node); - - if (call_id < xcall->call_id) - p = &(*p)->rb_left; - else if (call_id > xcall->call_id) - p = &(*p)->rb_right; - else - BUG(); - } - - rb_link_node(&call->conn_node, parent, p); - rb_insert_color(&call->conn_node, &conn->calls); - - write_unlock_bh(&conn->lock); -} - /* * Allocate a client connection. The caller must take care to clear any * padding bytes in *cp. @@ -277,12 +242,12 @@ found_channel: call->channel = chan; call->epoch = conn->proto.epoch; call->cid = conn->proto.cid | chan; - call->call_id = ++conn->call_counter; - rcu_assign_pointer(conn->channels[chan], call); + call->call_id = ++conn->channels[chan].call_counter; + conn->channels[chan].call_id = call->call_id; + rcu_assign_pointer(conn->channels[chan].call, call); _net("CONNECT call %d on conn %d", call->debug_id, conn->debug_id); - rxrpc_add_call_ID_to_conn(conn, call); spin_unlock(&conn->channel_lock); rxrpc_put_peer(cp->peer); cp->peer = NULL; @@ -326,7 +291,7 @@ found_extant_conn: spin_lock(&conn->channel_lock); for (chan = 0; chan < RXRPC_MAXCALLS; chan++) - if (!conn->channels[chan]) + if (!conn->channels[chan].call) goto found_channel; BUG(); @@ -531,28 +496,47 @@ found: /* * Disconnect a call and clear any channel it occupies when that call - * terminates. + * terminates. The caller must hold the channel_lock and must release the + * call's ref on the connection. */ -void rxrpc_disconnect_call(struct rxrpc_call *call) +void __rxrpc_disconnect_call(struct rxrpc_call *call) { struct rxrpc_connection *conn = call->conn; - unsigned chan = call->channel; + struct rxrpc_channel *chan = &conn->channels[call->channel]; _enter("%d,%d", conn->debug_id, call->channel); - spin_lock(&conn->channel_lock); + if (rcu_access_pointer(chan->call) == call) { + /* Save the result of the call so that we can repeat it if necessary + * through the channel, whilst disposing of the actual call record. + */ + chan->last_result = call->local_abort; + smp_wmb(); + chan->last_call = chan->call_id; + chan->call_id = chan->call_counter; - if (rcu_access_pointer(conn->channels[chan]) == call) { - rcu_assign_pointer(conn->channels[chan], NULL); + rcu_assign_pointer(chan->call, NULL); atomic_inc(&conn->avail_chans); wake_up(&conn->channel_wq); } + _leave(""); +} + +/* + * Disconnect a call and clear any channel it occupies when that call + * terminates. + */ +void rxrpc_disconnect_call(struct rxrpc_call *call) +{ + struct rxrpc_connection *conn = call->conn; + + spin_lock(&conn->channel_lock); + __rxrpc_disconnect_call(call); spin_unlock(&conn->channel_lock); call->conn = NULL; rxrpc_put_connection(conn); - _leave(""); } /* @@ -591,7 +575,6 @@ static void rxrpc_destroy_connection(struct rcu_head *rcu) _net("DESTROY CONN %d", conn->debug_id); - ASSERT(RB_EMPTY_ROOT(&conn->calls)); rxrpc_purge_queue(&conn->rx_queue); conn->security->clear(conn); diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c index 2a25ab425b6f..ced5f07444e5 100644 --- a/net/rxrpc/proc.c +++ b/net/rxrpc/proc.c @@ -137,7 +137,7 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v) if (v == &rxrpc_connections) { seq_puts(seq, "Proto Local Remote " - " SvID ConnID Calls End Use State Key " + " SvID ConnID End Use State Key " " Serial ISerial\n" ); return 0; @@ -154,13 +154,12 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v) ntohs(conn->params.peer->srx.transport.sin.sin_port)); seq_printf(seq, - "UDP %-22.22s %-22.22s %4x %08x %08x %s %3u" + "UDP %-22.22s %-22.22s %4x %08x %s %3u" " %s %08x %08x %08x\n", lbuff, rbuff, conn->params.service_id, conn->proto.cid, - conn->call_counter, rxrpc_conn_is_service(conn) ? "Svc" : "Clt", atomic_read(&conn->usage), rxrpc_conn_states[conn->state], diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c index 3acc7c1241d4..63afa9e9cc08 100644 --- a/net/rxrpc/rxkad.c +++ b/net/rxrpc/rxkad.c @@ -767,14 +767,10 @@ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn, resp.kvno = htonl(token->kad->kvno); resp.ticket_len = htonl(token->kad->ticket_len); - resp.encrypted.call_id[0] = - htonl(conn->channels[0] ? conn->channels[0]->call_id : 0); - resp.encrypted.call_id[1] = - htonl(conn->channels[1] ? conn->channels[1]->call_id : 0); - resp.encrypted.call_id[2] = - htonl(conn->channels[2] ? conn->channels[2]->call_id : 0); - resp.encrypted.call_id[3] = - htonl(conn->channels[3] ? conn->channels[3]->call_id : 0); + resp.encrypted.call_id[0] = htonl(conn->channels[0].call_counter); + resp.encrypted.call_id[1] = htonl(conn->channels[1].call_counter); + resp.encrypted.call_id[2] = htonl(conn->channels[2].call_counter); + resp.encrypted.call_id[3] = htonl(conn->channels[3].call_counter); /* calculate the response checksum and then do the encryption */ rxkad_calc_response_checksum(&resp); @@ -991,7 +987,7 @@ static int rxkad_verify_response(struct rxrpc_connection *conn, void *ticket; u32 abort_code, version, kvno, ticket_len, level; __be32 csum; - int ret; + int ret, i; _enter("{%d,%x}", conn->debug_id, key_serial(conn->server_key)); @@ -1054,11 +1050,26 @@ static int rxkad_verify_response(struct rxrpc_connection *conn, if (response.encrypted.checksum != csum) goto protocol_error_free; - if (ntohl(response.encrypted.call_id[0]) > INT_MAX || - ntohl(response.encrypted.call_id[1]) > INT_MAX || - ntohl(response.encrypted.call_id[2]) > INT_MAX || - ntohl(response.encrypted.call_id[3]) > INT_MAX) - goto protocol_error_free; + spin_lock(&conn->channel_lock); + for (i = 0; i < RXRPC_MAXCALLS; i++) { + struct rxrpc_call *call; + u32 call_id = ntohl(response.encrypted.call_id[i]); + + if (call_id > INT_MAX) + goto protocol_error_unlock; + + if (call_id < conn->channels[i].call_counter) + goto protocol_error_unlock; + if (call_id > conn->channels[i].call_counter) { + call = rcu_dereference_protected( + conn->channels[i].call, + lockdep_is_held(&conn->channel_lock)); + if (call && call->state < RXRPC_CALL_COMPLETE) + goto protocol_error_unlock; + conn->channels[i].call_counter = call_id; + } + } + spin_unlock(&conn->channel_lock); abort_code = RXKADOUTOFSEQUENCE; if (ntohl(response.encrypted.inc_nonce) != conn->security_nonce + 1) @@ -1083,6 +1094,8 @@ static int rxkad_verify_response(struct rxrpc_connection *conn, _leave(" = 0"); return 0; +protocol_error_unlock: + spin_unlock(&conn->channel_lock); protocol_error_free: kfree(ticket); protocol_error: -- cgit v1.2.3-59-g8ed1b