aboutsummaryrefslogtreecommitdiffstats
path: root/net/rxrpc/ar-ack.c
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--net/rxrpc/ar-ack.c102
1 files changed, 51 insertions, 51 deletions
diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
index 7cb7e45c8bad..9183da740600 100644
--- a/net/rxrpc/ar-ack.c
+++ b/net/rxrpc/ar-ack.c
@@ -161,7 +161,7 @@ cancel_timer:
try_to_del_timer_sync(&call->ack_timer);
read_lock_bh(&call->state_lock);
if (call->state <= RXRPC_CALL_COMPLETE &&
- !test_and_set_bit(RXRPC_CALL_ACK, &call->events))
+ !test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
rxrpc_queue_call(call);
read_unlock_bh(&call->state_lock);
}
@@ -193,7 +193,7 @@ static void rxrpc_set_resend(struct rxrpc_call *call, u8 resend,
if (resend & 1) {
_debug("SET RESEND");
- set_bit(RXRPC_CALL_RESEND, &call->events);
+ set_bit(RXRPC_CALL_EV_RESEND, &call->events);
}
if (resend & 2) {
@@ -203,7 +203,7 @@ static void rxrpc_set_resend(struct rxrpc_call *call, u8 resend,
} else {
_debug("KILL RESEND TIMER");
del_timer_sync(&call->resend_timer);
- clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events);
+ clear_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events);
clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
}
read_unlock_bh(&call->state_lock);
@@ -555,7 +555,7 @@ inserted:
if (call->state < RXRPC_CALL_COMPLETE &&
call->rx_data_post == call->rx_first_oos) {
_debug("drain rx oos now");
- set_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events);
+ set_bit(RXRPC_CALL_EV_DRAIN_RX_OOS, &call->events);
}
read_unlock(&call->state_lock);
@@ -793,7 +793,7 @@ all_acked:
del_timer_sync(&call->resend_timer);
clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
- clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events);
+ clear_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events);
if (call->acks_window)
rxrpc_zap_tx_window(call);
@@ -928,17 +928,17 @@ void rxrpc_process_call(struct work_struct *work)
iov[0].iov_len = sizeof(hdr);
/* deal with events of a final nature */
- if (test_bit(RXRPC_CALL_RELEASE, &call->events)) {
+ if (test_bit(RXRPC_CALL_EV_RELEASE, &call->events)) {
rxrpc_release_call(call);
- clear_bit(RXRPC_CALL_RELEASE, &call->events);
+ clear_bit(RXRPC_CALL_EV_RELEASE, &call->events);
}
- if (test_bit(RXRPC_CALL_RCVD_ERROR, &call->events)) {
+ if (test_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events)) {
int error;
- clear_bit(RXRPC_CALL_CONN_ABORT, &call->events);
- clear_bit(RXRPC_CALL_REJECT_BUSY, &call->events);
- clear_bit(RXRPC_CALL_ABORT, &call->events);
+ clear_bit(RXRPC_CALL_EV_CONN_ABORT, &call->events);
+ clear_bit(RXRPC_CALL_EV_REJECT_BUSY, &call->events);
+ clear_bit(RXRPC_CALL_EV_ABORT, &call->events);
error = call->conn->trans->peer->net_error;
_debug("post net error %d", error);
@@ -946,32 +946,32 @@ void rxrpc_process_call(struct work_struct *work)
if (rxrpc_post_message(call, RXRPC_SKB_MARK_NET_ERROR,
error, true) < 0)
goto no_mem;
- clear_bit(RXRPC_CALL_RCVD_ERROR, &call->events);
+ clear_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events);
goto kill_ACKs;
}
- if (test_bit(RXRPC_CALL_CONN_ABORT, &call->events)) {
+ if (test_bit(RXRPC_CALL_EV_CONN_ABORT, &call->events)) {
ASSERTCMP(call->state, >, RXRPC_CALL_COMPLETE);
- clear_bit(RXRPC_CALL_REJECT_BUSY, &call->events);
- clear_bit(RXRPC_CALL_ABORT, &call->events);
+ clear_bit(RXRPC_CALL_EV_REJECT_BUSY, &call->events);
+ clear_bit(RXRPC_CALL_EV_ABORT, &call->events);
_debug("post conn abort");
if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR,
call->conn->error, true) < 0)
goto no_mem;
- clear_bit(RXRPC_CALL_CONN_ABORT, &call->events);
+ clear_bit(RXRPC_CALL_EV_CONN_ABORT, &call->events);
goto kill_ACKs;
}
- if (test_bit(RXRPC_CALL_REJECT_BUSY, &call->events)) {
+ if (test_bit(RXRPC_CALL_EV_REJECT_BUSY, &call->events)) {
hdr.type = RXRPC_PACKET_TYPE_BUSY;
- genbit = RXRPC_CALL_REJECT_BUSY;
+ genbit = RXRPC_CALL_EV_REJECT_BUSY;
goto send_message;
}
- if (test_bit(RXRPC_CALL_ABORT, &call->events)) {
+ if (test_bit(RXRPC_CALL_EV_ABORT, &call->events)) {
ASSERTCMP(call->state, >, RXRPC_CALL_COMPLETE);
if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR,
@@ -981,12 +981,12 @@ void rxrpc_process_call(struct work_struct *work)
data = htonl(call->abort_code);
iov[1].iov_base = &data;
iov[1].iov_len = sizeof(data);
- genbit = RXRPC_CALL_ABORT;
+ genbit = RXRPC_CALL_EV_ABORT;
goto send_message;
}
- if (test_bit(RXRPC_CALL_ACK_FINAL, &call->events)) {
- genbit = RXRPC_CALL_ACK_FINAL;
+ if (test_bit(RXRPC_CALL_EV_ACK_FINAL, &call->events)) {
+ genbit = RXRPC_CALL_EV_ACK_FINAL;
ack.bufferSpace = htons(8);
ack.maxSkew = 0;
@@ -1012,12 +1012,12 @@ void rxrpc_process_call(struct work_struct *work)
goto send_ACK;
}
- if (call->events & ((1 << RXRPC_CALL_RCVD_BUSY) |
- (1 << RXRPC_CALL_RCVD_ABORT))
+ if (call->events & ((1 << RXRPC_CALL_EV_RCVD_BUSY) |
+ (1 << RXRPC_CALL_EV_RCVD_ABORT))
) {
u32 mark;
- if (test_bit(RXRPC_CALL_RCVD_ABORT, &call->events))
+ if (test_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events))
mark = RXRPC_SKB_MARK_REMOTE_ABORT;
else
mark = RXRPC_SKB_MARK_BUSY;
@@ -1027,22 +1027,22 @@ void rxrpc_process_call(struct work_struct *work)
if (rxrpc_post_message(call, mark, ECONNABORTED, true) < 0)
goto no_mem;
- clear_bit(RXRPC_CALL_RCVD_BUSY, &call->events);
- clear_bit(RXRPC_CALL_RCVD_ABORT, &call->events);
+ clear_bit(RXRPC_CALL_EV_RCVD_BUSY, &call->events);
+ clear_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events);
goto kill_ACKs;
}
- if (test_and_clear_bit(RXRPC_CALL_RCVD_ACKALL, &call->events)) {
+ if (test_and_clear_bit(RXRPC_CALL_EV_RCVD_ACKALL, &call->events)) {
_debug("do implicit ackall");
rxrpc_clear_tx_window(call);
}
- if (test_bit(RXRPC_CALL_LIFE_TIMER, &call->events)) {
+ if (test_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events)) {
write_lock_bh(&call->state_lock);
if (call->state <= RXRPC_CALL_COMPLETE) {
call->state = RXRPC_CALL_LOCALLY_ABORTED;
call->abort_code = RX_CALL_TIMEOUT;
- set_bit(RXRPC_CALL_ABORT, &call->events);
+ set_bit(RXRPC_CALL_EV_ABORT, &call->events);
}
write_unlock_bh(&call->state_lock);
@@ -1051,7 +1051,7 @@ void rxrpc_process_call(struct work_struct *work)
ETIME, true) < 0)
goto no_mem;
- clear_bit(RXRPC_CALL_LIFE_TIMER, &call->events);
+ clear_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events);
goto kill_ACKs;
}
@@ -1072,13 +1072,13 @@ void rxrpc_process_call(struct work_struct *work)
}
/* handle resending */
- if (test_and_clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events))
+ if (test_and_clear_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events))
rxrpc_resend_timer(call);
- if (test_and_clear_bit(RXRPC_CALL_RESEND, &call->events))
+ if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events))
rxrpc_resend(call);
/* consider sending an ordinary ACK */
- if (test_bit(RXRPC_CALL_ACK, &call->events)) {
+ if (test_bit(RXRPC_CALL_EV_ACK, &call->events)) {
_debug("send ACK: window: %d - %d { %lx }",
call->rx_data_eaten, call->ackr_win_top,
call->ackr_window[0]);
@@ -1086,11 +1086,11 @@ void rxrpc_process_call(struct work_struct *work)
if (call->state > RXRPC_CALL_SERVER_ACK_REQUEST &&
call->ackr_reason != RXRPC_ACK_PING_RESPONSE) {
/* ACK by sending reply DATA packet in this state */
- clear_bit(RXRPC_CALL_ACK, &call->events);
+ clear_bit(RXRPC_CALL_EV_ACK, &call->events);
goto maybe_reschedule;
}
- genbit = RXRPC_CALL_ACK;
+ genbit = RXRPC_CALL_EV_ACK;
acks = kzalloc(call->ackr_win_top - call->rx_data_eaten,
GFP_NOFS);
@@ -1153,7 +1153,7 @@ void rxrpc_process_call(struct work_struct *work)
/* handle completion of security negotiations on an incoming
* connection */
- if (test_and_clear_bit(RXRPC_CALL_SECURED, &call->events)) {
+ if (test_and_clear_bit(RXRPC_CALL_EV_SECURED, &call->events)) {
_debug("secured");
spin_lock_bh(&call->lock);
@@ -1161,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work)
_debug("securing");
write_lock(&call->conn->lock);
if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
- !test_bit(RXRPC_CALL_RELEASE, &call->events)) {
+ !test_bit(RXRPC_CALL_EV_RELEASE, &call->events)) {
_debug("not released");
call->state = RXRPC_CALL_SERVER_ACCEPTING;
list_move_tail(&call->accept_link,
@@ -1170,39 +1170,39 @@ void rxrpc_process_call(struct work_struct *work)
write_unlock(&call->conn->lock);
read_lock(&call->state_lock);
if (call->state < RXRPC_CALL_COMPLETE)
- set_bit(RXRPC_CALL_POST_ACCEPT, &call->events);
+ set_bit(RXRPC_CALL_EV_POST_ACCEPT, &call->events);
read_unlock(&call->state_lock);
}
spin_unlock_bh(&call->lock);
- if (!test_bit(RXRPC_CALL_POST_ACCEPT, &call->events))
+ if (!test_bit(RXRPC_CALL_EV_POST_ACCEPT, &call->events))
goto maybe_reschedule;
}
/* post a notification of an acceptable connection to the app */
- if (test_bit(RXRPC_CALL_POST_ACCEPT, &call->events)) {
+ if (test_bit(RXRPC_CALL_EV_POST_ACCEPT, &call->events)) {
_debug("post accept");
if (rxrpc_post_message(call, RXRPC_SKB_MARK_NEW_CALL,
0, false) < 0)
goto no_mem;
- clear_bit(RXRPC_CALL_POST_ACCEPT, &call->events);
+ clear_bit(RXRPC_CALL_EV_POST_ACCEPT, &call->events);
goto maybe_reschedule;
}
/* handle incoming call acceptance */
- if (test_and_clear_bit(RXRPC_CALL_ACCEPTED, &call->events)) {
+ if (test_and_clear_bit(RXRPC_CALL_EV_ACCEPTED, &call->events)) {
_debug("accepted");
ASSERTCMP(call->rx_data_post, ==, 0);
call->rx_data_post = 1;
read_lock_bh(&call->state_lock);
if (call->state < RXRPC_CALL_COMPLETE)
- set_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events);
+ set_bit(RXRPC_CALL_EV_DRAIN_RX_OOS, &call->events);
read_unlock_bh(&call->state_lock);
}
/* drain the out of sequence received packet queue into the packet Rx
* queue */
- if (test_and_clear_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events)) {
+ if (test_and_clear_bit(RXRPC_CALL_EV_DRAIN_RX_OOS, &call->events)) {
while (call->rx_data_post == call->rx_first_oos)
if (rxrpc_drain_rx_oos_queue(call) < 0)
break;
@@ -1281,12 +1281,12 @@ send_message_2:
}
switch (genbit) {
- case RXRPC_CALL_ABORT:
+ case RXRPC_CALL_EV_ABORT:
clear_bit(genbit, &call->events);
- clear_bit(RXRPC_CALL_RCVD_ABORT, &call->events);
+ clear_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events);
goto kill_ACKs;
- case RXRPC_CALL_ACK_FINAL:
+ case RXRPC_CALL_EV_ACK_FINAL:
write_lock_bh(&call->state_lock);
if (call->state == RXRPC_CALL_CLIENT_FINAL_ACK)
call->state = RXRPC_CALL_COMPLETE;
@@ -1311,9 +1311,9 @@ send_message_2:
kill_ACKs:
del_timer_sync(&call->ack_timer);
- if (test_and_clear_bit(RXRPC_CALL_ACK_FINAL, &call->events))
+ if (test_and_clear_bit(RXRPC_CALL_EV_ACK_FINAL, &call->events))
rxrpc_put_call(call);
- clear_bit(RXRPC_CALL_ACK, &call->events);
+ clear_bit(RXRPC_CALL_EV_ACK, &call->events);
maybe_reschedule:
if (call->events || !skb_queue_empty(&call->rx_queue)) {
@@ -1332,7 +1332,7 @@ maybe_reschedule:
read_lock_bh(&call->state_lock);
if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
- !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events))
+ !test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
rxrpc_queue_call(call);
read_unlock_bh(&call->state_lock);
}