@@ -358,7 +358,7 @@ static void smc_destruct(struct sock *sk)
{
if (smc_sk_state(sk) != SMC_CLOSED)
return;
- if (!sock_flag(sk, SOCK_DEAD))
+ if (!smc_sock_flag(sk, SOCK_DEAD))
return;
}
@@ -1623,7 +1623,7 @@ static void smc_connect_work(struct work_struct *work)
smc->sk.sk_err = -rc;
out:
- if (!sock_flag(&smc->sk, SOCK_DEAD)) {
+ if (!smc_sock_flag(&smc->sk, SOCK_DEAD)) {
if (smc->sk.sk_err) {
smc->sk.sk_state_change(&smc->sk);
} else { /* allow polling before and after fallback decision */
@@ -388,4 +388,6 @@ static inline void smc_sock_set_flag(struct sock *sk, enum sock_flags flag)
set_bit(flag, &sk->sk_flags);
}
+#define smc_sock_flag(sk, flag) sock_flag(sk, flag)
+
#endif /* __SMC_H */
@@ -285,7 +285,7 @@ static void smc_cdc_handle_urg_data_arrival(struct smc_sock *smc,
/* new data included urgent business */
smc_curs_copy(&conn->urg_curs, &conn->local_rx_ctrl.prod, conn);
conn->urg_state = SMC_URG_VALID;
- if (!sock_flag(&smc->sk, SOCK_URGINLINE))
+ if (!smc_sock_flag(&smc->sk, SOCK_URGINLINE))
/* we'll skip the urgent byte, so don't account for it */
(*diff_prod)--;
base = (char *)conn->rmb_desc->cpu_addr + conn->rx_off;
@@ -202,7 +202,7 @@ int smc_close_active(struct smc_sock *smc)
int rc1 = 0;
timeout = current->flags & PF_EXITING ?
- 0 : sock_flag(sk, SOCK_LINGER) ?
+ 0 : smc_sock_flag(sk, SOCK_LINGER) ?
sk->sk_lingertime : SMC_MAX_STREAM_WAIT_TIMEOUT;
old_state = smc_sk_state(sk);
@@ -395,7 +395,7 @@ static void smc_close_passive_work(struct work_struct *work)
case SMC_PEERCLOSEWAIT2:
if (!smc_cdc_rxed_any_close(conn))
break;
- if (sock_flag(sk, SOCK_DEAD) &&
+ if (smc_sock_flag(sk, SOCK_DEAD) &&
smc_close_sent_any_close(conn)) {
/* smc_release has already been called locally */
smc_sk_set_state(sk, SMC_CLOSED);
@@ -432,7 +432,7 @@ static void smc_close_passive_work(struct work_struct *work)
if (old_state != smc_sk_state(sk)) {
sk->sk_state_change(sk);
if ((smc_sk_state(sk) == SMC_CLOSED) &&
- (sock_flag(sk, SOCK_DEAD) || !sk->sk_socket)) {
+ (smc_sock_flag(sk, SOCK_DEAD) || !sk->sk_socket)) {
smc_conn_free(conn);
if (smc->clcsock)
release_clcsock = true;
@@ -453,7 +453,7 @@ int smc_close_shutdown_write(struct smc_sock *smc)
int rc = 0;
timeout = current->flags & PF_EXITING ?
- 0 : sock_flag(sk, SOCK_LINGER) ?
+ 0 : smc_sock_flag(sk, SOCK_LINGER) ?
sk->sk_lingertime : SMC_MAX_STREAM_WAIT_TIMEOUT;
old_state = smc_sk_state(sk);
@@ -70,7 +70,7 @@ static int smc_rx_update_consumer(struct smc_sock *smc,
if (conn->urg_state == SMC_URG_VALID || conn->urg_rx_skip_pend) {
diff = smc_curs_comp(conn->rmb_desc->len, &cons,
&conn->urg_curs);
- if (sock_flag(sk, SOCK_URGINLINE)) {
+ if (smc_sock_flag(sk, SOCK_URGINLINE)) {
if (diff == 0) {
force = true;
rc = 1;
@@ -286,7 +286,7 @@ static int smc_rx_recv_urg(struct smc_sock *smc, struct msghdr *msg, int len,
struct sock *sk = &smc->sk;
int rc = 0;
- if (sock_flag(sk, SOCK_URGINLINE) ||
+ if (smc_sock_flag(sk, SOCK_URGINLINE) ||
!(conn->urg_state == SMC_URG_VALID) ||
conn->urg_state == SMC_URG_READ)
return -EINVAL;
@@ -408,7 +408,7 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg,
break;
}
if (smc_sk_state(sk) == SMC_CLOSED) {
- if (!sock_flag(sk, SOCK_DONE)) {
+ if (!smc_sock_flag(sk, SOCK_DONE)) {
/* This occurs when user tries to read
* from never connected socket.
*/
@@ -449,7 +449,7 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg,
if (splbytes)
smc_curs_add(conn->rmb_desc->len, &cons, splbytes);
if (conn->urg_state == SMC_URG_VALID &&
- sock_flag(&smc->sk, SOCK_URGINLINE) &&
+ smc_sock_flag(&smc->sk, SOCK_URGINLINE) &&
readable > 1)
readable--; /* always stop at urgent Byte */
/* not more than what user space asked for */