@@ -1671,11 +1671,11 @@ tcp_md5_do_lookup(const struct sock *sk, int l3index,
#define tcp_twsk_md5_key(twsk) NULL
#endif
-bool tcp_alloc_md5sig_pool(void);
+bool tcp_md5sig_pool_alloc(void);
bool tcp_md5sig_pool_ready(void);
-struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
-static inline void tcp_put_md5sig_pool(void)
+struct tcp_md5sig_pool *tcp_md5sig_pool_get(void);
+static inline void tcp_md5sig_pool_put(void)
{
local_bh_enable();
}
@@ -4257,7 +4257,7 @@ static DEFINE_PER_CPU(struct tcp_md5sig_pool, tcp_md5sig_pool);
static DEFINE_MUTEX(tcp_md5sig_mutex);
static bool tcp_md5sig_pool_populated = false;
-static void __tcp_alloc_md5sig_pool(void)
+static void __tcp_md5sig_pool_alloc(void)
{
struct crypto_ahash *hash;
int cpu;
@@ -4289,7 +4289,7 @@ static void __tcp_alloc_md5sig_pool(void)
per_cpu(tcp_md5sig_pool, cpu).md5_req = req;
}
/* before setting tcp_md5sig_pool_populated, we must commit all writes
- * to memory. See smp_rmb() in tcp_get_md5sig_pool()
+ * to memory. See smp_rmb() in tcp_md5sig_pool_get()
*/
smp_wmb();
tcp_md5sig_pool_populated = true;
@@ -4305,13 +4305,13 @@ static void __tcp_alloc_md5sig_pool(void)
crypto_free_ahash(hash);
}
-bool tcp_alloc_md5sig_pool(void)
+bool tcp_md5sig_pool_alloc(void)
{
if (unlikely(!tcp_md5sig_pool_populated)) {
mutex_lock(&tcp_md5sig_mutex);
if (!tcp_md5sig_pool_populated) {
- __tcp_alloc_md5sig_pool();
+ __tcp_md5sig_pool_alloc();
if (tcp_md5sig_pool_populated)
static_branch_inc(&tcp_md5_needed);
}
@@ -4320,7 +4320,7 @@ bool tcp_alloc_md5sig_pool(void)
}
return tcp_md5sig_pool_populated;
}
-EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
+EXPORT_SYMBOL(tcp_md5sig_pool_alloc);
bool tcp_md5sig_pool_ready(void)
{
@@ -4329,25 +4329,25 @@ bool tcp_md5sig_pool_ready(void)
EXPORT_SYMBOL(tcp_md5sig_pool_ready);
/**
- * tcp_get_md5sig_pool - get md5sig_pool for this user
+ * tcp_md5sig_pool_get - get md5sig_pool for this user
*
* We use percpu structure, so if we succeed, we exit with preemption
* and BH disabled, to make sure another thread or softirq handling
* wont try to get same context.
*/
-struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
+struct tcp_md5sig_pool *tcp_md5sig_pool_get(void)
{
local_bh_disable();
if (tcp_md5sig_pool_populated) {
- /* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */
+ /* coupled with smp_wmb() in __tcp_md5sig_pool_alloc() */
smp_rmb();
return this_cpu_ptr(&tcp_md5sig_pool);
}
local_bh_enable();
return NULL;
}
-EXPORT_SYMBOL(tcp_get_md5sig_pool);
+EXPORT_SYMBOL(tcp_md5sig_pool_get);
int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
const struct sk_buff *skb, unsigned int header_len)
@@ -1294,7 +1294,7 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
return -EINVAL;
- if (!tcp_alloc_md5sig_pool())
+ if (!tcp_md5sig_pool_alloc())
return -ENOMEM;
return tcp_md5_do_add(sk, addr, AF_INET, prefixlen, l3index, flags,
@@ -1332,7 +1332,7 @@ static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
struct tcp_md5sig_pool *hp;
struct ahash_request *req;
- hp = tcp_get_md5sig_pool();
+ hp = tcp_md5sig_pool_get();
if (!hp)
goto clear_hash_noput;
req = hp->md5_req;
@@ -1347,11 +1347,11 @@ static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
if (crypto_ahash_final(req))
goto clear_hash;
- tcp_put_md5sig_pool();
+ tcp_md5sig_pool_put();
return 0;
clear_hash:
- tcp_put_md5sig_pool();
+ tcp_md5sig_pool_put();
clear_hash_noput:
memset(md5_hash, 0, 16);
return 1;
@@ -1375,7 +1375,7 @@ int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
daddr = iph->daddr;
}
- hp = tcp_get_md5sig_pool();
+ hp = tcp_md5sig_pool_get();
if (!hp)
goto clear_hash_noput;
req = hp->md5_req;
@@ -1393,11 +1393,11 @@ int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
if (crypto_ahash_final(req))
goto clear_hash;
- tcp_put_md5sig_pool();
+ tcp_md5sig_pool_put();
return 0;
clear_hash:
- tcp_put_md5sig_pool();
+ tcp_md5sig_pool_put();
clear_hash_noput:
memset(md5_hash, 0, 16);
return 1;
@@ -654,7 +654,7 @@ static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
return -EINVAL;
- if (!tcp_alloc_md5sig_pool())
+ if (!tcp_md5sig_pool_alloc())
return -ENOMEM;
if (ipv6_addr_v4mapped(&sin6->sin6_addr))
@@ -701,7 +701,7 @@ static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
struct tcp_md5sig_pool *hp;
struct ahash_request *req;
- hp = tcp_get_md5sig_pool();
+ hp = tcp_md5sig_pool_get();
if (!hp)
goto clear_hash_noput;
req = hp->md5_req;
@@ -716,11 +716,11 @@ static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
if (crypto_ahash_final(req))
goto clear_hash;
- tcp_put_md5sig_pool();
+ tcp_md5sig_pool_put();
return 0;
clear_hash:
- tcp_put_md5sig_pool();
+ tcp_md5sig_pool_put();
clear_hash_noput:
memset(md5_hash, 0, 16);
return 1;
@@ -745,7 +745,7 @@ static int tcp_v6_md5_hash_skb(char *md5_hash,
daddr = &ip6h->daddr;
}
- hp = tcp_get_md5sig_pool();
+ hp = tcp_md5sig_pool_get();
if (!hp)
goto clear_hash_noput;
req = hp->md5_req;
@@ -763,11 +763,11 @@ static int tcp_v6_md5_hash_skb(char *md5_hash,
if (crypto_ahash_final(req))
goto clear_hash;
- tcp_put_md5sig_pool();
+ tcp_md5sig_pool_put();
return 0;
clear_hash:
- tcp_put_md5sig_pool();
+ tcp_md5sig_pool_put();
clear_hash_noput:
memset(md5_hash, 0, 16);
return 1;
Use common prefixes for operations with (struct tcp_md5sig_pool). Signed-off-by: Dmitry Safonov <dima@arista.com> --- include/net/tcp.h | 6 +++--- net/ipv4/tcp.c | 18 +++++++++--------- net/ipv4/tcp_ipv4.c | 14 +++++++------- net/ipv6/tcp_ipv6.c | 14 +++++++------- 4 files changed, 26 insertions(+), 26 deletions(-)